diff --git a/.gitignore b/.gitignore index 8c7e4a9..51664b3 100644 --- a/.gitignore +++ b/.gitignore @@ -1,5 +1,5 @@ # file: ~/.gitignore_global .DS_Store -.idea -src/* -tests/* \ No newline at end of file +.idea/ +venv + __pycache__/ \ No newline at end of file diff --git a/README.md b/README.md index bae7cb8..4727a71 100644 --- a/README.md +++ b/README.md @@ -1,2 +1,2 @@ -# Hello world -This is repository for OTUS course. +# WorkWithData +This is the third task from OTUS course. diff --git a/__init__.py b/__init__.py new file mode 100644 index 0000000..e03d808 --- /dev/null +++ b/__init__.py @@ -0,0 +1,8 @@ +import os + +CURRENT_DIR = os.path.dirname(__file__) +FILES_DIR = os.path.join(CURRENT_DIR, 'files') + + +def get_path(filename: str): + return os.path.join(FILES_DIR, filename) diff --git a/files/books.csv b/files/books.csv new file mode 100644 index 0000000..e8fd926 --- /dev/null +++ b/files/books.csv @@ -0,0 +1,212 @@ +Title,Author,Genre,Pages,Publisher +Fundamentals of Wavelets,"Goswami, Jaideva",signal_processing,228,Wiley +Data Smart,"Foreman, John",data_science,235,Wiley +God Created the Integers,"Hawking, Stephen",mathematics,197,Penguin +Superfreakonomics,"Dubner, Stephen",economics,179,HarperCollins +Orientalism,"Said, Edward",history,197,Penguin +"Nature of Statistical Learning Theory, The","Vapnik, Vladimir",data_science,230,Springer +Integration of the Indian States,"Menon, V P",history,217,Orient Blackswan +"Drunkard's Walk, The","Mlodinow, Leonard",science,197,Penguin +Image Processing & Mathematical Morphology,"Shih, Frank",signal_processing,241,CRC +How to Think Like Sherlock Holmes,"Konnikova, Maria",psychology,240,Penguin +Data Scientists at Work,Sebastian Gutierrez,data_science,230,Apress +Slaughterhouse Five,"Vonnegut, Kurt",fiction,198,Random House +Birth of a Theorem,"Villani, Cedric",mathematics,234,Bodley Head +Structure & Interpretation of Computer Programs,"Sussman, Gerald",computer_science,240,MIT Press +"Age of Wrath, The","Eraly, Abraham",history,238,Penguin +"Trial, The","Kafka, Frank",fiction,198,Random House +Statistical Decision Theory',"Pratt, John",data_science,236,MIT Press +Data Mining Handbook,"Nisbet, Robert",data_science,242,Apress +"New Machiavelli, The","Wells, H. G.",fiction,180,Penguin +Physics & Philosophy,"Heisenberg, Werner",science,197,Penguin +Making Software,"Oram, Andy",computer_science,232,O'Reilly +"Analysis, Vol I","Tao, Terence",mathematics,248,HBA +Machine Learning for Hackers,"Conway, Drew",data_science,233,O'Reilly +"Signal and the Noise, The","Silver, Nate",data_science,233,Penguin +Python for Data Analysis,"McKinney, Wes",data_science,233,O'Reilly +Introduction to Algorithms,"Cormen, Thomas",computer_science,234,MIT Press +"Beautiful and the Damned, The","Deb, Siddhartha",nonfiction,198,Penguin +"Outsider, The","Camus, Albert",fiction,198,Penguin +"Complete Sherlock Holmes, The - Vol I","Doyle, Arthur Conan",fiction,176,Random House +"Complete Sherlock Holmes, The - Vol II","Doyle, Arthur Conan",fiction,176,Random House +"Wealth of Nations, The","Smith, Adam",economics,175,Random House +"Pillars of the Earth, The","Follett, Ken",fiction,176,Random House +Mein Kampf,"Hitler, Adolf",nonfiction,212,Rupa +"Tao of Physics, The","Capra, Fritjof",science,179,Penguin +Surely You're Joking Mr Feynman,"Feynman, Richard",science,198,Random House +"Farewell to Arms, A","Hemingway, Ernest",fiction,179,Rupa +"Veteran, The","Forsyth, Frederick",fiction,177,Transworld +False Impressions,"Archer, Jeffery",fiction,177,Pan +"Last Lecture, The","Pausch, Randy",nonfiction,197,Hyperion +Return of the Primitive,"Rand, Ayn",philosophy,202,Penguin +Jurassic Park,"Crichton, Michael",fiction,174,Random House +"Russian Journal, A","Steinbeck, John",nonfiction,196,Penguin +Tales of Mystery and Imagination,"Poe, Edgar Allen",fiction,172,HarperCollins +Freakonomics,"Dubner, Stephen",economics,197,Penguin +"Hidden Connections, The","Capra, Fritjof",science,197,HarperCollins +"Story of Philosophy, The","Durant, Will",philosophy,170,Pocket +Asami Asami,"Deshpande, P L",fiction,205,Mauj +Journal of a Novel,"Steinbeck, John",fiction,196,Penguin +Once There Was a War,"Steinbeck, John",nonfiction,196,Penguin +"Moon is Down, The","Steinbeck, John",fiction,196,Penguin +"Brethren, The","Grisham, John",fiction,174,Random House +In a Free State,"Naipaul, V. S.",fiction,196,Rupa +Catch 22,"Heller, Joseph",fiction,178,Random House +"Complete Mastermind, The",BBC,nonfiction,178,BBC +Dylan on Dylan,"Dylan, Bob",nonfiction,197,Random House +Soft Computing & Intelligent Systems,"Gupta, Madan",data_science,242,Elsevier +Textbook of Economic Theory,"Stonier, Alfred",economics,242,Pearson +Econometric Analysis,"Greene, W. H.",economics,242,Pearson +Learning OpenCV,"Bradsky, Gary",data_science,232,O'Reilly +Data Structures Using C & C++,"Tanenbaum, Andrew",computer_science,235,Prentice Hall +"Computer Vision, A Modern Approach","Forsyth, David",data_science,255,Pearson +Principles of Communication Systems,"Taub, Schilling",computer_science,240,TMH +Let Us C,"Kanetkar, Yashwant",computer_science,213,Prentice Hall +"Amulet of Samarkand, The","Stroud, Jonathan",fiction,179,Random House +Crime and Punishment,"Dostoevsky, Fyodor",fiction,180,Penguin +Angels & Demons,"Brown, Dan",fiction,178,Random House +"Argumentative Indian, The","Sen, Amartya",nonfiction,209,Picador +Sea of Poppies,"Ghosh, Amitav",fiction,197,Penguin +"Idea of Justice, The","Sen, Amartya",nonfiction,212,Penguin +"Raisin in the Sun, A","Hansberry, Lorraine",fiction,175,Penguin +All the President's Men,"Woodward, Bob",history,177,Random House +"Prisoner of Birth, A","Archer, Jeffery",fiction,176,Pan +Scoop!,"Nayar, Kuldip",history,216,HarperCollins +Ahe Manohar Tari,"Deshpande, Sunita",nonfiction,213,Mauj +"Last Mughal, The","Dalrymple, William",history,199,Penguin +"Social Choice & Welfare, Vol 39 No. 1",Various,economics,235,Springer +Radiowaril Bhashane & Shrutika,"Deshpande, P L",nonfiction,213,Mauj +Gun Gayin Awadi,"Deshpande, P L",nonfiction,212,Mauj +Aghal Paghal,"Deshpande, P L",nonfiction,212,Mauj +Maqta-e-Ghalib,"Garg, Sanjay",fiction,221,Mauj +Beyond Degrees,,nonfiction,222,HarperCollins +Manasa,"Kale, V P",nonfiction,213,Mauj +India from Midnight to Milennium,"Tharoor, Shashi",history,198,Penguin +"World's Greatest Trials, The",,history,210, +"Great Indian Novel, The","Tharoor, Shashi",fiction,198,Penguin +O Jerusalem!,"Lapierre, Dominique",history,217,vikas +"City of Joy, The","Lapierre, Dominique",fiction,177,vikas +Freedom at Midnight,"Lapierre, Dominique",history,167,vikas +"Winter of Our Discontent, The","Steinbeck, John",fiction,196,Penguin +On Education,"Russell, Bertrand",philosophy,203,Routledge +Free Will,"Harris, Sam",philosophy,203,FreePress +Bookless in Baghdad,"Tharoor, Shashi",nonfiction,206,Penguin +"Case of the Lame Canary, The","Gardner, Earle Stanley",fiction,179, +"Theory of Everything, The","Hawking, Stephen",science,217,Jaico +New Markets & Other Essays,"Drucker, Peter",economics,176,Penguin +Electric Universe,"Bodanis, David",science,201,Penguin +"Hunchback of Notre Dame, The","Hugo, Victor",fiction,175,Random House +Burning Bright,"Steinbeck, John",fiction,175,Penguin +"Age of Discontuinity, The","Drucker, Peter",economics,178,Random House +Doctor in the Nude,"Gordon, Richard",fiction,179,Penguin +Down and Out in Paris & London,"Orwell, George",nonfiction,179,Penguin +Identity & Violence,"Sen, Amartya",philosophy,219,Penguin +Beyond the Three Seas,"Dalrymple, William",history,197,Random House +"World's Greatest Short Stories, The",,fiction,217,Jaico +Talking Straight,"Iacoca, Lee",nonfiction,175, +"Maugham's Collected Short Stories, Vol 3","Maugham, William S",fiction,171,Vintage +"Phantom of Manhattan, The","Forsyth, Frederick",fiction,180, +Ashenden of The British Agent,"Maugham, William S",fiction,160,Vintage +Zen & The Art of Motorcycle Maintenance,"Pirsig, Robert",philosophy,172,Vintage +"Great War for Civilization, The","Fisk, Robert",history,197,HarperCollins +We the Living,"Rand, Ayn",fiction,178,Penguin +"Artist and the Mathematician, The","Aczel, Amir",science,186,HighStakes +History of Western Philosophy,"Russell, Bertrand",philosophy,213,Routledge +Selected Short Stories,,fiction,215,Jaico +Rationality & Freedom,"Sen, Amartya",economics,213,Springer +Clash of Civilizations and Remaking of the World Order,"Huntington, Samuel",history,228,Simon&Schuster +Uncommon Wisdom,"Capra, Fritjof",nonfiction,197,Fontana +One,"Bach, Richard",nonfiction,172,Dell +Karl Marx Biography,,nonfiction,162, +To Sir With Love,Braithwaite,fiction,197,Penguin +Half A Life,"Naipaul, V S",fiction,196, +"Discovery of India, The","Nehru, Jawaharlal",history,230, +Apulki,"Deshpande, P L",nonfiction,211, +Unpopular Essays,"Russell, Bertrand",philosophy,198, +"Deceiver, The","Forsyth, Frederick",fiction,178, +Veil: Secret Wars of the CIA,"Woodward, Bob",history,171, +Char Shabda,"Deshpande, P L",nonfiction,214, +Rosy is My Relative,"Durrell, Gerald",fiction,176, +"Moon and Sixpence, The","Maugham, William S",fiction,180, +Political Philosophers,,philosophy,162, +"Short History of the World, A","Wells, H G",history,197, +"Trembling of a Leaf, The","Maugham, William S",fiction,205, +Doctor on the Brain,"Gordon, Richard",fiction,204, +Simpsons & Their Mathematical Secrets,"Singh, Simon",science,233, +Pattern Classification,"Duda, Hart",data_science,241, +From Beirut to Jerusalem,"Friedman, Thomas",history,202, +"Code Book, The","Singh, Simon",science,197, +"Age of the Warrior, The","Fisk, Robert",history,197, +Final Crisis,,comic,257, +"Killing Joke, The",,comic,283, +Flashpoint,,comic,265, +Batman Earth One,,comic,265, +Crisis on Infinite Earths,,comic,258, +"Numbers Behind Numb3rs, The","Devlin, Keith",science,202, +Superman Earth One - 1,,comic,259, +Superman Earth One - 2,,comic,258, +Justice League: Throne of Atlantis,,comic,258, +Justice League: The Villain's Journey,,comic,258, +"Death of Superman, The",,comic,258, +History of the DC Universe,,comic,258, +Batman: The Long Halloween,,comic,258, +"Life in Letters, A","Steinbeck, John",nonfiction,196, +"Information, The","Gleick, James",science,233, +"Journal of Economics, vol 106 No 3",,economics,235, +Elements of Information Theory,"Thomas, Joy",data_science,229, +Power Electronics - Rashid,"Rashid, Muhammad",computer_science,235, +Power Electronics - Mohan,"Mohan, Ned",computer_science,237, +Neural Networks,"Haykin, Simon",data_science,240, +"Grapes of Wrath, The","Steinbeck, John",fiction,196, +Vyakti ani Valli,"Deshpande, P L",nonfiction,211, +Statistical Learning Theory,"Vapnik, Vladimir",data_science,228, +Empire of the Mughal - The Tainted Throne,"Rutherford, Alex",history,180, +Empire of the Mughal - Brothers at War,"Rutherford, Alex",history,180, +Empire of the Mughal - Ruler of the World,"Rutherford, Alex",history,180, +Empire of the Mughal - The Serpent's Tooth,"Rutherford, Alex",history,180, +Empire of the Mughal - Raiders from the North,"Rutherford, Alex",history,180, +Mossad,"Baz-Zohar, Michael",history,236, +Jim Corbett Omnibus,"Corbett, Jim",nonfiction,223, +20000 Leagues Under the Sea,"Verne, Jules",fiction,190, +Batatyachi Chal,Deshpande P L,fiction,200, +Hafasavnuk,Deshpande P L,fiction,211, +Urlasurla,Deshpande P L,fiction,211, +Pointers in C,"Kanetkar, Yashwant",computer_science,213, +"Cathedral and the Bazaar, The","Raymond, Eric",computer_science,217, +Design with OpAmps,"Franco, Sergio",computer_science,240, +Think Complexity,"Downey, Allen",data_science,230, +"Devil's Advocate, The","West, Morris",fiction,178, +Ayn Rand Answers,"Rand, Ayn",philosophy,203, +Philosophy: Who Needs It,"Rand, Ayn",philosophy,171, +"World's Great Thinkers, The",,philosophy,189, +Data Analysis with Open Source Tools,"Janert, Phillip",data_science,230, +Broca's Brain,"Sagan, Carl",science,174, +Men of Mathematics,"Bell, E T",mathematics,217, +Oxford book of Modern Science Writing,"Dawkins, Richard",science,240, +"Justice, Judiciary and Democracy","Ranjan, Sudhanshu",philosophy,224, +"Arthashastra, The",Kautiyla,philosophy,214, +We the People,Palkhivala,philosophy,216, +We the Nation,Palkhivala,philosophy,216, +"Courtroom Genius, The",Sorabjee,nonfiction,217, +Dongri to Dubai,"Zaidi, Hussain",nonfiction,216, +"History of England, Foundation","Ackroyd, Peter",history,197, +City of Djinns,"Dalrymple, William",history,198, +India's Legal System,Nariman,nonfiction,177, +More Tears to Cry,"Sassoon, Jean",fiction,235, +"Ropemaker, The","Dickinson, Peter",fiction,196, +Angels & Demons,"Brown, Dan",fiction,170, +"Judge, The",,fiction,170, +"Attorney, The",,fiction,170, +"Prince, The",Machiavelli,philosophy,173, +Eyeless in Gaza,"Huxley, Aldous",fiction,180, +Tales of Beedle the Bard,"Rowling, J K",fiction,184, +Girl with the Dragon Tattoo,"Larsson, Steig",fiction,179, +Girl who kicked the Hornet's Nest,"Larsson, Steig",fiction,179, +Girl who played with Fire,"Larsson, Steig",fiction,179, +Batman Handbook,,comic,270, +Murphy's Law,,nonfiction,178, +Structure and Randomness,"Tao, Terence",mathematics,252, +Image Processing with MATLAB,"Eddins, Steve",signal_processing,241, +Animal Farm,"Orwell, George",fiction,180, +"Idiot, The","Dostoevsky, Fyodor",fiction,197, +"Christmas Carol, A","Dickens, Charles",fiction,196, \ No newline at end of file diff --git a/files/result.json b/files/result.json new file mode 100644 index 0000000..3846678 --- /dev/null +++ b/files/result.json @@ -0,0 +1,1492 @@ +[ + { + "name": "Lolita Lynn", + "gender": "female", + "address": "389 Neptune Avenue, Belfair, Iowa, 6116", + "age": 34, + "books": [ + { + "Title": "Fundamentals of Wavelets", + "Author": "Goswami, Jaideva", + "Pages": "228", + "Genre": "signal_processing" + }, + { + "Title": "Data Smart", + "Author": "Foreman, John", + "Pages": "235", + "Genre": "data_science" + }, + { + "Title": "God Created the Integers", + "Author": "Hawking, Stephen", + "Pages": "197", + "Genre": "mathematics" + }, + { + "Title": "Superfreakonomics", + "Author": "Dubner, Stephen", + "Pages": "179", + "Genre": "economics" + }, + { + "Title": "Orientalism", + "Author": "Said, Edward", + "Pages": "197", + "Genre": "history" + }, + { + "Title": "Nature of Statistical Learning Theory, The", + "Author": "Vapnik, Vladimir", + "Pages": "230", + "Genre": "data_science" + }, + { + "Title": "Integration of the Indian States", + "Author": "Menon, V P", + "Pages": "217", + "Genre": "history" + }, + { + "Title": "Drunkard's Walk, The", + "Author": "Mlodinow, Leonard", + "Pages": "197", + "Genre": "science" + } + ] + }, + { + "name": "Tonia Hurst", + "gender": "female", + "address": "917 Terrace Place, Urbana, Idaho, 684", + "age": 31, + "books": [ + { + "Title": "Image Processing & Mathematical Morphology", + "Author": "Shih, Frank", + "Pages": "241", + "Genre": "signal_processing" + }, + { + "Title": "How to Think Like Sherlock Holmes", + "Author": "Konnikova, Maria", + "Pages": "240", + "Genre": "psychology" + }, + { + "Title": "Data Scientists at Work", + "Author": "Sebastian Gutierrez", + "Pages": "230", + "Genre": "data_science" + }, + { + "Title": "Slaughterhouse Five", + "Author": "Vonnegut, Kurt", + "Pages": "198", + "Genre": "fiction" + }, + { + "Title": "Birth of a Theorem", + "Author": "Villani, Cedric", + "Pages": "234", + "Genre": "mathematics" + }, + { + "Title": "Structure & Interpretation of Computer Programs", + "Author": "Sussman, Gerald", + "Pages": "240", + "Genre": "computer_science" + }, + { + "Title": "Age of Wrath, The", + "Author": "Eraly, Abraham", + "Pages": "238", + "Genre": "history" + }, + { + "Title": "Trial, The", + "Author": "Kafka, Frank", + "Pages": "198", + "Genre": "fiction" + } + ] + }, + { + "name": "Brooks Bright", + "gender": "male", + "address": "901 Mermaid Avenue, Wyoming, Marshall Islands, 8506", + "age": 39, + "books": [ + { + "Title": "Statistical Decision Theory'", + "Author": "Pratt, John", + "Pages": "236", + "Genre": "data_science" + }, + { + "Title": "Data Mining Handbook", + "Author": "Nisbet, Robert", + "Pages": "242", + "Genre": "data_science" + }, + { + "Title": "New Machiavelli, The", + "Author": "Wells, H. G.", + "Pages": "180", + "Genre": "fiction" + }, + { + "Title": "Physics & Philosophy", + "Author": "Heisenberg, Werner", + "Pages": "197", + "Genre": "science" + }, + { + "Title": "Making Software", + "Author": "Oram, Andy", + "Pages": "232", + "Genre": "computer_science" + }, + { + "Title": "Analysis, Vol I", + "Author": "Tao, Terence", + "Pages": "248", + "Genre": "mathematics" + }, + { + "Title": "Machine Learning for Hackers", + "Author": "Conway, Drew", + "Pages": "233", + "Genre": "data_science" + }, + { + "Title": "Signal and the Noise, The", + "Author": "Silver, Nate", + "Pages": "233", + "Genre": "data_science" + } + ] + }, + { + "name": "Kathrine Sharp", + "gender": "female", + "address": "989 Huron Street, Talpa, Utah, 7018", + "age": 40, + "books": [ + { + "Title": "Python for Data Analysis", + "Author": "McKinney, Wes", + "Pages": "233", + "Genre": "data_science" + }, + { + "Title": "Introduction to Algorithms", + "Author": "Cormen, Thomas", + "Pages": "234", + "Genre": "computer_science" + }, + { + "Title": "Beautiful and the Damned, The", + "Author": "Deb, Siddhartha", + "Pages": "198", + "Genre": "nonfiction" + }, + { + "Title": "Outsider, The", + "Author": "Camus, Albert", + "Pages": "198", + "Genre": "fiction" + }, + { + "Title": "Complete Sherlock Holmes, The - Vol I", + "Author": "Doyle, Arthur Conan", + "Pages": "176", + "Genre": "fiction" + }, + { + "Title": "Complete Sherlock Holmes, The - Vol II", + "Author": "Doyle, Arthur Conan", + "Pages": "176", + "Genre": "fiction" + }, + { + "Title": "Wealth of Nations, The", + "Author": "Smith, Adam", + "Pages": "175", + "Genre": "economics" + }, + { + "Title": "Pillars of the Earth, The", + "Author": "Follett, Ken", + "Pages": "176", + "Genre": "fiction" + } + ] + }, + { + "name": "Shawn Harrell", + "gender": "female", + "address": "534 Hinsdale Street, Albany, Palau, 3291", + "age": 34, + "books": [ + { + "Title": "Mein Kampf", + "Author": "Hitler, Adolf", + "Pages": "212", + "Genre": "nonfiction" + }, + { + "Title": "Tao of Physics, The", + "Author": "Capra, Fritjof", + "Pages": "179", + "Genre": "science" + }, + { + "Title": "Surely You're Joking Mr Feynman", + "Author": "Feynman, Richard", + "Pages": "198", + "Genre": "science" + }, + { + "Title": "Farewell to Arms, A", + "Author": "Hemingway, Ernest", + "Pages": "179", + "Genre": "fiction" + }, + { + "Title": "Veteran, The", + "Author": "Forsyth, Frederick", + "Pages": "177", + "Genre": "fiction" + }, + { + "Title": "False Impressions", + "Author": "Archer, Jeffery", + "Pages": "177", + "Genre": "fiction" + }, + { + "Title": "Last Lecture, The", + "Author": "Pausch, Randy", + "Pages": "197", + "Genre": "nonfiction" + }, + { + "Title": "Return of the Primitive", + "Author": "Rand, Ayn", + "Pages": "202", + "Genre": "philosophy" + } + ] + }, + { + "name": "Amy Casey", + "gender": "female", + "address": "589 Townsend Street, Hiseville, Connecticut, 7082", + "age": 31, + "books": [ + { + "Title": "Jurassic Park", + "Author": "Crichton, Michael", + "Pages": "174", + "Genre": "fiction" + }, + { + "Title": "Russian Journal, A", + "Author": "Steinbeck, John", + "Pages": "196", + "Genre": "nonfiction" + }, + { + "Title": "Tales of Mystery and Imagination", + "Author": "Poe, Edgar Allen", + "Pages": "172", + "Genre": "fiction" + }, + { + "Title": "Freakonomics", + "Author": "Dubner, Stephen", + "Pages": "197", + "Genre": "economics" + }, + { + "Title": "Hidden Connections, The", + "Author": "Capra, Fritjof", + "Pages": "197", + "Genre": "science" + }, + { + "Title": "Story of Philosophy, The", + "Author": "Durant, Will", + "Pages": "170", + "Genre": "philosophy" + }, + { + "Title": "Asami Asami", + "Author": "Deshpande, P L", + "Pages": "205", + "Genre": "fiction" + }, + { + "Title": "Journal of a Novel", + "Author": "Steinbeck, John", + "Pages": "196", + "Genre": "fiction" + } + ] + }, + { + "name": "Lorena Mejia", + "gender": "female", + "address": "614 High Street, Blanford, Maryland, 2776", + "age": 24, + "books": [ + { + "Title": "Once There Was a War", + "Author": "Steinbeck, John", + "Pages": "196", + "Genre": "nonfiction" + }, + { + "Title": "Moon is Down, The", + "Author": "Steinbeck, John", + "Pages": "196", + "Genre": "fiction" + }, + { + "Title": "Brethren, The", + "Author": "Grisham, John", + "Pages": "174", + "Genre": "fiction" + }, + { + "Title": "In a Free State", + "Author": "Naipaul, V. S.", + "Pages": "196", + "Genre": "fiction" + }, + { + "Title": "Catch 22", + "Author": "Heller, Joseph", + "Pages": "178", + "Genre": "fiction" + }, + { + "Title": "Complete Mastermind, The", + "Author": "BBC", + "Pages": "178", + "Genre": "nonfiction" + }, + { + "Title": "Dylan on Dylan", + "Author": "Dylan, Bob", + "Pages": "197", + "Genre": "nonfiction" + }, + { + "Title": "Soft Computing & Intelligent Systems", + "Author": "Gupta, Madan", + "Pages": "242", + "Genre": "data_science" + } + ] + }, + { + "name": "Allyson Wilkins", + "gender": "female", + "address": "572 Downing Street, Ivanhoe, American Samoa, 8235", + "age": 29, + "books": [ + { + "Title": "Textbook of Economic Theory", + "Author": "Stonier, Alfred", + "Pages": "242", + "Genre": "economics" + }, + { + "Title": "Econometric Analysis", + "Author": "Greene, W. H.", + "Pages": "242", + "Genre": "economics" + }, + { + "Title": "Learning OpenCV", + "Author": "Bradsky, Gary", + "Pages": "232", + "Genre": "data_science" + }, + { + "Title": "Data Structures Using C & C++", + "Author": "Tanenbaum, Andrew", + "Pages": "235", + "Genre": "computer_science" + }, + { + "Title": "Computer Vision, A Modern Approach", + "Author": "Forsyth, David", + "Pages": "255", + "Genre": "data_science" + }, + { + "Title": "Principles of Communication Systems", + "Author": "Taub, Schilling", + "Pages": "240", + "Genre": "computer_science" + }, + { + "Title": "Let Us C", + "Author": "Kanetkar, Yashwant", + "Pages": "213", + "Genre": "computer_science" + }, + { + "Title": "Amulet of Samarkand, The", + "Author": "Stroud, Jonathan", + "Pages": "179", + "Genre": "fiction" + } + ] + }, + { + "name": "Mays Reed", + "gender": "male", + "address": "306 Georgia Avenue, Hall, New Mexico, 1402", + "age": 38, + "books": [ + { + "Title": "Crime and Punishment", + "Author": "Dostoevsky, Fyodor", + "Pages": "180", + "Genre": "fiction" + }, + { + "Title": "Angels & Demons", + "Author": "Brown, Dan", + "Pages": "178", + "Genre": "fiction" + }, + { + "Title": "Argumentative Indian, The", + "Author": "Sen, Amartya", + "Pages": "209", + "Genre": "nonfiction" + }, + { + "Title": "Sea of Poppies", + "Author": "Ghosh, Amitav", + "Pages": "197", + "Genre": "fiction" + }, + { + "Title": "Idea of Justice, The", + "Author": "Sen, Amartya", + "Pages": "212", + "Genre": "nonfiction" + }, + { + "Title": "Raisin in the Sun, A", + "Author": "Hansberry, Lorraine", + "Pages": "175", + "Genre": "fiction" + }, + { + "Title": "All the President's Men", + "Author": "Woodward, Bob", + "Pages": "177", + "Genre": "history" + }, + { + "Title": "Prisoner of Birth, A", + "Author": "Archer, Jeffery", + "Pages": "176", + "Genre": "fiction" + } + ] + }, + { + "name": "Katherine Mayer", + "gender": "female", + "address": "640 Prescott Place, Curtice, Kansas, 3395", + "age": 27, + "books": [ + { + "Title": "Scoop!", + "Author": "Nayar, Kuldip", + "Pages": "216", + "Genre": "history" + }, + { + "Title": "Ahe Manohar Tari", + "Author": "Deshpande, Sunita", + "Pages": "213", + "Genre": "nonfiction" + }, + { + "Title": "Last Mughal, The", + "Author": "Dalrymple, William", + "Pages": "199", + "Genre": "history" + }, + { + "Title": "Social Choice & Welfare, Vol 39 No. 1", + "Author": "Various", + "Pages": "235", + "Genre": "economics" + }, + { + "Title": "Radiowaril Bhashane & Shrutika", + "Author": "Deshpande, P L", + "Pages": "213", + "Genre": "nonfiction" + }, + { + "Title": "Gun Gayin Awadi", + "Author": "Deshpande, P L", + "Pages": "212", + "Genre": "nonfiction" + }, + { + "Title": "Aghal Paghal", + "Author": "Deshpande, P L", + "Pages": "212", + "Genre": "nonfiction" + }, + { + "Title": "Maqta-e-Ghalib", + "Author": "Garg, Sanjay", + "Pages": "221", + "Genre": "fiction" + } + ] + }, + { + "name": "Kelly Byers", + "gender": "female", + "address": "865 Revere Place, Homeland, Arizona, 232", + "age": 35, + "books": [ + { + "Title": "Beyond Degrees", + "Author": "", + "Pages": "222", + "Genre": "nonfiction" + }, + { + "Title": "Manasa", + "Author": "Kale, V P", + "Pages": "213", + "Genre": "nonfiction" + }, + { + "Title": "India from Midnight to Milennium", + "Author": "Tharoor, Shashi", + "Pages": "198", + "Genre": "history" + }, + { + "Title": "World's Greatest Trials, The", + "Author": "", + "Pages": "210", + "Genre": "history" + }, + { + "Title": "Great Indian Novel, The", + "Author": "Tharoor, Shashi", + "Pages": "198", + "Genre": "fiction" + }, + { + "Title": "O Jerusalem!", + "Author": "Lapierre, Dominique", + "Pages": "217", + "Genre": "history" + }, + { + "Title": "City of Joy, The", + "Author": "Lapierre, Dominique", + "Pages": "177", + "Genre": "fiction" + }, + { + "Title": "Freedom at Midnight", + "Author": "Lapierre, Dominique", + "Pages": "167", + "Genre": "history" + } + ] + }, + { + "name": "Schwartz Carey", + "gender": "male", + "address": "860 Centre Street, Hiwasse, Nevada, 2819", + "age": 32, + "books": [ + { + "Title": "Winter of Our Discontent, The", + "Author": "Steinbeck, John", + "Pages": "196", + "Genre": "fiction" + }, + { + "Title": "On Education", + "Author": "Russell, Bertrand", + "Pages": "203", + "Genre": "philosophy" + }, + { + "Title": "Free Will", + "Author": "Harris, Sam", + "Pages": "203", + "Genre": "philosophy" + }, + { + "Title": "Bookless in Baghdad", + "Author": "Tharoor, Shashi", + "Pages": "206", + "Genre": "nonfiction" + }, + { + "Title": "Case of the Lame Canary, The", + "Author": "Gardner, Earle Stanley", + "Pages": "179", + "Genre": "fiction" + }, + { + "Title": "Theory of Everything, The", + "Author": "Hawking, Stephen", + "Pages": "217", + "Genre": "science" + }, + { + "Title": "New Markets & Other Essays", + "Author": "Drucker, Peter", + "Pages": "176", + "Genre": "economics" + }, + { + "Title": "Electric Universe", + "Author": "Bodanis, David", + "Pages": "201", + "Genre": "science" + } + ] + }, + { + "name": "Kay Beasley", + "gender": "female", + "address": "358 Sutton Street, Bellamy, Ohio, 8845", + "age": 34, + "books": [ + { + "Title": "Hunchback of Notre Dame, The", + "Author": "Hugo, Victor", + "Pages": "175", + "Genre": "fiction" + }, + { + "Title": "Burning Bright", + "Author": "Steinbeck, John", + "Pages": "175", + "Genre": "fiction" + }, + { + "Title": "Age of Discontuinity, The", + "Author": "Drucker, Peter", + "Pages": "178", + "Genre": "economics" + }, + { + "Title": "Doctor in the Nude", + "Author": "Gordon, Richard", + "Pages": "179", + "Genre": "fiction" + }, + { + "Title": "Down and Out in Paris & London", + "Author": "Orwell, George", + "Pages": "179", + "Genre": "nonfiction" + }, + { + "Title": "Identity & Violence", + "Author": "Sen, Amartya", + "Pages": "219", + "Genre": "philosophy" + }, + { + "Title": "Beyond the Three Seas", + "Author": "Dalrymple, William", + "Pages": "197", + "Genre": "history" + }, + { + "Title": "World's Greatest Short Stories, The", + "Author": "", + "Pages": "217", + "Genre": "fiction" + } + ] + }, + { + "name": "Robbins Gordon", + "gender": "male", + "address": "610 Langham Street, Boykin, Guam, 6688", + "age": 23, + "books": [ + { + "Title": "Talking Straight", + "Author": "Iacoca, Lee", + "Pages": "175", + "Genre": "nonfiction" + }, + { + "Title": "Maugham's Collected Short Stories, Vol 3", + "Author": "Maugham, William S", + "Pages": "171", + "Genre": "fiction" + }, + { + "Title": "Phantom of Manhattan, The", + "Author": "Forsyth, Frederick", + "Pages": "180", + "Genre": "fiction" + }, + { + "Title": "Ashenden of The British Agent", + "Author": "Maugham, William S", + "Pages": "160", + "Genre": "fiction" + }, + { + "Title": "Zen & The Art of Motorcycle Maintenance", + "Author": "Pirsig, Robert", + "Pages": "172", + "Genre": "philosophy" + }, + { + "Title": "Great War for Civilization, The", + "Author": "Fisk, Robert", + "Pages": "197", + "Genre": "history" + }, + { + "Title": "We the Living", + "Author": "Rand, Ayn", + "Pages": "178", + "Genre": "fiction" + }, + { + "Title": "Artist and the Mathematician, The", + "Author": "Aczel, Amir", + "Pages": "186", + "Genre": "science" + } + ] + }, + { + "name": "Hillary Bauer", + "gender": "female", + "address": "951 Cumberland Street, Alleghenyville, Oregon, 7073", + "age": 39, + "books": [ + { + "Title": "History of Western Philosophy", + "Author": "Russell, Bertrand", + "Pages": "213", + "Genre": "philosophy" + }, + { + "Title": "Selected Short Stories", + "Author": "", + "Pages": "215", + "Genre": "fiction" + }, + { + "Title": "Rationality & Freedom", + "Author": "Sen, Amartya", + "Pages": "213", + "Genre": "economics" + }, + { + "Title": "Clash of Civilizations and Remaking of the World Order", + "Author": "Huntington, Samuel", + "Pages": "228", + "Genre": "history" + }, + { + "Title": "Uncommon Wisdom", + "Author": "Capra, Fritjof", + "Pages": "197", + "Genre": "nonfiction" + }, + { + "Title": "One", + "Author": "Bach, Richard", + "Pages": "172", + "Genre": "nonfiction" + }, + { + "Title": "Karl Marx Biography", + "Author": "", + "Pages": "162", + "Genre": "nonfiction" + }, + { + "Title": "To Sir With Love", + "Author": "Braithwaite", + "Pages": "197", + "Genre": "fiction" + } + ] + }, + { + "name": "Ruiz Phelps", + "gender": "male", + "address": "836 Troutman Street, Harborton, Kentucky, 4030", + "age": 36, + "books": [ + { + "Title": "Half A Life", + "Author": "Naipaul, V S", + "Pages": "196", + "Genre": "fiction" + }, + { + "Title": "Discovery of India, The", + "Author": "Nehru, Jawaharlal", + "Pages": "230", + "Genre": "history" + }, + { + "Title": "Apulki", + "Author": "Deshpande, P L", + "Pages": "211", + "Genre": "nonfiction" + }, + { + "Title": "Unpopular Essays", + "Author": "Russell, Bertrand", + "Pages": "198", + "Genre": "philosophy" + }, + { + "Title": "Deceiver, The", + "Author": "Forsyth, Frederick", + "Pages": "178", + "Genre": "fiction" + }, + { + "Title": "Veil: Secret Wars of the CIA", + "Author": "Woodward, Bob", + "Pages": "171", + "Genre": "history" + }, + { + "Title": "Char Shabda", + "Author": "Deshpande, P L", + "Pages": "214", + "Genre": "nonfiction" + } + ] + }, + { + "name": "Carolina Bryant", + "gender": "female", + "address": "377 Middagh Street, Ellerslie, Nebraska, 2644", + "age": 31, + "books": [ + { + "Title": "Rosy is My Relative", + "Author": "Durrell, Gerald", + "Pages": "176", + "Genre": "fiction" + }, + { + "Title": "Moon and Sixpence, The", + "Author": "Maugham, William S", + "Pages": "180", + "Genre": "fiction" + }, + { + "Title": "Political Philosophers", + "Author": "", + "Pages": "162", + "Genre": "philosophy" + }, + { + "Title": "Short History of the World, A", + "Author": "Wells, H G", + "Pages": "197", + "Genre": "history" + }, + { + "Title": "Trembling of a Leaf, The", + "Author": "Maugham, William S", + "Pages": "205", + "Genre": "fiction" + }, + { + "Title": "Doctor on the Brain", + "Author": "Gordon, Richard", + "Pages": "204", + "Genre": "fiction" + }, + { + "Title": "Simpsons & Their Mathematical Secrets", + "Author": "Singh, Simon", + "Pages": "233", + "Genre": "science" + } + ] + }, + { + "name": "Sosa Lee", + "gender": "male", + "address": "364 Holly Street, Omar, California, 5140", + "age": 31, + "books": [ + { + "Title": "Pattern Classification", + "Author": "Duda, Hart", + "Pages": "241", + "Genre": "data_science" + }, + { + "Title": "From Beirut to Jerusalem", + "Author": "Friedman, Thomas", + "Pages": "202", + "Genre": "history" + }, + { + "Title": "Code Book, The", + "Author": "Singh, Simon", + "Pages": "197", + "Genre": "science" + }, + { + "Title": "Age of the Warrior, The", + "Author": "Fisk, Robert", + "Pages": "197", + "Genre": "history" + }, + { + "Title": "Final Crisis", + "Author": "", + "Pages": "257", + "Genre": "comic" + }, + { + "Title": "Killing Joke, The", + "Author": "", + "Pages": "283", + "Genre": "comic" + }, + { + "Title": "Flashpoint", + "Author": "", + "Pages": "265", + "Genre": "comic" + } + ] + }, + { + "name": "Lorna Scott", + "gender": "female", + "address": "878 Marconi Place, Gerton, Alabama, 845", + "age": 24, + "books": [ + { + "Title": "Batman Earth One", + "Author": "", + "Pages": "265", + "Genre": "comic" + }, + { + "Title": "Crisis on Infinite Earths", + "Author": "", + "Pages": "258", + "Genre": "comic" + }, + { + "Title": "Numbers Behind Numb3rs, The", + "Author": "Devlin, Keith", + "Pages": "202", + "Genre": "science" + }, + { + "Title": "Superman Earth One - 1", + "Author": "", + "Pages": "259", + "Genre": "comic" + }, + { + "Title": "Superman Earth One - 2", + "Author": "", + "Pages": "258", + "Genre": "comic" + }, + { + "Title": "Justice League: Throne of Atlantis", + "Author": "", + "Pages": "258", + "Genre": "comic" + }, + { + "Title": "Justice League: The Villain's Journey", + "Author": "", + "Pages": "258", + "Genre": "comic" + } + ] + }, + { + "name": "Bernard Holden", + "gender": "male", + "address": "674 Pine Street, Conestoga, Mississippi, 4727", + "age": 32, + "books": [ + { + "Title": "Death of Superman, The", + "Author": "", + "Pages": "258", + "Genre": "comic" + }, + { + "Title": "History of the DC Universe", + "Author": "", + "Pages": "258", + "Genre": "comic" + }, + { + "Title": "Batman: The Long Halloween", + "Author": "", + "Pages": "258", + "Genre": "comic" + }, + { + "Title": "Life in Letters, A", + "Author": "Steinbeck, John", + "Pages": "196", + "Genre": "nonfiction" + }, + { + "Title": "Information, The", + "Author": "Gleick, James", + "Pages": "233", + "Genre": "science" + }, + { + "Title": "Journal of Economics, vol 106 No 3", + "Author": "", + "Pages": "235", + "Genre": "economics" + }, + { + "Title": "Elements of Information Theory", + "Author": "Thomas, Joy", + "Pages": "229", + "Genre": "data_science" + } + ] + }, + { + "name": "Craft Shields", + "gender": "male", + "address": "586 Java Street, Catherine, Arkansas, 1445", + "age": 31, + "books": [ + { + "Title": "Power Electronics - Rashid", + "Author": "Rashid, Muhammad", + "Pages": "235", + "Genre": "computer_science" + }, + { + "Title": "Power Electronics - Mohan", + "Author": "Mohan, Ned", + "Pages": "237", + "Genre": "computer_science" + }, + { + "Title": "Neural Networks", + "Author": "Haykin, Simon", + "Pages": "240", + "Genre": "data_science" + }, + { + "Title": "Grapes of Wrath, The", + "Author": "Steinbeck, John", + "Pages": "196", + "Genre": "fiction" + }, + { + "Title": "Vyakti ani Valli", + "Author": "Deshpande, P L", + "Pages": "211", + "Genre": "nonfiction" + }, + { + "Title": "Statistical Learning Theory", + "Author": "Vapnik, Vladimir", + "Pages": "228", + "Genre": "data_science" + }, + { + "Title": "Empire of the Mughal - The Tainted Throne", + "Author": "Rutherford, Alex", + "Pages": "180", + "Genre": "history" + } + ] + }, + { + "name": "Mara English", + "gender": "female", + "address": "324 Herkimer Court, Boomer, Delaware, 5367", + "age": 23, + "books": [ + { + "Title": "Empire of the Mughal - Brothers at War", + "Author": "Rutherford, Alex", + "Pages": "180", + "Genre": "history" + }, + { + "Title": "Empire of the Mughal - Ruler of the World", + "Author": "Rutherford, Alex", + "Pages": "180", + "Genre": "history" + }, + { + "Title": "Empire of the Mughal - The Serpent's Tooth", + "Author": "Rutherford, Alex", + "Pages": "180", + "Genre": "history" + }, + { + "Title": "Empire of the Mughal - Raiders from the North", + "Author": "Rutherford, Alex", + "Pages": "180", + "Genre": "history" + }, + { + "Title": "Mossad", + "Author": "Baz-Zohar, Michael", + "Pages": "236", + "Genre": "history" + }, + { + "Title": "Jim Corbett Omnibus", + "Author": "Corbett, Jim", + "Pages": "223", + "Genre": "nonfiction" + }, + { + "Title": "20000 Leagues Under the Sea", + "Author": "Verne, Jules", + "Pages": "190", + "Genre": "fiction" + } + ] + }, + { + "name": "Fisher Levy", + "gender": "male", + "address": "540 Adler Place, Hachita, Federated States Of Micronesia, 9894", + "age": 30, + "books": [ + { + "Title": "Batatyachi Chal", + "Author": "Deshpande P L", + "Pages": "200", + "Genre": "fiction" + }, + { + "Title": "Hafasavnuk", + "Author": "Deshpande P L", + "Pages": "211", + "Genre": "fiction" + }, + { + "Title": "Urlasurla", + "Author": "Deshpande P L", + "Pages": "211", + "Genre": "fiction" + }, + { + "Title": "Pointers in C", + "Author": "Kanetkar, Yashwant", + "Pages": "213", + "Genre": "computer_science" + }, + { + "Title": "Cathedral and the Bazaar, The", + "Author": "Raymond, Eric", + "Pages": "217", + "Genre": "computer_science" + }, + { + "Title": "Design with OpAmps", + "Author": "Franco, Sergio", + "Pages": "240", + "Genre": "computer_science" + }, + { + "Title": "Think Complexity", + "Author": "Downey, Allen", + "Pages": "230", + "Genre": "data_science" + } + ] + }, + { + "name": "Cecelia Snyder", + "gender": "female", + "address": "236 Anchorage Place, Odessa, Michigan, 6314", + "age": 25, + "books": [ + { + "Title": "Devil's Advocate, The", + "Author": "West, Morris", + "Pages": "178", + "Genre": "fiction" + }, + { + "Title": "Ayn Rand Answers", + "Author": "Rand, Ayn", + "Pages": "203", + "Genre": "philosophy" + }, + { + "Title": "Philosophy: Who Needs It", + "Author": "Rand, Ayn", + "Pages": "171", + "Genre": "philosophy" + }, + { + "Title": "World's Great Thinkers, The", + "Author": "", + "Pages": "189", + "Genre": "philosophy" + }, + { + "Title": "Data Analysis with Open Source Tools", + "Author": "Janert, Phillip", + "Pages": "230", + "Genre": "data_science" + }, + { + "Title": "Broca's Brain", + "Author": "Sagan, Carl", + "Pages": "174", + "Genre": "science" + }, + { + "Title": "Men of Mathematics", + "Author": "Bell, E T", + "Pages": "217", + "Genre": "mathematics" + } + ] + }, + { + "name": "Nina Kaufman", + "gender": "female", + "address": "538 Ashford Street, Boling, West Virginia, 7840", + "age": 22, + "books": [ + { + "Title": "Oxford book of Modern Science Writing", + "Author": "Dawkins, Richard", + "Pages": "240", + "Genre": "science" + }, + { + "Title": "Justice, Judiciary and Democracy", + "Author": "Ranjan, Sudhanshu", + "Pages": "224", + "Genre": "philosophy" + }, + { + "Title": "Arthashastra, The", + "Author": "Kautiyla", + "Pages": "214", + "Genre": "philosophy" + }, + { + "Title": "We the People", + "Author": "Palkhivala", + "Pages": "216", + "Genre": "philosophy" + }, + { + "Title": "We the Nation", + "Author": "Palkhivala", + "Pages": "216", + "Genre": "philosophy" + }, + { + "Title": "Courtroom Genius, The", + "Author": "Sorabjee", + "Pages": "217", + "Genre": "nonfiction" + }, + { + "Title": "Dongri to Dubai", + "Author": "Zaidi, Hussain", + "Pages": "216", + "Genre": "nonfiction" + } + ] + }, + { + "name": "Dillard Branch", + "gender": "male", + "address": "225 Hampton Avenue, Bethany, Pennsylvania, 8056", + "age": 37, + "books": [ + { + "Title": "History of England, Foundation", + "Author": "Ackroyd, Peter", + "Pages": "197", + "Genre": "history" + }, + { + "Title": "City of Djinns", + "Author": "Dalrymple, William", + "Pages": "198", + "Genre": "history" + }, + { + "Title": "India's Legal System", + "Author": "Nariman", + "Pages": "177", + "Genre": "nonfiction" + }, + { + "Title": "More Tears to Cry", + "Author": "Sassoon, Jean", + "Pages": "235", + "Genre": "fiction" + }, + { + "Title": "Ropemaker, The", + "Author": "Dickinson, Peter", + "Pages": "196", + "Genre": "fiction" + }, + { + "Title": "Angels & Demons", + "Author": "Brown, Dan", + "Pages": "170", + "Genre": "fiction" + }, + { + "Title": "Judge, The", + "Author": "", + "Pages": "170", + "Genre": "fiction" + } + ] + }, + { + "name": "Dyer Bartlett", + "gender": "male", + "address": "880 Meadow Street, Seymour, Puerto Rico, 7921", + "age": 33, + "books": [ + { + "Title": "Attorney, The", + "Author": "", + "Pages": "170", + "Genre": "fiction" + }, + { + "Title": "Prince, The", + "Author": "Machiavelli", + "Pages": "173", + "Genre": "philosophy" + }, + { + "Title": "Eyeless in Gaza", + "Author": "Huxley, Aldous", + "Pages": "180", + "Genre": "fiction" + }, + { + "Title": "Tales of Beedle the Bard", + "Author": "Rowling, J K", + "Pages": "184", + "Genre": "fiction" + }, + { + "Title": "Girl with the Dragon Tattoo", + "Author": "Larsson, Steig", + "Pages": "179", + "Genre": "fiction" + }, + { + "Title": "Girl who kicked the Hornet's Nest", + "Author": "Larsson, Steig", + "Pages": "179", + "Genre": "fiction" + }, + { + "Title": "Girl who played with Fire", + "Author": "Larsson, Steig", + "Pages": "179", + "Genre": "fiction" + } + ] + }, + { + "name": "Tyler Dotson", + "gender": "male", + "address": "220 Herkimer Place, Turpin, Oklahoma, 4468", + "age": 20, + "books": [ + { + "Title": "Batman Handbook", + "Author": "", + "Pages": "270", + "Genre": "comic" + }, + { + "Title": "Murphy's Law", + "Author": "", + "Pages": "178", + "Genre": "nonfiction" + }, + { + "Title": "Structure and Randomness", + "Author": "Tao, Terence", + "Pages": "252", + "Genre": "mathematics" + }, + { + "Title": "Image Processing with MATLAB", + "Author": "Eddins, Steve", + "Pages": "241", + "Genre": "signal_processing" + }, + { + "Title": "Animal Farm", + "Author": "Orwell, George", + "Pages": "180", + "Genre": "fiction" + }, + { + "Title": "Idiot, The", + "Author": "Dostoevsky, Fyodor", + "Pages": "197", + "Genre": "fiction" + }, + { + "Title": "Christmas Carol, A", + "Author": "Dickens, Charles", + "Pages": "196", + "Genre": "fiction" + } + ] + } +] \ No newline at end of file diff --git a/files/users.json b/files/users.json new file mode 100644 index 0000000..2b2a059 --- /dev/null +++ b/files/users.json @@ -0,0 +1,1262 @@ +[ + { + "_id": "5e2696e561fdc6df60d43b5f", + "index": 0, + "guid": "3e518b31-20f0-4dea-8de8-039af5afbd33", + "isActive": false, + "balance": "$3,646.47", + "picture": "http://placehold.it/32x32", + "age": 34, + "eyeColor": "brown", + "name": "Lolita Lynn", + "gender": "female", + "company": "HIVEDOM", + "email": "lolitalynn@hivedom.com", + "phone": "+1 (842) 513-2979", + "address": "389 Neptune Avenue, Belfair, Iowa, 6116", + "about": "Ea irure labore culpa proident sint cupidatat minim laboris labore eu exercitation aliqua duis aute. Consectetur pariatur commodo enim pariatur mollit. Laborum nisi cillum do consectetur laboris nulla id laboris eu voluptate sit consequat commodo aute. Ad minim eiusmod pariatur non cupidatat esse fugiat et laborum ullamco commodo. Sint fugiat enim elit pariatur consequat ipsum Lorem qui qui Lorem proident mollit culpa. In enim commodo culpa nostrud reprehenderit nostrud incididunt elit labore. Aute proident mollit pariatur proident enim commodo.\r\n", + "registered": "2014-03-19T10:39:24 -06:00", + "latitude": 0.246756, + "longitude": -96.404056, + "tags": [ + "ad", + "ut", + "do", + "dolor", + "qui", + "quis", + "enim" + ], + "friends": [ + { + "id": 0, + "name": "Joan Weaver" + }, + { + "id": 1, + "name": "Morris Wheeler" + }, + { + "id": 2, + "name": "Morton Noble" + } + ], + "greeting": "Hello, Lolita Lynn! You have 2 unread messages.", + "favoriteFruit": "banana" + }, + { + "_id": "5e2696e5e41b533b9c781cbb", + "index": 1, + "guid": "fee7342e-ad05-40bd-9275-e6333a593131", + "isActive": false, + "balance": "$3,182.86", + "picture": "http://placehold.it/32x32", + "age": 31, + "eyeColor": "brown", + "name": "Tonia Hurst", + "gender": "female", + "company": "VIOCULAR", + "email": "toniahurst@viocular.com", + "phone": "+1 (810) 518-2428", + "address": "917 Terrace Place, Urbana, Idaho, 684", + "about": "Veniam non labore Lorem eiusmod. Quis culpa commodo officia ipsum dolore incididunt tempor in dolor aliqua et ad culpa. In magna amet tempor occaecat consectetur occaecat. Ex exercitation deserunt elit incididunt. Cupidatat labore id eiusmod cupidatat laboris proident duis irure reprehenderit dolor est esse sunt veniam. Qui Lorem voluptate duis aliqua aute esse exercitation laborum.\r\n", + "registered": "2017-05-10T08:41:24 -05:00", + "latitude": -18.252788, + "longitude": -24.432542, + "tags": [ + "aute", + "dolor", + "ipsum", + "veniam", + "culpa", + "voluptate", + "ipsum" + ], + "friends": [ + { + "id": 0, + "name": "Haney Kirkland" + }, + { + "id": 1, + "name": "Karla Vazquez" + }, + { + "id": 2, + "name": "Marcia Michael" + } + ], + "greeting": "Hello, Tonia Hurst! You have 8 unread messages.", + "favoriteFruit": "banana" + }, + { + "_id": "5e2696e56de4bbafd54d3587", + "index": 2, + "guid": "ab87ddbf-2758-4395-9ac3-1b9aaf896cd6", + "isActive": false, + "balance": "$2,863.22", + "picture": "http://placehold.it/32x32", + "age": 39, + "eyeColor": "green", + "name": "Brooks Bright", + "gender": "male", + "company": "DIGITALUS", + "email": "brooksbright@digitalus.com", + "phone": "+1 (888) 446-2993", + "address": "901 Mermaid Avenue, Wyoming, Marshall Islands, 8506", + "about": "Fugiat ad elit elit voluptate anim dolore nisi ipsum. Fugiat proident consequat eu irure. Eiusmod ut aliqua magna dolor irure Lorem adipisicing. Consequat amet eu ad occaecat aute commodo Lorem nisi exercitation.\r\n", + "registered": "2014-01-01T09:01:40 -06:00", + "latitude": -89.99231, + "longitude": 31.869389, + "tags": [ + "sunt", + "laborum", + "reprehenderit", + "laborum", + "voluptate", + "do", + "consectetur" + ], + "friends": [ + { + "id": 0, + "name": "Megan Stevens" + }, + { + "id": 1, + "name": "Jerry Bond" + }, + { + "id": 2, + "name": "Nieves Kelley" + } + ], + "greeting": "Hello, Brooks Bright! You have 8 unread messages.", + "favoriteFruit": "strawberry" + }, + { + "_id": "5e2696e53c90e9cbfff5aa41", + "index": 3, + "guid": "707adcec-495d-4c8b-90cd-0ef9c6a2c8e6", + "isActive": false, + "balance": "$1,710.89", + "picture": "http://placehold.it/32x32", + "age": 40, + "eyeColor": "brown", + "name": "Kathrine Sharp", + "gender": "female", + "company": "QUIZMO", + "email": "kathrinesharp@quizmo.com", + "phone": "+1 (906) 445-2366", + "address": "989 Huron Street, Talpa, Utah, 7018", + "about": "Eu sunt velit ex amet ipsum Lorem quis culpa. Ea tempor velit est esse nisi excepteur duis. Excepteur esse ut eu cupidatat. Incididunt aute ex dolor ipsum deserunt tempor ipsum veniam laboris. Ullamco fugiat ea ad in dolore deserunt consequat ea quis consequat. Eu magna cupidatat excepteur minim non Lorem eiusmod amet do.\r\n", + "registered": "2016-07-30T11:33:52 -05:00", + "latitude": -76.647766, + "longitude": -168.393827, + "tags": [ + "occaecat", + "minim", + "sit", + "consectetur", + "ad", + "excepteur", + "ipsum" + ], + "friends": [ + { + "id": 0, + "name": "Pitts Ayers" + }, + { + "id": 1, + "name": "Bray Valentine" + }, + { + "id": 2, + "name": "Jerri Petty" + } + ], + "greeting": "Hello, Kathrine Sharp! You have 7 unread messages.", + "favoriteFruit": "apple" + }, + { + "_id": "5e2696e5ecad926b76309b06", + "index": 4, + "guid": "cb05ebc0-8340-4859-a65e-d30ba1118770", + "isActive": false, + "balance": "$1,627.82", + "picture": "http://placehold.it/32x32", + "age": 34, + "eyeColor": "brown", + "name": "Shawn Harrell", + "gender": "female", + "company": "DECRATEX", + "email": "shawnharrell@decratex.com", + "phone": "+1 (969) 470-3922", + "address": "534 Hinsdale Street, Albany, Palau, 3291", + "about": "Excepteur est sint qui laboris. Eiusmod laborum ad labore deserunt consequat nostrud ut quis minim esse sit. Lorem anim exercitation proident cillum laborum amet nisi. Veniam laboris id laboris non id occaecat dolore. Nostrud laborum duis nostrud duis ullamco adipisicing ipsum proident sint elit Lorem et veniam minim. Eu Lorem ex ullamco do laboris cupidatat excepteur fugiat incididunt consequat id sint consectetur. Consequat aliquip nostrud amet ut aliquip aliquip.\r\n", + "registered": "2014-04-01T02:32:56 -06:00", + "latitude": 27.108566, + "longitude": 40.576595, + "tags": [ + "aute", + "officia", + "officia", + "deserunt", + "aute", + "tempor", + "dolore" + ], + "friends": [ + { + "id": 0, + "name": "Valentine Campbell" + }, + { + "id": 1, + "name": "Blanca Pate" + }, + { + "id": 2, + "name": "Letitia Melendez" + } + ], + "greeting": "Hello, Shawn Harrell! You have 8 unread messages.", + "favoriteFruit": "banana" + }, + { + "_id": "5e2696e5334cde0ce1af76c8", + "index": 5, + "guid": "2ec05944-911e-48be-8ad4-81e9af05ac45", + "isActive": true, + "balance": "$1,287.51", + "picture": "http://placehold.it/32x32", + "age": 31, + "eyeColor": "blue", + "name": "Amy Casey", + "gender": "female", + "company": "JAMNATION", + "email": "amycasey@jamnation.com", + "phone": "+1 (899) 475-3362", + "address": "589 Townsend Street, Hiseville, Connecticut, 7082", + "about": "Est sunt dolor ullamco mollit ut aliqua voluptate irure exercitation. Dolor veniam adipisicing irure dolor est anim Lorem labore duis consequat exercitation quis elit. Irure ipsum ullamco do cupidatat pariatur qui excepteur proident voluptate et enim. Reprehenderit minim non cupidatat quis dolore proident duis nulla irure cillum fugiat sit dolor. Enim cupidatat ut sint magna.\r\n", + "registered": "2014-09-26T02:53:09 -06:00", + "latitude": 89.778477, + "longitude": 8.75994, + "tags": [ + "veniam", + "adipisicing", + "ullamco", + "aute", + "non", + "et", + "consectetur" + ], + "friends": [ + { + "id": 0, + "name": "Kim Hatfield" + }, + { + "id": 1, + "name": "Stephens Morin" + }, + { + "id": 2, + "name": "Gilmore Kim" + } + ], + "greeting": "Hello, Amy Casey! You have 5 unread messages.", + "favoriteFruit": "strawberry" + }, + { + "_id": "5e2696e54e08309d9f5ea63b", + "index": 6, + "guid": "35700ec0-91f8-4dd8-8bfd-aef59442044d", + "isActive": true, + "balance": "$3,160.67", + "picture": "http://placehold.it/32x32", + "age": 24, + "eyeColor": "blue", + "name": "Lorena Mejia", + "gender": "female", + "company": "PLASMOX", + "email": "lorenamejia@plasmox.com", + "phone": "+1 (869) 594-2065", + "address": "614 High Street, Blanford, Maryland, 2776", + "about": "Dolor adipisicing nisi aute consectetur aute minim. Ea laborum velit velit in fugiat ea. Do sit et sunt excepteur irure ut ullamco occaecat cupidatat ut proident aliquip cillum sint.\r\n", + "registered": "2014-08-19T05:40:08 -06:00", + "latitude": 3.816982, + "longitude": 76.209376, + "tags": [ + "commodo", + "adipisicing", + "enim", + "commodo", + "commodo", + "labore", + "dolor" + ], + "friends": [ + { + "id": 0, + "name": "Leslie Stanton" + }, + { + "id": 1, + "name": "Terrell Figueroa" + }, + { + "id": 2, + "name": "Alyssa Lindsey" + } + ], + "greeting": "Hello, Lorena Mejia! You have 4 unread messages.", + "favoriteFruit": "apple" + }, + { + "_id": "5e2696e55cea2cc6a4e23c98", + "index": 7, + "guid": "55e77c44-b44e-4223-8161-66df6ca941d9", + "isActive": true, + "balance": "$1,493.51", + "picture": "http://placehold.it/32x32", + "age": 29, + "eyeColor": "blue", + "name": "Allyson Wilkins", + "gender": "female", + "company": "COMTOURS", + "email": "allysonwilkins@comtours.com", + "phone": "+1 (870) 535-2589", + "address": "572 Downing Street, Ivanhoe, American Samoa, 8235", + "about": "Exercitation exercitation ullamco veniam aute nulla dolore aliqua. Duis laboris nulla aliqua incididunt cupidatat velit mollit Lorem aliquip commodo et. Nisi dolore nulla dolor nulla id sunt amet labore commodo Lorem nostrud. Irure dolor laboris commodo irure proident sint reprehenderit aute qui est nulla duis sit. Esse anim labore aute qui. Ea exercitation reprehenderit commodo anim. Tempor cupidatat minim laborum sit ullamco cupidatat culpa nulla aliquip dolor reprehenderit quis.\r\n", + "registered": "2015-10-17T05:38:48 -05:00", + "latitude": -1.577721, + "longitude": 118.761559, + "tags": [ + "velit", + "laboris", + "officia", + "dolor", + "veniam", + "consectetur", + "reprehenderit" + ], + "friends": [ + { + "id": 0, + "name": "Norris Wilder" + }, + { + "id": 1, + "name": "Heidi Holmes" + }, + { + "id": 2, + "name": "Lindsey Gibson" + } + ], + "greeting": "Hello, Allyson Wilkins! You have 9 unread messages.", + "favoriteFruit": "apple" + }, + { + "_id": "5e2696e5075f72eb75b9f19f", + "index": 8, + "guid": "8f815668-9fba-4cde-9476-4d060fa8e417", + "isActive": false, + "balance": "$2,010.78", + "picture": "http://placehold.it/32x32", + "age": 38, + "eyeColor": "brown", + "name": "Mays Reed", + "gender": "male", + "company": "COMTENT", + "email": "maysreed@comtent.com", + "phone": "+1 (948) 567-2624", + "address": "306 Georgia Avenue, Hall, New Mexico, 1402", + "about": "Incididunt ipsum Lorem dolore pariatur nulla dolor commodo commodo do. Veniam do id consectetur pariatur. Qui aliquip sint id dolore minim mollit fugiat Lorem quis labore duis magna excepteur. Nostrud sint proident eiusmod nulla.\r\n", + "registered": "2014-04-05T02:16:43 -06:00", + "latitude": 65.296414, + "longitude": -120.359175, + "tags": [ + "reprehenderit", + "cupidatat", + "nostrud", + "veniam", + "et", + "nisi", + "quis" + ], + "friends": [ + { + "id": 0, + "name": "Mcguire Hodge" + }, + { + "id": 1, + "name": "Miles Cohen" + }, + { + "id": 2, + "name": "Washington Gonzalez" + } + ], + "greeting": "Hello, Mays Reed! You have 10 unread messages.", + "favoriteFruit": "strawberry" + }, + { + "_id": "5e2696e5e1e3178a031395a4", + "index": 9, + "guid": "7171e4d7-ae00-4304-94b2-37de7e9952f7", + "isActive": true, + "balance": "$3,003.39", + "picture": "http://placehold.it/32x32", + "age": 27, + "eyeColor": "green", + "name": "Katherine Mayer", + "gender": "female", + "company": "EXTRAGEN", + "email": "katherinemayer@extragen.com", + "phone": "+1 (942) 598-2468", + "address": "640 Prescott Place, Curtice, Kansas, 3395", + "about": "Id eiusmod aliqua officia consequat irure amet sint consequat exercitation adipisicing ex Lorem commodo dolore. Nulla aliqua culpa amet tempor incididunt tempor sint veniam esse tempor anim. Commodo proident non excepteur dolore occaecat sint. Consectetur ullamco Lorem incididunt eu culpa reprehenderit sunt. Amet magna magna excepteur cupidatat enim est tempor ad aliquip. Laboris irure dolore proident nulla ut. Ut laboris exercitation proident laborum et sint ex labore exercitation consectetur consequat.\r\n", + "registered": "2017-02-25T07:32:15 -05:00", + "latitude": 45.747595, + "longitude": -34.060273, + "tags": [ + "nisi", + "proident", + "aliqua", + "et", + "non", + "aute", + "incididunt" + ], + "friends": [ + { + "id": 0, + "name": "Kellie Cervantes" + }, + { + "id": 1, + "name": "Keller Kline" + }, + { + "id": 2, + "name": "Ross Robles" + } + ], + "greeting": "Hello, Katherine Mayer! You have 3 unread messages.", + "favoriteFruit": "banana" + }, + { + "_id": "5e2696e56c1380ffbcb766a7", + "index": 10, + "guid": "1d769418-d13a-4283-badb-1d99cb0231bd", + "isActive": true, + "balance": "$1,684.33", + "picture": "http://placehold.it/32x32", + "age": 35, + "eyeColor": "brown", + "name": "Kelly Byers", + "gender": "female", + "company": "ENAUT", + "email": "kellybyers@enaut.com", + "phone": "+1 (845) 467-2830", + "address": "865 Revere Place, Homeland, Arizona, 232", + "about": "Fugiat esse duis fugiat do velit voluptate. Irure sit velit cillum deserunt consequat. Do dolore quis duis minim reprehenderit.\r\n", + "registered": "2016-01-13T07:21:31 -05:00", + "latitude": -20.437555, + "longitude": 78.241131, + "tags": [ + "magna", + "tempor", + "sint", + "eu", + "laborum", + "Lorem", + "dolor" + ], + "friends": [ + { + "id": 0, + "name": "Laurel Matthews" + }, + { + "id": 1, + "name": "Griffin Reilly" + }, + { + "id": 2, + "name": "Finley Wood" + } + ], + "greeting": "Hello, Kelly Byers! You have 1 unread messages.", + "favoriteFruit": "strawberry" + }, + { + "_id": "5e2696e55c3307acf755d71b", + "index": 11, + "guid": "4ffe0127-2536-439e-bb29-f5a72659a356", + "isActive": true, + "balance": "$3,866.05", + "picture": "http://placehold.it/32x32", + "age": 32, + "eyeColor": "blue", + "name": "Schwartz Carey", + "gender": "male", + "company": "GEOSTELE", + "email": "schwartzcarey@geostele.com", + "phone": "+1 (969) 531-2988", + "address": "860 Centre Street, Hiwasse, Nevada, 2819", + "about": "Proident commodo laborum ullamco consequat consequat nulla occaecat veniam magna laborum. Nostrud veniam magna excepteur in eiusmod in in in ullamco laboris laborum. Eiusmod voluptate id culpa anim tempor magna exercitation ad pariatur do et laboris esse.\r\n", + "registered": "2016-07-11T11:32:39 -05:00", + "latitude": 11.840936, + "longitude": -154.689887, + "tags": [ + "et", + "laborum", + "proident", + "enim", + "laboris", + "voluptate", + "anim" + ], + "friends": [ + { + "id": 0, + "name": "May Richards" + }, + { + "id": 1, + "name": "Carmella Burks" + }, + { + "id": 2, + "name": "Erma Buchanan" + } + ], + "greeting": "Hello, Schwartz Carey! You have 5 unread messages.", + "favoriteFruit": "apple" + }, + { + "_id": "5e2696e5eee7e916099e072b", + "index": 12, + "guid": "a237124b-f53e-4784-85d7-cb29018183bc", + "isActive": false, + "balance": "$2,697.81", + "picture": "http://placehold.it/32x32", + "age": 34, + "eyeColor": "blue", + "name": "Kay Beasley", + "gender": "female", + "company": "ECOLIGHT", + "email": "kaybeasley@ecolight.com", + "phone": "+1 (808) 461-2509", + "address": "358 Sutton Street, Bellamy, Ohio, 8845", + "about": "Anim ea qui cillum nostrud minim. Dolor qui mollit excepteur amet magna excepteur sunt ullamco sint proident laborum. Ea duis aliqua laborum veniam veniam consectetur sit. Sit aute nisi ipsum fugiat consequat. Magna mollit adipisicing tempor sint.\r\n", + "registered": "2018-01-14T08:28:19 -05:00", + "latitude": 64.653955, + "longitude": -162.499918, + "tags": [ + "nisi", + "excepteur", + "magna", + "voluptate", + "velit", + "dolore", + "et" + ], + "friends": [ + { + "id": 0, + "name": "Fanny Preston" + }, + { + "id": 1, + "name": "Roth Harris" + }, + { + "id": 2, + "name": "Rosalie Conley" + } + ], + "greeting": "Hello, Kay Beasley! You have 1 unread messages.", + "favoriteFruit": "banana" + }, + { + "_id": "5e2696e53a0787549dc666b1", + "index": 13, + "guid": "c03d97a3-548e-400e-8ab9-8d4fdbc7c18a", + "isActive": true, + "balance": "$1,596.10", + "picture": "http://placehold.it/32x32", + "age": 23, + "eyeColor": "brown", + "name": "Robbins Gordon", + "gender": "male", + "company": "EXTREMO", + "email": "robbinsgordon@extremo.com", + "phone": "+1 (985) 456-3139", + "address": "610 Langham Street, Boykin, Guam, 6688", + "about": "Adipisicing ad magna deserunt ea duis. Incididunt aute non enim adipisicing do nisi aliqua consequat esse sit occaecat nulla irure. Lorem sunt adipisicing ad eiusmod cupidatat sunt adipisicing. Eiusmod nisi pariatur amet est aliquip excepteur consectetur minim id velit excepteur Lorem amet. Nostrud reprehenderit labore aute exercitation ullamco laboris irure do culpa magna proident fugiat id dolor.\r\n", + "registered": "2019-11-08T10:06:39 -05:00", + "latitude": 68.329626, + "longitude": 126.389329, + "tags": [ + "in", + "esse", + "sunt", + "aliquip", + "pariatur", + "mollit", + "qui" + ], + "friends": [ + { + "id": 0, + "name": "Luisa Jenkins" + }, + { + "id": 1, + "name": "Sharpe Kirby" + }, + { + "id": 2, + "name": "Alana Spence" + } + ], + "greeting": "Hello, Robbins Gordon! You have 8 unread messages.", + "favoriteFruit": "strawberry" + }, + { + "_id": "5e2696e5789ee072ba71b2a5", + "index": 14, + "guid": "e4c9c853-1bd7-470b-ab40-2de483756f5f", + "isActive": false, + "balance": "$3,233.71", + "picture": "http://placehold.it/32x32", + "age": 39, + "eyeColor": "blue", + "name": "Hillary Bauer", + "gender": "female", + "company": "SENMAO", + "email": "hillarybauer@senmao.com", + "phone": "+1 (967) 525-2943", + "address": "951 Cumberland Street, Alleghenyville, Oregon, 7073", + "about": "Aliquip sint consectetur ex commodo minim non aute mollit reprehenderit deserunt. Eu officia labore ad laborum fugiat nisi dolor amet cupidatat velit voluptate. Quis aliqua commodo magna adipisicing in officia mollit amet velit fugiat minim velit. Consectetur tempor culpa aliquip exercitation eiusmod labore eiusmod deserunt cupidatat. Et voluptate officia consectetur pariatur.\r\n", + "registered": "2014-05-08T09:30:42 -06:00", + "latitude": 54.363979, + "longitude": 19.351786, + "tags": [ + "amet", + "dolore", + "non", + "cupidatat", + "do", + "magna", + "exercitation" + ], + "friends": [ + { + "id": 0, + "name": "Glass Vang" + }, + { + "id": 1, + "name": "Charlotte Summers" + }, + { + "id": 2, + "name": "Toni Walton" + } + ], + "greeting": "Hello, Hillary Bauer! You have 6 unread messages.", + "favoriteFruit": "banana" + }, + { + "_id": "5e2696e52b8620e2cbb90c83", + "index": 15, + "guid": "6c2989b6-6b1b-429d-a735-c929d679ae2a", + "isActive": true, + "balance": "$3,683.70", + "picture": "http://placehold.it/32x32", + "age": 36, + "eyeColor": "brown", + "name": "Ruiz Phelps", + "gender": "male", + "company": "DOGNOSIS", + "email": "ruizphelps@dognosis.com", + "phone": "+1 (860) 558-3280", + "address": "836 Troutman Street, Harborton, Kentucky, 4030", + "about": "Aute cillum esse adipisicing do reprehenderit ea velit reprehenderit velit consectetur labore. Sunt nostrud pariatur in ea fugiat. Velit elit cupidatat commodo eiusmod fugiat sint proident minim. Est fugiat voluptate incididunt sunt voluptate incididunt qui. Irure minim Lorem tempor irure irure ex tempor in adipisicing nisi occaecat nulla ea veniam. Nulla ut exercitation id mollit aliquip aliqua mollit irure aliqua aliqua id incididunt tempor. Pariatur incididunt incididunt excepteur non proident tempor tempor.\r\n", + "registered": "2017-01-24T12:12:16 -05:00", + "latitude": -21.117289, + "longitude": 138.033558, + "tags": [ + "duis", + "aute", + "quis", + "deserunt", + "cupidatat", + "labore", + "adipisicing" + ], + "friends": [ + { + "id": 0, + "name": "Regina Horn" + }, + { + "id": 1, + "name": "Daisy Schroeder" + }, + { + "id": 2, + "name": "Nora Sullivan" + } + ], + "greeting": "Hello, Ruiz Phelps! You have 7 unread messages.", + "favoriteFruit": "strawberry" + }, + { + "_id": "5e2696e56b574aef6b6da396", + "index": 16, + "guid": "5ea84c0a-66f5-46dc-a1ba-0eae9b9c9550", + "isActive": false, + "balance": "$3,242.86", + "picture": "http://placehold.it/32x32", + "age": 31, + "eyeColor": "green", + "name": "Carolina Bryant", + "gender": "female", + "company": "SENTIA", + "email": "carolinabryant@sentia.com", + "phone": "+1 (971) 600-3113", + "address": "377 Middagh Street, Ellerslie, Nebraska, 2644", + "about": "Exercitation irure aute esse culpa anim id aliqua fugiat dolore Lorem Lorem. Aliquip eu non proident dolor commodo voluptate nisi minim elit. Non eu mollit duis amet dolore. Ex ipsum cillum est mollit eiusmod excepteur nostrud et adipisicing adipisicing reprehenderit voluptate proident irure. Eu Lorem laboris aliqua irure excepteur irure sit eu.\r\n", + "registered": "2014-01-12T05:13:52 -06:00", + "latitude": -62.525677, + "longitude": 135.054493, + "tags": [ + "laboris", + "consequat", + "ut", + "magna", + "minim", + "ex", + "eu" + ], + "friends": [ + { + "id": 0, + "name": "Burke Price" + }, + { + "id": 1, + "name": "Lambert Weeks" + }, + { + "id": 2, + "name": "Brown Berg" + } + ], + "greeting": "Hello, Carolina Bryant! You have 5 unread messages.", + "favoriteFruit": "apple" + }, + { + "_id": "5e2696e578f40fefcf216b80", + "index": 17, + "guid": "cf11331e-e9ba-4a34-a849-6512d731e683", + "isActive": false, + "balance": "$1,031.54", + "picture": "http://placehold.it/32x32", + "age": 31, + "eyeColor": "blue", + "name": "Sosa Lee", + "gender": "male", + "company": "NORSUL", + "email": "sosalee@norsul.com", + "phone": "+1 (894) 490-2333", + "address": "364 Holly Street, Omar, California, 5140", + "about": "Esse nulla eu veniam qui labore nostrud enim cupidatat. Incididunt in fugiat elit sunt anim id. Ea minim enim nulla officia aute commodo occaecat occaecat ipsum irure ex laborum cupidatat. Cillum est enim consectetur sunt aute duis incididunt incididunt aliqua ea proident deserunt. Irure velit nisi reprehenderit do ea aliquip reprehenderit et et veniam. Et occaecat ut enim aliqua ut velit veniam commodo deserunt irure ipsum laboris labore consectetur.\r\n", + "registered": "2019-04-29T05:27:50 -05:00", + "latitude": -27.638349, + "longitude": -51.382204, + "tags": [ + "exercitation", + "occaecat", + "in", + "est", + "commodo", + "ad", + "fugiat" + ], + "friends": [ + { + "id": 0, + "name": "Henrietta Horton" + }, + { + "id": 1, + "name": "Barnes Burton" + }, + { + "id": 2, + "name": "Robertson Nichols" + } + ], + "greeting": "Hello, Sosa Lee! You have 10 unread messages.", + "favoriteFruit": "apple" + }, + { + "_id": "5e2696e5e8dba3478fdbf7f9", + "index": 18, + "guid": "deeaba55-b5a4-4ec1-bfb3-0a3cd0e153b0", + "isActive": false, + "balance": "$1,732.82", + "picture": "http://placehold.it/32x32", + "age": 24, + "eyeColor": "brown", + "name": "Lorna Scott", + "gender": "female", + "company": "QNEKT", + "email": "lornascott@qnekt.com", + "phone": "+1 (837) 559-2342", + "address": "878 Marconi Place, Gerton, Alabama, 845", + "about": "Pariatur dolore dolor tempor duis. Ex deserunt non anim ea nulla nisi mollit cupidatat sunt consequat. Fugiat tempor deserunt proident incididunt qui cillum nostrud adipisicing id aute tempor sint deserunt. Aliqua aliquip mollit duis occaecat est veniam pariatur est laboris Lorem ad ullamco ea.\r\n", + "registered": "2018-10-14T07:23:51 -05:00", + "latitude": -27.99673, + "longitude": 86.876157, + "tags": [ + "qui", + "et", + "irure", + "minim", + "reprehenderit", + "occaecat", + "pariatur" + ], + "friends": [ + { + "id": 0, + "name": "Natalia Keller" + }, + { + "id": 1, + "name": "Cardenas Mckee" + }, + { + "id": 2, + "name": "Kenya Hutchinson" + } + ], + "greeting": "Hello, Lorna Scott! You have 1 unread messages.", + "favoriteFruit": "apple" + }, + { + "_id": "5e2696e569c3cd480a1daead", + "index": 19, + "guid": "061c9516-d41e-43da-a1ee-c9ce14828170", + "isActive": true, + "balance": "$2,778.30", + "picture": "http://placehold.it/32x32", + "age": 32, + "eyeColor": "brown", + "name": "Bernard Holden", + "gender": "male", + "company": "ESCHOIR", + "email": "bernardholden@eschoir.com", + "phone": "+1 (980) 435-2632", + "address": "674 Pine Street, Conestoga, Mississippi, 4727", + "about": "Quis labore laboris aute tempor est voluptate. Ipsum est est ut officia elit quis consectetur aliquip deserunt est occaecat officia adipisicing. Ex officia duis Lorem reprehenderit id exercitation fugiat qui dolor. Incididunt adipisicing anim culpa adipisicing id proident laborum ad culpa eiusmod elit. Qui Lorem commodo ut est nostrud est.\r\n", + "registered": "2019-11-18T01:20:14 -05:00", + "latitude": 51.959052, + "longitude": 175.310976, + "tags": [ + "tempor", + "sit", + "non", + "non", + "culpa", + "fugiat", + "aliqua" + ], + "friends": [ + { + "id": 0, + "name": "Karen Bradshaw" + }, + { + "id": 1, + "name": "Taylor Barnes" + }, + { + "id": 2, + "name": "Calhoun Ingram" + } + ], + "greeting": "Hello, Bernard Holden! You have 3 unread messages.", + "favoriteFruit": "strawberry" + }, + { + "_id": "5e2696e51cfb1999ffd17c67", + "index": 20, + "guid": "f0e3fb48-51db-4cdd-a6a3-a4fef837ea75", + "isActive": false, + "balance": "$3,720.10", + "picture": "http://placehold.it/32x32", + "age": 31, + "eyeColor": "blue", + "name": "Craft Shields", + "gender": "male", + "company": "INSURON", + "email": "craftshields@insuron.com", + "phone": "+1 (879) 578-2536", + "address": "586 Java Street, Catherine, Arkansas, 1445", + "about": "Occaecat quis excepteur sint dolor culpa. Lorem ullamco exercitation magna incididunt labore. Laborum veniam officia ipsum veniam reprehenderit. Dolor ex amet officia nostrud.\r\n", + "registered": "2017-01-21T02:39:55 -05:00", + "latitude": -62.079599, + "longitude": 4.346601, + "tags": [ + "in", + "eiusmod", + "consequat", + "aliquip", + "adipisicing", + "velit", + "esse" + ], + "friends": [ + { + "id": 0, + "name": "White Morton" + }, + { + "id": 1, + "name": "Huff Whitaker" + }, + { + "id": 2, + "name": "Estrada Chen" + } + ], + "greeting": "Hello, Craft Shields! You have 7 unread messages.", + "favoriteFruit": "strawberry" + }, + { + "_id": "5e2696e5b9a69e99b9e1fd0c", + "index": 21, + "guid": "1752dd9e-a96b-44a8-9d12-50f5d54b31c8", + "isActive": true, + "balance": "$3,542.41", + "picture": "http://placehold.it/32x32", + "age": 23, + "eyeColor": "blue", + "name": "Mara English", + "gender": "female", + "company": "CYCLONICA", + "email": "maraenglish@cyclonica.com", + "phone": "+1 (827) 458-2276", + "address": "324 Herkimer Court, Boomer, Delaware, 5367", + "about": "Deserunt duis laborum commodo officia veniam quis labore in veniam excepteur id ut sint nostrud. Veniam commodo velit anim officia. Non ut exercitation labore ut irure quis id Lorem dolor deserunt consequat aute amet fugiat.\r\n", + "registered": "2015-08-15T09:17:15 -05:00", + "latitude": -30.226127, + "longitude": 143.730353, + "tags": [ + "cillum", + "qui", + "reprehenderit", + "consectetur", + "mollit", + "culpa", + "Lorem" + ], + "friends": [ + { + "id": 0, + "name": "Sykes Pena" + }, + { + "id": 1, + "name": "Ingrid Francis" + }, + { + "id": 2, + "name": "Burris Mcleod" + } + ], + "greeting": "Hello, Mara English! You have 6 unread messages.", + "favoriteFruit": "strawberry" + }, + { + "_id": "5e2696e50ae7507c7b0224f2", + "index": 22, + "guid": "bf6188ac-09be-45b2-ac85-c24e87160893", + "isActive": true, + "balance": "$3,859.14", + "picture": "http://placehold.it/32x32", + "age": 30, + "eyeColor": "brown", + "name": "Fisher Levy", + "gender": "male", + "company": "XLEEN", + "email": "fisherlevy@xleen.com", + "phone": "+1 (840) 471-3795", + "address": "540 Adler Place, Hachita, Federated States Of Micronesia, 9894", + "about": "Consequat nostrud tempor excepteur proident nisi laboris mollit reprehenderit nisi. Irure cillum exercitation eu in exercitation Lorem eiusmod do non magna nulla cillum sit nisi. Pariatur voluptate elit adipisicing et magna incididunt commodo aliqua ad ea mollit ex sit cupidatat. Et anim anim in ipsum anim dolore enim nulla.\r\n", + "registered": "2015-07-08T06:21:03 -05:00", + "latitude": -5.525898, + "longitude": 146.347582, + "tags": [ + "ipsum", + "laboris", + "consequat", + "sunt", + "reprehenderit", + "commodo", + "id" + ], + "friends": [ + { + "id": 0, + "name": "Franks Houston" + }, + { + "id": 1, + "name": "Wolfe Pollard" + }, + { + "id": 2, + "name": "Louisa Sykes" + } + ], + "greeting": "Hello, Fisher Levy! You have 10 unread messages.", + "favoriteFruit": "banana" + }, + { + "_id": "5e2696e50618b76e973b472c", + "index": 23, + "guid": "3f460658-984c-4f21-bb34-427ccc86089c", + "isActive": false, + "balance": "$3,537.44", + "picture": "http://placehold.it/32x32", + "age": 25, + "eyeColor": "blue", + "name": "Cecelia Snyder", + "gender": "female", + "company": "LETPRO", + "email": "ceceliasnyder@letpro.com", + "phone": "+1 (814) 429-2909", + "address": "236 Anchorage Place, Odessa, Michigan, 6314", + "about": "Occaecat proident anim nisi aliquip pariatur ex in laborum minim aliquip. Dolore consectetur laborum Lorem sint ullamco. Exercitation dolore pariatur ea aliqua commodo elit minim cillum id.\r\n", + "registered": "2015-12-15T08:51:43 -05:00", + "latitude": -4.951759, + "longitude": 128.468969, + "tags": [ + "sit", + "ullamco", + "irure", + "esse", + "laborum", + "qui", + "occaecat" + ], + "friends": [ + { + "id": 0, + "name": "Erica Pacheco" + }, + { + "id": 1, + "name": "Shields Phillips" + }, + { + "id": 2, + "name": "Delores Mcfadden" + } + ], + "greeting": "Hello, Cecelia Snyder! You have 5 unread messages.", + "favoriteFruit": "banana" + }, + { + "_id": "5e2696e55158d54bef1e8084", + "index": 24, + "guid": "abc8c35c-08c1-40f2-9cf3-dfc80eb8dacc", + "isActive": false, + "balance": "$2,243.26", + "picture": "http://placehold.it/32x32", + "age": 22, + "eyeColor": "blue", + "name": "Nina Kaufman", + "gender": "female", + "company": "BLEENDOT", + "email": "ninakaufman@bleendot.com", + "phone": "+1 (889) 529-2118", + "address": "538 Ashford Street, Boling, West Virginia, 7840", + "about": "Tempor tempor irure pariatur pariatur magna nostrud mollit deserunt. Cillum eiusmod irure velit proident veniam qui occaecat qui quis sint duis consectetur. Aliquip ullamco sit laborum irure. Labore sit culpa enim do amet. Labore eiusmod aliqua officia aute do ut.\r\n", + "registered": "2018-08-14T03:11:42 -05:00", + "latitude": 7.711409, + "longitude": 59.54525, + "tags": [ + "aute", + "deserunt", + "incididunt", + "est", + "cillum", + "in", + "adipisicing" + ], + "friends": [ + { + "id": 0, + "name": "Knox Wiggins" + }, + { + "id": 1, + "name": "Mckenzie Atkinson" + }, + { + "id": 2, + "name": "Christensen Velasquez" + } + ], + "greeting": "Hello, Nina Kaufman! You have 3 unread messages.", + "favoriteFruit": "banana" + }, + { + "_id": "5e2696e5c113d7c5468253e8", + "index": 25, + "guid": "578a92d3-802b-46af-b887-fe410399cb56", + "isActive": true, + "balance": "$2,201.57", + "picture": "http://placehold.it/32x32", + "age": 37, + "eyeColor": "blue", + "name": "Dillard Branch", + "gender": "male", + "company": "ZOINAGE", + "email": "dillardbranch@zoinage.com", + "phone": "+1 (810) 440-3306", + "address": "225 Hampton Avenue, Bethany, Pennsylvania, 8056", + "about": "Non ex labore sit elit ullamco et reprehenderit in. Reprehenderit reprehenderit mollit nisi magna elit nulla aliquip magna. Adipisicing ex nostrud dolore cillum excepteur ut duis. Ipsum qui dolore pariatur excepteur qui. Cupidatat eu proident ipsum ut. Tempor deserunt irure consequat reprehenderit occaecat. Ex dolore ullamco duis id consectetur anim ipsum ea est veniam deserunt cillum.\r\n", + "registered": "2017-12-02T01:17:11 -05:00", + "latitude": 54.320747, + "longitude": -98.69116, + "tags": [ + "nisi", + "ea", + "mollit", + "velit", + "aliqua", + "fugiat", + "Lorem" + ], + "friends": [ + { + "id": 0, + "name": "Lisa Bishop" + }, + { + "id": 1, + "name": "Bertha Lott" + }, + { + "id": 2, + "name": "Dora Fitzpatrick" + } + ], + "greeting": "Hello, Dillard Branch! You have 8 unread messages.", + "favoriteFruit": "strawberry" + }, + { + "_id": "5e2696e5ce5ce9b29c226e8a", + "index": 26, + "guid": "d699547d-6156-4476-ba9f-9ea1f4992595", + "isActive": true, + "balance": "$1,383.56", + "picture": "http://placehold.it/32x32", + "age": 33, + "eyeColor": "blue", + "name": "Dyer Bartlett", + "gender": "male", + "company": "KNEEDLES", + "email": "dyerbartlett@kneedles.com", + "phone": "+1 (964) 466-3619", + "address": "880 Meadow Street, Seymour, Puerto Rico, 7921", + "about": "Duis in qui officia enim irure proident reprehenderit excepteur ut est voluptate deserunt sunt laboris. Ullamco incididunt pariatur eu esse nostrud aute ex do. Laborum ea qui anim dolor commodo cillum cupidatat in.\r\n", + "registered": "2015-05-31T06:22:45 -05:00", + "latitude": 86.391749, + "longitude": 39.208117, + "tags": [ + "ea", + "exercitation", + "excepteur", + "elit", + "velit", + "enim", + "elit" + ], + "friends": [ + { + "id": 0, + "name": "Tyson Hardy" + }, + { + "id": 1, + "name": "Owen Rogers" + }, + { + "id": 2, + "name": "Georgia Merrill" + } + ], + "greeting": "Hello, Dyer Bartlett! You have 10 unread messages.", + "favoriteFruit": "apple" + }, + { + "_id": "5e2696e5562c047a17bc553e", + "index": 27, + "guid": "d38794d0-356b-416e-8d65-a42126fd9c55", + "isActive": true, + "balance": "$1,950.14", + "picture": "http://placehold.it/32x32", + "age": 20, + "eyeColor": "brown", + "name": "Tyler Dotson", + "gender": "male", + "company": "COMBOGENE", + "email": "tylerdotson@combogene.com", + "phone": "+1 (915) 409-2491", + "address": "220 Herkimer Place, Turpin, Oklahoma, 4468", + "about": "Ad cupidatat labore duis id incididunt pariatur. Officia cillum labore reprehenderit aliqua id incididunt velit ea adipisicing esse dolore minim. Aute duis velit duis consectetur commodo cillum occaecat mollit anim. Sit exercitation ullamco dolor elit laboris amet sunt pariatur. Voluptate elit cupidatat fugiat incididunt sint Lorem officia proident ex Lorem non. Ipsum ut non exercitation nulla proident esse officia quis excepteur in cupidatat sint.\r\n", + "registered": "2014-08-12T01:21:54 -06:00", + "latitude": -74.786151, + "longitude": 10.688776, + "tags": [ + "veniam", + "cillum", + "eiusmod", + "esse", + "proident", + "excepteur", + "tempor" + ], + "friends": [ + { + "id": 0, + "name": "Rodriquez Huffman" + }, + { + "id": 1, + "name": "Cantu Mack" + }, + { + "id": 2, + "name": "Wooten Stephenson" + } + ], + "greeting": "Hello, Tyler Dotson! You have 7 unread messages.", + "favoriteFruit": "strawberry" + } +] \ No newline at end of file diff --git a/main.py b/main.py deleted file mode 100644 index dace689..0000000 --- a/main.py +++ /dev/null @@ -1,3 +0,0 @@ -print('Hello world!') - - diff --git a/requirements.txt b/requirements.txt new file mode 100644 index 0000000..3275e42 --- /dev/null +++ b/requirements.txt @@ -0,0 +1,6 @@ +pytest==7.1.2 +pytest-cov==3.0.0 +tox==3.25.0 +pytest-xdist==2.5.0 +parameterized==0.8.1 +pytest-rerunfailures==10.2 \ No newline at end of file diff --git a/src/reader.py b/src/reader.py new file mode 100644 index 0000000..3a8a391 --- /dev/null +++ b/src/reader.py @@ -0,0 +1,50 @@ +import csv +import json +from __init__ import get_path + +# Считываем данные из CSV и JSON файлов +books_file = get_path('books.csv') +with open(books_file, 'r') as f: + books = list(csv.DictReader(f)) + +users_file = get_path('users.json') +with open(users_file, 'r') as f: + users = json.load(f) + +inform_users = [] +for person in range(len(users)): + in_user = { + "name": users[person]["name"], + "gender": users[person]["gender"], + "address": users[person]["address"], + "age": users[person]["age"], + "books": [] + } + inform_users.append(in_user) + +short_books = [] +for book in range(len(books)): + about_book = { + "Title": books[book]["Title"], + "Author": books[book]["Author"], + "Pages": books[book]["Pages"], + "Genre": books[book]["Genre"] + } + short_books.append(about_book) + +books_count = len(short_books) // len(inform_users) +residuals = len(short_books) % len(inform_users) +book_index = 0 +for user in range(len(inform_users)): + user_books = short_books[book_index:book_index + books_count] + if residuals > 0: + user_books.append(short_books[book_index + books_count]) + residuals -= 1 + book_index += 1 + + inform_users[user]["books"] = user_books + book_index += books_count + +# Записываем результат в JSON файл +with open(get_path('result.json'), 'w') as f: + json.dump(inform_users, f, indent=4) diff --git a/venv/.gitignore b/venv/.gitignore deleted file mode 100644 index d173f9d..0000000 --- a/venv/.gitignore +++ /dev/null @@ -1,4 +0,0 @@ -# created by virtualenv automatically -* -.idea/ -venv/ \ No newline at end of file diff --git a/venv/bin/activate b/venv/bin/activate index 2392099..14ee19d 100644 --- a/venv/bin/activate +++ b/venv/bin/activate @@ -1,41 +1,35 @@ # This file must be used with "source bin/activate" *from bash* # you cannot run it directly - -if [ "${BASH_SOURCE-}" = "$0" ]; then - echo "You must source this script: \$ source $0" >&2 - exit 33 -fi - deactivate () { - unset -f pydoc >/dev/null 2>&1 || true - # reset old environment variables - # ! [ -z ${VAR+_} ] returns true if VAR is declared at all - if ! [ -z "${_OLD_VIRTUAL_PATH:+_}" ] ; then - PATH="$_OLD_VIRTUAL_PATH" + if [ -n "${_OLD_VIRTUAL_PATH:-}" ] ; then + PATH="${_OLD_VIRTUAL_PATH:-}" export PATH unset _OLD_VIRTUAL_PATH fi - if ! [ -z "${_OLD_VIRTUAL_PYTHONHOME+_}" ] ; then - PYTHONHOME="$_OLD_VIRTUAL_PYTHONHOME" + if [ -n "${_OLD_VIRTUAL_PYTHONHOME:-}" ] ; then + PYTHONHOME="${_OLD_VIRTUAL_PYTHONHOME:-}" export PYTHONHOME unset _OLD_VIRTUAL_PYTHONHOME fi - # The hash command must be called to get it to forget past - # commands. Without forgetting past commands the $PATH changes - # we made may not be respected - hash -r 2>/dev/null + # This should detect bash and zsh, which have a hash command that must + # be called to get it to forget past commands. Without forgetting + # past commands the $PATH changes we made may not be respected + if [ -n "${BASH:-}" -o -n "${ZSH_VERSION:-}" ] ; then + hash -r 2> /dev/null + fi - if ! [ -z "${_OLD_VIRTUAL_PS1+_}" ] ; then - PS1="$_OLD_VIRTUAL_PS1" + if [ -n "${_OLD_VIRTUAL_PS1:-}" ] ; then + PS1="${_OLD_VIRTUAL_PS1:-}" export PS1 unset _OLD_VIRTUAL_PS1 fi unset VIRTUAL_ENV - if [ ! "${1-}" = "nondestructive" ] ; then + unset VIRTUAL_ENV_PROMPT + if [ ! "${1:-}" = "nondestructive" ] ; then # Self destruct! unset -f deactivate fi @@ -44,10 +38,7 @@ deactivate () { # unset irrelevant variables deactivate nondestructive -VIRTUAL_ENV='/home/poveteva/PycharmProjects/HelloWorld/venv' -if ([ "$OSTYPE" = "cygwin" ] || [ "$OSTYPE" = "msys" ]) && $(command -v cygpath &> /dev/null) ; then - VIRTUAL_ENV=$(cygpath -u "$VIRTUAL_ENV") -fi +VIRTUAL_ENV="/home/olesya2/PycharmProjects/OtusCourse/venv" export VIRTUAL_ENV _OLD_VIRTUAL_PATH="$PATH" @@ -55,29 +46,24 @@ PATH="$VIRTUAL_ENV/bin:$PATH" export PATH # unset PYTHONHOME if set -if ! [ -z "${PYTHONHOME+_}" ] ; then - _OLD_VIRTUAL_PYTHONHOME="$PYTHONHOME" +# this will fail if PYTHONHOME is set to the empty string (which is bad anyway) +# could use `if (set -u; : $PYTHONHOME) ;` in bash +if [ -n "${PYTHONHOME:-}" ] ; then + _OLD_VIRTUAL_PYTHONHOME="${PYTHONHOME:-}" unset PYTHONHOME fi -if [ -z "${VIRTUAL_ENV_DISABLE_PROMPT-}" ] ; then - _OLD_VIRTUAL_PS1="${PS1-}" - if [ "x" != x ] ; then - PS1="() ${PS1-}" - else - PS1="(`basename \"$VIRTUAL_ENV\"`) ${PS1-}" - fi +if [ -z "${VIRTUAL_ENV_DISABLE_PROMPT:-}" ] ; then + _OLD_VIRTUAL_PS1="${PS1:-}" + PS1="(venv) ${PS1:-}" export PS1 + VIRTUAL_ENV_PROMPT="(venv) " + export VIRTUAL_ENV_PROMPT fi -# Make sure to unalias pydoc if it's already there -alias pydoc 2>/dev/null >/dev/null && unalias pydoc || true - -pydoc () { - python -m pydoc "$@" -} - -# The hash command must be called to get it to forget past -# commands. Without forgetting past commands the $PATH changes -# we made may not be respected -hash -r 2>/dev/null +# This should detect bash and zsh, which have a hash command that must +# be called to get it to forget past commands. Without forgetting +# past commands the $PATH changes we made may not be respected +if [ -n "${BASH:-}" -o -n "${ZSH_VERSION:-}" ] ; then + hash -r 2> /dev/null +fi diff --git a/venv/bin/activate.csh b/venv/bin/activate.csh index cb69843..d71a197 100644 --- a/venv/bin/activate.csh +++ b/venv/bin/activate.csh @@ -1,55 +1,26 @@ # This file must be used with "source bin/activate.csh" *from csh*. # You cannot run it directly. # Created by Davide Di Blasi . +# Ported to Python 3.3 venv by Andrew Svetlov -set newline='\ -' - -alias deactivate 'test $?_OLD_VIRTUAL_PATH != 0 && setenv PATH "$_OLD_VIRTUAL_PATH:q" && unset _OLD_VIRTUAL_PATH; rehash; test $?_OLD_VIRTUAL_PROMPT != 0 && set prompt="$_OLD_VIRTUAL_PROMPT:q" && unset _OLD_VIRTUAL_PROMPT; unsetenv VIRTUAL_ENV; test "\!:*" != "nondestructive" && unalias deactivate && unalias pydoc' +alias deactivate 'test $?_OLD_VIRTUAL_PATH != 0 && setenv PATH "$_OLD_VIRTUAL_PATH" && unset _OLD_VIRTUAL_PATH; rehash; test $?_OLD_VIRTUAL_PROMPT != 0 && set prompt="$_OLD_VIRTUAL_PROMPT" && unset _OLD_VIRTUAL_PROMPT; unsetenv VIRTUAL_ENV; unsetenv VIRTUAL_ENV_PROMPT; test "\!:*" != "nondestructive" && unalias deactivate' # Unset irrelevant variables. deactivate nondestructive -setenv VIRTUAL_ENV '/home/poveteva/PycharmProjects/HelloWorld/venv' - -set _OLD_VIRTUAL_PATH="$PATH:q" -setenv PATH "$VIRTUAL_ENV:q/bin:$PATH:q" - +setenv VIRTUAL_ENV "/home/olesya2/PycharmProjects/OtusCourse/venv" +set _OLD_VIRTUAL_PATH="$PATH" +setenv PATH "$VIRTUAL_ENV/bin:$PATH" -if ('' != "") then - set env_name = '() ' -else - set env_name = '('"$VIRTUAL_ENV:t:q"') ' -endif -if ( $?VIRTUAL_ENV_DISABLE_PROMPT ) then - if ( $VIRTUAL_ENV_DISABLE_PROMPT == "" ) then - set do_prompt = "1" - else - set do_prompt = "0" - endif -else - set do_prompt = "1" -endif +set _OLD_VIRTUAL_PROMPT="$prompt" -if ( $do_prompt == "1" ) then - # Could be in a non-interactive environment, - # in which case, $prompt is undefined and we wouldn't - # care about the prompt anyway. - if ( $?prompt ) then - set _OLD_VIRTUAL_PROMPT="$prompt:q" - if ( "$prompt:q" =~ *"$newline:q"* ) then - : - else - set prompt = "$env_name:q$prompt:q" - endif - endif +if (! "$?VIRTUAL_ENV_DISABLE_PROMPT") then + set prompt = "(venv) $prompt" + setenv VIRTUAL_ENV_PROMPT "(venv) " endif -unset env_name -unset do_prompt - alias pydoc python -m pydoc rehash diff --git a/venv/bin/activate.fish b/venv/bin/activate.fish index e9a8565..5b6fe97 100644 --- a/venv/bin/activate.fish +++ b/venv/bin/activate.fish @@ -1,100 +1,69 @@ -# This file must be used using `source bin/activate.fish` *within a running fish ( http://fishshell.com ) session*. -# Do not run it directly. +# This file must be used with "source /bin/activate.fish" *from fish* +# (https://fishshell.com/); you cannot run it directly. -function _bashify_path -d "Converts a fish path to something bash can recognize" - set fishy_path $argv - set bashy_path $fishy_path[1] - for path_part in $fishy_path[2..-1] - set bashy_path "$bashy_path:$path_part" - end - echo $bashy_path -end - -function _fishify_path -d "Converts a bash path to something fish can recognize" - echo $argv | tr ':' '\n' -end - -function deactivate -d 'Exit virtualenv mode and return to the normal environment.' +function deactivate -d "Exit virtual environment and return to normal shell environment" # reset old environment variables if test -n "$_OLD_VIRTUAL_PATH" - # https://github.com/fish-shell/fish-shell/issues/436 altered PATH handling - if test (echo $FISH_VERSION | head -c 1) -lt 3 - set -gx PATH (_fishify_path "$_OLD_VIRTUAL_PATH") - else - set -gx PATH $_OLD_VIRTUAL_PATH - end + set -gx PATH $_OLD_VIRTUAL_PATH set -e _OLD_VIRTUAL_PATH end - if test -n "$_OLD_VIRTUAL_PYTHONHOME" - set -gx PYTHONHOME "$_OLD_VIRTUAL_PYTHONHOME" + set -gx PYTHONHOME $_OLD_VIRTUAL_PYTHONHOME set -e _OLD_VIRTUAL_PYTHONHOME end if test -n "$_OLD_FISH_PROMPT_OVERRIDE" - and functions -q _old_fish_prompt - # Set an empty local `$fish_function_path` to allow the removal of `fish_prompt` using `functions -e`. - set -l fish_function_path - - # Erase virtualenv's `fish_prompt` and restore the original. - functions -e fish_prompt - functions -c _old_fish_prompt fish_prompt - functions -e _old_fish_prompt set -e _OLD_FISH_PROMPT_OVERRIDE + # prevents error when using nested fish instances (Issue #93858) + if functions -q _old_fish_prompt + functions -e fish_prompt + functions -c _old_fish_prompt fish_prompt + functions -e _old_fish_prompt + end end set -e VIRTUAL_ENV - - if test "$argv[1]" != 'nondestructive' + set -e VIRTUAL_ENV_PROMPT + if test "$argv[1]" != "nondestructive" # Self-destruct! - functions -e pydoc functions -e deactivate - functions -e _bashify_path - functions -e _fishify_path end end # Unset irrelevant variables. deactivate nondestructive -set -gx VIRTUAL_ENV '/home/poveteva/PycharmProjects/HelloWorld/venv' +set -gx VIRTUAL_ENV "/home/olesya2/PycharmProjects/OtusCourse/venv" -# https://github.com/fish-shell/fish-shell/issues/436 altered PATH handling -if test (echo $FISH_VERSION | head -c 1) -lt 3 - set -gx _OLD_VIRTUAL_PATH (_bashify_path $PATH) -else - set -gx _OLD_VIRTUAL_PATH $PATH -end -set -gx PATH "$VIRTUAL_ENV"'/bin' $PATH +set -gx _OLD_VIRTUAL_PATH $PATH +set -gx PATH "$VIRTUAL_ENV/bin" $PATH -# Unset `$PYTHONHOME` if set. +# Unset PYTHONHOME if set. if set -q PYTHONHOME set -gx _OLD_VIRTUAL_PYTHONHOME $PYTHONHOME set -e PYTHONHOME end -function pydoc - python -m pydoc $argv -end - if test -z "$VIRTUAL_ENV_DISABLE_PROMPT" - # Copy the current `fish_prompt` function as `_old_fish_prompt`. + # fish uses a function instead of an env var to generate the prompt. + + # Save the current fish_prompt function as the function _old_fish_prompt. functions -c fish_prompt _old_fish_prompt + # With the original prompt function renamed, we can override with our own. function fish_prompt - # Run the user's prompt first; it might depend on (pipe)status. - set -l prompt (_old_fish_prompt) + # Save the return status of the last command. + set -l old_status $status - # Prompt override provided? - # If not, just prepend the environment name. - if test -n '' - printf '(%s) ' '' - else - printf '(%s) ' (basename "$VIRTUAL_ENV") - end + # Output the venv prompt; color taken from the blue of the Python logo. + printf "%s%s%s" (set_color 4B8BBE) "(venv) " (set_color normal) - string join -- \n $prompt # handle multi-line prompts + # Restore the return status of the previous command. + echo "exit $old_status" | . + # Output the original/"old" prompt. + _old_fish_prompt end set -gx _OLD_FISH_PROMPT_OVERRIDE "$VIRTUAL_ENV" + set -gx VIRTUAL_ENV_PROMPT "(venv) " end diff --git a/venv/bin/activate.nu b/venv/bin/activate.nu deleted file mode 100644 index 0a77ad2..0000000 --- a/venv/bin/activate.nu +++ /dev/null @@ -1,92 +0,0 @@ -# This command prepares the required environment variables -def-env activate-virtualenv [] { - def is-string [x] { - ($x | describe) == 'string' - } - - def has-env [name: string] { - $name in (env).name - } - - let is_windows = ((sys).host.name | str downcase) == 'windows' - let virtual_env = '/home/poveteva/PycharmProjects/HelloWorld/venv' - let bin = 'bin' - let path_sep = ':' - let path_name = if $is_windows { - if (has-env 'Path') { - 'Path' - } else { - 'PATH' - } - } else { - 'PATH' - } - - let old_path = ( - if $is_windows { - if (has-env 'Path') { - $env.Path - } else { - $env.PATH - } - } else { - $env.PATH - } | if (is-string $in) { - # if Path/PATH is a string, make it a list - $in | split row $path_sep | path expand - } else { - $in - } - ) - - let venv_path = ([$virtual_env $bin] | path join) - let new_path = ($old_path | prepend $venv_path | str collect $path_sep) - - # Creating the new prompt for the session - let virtual_prompt = if ('' == '') { - $'(char lparen)($virtual_env | path basename)(char rparen) ' - } else { - '() ' - } - - # Back up the old prompt builder - let old_prompt_command = if (has-env 'VIRTUAL_ENV') && (has-env '_OLD_PROMPT_COMMAND') { - $env._OLD_PROMPT_COMMAND - } else { - if (has-env 'PROMPT_COMMAND') { - $env.PROMPT_COMMAND - } else { - '' - } - } - - # If there is no default prompt, then only the env is printed in the prompt - let new_prompt = if (has-env 'PROMPT_COMMAND') { - if ($old_prompt_command | describe) == 'block' { - { $'($virtual_prompt)(do $old_prompt_command)' } - } else { - { $'($virtual_prompt)($old_prompt_command)' } - } - } else { - { $'($virtual_prompt)' } - } - - # Environment variables that will be batched loaded to the virtual env - let new_env = { - $path_name : $new_path - VIRTUAL_ENV : $virtual_env - _OLD_VIRTUAL_PATH : ($old_path | str collect $path_sep) - _OLD_PROMPT_COMMAND : $old_prompt_command - PROMPT_COMMAND : $new_prompt - VIRTUAL_PROMPT : $virtual_prompt - } - - # Activate the environment variables - load-env $new_env -} - -# Activate the virtualenv -activate-virtualenv - -alias pydoc = python -m pydoc -alias deactivate = source '/home/poveteva/PycharmProjects/HelloWorld/venv/bin/deactivate.nu' diff --git a/venv/bin/deactivate.nu b/venv/bin/deactivate.nu deleted file mode 100644 index 4dd132c..0000000 --- a/venv/bin/deactivate.nu +++ /dev/null @@ -1,32 +0,0 @@ -def-env deactivate-virtualenv [] { - def has-env [name: string] { - $name in (env).name - } - - let is_windows = ((sys).host.name | str downcase) == 'windows' - - let path_name = if $is_windows { - if (has-env 'Path') { - 'Path' - } else { - 'PATH' - } - } else { - 'PATH' - } - - load-env { $path_name : $env._OLD_VIRTUAL_PATH } - - let-env PROMPT_COMMAND = $env._OLD_PROMPT_COMMAND - - # Hiding the environment variables that were created when activating the env - hide _OLD_VIRTUAL_PATH - hide _OLD_PROMPT_COMMAND - hide VIRTUAL_ENV - hide VIRTUAL_PROMPT -} - -deactivate-virtualenv - -hide pydoc -hide deactivate diff --git a/venv/bin/pip b/venv/bin/pip index 686f1ab..4d7e243 100755 --- a/venv/bin/pip +++ b/venv/bin/pip @@ -1,4 +1,4 @@ -#!/home/poveteva/PycharmProjects/HelloWorld/venv/bin/python +#!/home/olesya2/PycharmProjects/OtusCourse/venv/bin/python3 # -*- coding: utf-8 -*- import re import sys diff --git a/venv/bin/pip-3.10 b/venv/bin/pip-3.10 deleted file mode 100755 index 686f1ab..0000000 --- a/venv/bin/pip-3.10 +++ /dev/null @@ -1,8 +0,0 @@ -#!/home/poveteva/PycharmProjects/HelloWorld/venv/bin/python -# -*- coding: utf-8 -*- -import re -import sys -from pip._internal.cli.main import main -if __name__ == '__main__': - sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0]) - sys.exit(main()) diff --git a/venv/bin/pip3 b/venv/bin/pip3 index 686f1ab..4d7e243 100755 --- a/venv/bin/pip3 +++ b/venv/bin/pip3 @@ -1,4 +1,4 @@ -#!/home/poveteva/PycharmProjects/HelloWorld/venv/bin/python +#!/home/olesya2/PycharmProjects/OtusCourse/venv/bin/python3 # -*- coding: utf-8 -*- import re import sys diff --git a/venv/bin/pip3.10 b/venv/bin/pip3.10 index 686f1ab..4d7e243 100755 --- a/venv/bin/pip3.10 +++ b/venv/bin/pip3.10 @@ -1,4 +1,4 @@ -#!/home/poveteva/PycharmProjects/HelloWorld/venv/bin/python +#!/home/olesya2/PycharmProjects/OtusCourse/venv/bin/python3 # -*- coding: utf-8 -*- import re import sys diff --git a/venv/bin/python b/venv/bin/python index 5202249..b8a0adb 120000 --- a/venv/bin/python +++ b/venv/bin/python @@ -1 +1 @@ -/usr/bin/python3.10 \ No newline at end of file +python3 \ No newline at end of file diff --git a/venv/bin/python3 b/venv/bin/python3 index d8654aa..ae65fda 120000 --- a/venv/bin/python3 +++ b/venv/bin/python3 @@ -1 +1 @@ -python \ No newline at end of file +/usr/bin/python3 \ No newline at end of file diff --git a/venv/bin/python3.10 b/venv/bin/python3.10 index d8654aa..b8a0adb 120000 --- a/venv/bin/python3.10 +++ b/venv/bin/python3.10 @@ -1 +1 @@ -python \ No newline at end of file +python3 \ No newline at end of file diff --git a/venv/bin/wheel b/venv/bin/wheel deleted file mode 100755 index 14240bf..0000000 --- a/venv/bin/wheel +++ /dev/null @@ -1,8 +0,0 @@ -#!/home/poveteva/PycharmProjects/HelloWorld/venv/bin/python -# -*- coding: utf-8 -*- -import re -import sys -from wheel.cli import main -if __name__ == '__main__': - sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0]) - sys.exit(main()) diff --git a/venv/bin/wheel-3.10 b/venv/bin/wheel-3.10 deleted file mode 100755 index 14240bf..0000000 --- a/venv/bin/wheel-3.10 +++ /dev/null @@ -1,8 +0,0 @@ -#!/home/poveteva/PycharmProjects/HelloWorld/venv/bin/python -# -*- coding: utf-8 -*- -import re -import sys -from wheel.cli import main -if __name__ == '__main__': - sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0]) - sys.exit(main()) diff --git a/venv/bin/wheel3 b/venv/bin/wheel3 deleted file mode 100755 index 14240bf..0000000 --- a/venv/bin/wheel3 +++ /dev/null @@ -1,8 +0,0 @@ -#!/home/poveteva/PycharmProjects/HelloWorld/venv/bin/python -# -*- coding: utf-8 -*- -import re -import sys -from wheel.cli import main -if __name__ == '__main__': - sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0]) - sys.exit(main()) diff --git a/venv/bin/wheel3.10 b/venv/bin/wheel3.10 deleted file mode 100755 index 14240bf..0000000 --- a/venv/bin/wheel3.10 +++ /dev/null @@ -1,8 +0,0 @@ -#!/home/poveteva/PycharmProjects/HelloWorld/venv/bin/python -# -*- coding: utf-8 -*- -import re -import sys -from wheel.cli import main -if __name__ == '__main__': - sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0]) - sys.exit(main()) diff --git a/venv/lib/python3.10/site-packages/_distutils_hack/__init__.py b/venv/lib/python3.10/site-packages/_distutils_hack/__init__.py index f987a53..f707416 100644 --- a/venv/lib/python3.10/site-packages/_distutils_hack/__init__.py +++ b/venv/lib/python3.10/site-packages/_distutils_hack/__init__.py @@ -1,11 +1,18 @@ -# don't import any costly modules import sys import os +import re +import importlib +import warnings is_pypy = '__pypy__' in sys.builtin_module_names +warnings.filterwarnings('ignore', + r'.+ distutils\b.+ deprecated', + DeprecationWarning) + + def warn_distutils_present(): if 'distutils' not in sys.modules: return @@ -13,29 +20,20 @@ def warn_distutils_present(): # PyPy for 3.6 unconditionally imports distutils, so bypass the warning # https://foss.heptapod.net/pypy/pypy/-/blob/be829135bc0d758997b3566062999ee8b23872b4/lib-python/3/site.py#L250 return - import warnings - warnings.warn( "Distutils was imported before Setuptools, but importing Setuptools " "also replaces the `distutils` module in `sys.modules`. This may lead " "to undesirable behaviors or errors. To avoid these issues, avoid " "using distutils directly, ensure that setuptools is installed in the " "traditional way (e.g. not an editable install), and/or make sure " - "that setuptools is always imported before distutils." - ) + "that setuptools is always imported before distutils.") def clear_distutils(): if 'distutils' not in sys.modules: return - import warnings - warnings.warn("Setuptools is replacing distutils.") - mods = [ - name - for name in sys.modules - if name == "distutils" or name.startswith("distutils.") - ] + mods = [name for name in sys.modules if re.match(r'distutils\b', name)] for name in mods: del sys.modules[name] @@ -44,25 +42,23 @@ def enabled(): """ Allow selection of distutils by environment variable. """ - which = os.environ.get('SETUPTOOLS_USE_DISTUTILS', 'local') + which = os.environ.get('SETUPTOOLS_USE_DISTUTILS', 'stdlib') return which == 'local' def ensure_local_distutils(): - import importlib - clear_distutils() # With the DistutilsMetaFinder in place, # perform an import to cause distutils to be # loaded from setuptools._distutils. Ref #2906. - with shim(): - importlib.import_module('distutils') + add_shim() + importlib.import_module('distutils') + remove_shim() # check that submodules load as expected core = importlib.import_module('distutils.core') assert '_distutils' in core.__file__, core.__file__ - assert 'setuptools._distutils.log' not in sys.modules def do_override(): @@ -77,19 +73,9 @@ def do_override(): ensure_local_distutils() -class _TrivialRe: - def __init__(self, *patterns): - self._patterns = patterns - - def match(self, string): - return all(pat in string for pat in self._patterns) - - class DistutilsMetaFinder: def find_spec(self, fullname, path, target=None): - # optimization: only consider top level modules and those - # found in the CPython test suite. - if path is not None and not fullname.startswith('test.'): + if path is not None: return method_name = 'spec_for_{fullname}'.format(**locals()) @@ -97,45 +83,18 @@ def find_spec(self, fullname, path, target=None): return method() def spec_for_distutils(self): - if self.is_cpython(): - return - - import importlib import importlib.abc import importlib.util - try: - mod = importlib.import_module('setuptools._distutils') - except Exception: - # There are a couple of cases where setuptools._distutils - # may not be present: - # - An older Setuptools without a local distutils is - # taking precedence. Ref #2957. - # - Path manipulation during sitecustomize removes - # setuptools from the path but only after the hook - # has been loaded. Ref #2980. - # In either case, fall back to stdlib behavior. - return - class DistutilsLoader(importlib.abc.Loader): + def create_module(self, spec): - mod.__name__ = 'distutils' - return mod + return importlib.import_module('setuptools._distutils') def exec_module(self, module): pass - return importlib.util.spec_from_loader( - 'distutils', DistutilsLoader(), origin=mod.__file__ - ) - - @staticmethod - def is_cpython(): - """ - Suppress supplying distutils for CPython (build and tests). - Ref #2965 and #3007. - """ - return os.path.isfile('pybuilddir.txt') + return importlib.util.spec_from_loader('distutils', DistutilsLoader()) def spec_for_pip(self): """ @@ -147,71 +106,22 @@ def spec_for_pip(self): clear_distutils() self.spec_for_distutils = lambda: None - @classmethod - def pip_imported_during_build(cls): + @staticmethod + def pip_imported_during_build(): """ Detect if pip is being imported in a build script. Ref #2355. """ import traceback - return any( - cls.frame_file_is_setup(frame) for frame, line in traceback.walk_stack(None) + frame.f_globals['__file__'].endswith('setup.py') + for frame, line in traceback.walk_stack(None) ) - @staticmethod - def frame_file_is_setup(frame): - """ - Return True if the indicated frame suggests a setup.py file. - """ - # some frames may not have __file__ (#2940) - return frame.f_globals.get('__file__', '').endswith('setup.py') - - def spec_for_sensitive_tests(self): - """ - Ensure stdlib distutils when running select tests under CPython. - - python/cpython#91169 - """ - clear_distutils() - self.spec_for_distutils = lambda: None - - sensitive_tests = ( - [ - 'test.test_distutils', - 'test.test_peg_generator', - 'test.test_importlib', - ] - if sys.version_info < (3, 10) - else [ - 'test.test_distutils', - ] - ) - - -for name in DistutilsMetaFinder.sensitive_tests: - setattr( - DistutilsMetaFinder, - f'spec_for_{name}', - DistutilsMetaFinder.spec_for_sensitive_tests, - ) - DISTUTILS_FINDER = DistutilsMetaFinder() def add_shim(): - DISTUTILS_FINDER in sys.meta_path or insert_shim() - - -class shim: - def __enter__(self): - insert_shim() - - def __exit__(self, exc, value, tb): - remove_shim() - - -def insert_shim(): sys.meta_path.insert(0, DISTUTILS_FINDER) diff --git a/venv/lib/python3.10/site-packages/_pytest/__init__.py b/venv/lib/python3.10/site-packages/_pytest/__init__.py new file mode 100644 index 0000000..8a406c5 --- /dev/null +++ b/venv/lib/python3.10/site-packages/_pytest/__init__.py @@ -0,0 +1,9 @@ +__all__ = ["__version__", "version_tuple"] + +try: + from ._version import version as __version__, version_tuple +except ImportError: # pragma: no cover + # broken installation, we don't even try + # unknown only works because we do poor mans version compare + __version__ = "unknown" + version_tuple = (0, 0, "unknown") # type:ignore[assignment] diff --git a/venv/lib/python3.10/site-packages/_pytest/_argcomplete.py b/venv/lib/python3.10/site-packages/_pytest/_argcomplete.py new file mode 100644 index 0000000..120f09f --- /dev/null +++ b/venv/lib/python3.10/site-packages/_pytest/_argcomplete.py @@ -0,0 +1,116 @@ +"""Allow bash-completion for argparse with argcomplete if installed. + +Needs argcomplete>=0.5.6 for python 3.2/3.3 (older versions fail +to find the magic string, so _ARGCOMPLETE env. var is never set, and +this does not need special code). + +Function try_argcomplete(parser) should be called directly before +the call to ArgumentParser.parse_args(). + +The filescompleter is what you normally would use on the positional +arguments specification, in order to get "dirname/" after "dirn" +instead of the default "dirname ": + + optparser.add_argument(Config._file_or_dir, nargs='*').completer=filescompleter + +Other, application specific, completers should go in the file +doing the add_argument calls as they need to be specified as .completer +attributes as well. (If argcomplete is not installed, the function the +attribute points to will not be used). + +SPEEDUP +======= + +The generic argcomplete script for bash-completion +(/etc/bash_completion.d/python-argcomplete.sh) +uses a python program to determine startup script generated by pip. +You can speed up completion somewhat by changing this script to include + # PYTHON_ARGCOMPLETE_OK +so the python-argcomplete-check-easy-install-script does not +need to be called to find the entry point of the code and see if that is +marked with PYTHON_ARGCOMPLETE_OK. + +INSTALL/DEBUGGING +================= + +To include this support in another application that has setup.py generated +scripts: + +- Add the line: + # PYTHON_ARGCOMPLETE_OK + near the top of the main python entry point. + +- Include in the file calling parse_args(): + from _argcomplete import try_argcomplete, filescompleter + Call try_argcomplete just before parse_args(), and optionally add + filescompleter to the positional arguments' add_argument(). + +If things do not work right away: + +- Switch on argcomplete debugging with (also helpful when doing custom + completers): + export _ARC_DEBUG=1 + +- Run: + python-argcomplete-check-easy-install-script $(which appname) + echo $? + will echo 0 if the magic line has been found, 1 if not. + +- Sometimes it helps to find early on errors using: + _ARGCOMPLETE=1 _ARC_DEBUG=1 appname + which should throw a KeyError: 'COMPLINE' (which is properly set by the + global argcomplete script). +""" +import argparse +import os +import sys +from glob import glob +from typing import Any +from typing import List +from typing import Optional + + +class FastFilesCompleter: + """Fast file completer class.""" + + def __init__(self, directories: bool = True) -> None: + self.directories = directories + + def __call__(self, prefix: str, **kwargs: Any) -> List[str]: + # Only called on non option completions. + if os.path.sep in prefix[1:]: + prefix_dir = len(os.path.dirname(prefix) + os.path.sep) + else: + prefix_dir = 0 + completion = [] + globbed = [] + if "*" not in prefix and "?" not in prefix: + # We are on unix, otherwise no bash. + if not prefix or prefix[-1] == os.path.sep: + globbed.extend(glob(prefix + ".*")) + prefix += "*" + globbed.extend(glob(prefix)) + for x in sorted(globbed): + if os.path.isdir(x): + x += "/" + # Append stripping the prefix (like bash, not like compgen). + completion.append(x[prefix_dir:]) + return completion + + +if os.environ.get("_ARGCOMPLETE"): + try: + import argcomplete.completers + except ImportError: + sys.exit(-1) + filescompleter: Optional[FastFilesCompleter] = FastFilesCompleter() + + def try_argcomplete(parser: argparse.ArgumentParser) -> None: + argcomplete.autocomplete(parser, always_complete_options=False) + +else: + + def try_argcomplete(parser: argparse.ArgumentParser) -> None: + pass + + filescompleter = None diff --git a/venv/lib/python3.10/site-packages/_pytest/_code/__init__.py b/venv/lib/python3.10/site-packages/_pytest/_code/__init__.py new file mode 100644 index 0000000..511d0dd --- /dev/null +++ b/venv/lib/python3.10/site-packages/_pytest/_code/__init__.py @@ -0,0 +1,22 @@ +"""Python inspection/code generation API.""" +from .code import Code +from .code import ExceptionInfo +from .code import filter_traceback +from .code import Frame +from .code import getfslineno +from .code import Traceback +from .code import TracebackEntry +from .source import getrawcode +from .source import Source + +__all__ = [ + "Code", + "ExceptionInfo", + "filter_traceback", + "Frame", + "getfslineno", + "getrawcode", + "Traceback", + "TracebackEntry", + "Source", +] diff --git a/venv/lib/python3.10/site-packages/_pytest/_code/code.py b/venv/lib/python3.10/site-packages/_pytest/_code/code.py new file mode 100644 index 0000000..5b758a8 --- /dev/null +++ b/venv/lib/python3.10/site-packages/_pytest/_code/code.py @@ -0,0 +1,1274 @@ +import ast +import inspect +import os +import re +import sys +import traceback +from inspect import CO_VARARGS +from inspect import CO_VARKEYWORDS +from io import StringIO +from pathlib import Path +from traceback import format_exception_only +from types import CodeType +from types import FrameType +from types import TracebackType +from typing import Any +from typing import Callable +from typing import ClassVar +from typing import Dict +from typing import Generic +from typing import Iterable +from typing import List +from typing import Mapping +from typing import Optional +from typing import overload +from typing import Pattern +from typing import Sequence +from typing import Set +from typing import Tuple +from typing import Type +from typing import TYPE_CHECKING +from typing import TypeVar +from typing import Union +from weakref import ref + +import attr +import pluggy + +import _pytest +from _pytest._code.source import findsource +from _pytest._code.source import getrawcode +from _pytest._code.source import getstatementrange_ast +from _pytest._code.source import Source +from _pytest._io import TerminalWriter +from _pytest._io.saferepr import safeformat +from _pytest._io.saferepr import saferepr +from _pytest.compat import final +from _pytest.compat import get_real_func +from _pytest.deprecated import check_ispytest +from _pytest.pathlib import absolutepath +from _pytest.pathlib import bestrelpath + +if TYPE_CHECKING: + from typing_extensions import Literal + from typing_extensions import SupportsIndex + from weakref import ReferenceType + + _TracebackStyle = Literal["long", "short", "line", "no", "native", "value", "auto"] + + +class Code: + """Wrapper around Python code objects.""" + + __slots__ = ("raw",) + + def __init__(self, obj: CodeType) -> None: + self.raw = obj + + @classmethod + def from_function(cls, obj: object) -> "Code": + return cls(getrawcode(obj)) + + def __eq__(self, other): + return self.raw == other.raw + + # Ignore type because of https://github.com/python/mypy/issues/4266. + __hash__ = None # type: ignore + + @property + def firstlineno(self) -> int: + return self.raw.co_firstlineno - 1 + + @property + def name(self) -> str: + return self.raw.co_name + + @property + def path(self) -> Union[Path, str]: + """Return a path object pointing to source code, or an ``str`` in + case of ``OSError`` / non-existing file.""" + if not self.raw.co_filename: + return "" + try: + p = absolutepath(self.raw.co_filename) + # maybe don't try this checking + if not p.exists(): + raise OSError("path check failed.") + return p + except OSError: + # XXX maybe try harder like the weird logic + # in the standard lib [linecache.updatecache] does? + return self.raw.co_filename + + @property + def fullsource(self) -> Optional["Source"]: + """Return a _pytest._code.Source object for the full source file of the code.""" + full, _ = findsource(self.raw) + return full + + def source(self) -> "Source": + """Return a _pytest._code.Source object for the code object's source only.""" + # return source only for that part of code + return Source(self.raw) + + def getargs(self, var: bool = False) -> Tuple[str, ...]: + """Return a tuple with the argument names for the code object. + + If 'var' is set True also return the names of the variable and + keyword arguments when present. + """ + # Handy shortcut for getting args. + raw = self.raw + argcount = raw.co_argcount + if var: + argcount += raw.co_flags & CO_VARARGS + argcount += raw.co_flags & CO_VARKEYWORDS + return raw.co_varnames[:argcount] + + +class Frame: + """Wrapper around a Python frame holding f_locals and f_globals + in which expressions can be evaluated.""" + + __slots__ = ("raw",) + + def __init__(self, frame: FrameType) -> None: + self.raw = frame + + @property + def lineno(self) -> int: + return self.raw.f_lineno - 1 + + @property + def f_globals(self) -> Dict[str, Any]: + return self.raw.f_globals + + @property + def f_locals(self) -> Dict[str, Any]: + return self.raw.f_locals + + @property + def code(self) -> Code: + return Code(self.raw.f_code) + + @property + def statement(self) -> "Source": + """Statement this frame is at.""" + if self.code.fullsource is None: + return Source("") + return self.code.fullsource.getstatement(self.lineno) + + def eval(self, code, **vars): + """Evaluate 'code' in the frame. + + 'vars' are optional additional local variables. + + Returns the result of the evaluation. + """ + f_locals = self.f_locals.copy() + f_locals.update(vars) + return eval(code, self.f_globals, f_locals) + + def repr(self, object: object) -> str: + """Return a 'safe' (non-recursive, one-line) string repr for 'object'.""" + return saferepr(object) + + def getargs(self, var: bool = False): + """Return a list of tuples (name, value) for all arguments. + + If 'var' is set True, also include the variable and keyword arguments + when present. + """ + retval = [] + for arg in self.code.getargs(var): + try: + retval.append((arg, self.f_locals[arg])) + except KeyError: + pass # this can occur when using Psyco + return retval + + +class TracebackEntry: + """A single entry in a Traceback.""" + + __slots__ = ("_rawentry", "_excinfo", "_repr_style") + + def __init__( + self, + rawentry: TracebackType, + excinfo: Optional["ReferenceType[ExceptionInfo[BaseException]]"] = None, + ) -> None: + self._rawentry = rawentry + self._excinfo = excinfo + self._repr_style: Optional['Literal["short", "long"]'] = None + + @property + def lineno(self) -> int: + return self._rawentry.tb_lineno - 1 + + def set_repr_style(self, mode: "Literal['short', 'long']") -> None: + assert mode in ("short", "long") + self._repr_style = mode + + @property + def frame(self) -> Frame: + return Frame(self._rawentry.tb_frame) + + @property + def relline(self) -> int: + return self.lineno - self.frame.code.firstlineno + + def __repr__(self) -> str: + return "" % (self.frame.code.path, self.lineno + 1) + + @property + def statement(self) -> "Source": + """_pytest._code.Source object for the current statement.""" + source = self.frame.code.fullsource + assert source is not None + return source.getstatement(self.lineno) + + @property + def path(self) -> Union[Path, str]: + """Path to the source code.""" + return self.frame.code.path + + @property + def locals(self) -> Dict[str, Any]: + """Locals of underlying frame.""" + return self.frame.f_locals + + def getfirstlinesource(self) -> int: + return self.frame.code.firstlineno + + def getsource( + self, astcache: Optional[Dict[Union[str, Path], ast.AST]] = None + ) -> Optional["Source"]: + """Return failing source code.""" + # we use the passed in astcache to not reparse asttrees + # within exception info printing + source = self.frame.code.fullsource + if source is None: + return None + key = astnode = None + if astcache is not None: + key = self.frame.code.path + if key is not None: + astnode = astcache.get(key, None) + start = self.getfirstlinesource() + try: + astnode, _, end = getstatementrange_ast( + self.lineno, source, astnode=astnode + ) + except SyntaxError: + end = self.lineno + 1 + else: + if key is not None and astcache is not None: + astcache[key] = astnode + return source[start:end] + + source = property(getsource) + + def ishidden(self) -> bool: + """Return True if the current frame has a var __tracebackhide__ + resolving to True. + + If __tracebackhide__ is a callable, it gets called with the + ExceptionInfo instance and can decide whether to hide the traceback. + + Mostly for internal use. + """ + tbh: Union[ + bool, Callable[[Optional[ExceptionInfo[BaseException]]], bool] + ] = False + for maybe_ns_dct in (self.frame.f_locals, self.frame.f_globals): + # in normal cases, f_locals and f_globals are dictionaries + # however via `exec(...)` / `eval(...)` they can be other types + # (even incorrect types!). + # as such, we suppress all exceptions while accessing __tracebackhide__ + try: + tbh = maybe_ns_dct["__tracebackhide__"] + except Exception: + pass + else: + break + if tbh and callable(tbh): + return tbh(None if self._excinfo is None else self._excinfo()) + return tbh + + def __str__(self) -> str: + name = self.frame.code.name + try: + line = str(self.statement).lstrip() + except KeyboardInterrupt: + raise + except BaseException: + line = "???" + # This output does not quite match Python's repr for traceback entries, + # but changing it to do so would break certain plugins. See + # https://github.com/pytest-dev/pytest/pull/7535/ for details. + return " File %r:%d in %s\n %s\n" % ( + str(self.path), + self.lineno + 1, + name, + line, + ) + + @property + def name(self) -> str: + """co_name of underlying code.""" + return self.frame.code.raw.co_name + + +class Traceback(List[TracebackEntry]): + """Traceback objects encapsulate and offer higher level access to Traceback entries.""" + + def __init__( + self, + tb: Union[TracebackType, Iterable[TracebackEntry]], + excinfo: Optional["ReferenceType[ExceptionInfo[BaseException]]"] = None, + ) -> None: + """Initialize from given python traceback object and ExceptionInfo.""" + self._excinfo = excinfo + if isinstance(tb, TracebackType): + + def f(cur: TracebackType) -> Iterable[TracebackEntry]: + cur_: Optional[TracebackType] = cur + while cur_ is not None: + yield TracebackEntry(cur_, excinfo=excinfo) + cur_ = cur_.tb_next + + super().__init__(f(tb)) + else: + super().__init__(tb) + + def cut( + self, + path: Optional[Union["os.PathLike[str]", str]] = None, + lineno: Optional[int] = None, + firstlineno: Optional[int] = None, + excludepath: Optional["os.PathLike[str]"] = None, + ) -> "Traceback": + """Return a Traceback instance wrapping part of this Traceback. + + By providing any combination of path, lineno and firstlineno, the + first frame to start the to-be-returned traceback is determined. + + This allows cutting the first part of a Traceback instance e.g. + for formatting reasons (removing some uninteresting bits that deal + with handling of the exception/traceback). + """ + path_ = None if path is None else os.fspath(path) + excludepath_ = None if excludepath is None else os.fspath(excludepath) + for x in self: + code = x.frame.code + codepath = code.path + if path is not None and str(codepath) != path_: + continue + if ( + excludepath is not None + and isinstance(codepath, Path) + and excludepath_ in (str(p) for p in codepath.parents) # type: ignore[operator] + ): + continue + if lineno is not None and x.lineno != lineno: + continue + if firstlineno is not None and x.frame.code.firstlineno != firstlineno: + continue + return Traceback(x._rawentry, self._excinfo) + return self + + @overload + def __getitem__(self, key: "SupportsIndex") -> TracebackEntry: + ... + + @overload + def __getitem__(self, key: slice) -> "Traceback": + ... + + def __getitem__( + self, key: Union["SupportsIndex", slice] + ) -> Union[TracebackEntry, "Traceback"]: + if isinstance(key, slice): + return self.__class__(super().__getitem__(key)) + else: + return super().__getitem__(key) + + def filter( + self, fn: Callable[[TracebackEntry], bool] = lambda x: not x.ishidden() + ) -> "Traceback": + """Return a Traceback instance with certain items removed + + fn is a function that gets a single argument, a TracebackEntry + instance, and should return True when the item should be added + to the Traceback, False when not. + + By default this removes all the TracebackEntries which are hidden + (see ishidden() above). + """ + return Traceback(filter(fn, self), self._excinfo) + + def getcrashentry(self) -> TracebackEntry: + """Return last non-hidden traceback entry that lead to the exception of a traceback.""" + for i in range(-1, -len(self) - 1, -1): + entry = self[i] + if not entry.ishidden(): + return entry + return self[-1] + + def recursionindex(self) -> Optional[int]: + """Return the index of the frame/TracebackEntry where recursion originates if + appropriate, None if no recursion occurred.""" + cache: Dict[Tuple[Any, int, int], List[Dict[str, Any]]] = {} + for i, entry in enumerate(self): + # id for the code.raw is needed to work around + # the strange metaprogramming in the decorator lib from pypi + # which generates code objects that have hash/value equality + # XXX needs a test + key = entry.frame.code.path, id(entry.frame.code.raw), entry.lineno + # print "checking for recursion at", key + values = cache.setdefault(key, []) + if values: + f = entry.frame + loc = f.f_locals + for otherloc in values: + if otherloc == loc: + return i + values.append(entry.frame.f_locals) + return None + + +E = TypeVar("E", bound=BaseException, covariant=True) + + +@final +@attr.s(repr=False, init=False, auto_attribs=True) +class ExceptionInfo(Generic[E]): + """Wraps sys.exc_info() objects and offers help for navigating the traceback.""" + + _assert_start_repr: ClassVar = "AssertionError('assert " + + _excinfo: Optional[Tuple[Type["E"], "E", TracebackType]] + _striptext: str + _traceback: Optional[Traceback] + + def __init__( + self, + excinfo: Optional[Tuple[Type["E"], "E", TracebackType]], + striptext: str = "", + traceback: Optional[Traceback] = None, + *, + _ispytest: bool = False, + ) -> None: + check_ispytest(_ispytest) + self._excinfo = excinfo + self._striptext = striptext + self._traceback = traceback + + @classmethod + def from_exc_info( + cls, + exc_info: Tuple[Type[E], E, TracebackType], + exprinfo: Optional[str] = None, + ) -> "ExceptionInfo[E]": + """Return an ExceptionInfo for an existing exc_info tuple. + + .. warning:: + + Experimental API + + :param exprinfo: + A text string helping to determine if we should strip + ``AssertionError`` from the output. Defaults to the exception + message/``__str__()``. + """ + _striptext = "" + if exprinfo is None and isinstance(exc_info[1], AssertionError): + exprinfo = getattr(exc_info[1], "msg", None) + if exprinfo is None: + exprinfo = saferepr(exc_info[1]) + if exprinfo and exprinfo.startswith(cls._assert_start_repr): + _striptext = "AssertionError: " + + return cls(exc_info, _striptext, _ispytest=True) + + @classmethod + def from_current( + cls, exprinfo: Optional[str] = None + ) -> "ExceptionInfo[BaseException]": + """Return an ExceptionInfo matching the current traceback. + + .. warning:: + + Experimental API + + :param exprinfo: + A text string helping to determine if we should strip + ``AssertionError`` from the output. Defaults to the exception + message/``__str__()``. + """ + tup = sys.exc_info() + assert tup[0] is not None, "no current exception" + assert tup[1] is not None, "no current exception" + assert tup[2] is not None, "no current exception" + exc_info = (tup[0], tup[1], tup[2]) + return ExceptionInfo.from_exc_info(exc_info, exprinfo) + + @classmethod + def for_later(cls) -> "ExceptionInfo[E]": + """Return an unfilled ExceptionInfo.""" + return cls(None, _ispytest=True) + + def fill_unfilled(self, exc_info: Tuple[Type[E], E, TracebackType]) -> None: + """Fill an unfilled ExceptionInfo created with ``for_later()``.""" + assert self._excinfo is None, "ExceptionInfo was already filled" + self._excinfo = exc_info + + @property + def type(self) -> Type[E]: + """The exception class.""" + assert ( + self._excinfo is not None + ), ".type can only be used after the context manager exits" + return self._excinfo[0] + + @property + def value(self) -> E: + """The exception value.""" + assert ( + self._excinfo is not None + ), ".value can only be used after the context manager exits" + return self._excinfo[1] + + @property + def tb(self) -> TracebackType: + """The exception raw traceback.""" + assert ( + self._excinfo is not None + ), ".tb can only be used after the context manager exits" + return self._excinfo[2] + + @property + def typename(self) -> str: + """The type name of the exception.""" + assert ( + self._excinfo is not None + ), ".typename can only be used after the context manager exits" + return self.type.__name__ + + @property + def traceback(self) -> Traceback: + """The traceback.""" + if self._traceback is None: + self._traceback = Traceback(self.tb, excinfo=ref(self)) + return self._traceback + + @traceback.setter + def traceback(self, value: Traceback) -> None: + self._traceback = value + + def __repr__(self) -> str: + if self._excinfo is None: + return "" + return "<{} {} tblen={}>".format( + self.__class__.__name__, saferepr(self._excinfo[1]), len(self.traceback) + ) + + def exconly(self, tryshort: bool = False) -> str: + """Return the exception as a string. + + When 'tryshort' resolves to True, and the exception is an + AssertionError, only the actual exception part of the exception + representation is returned (so 'AssertionError: ' is removed from + the beginning). + """ + lines = format_exception_only(self.type, self.value) + text = "".join(lines) + text = text.rstrip() + if tryshort: + if text.startswith(self._striptext): + text = text[len(self._striptext) :] + return text + + def errisinstance( + self, exc: Union[Type[BaseException], Tuple[Type[BaseException], ...]] + ) -> bool: + """Return True if the exception is an instance of exc. + + Consider using ``isinstance(excinfo.value, exc)`` instead. + """ + return isinstance(self.value, exc) + + def _getreprcrash(self) -> "ReprFileLocation": + exconly = self.exconly(tryshort=True) + entry = self.traceback.getcrashentry() + path, lineno = entry.frame.code.raw.co_filename, entry.lineno + return ReprFileLocation(path, lineno + 1, exconly) + + def getrepr( + self, + showlocals: bool = False, + style: "_TracebackStyle" = "long", + abspath: bool = False, + tbfilter: bool = True, + funcargs: bool = False, + truncate_locals: bool = True, + chain: bool = True, + ) -> Union["ReprExceptionInfo", "ExceptionChainRepr"]: + """Return str()able representation of this exception info. + + :param bool showlocals: + Show locals per traceback entry. + Ignored if ``style=="native"``. + + :param str style: + long|short|no|native|value traceback style. + + :param bool abspath: + If paths should be changed to absolute or left unchanged. + + :param bool tbfilter: + Hide entries that contain a local variable ``__tracebackhide__==True``. + Ignored if ``style=="native"``. + + :param bool funcargs: + Show fixtures ("funcargs" for legacy purposes) per traceback entry. + + :param bool truncate_locals: + With ``showlocals==True``, make sure locals can be safely represented as strings. + + :param bool chain: + If chained exceptions in Python 3 should be shown. + + .. versionchanged:: 3.9 + + Added the ``chain`` parameter. + """ + if style == "native": + return ReprExceptionInfo( + ReprTracebackNative( + traceback.format_exception( + self.type, self.value, self.traceback[0]._rawentry + ) + ), + self._getreprcrash(), + ) + + fmt = FormattedExcinfo( + showlocals=showlocals, + style=style, + abspath=abspath, + tbfilter=tbfilter, + funcargs=funcargs, + truncate_locals=truncate_locals, + chain=chain, + ) + return fmt.repr_excinfo(self) + + def match(self, regexp: Union[str, Pattern[str]]) -> "Literal[True]": + """Check whether the regular expression `regexp` matches the string + representation of the exception using :func:`python:re.search`. + + If it matches `True` is returned, otherwise an `AssertionError` is raised. + """ + __tracebackhide__ = True + msg = "Regex pattern {!r} does not match {!r}." + if regexp == str(self.value): + msg += " Did you mean to `re.escape()` the regex?" + assert re.search(regexp, str(self.value)), msg.format(regexp, str(self.value)) + # Return True to allow for "assert excinfo.match()". + return True + + +@attr.s(auto_attribs=True) +class FormattedExcinfo: + """Presenting information about failing Functions and Generators.""" + + # for traceback entries + flow_marker: ClassVar = ">" + fail_marker: ClassVar = "E" + + showlocals: bool = False + style: "_TracebackStyle" = "long" + abspath: bool = True + tbfilter: bool = True + funcargs: bool = False + truncate_locals: bool = True + chain: bool = True + astcache: Dict[Union[str, Path], ast.AST] = attr.ib( + factory=dict, init=False, repr=False + ) + + def _getindent(self, source: "Source") -> int: + # Figure out indent for the given source. + try: + s = str(source.getstatement(len(source) - 1)) + except KeyboardInterrupt: + raise + except BaseException: + try: + s = str(source[-1]) + except KeyboardInterrupt: + raise + except BaseException: + return 0 + return 4 + (len(s) - len(s.lstrip())) + + def _getentrysource(self, entry: TracebackEntry) -> Optional["Source"]: + source = entry.getsource(self.astcache) + if source is not None: + source = source.deindent() + return source + + def repr_args(self, entry: TracebackEntry) -> Optional["ReprFuncArgs"]: + if self.funcargs: + args = [] + for argname, argvalue in entry.frame.getargs(var=True): + args.append((argname, saferepr(argvalue))) + return ReprFuncArgs(args) + return None + + def get_source( + self, + source: Optional["Source"], + line_index: int = -1, + excinfo: Optional[ExceptionInfo[BaseException]] = None, + short: bool = False, + ) -> List[str]: + """Return formatted and marked up source lines.""" + lines = [] + if source is None or line_index >= len(source.lines): + source = Source("???") + line_index = 0 + if line_index < 0: + line_index += len(source) + space_prefix = " " + if short: + lines.append(space_prefix + source.lines[line_index].strip()) + else: + for line in source.lines[:line_index]: + lines.append(space_prefix + line) + lines.append(self.flow_marker + " " + source.lines[line_index]) + for line in source.lines[line_index + 1 :]: + lines.append(space_prefix + line) + if excinfo is not None: + indent = 4 if short else self._getindent(source) + lines.extend(self.get_exconly(excinfo, indent=indent, markall=True)) + return lines + + def get_exconly( + self, + excinfo: ExceptionInfo[BaseException], + indent: int = 4, + markall: bool = False, + ) -> List[str]: + lines = [] + indentstr = " " * indent + # Get the real exception information out. + exlines = excinfo.exconly(tryshort=True).split("\n") + failindent = self.fail_marker + indentstr[1:] + for line in exlines: + lines.append(failindent + line) + if not markall: + failindent = indentstr + return lines + + def repr_locals(self, locals: Mapping[str, object]) -> Optional["ReprLocals"]: + if self.showlocals: + lines = [] + keys = [loc for loc in locals if loc[0] != "@"] + keys.sort() + for name in keys: + value = locals[name] + if name == "__builtins__": + lines.append("__builtins__ = ") + else: + # This formatting could all be handled by the + # _repr() function, which is only reprlib.Repr in + # disguise, so is very configurable. + if self.truncate_locals: + str_repr = saferepr(value) + else: + str_repr = safeformat(value) + # if len(str_repr) < 70 or not isinstance(value, (list, tuple, dict)): + lines.append(f"{name:<10} = {str_repr}") + # else: + # self._line("%-10s =\\" % (name,)) + # # XXX + # pprint.pprint(value, stream=self.excinfowriter) + return ReprLocals(lines) + return None + + def repr_traceback_entry( + self, + entry: TracebackEntry, + excinfo: Optional[ExceptionInfo[BaseException]] = None, + ) -> "ReprEntry": + lines: List[str] = [] + style = entry._repr_style if entry._repr_style is not None else self.style + if style in ("short", "long"): + source = self._getentrysource(entry) + if source is None: + source = Source("???") + line_index = 0 + else: + line_index = entry.lineno - entry.getfirstlinesource() + short = style == "short" + reprargs = self.repr_args(entry) if not short else None + s = self.get_source(source, line_index, excinfo, short=short) + lines.extend(s) + if short: + message = "in %s" % (entry.name) + else: + message = excinfo and excinfo.typename or "" + entry_path = entry.path + path = self._makepath(entry_path) + reprfileloc = ReprFileLocation(path, entry.lineno + 1, message) + localsrepr = self.repr_locals(entry.locals) + return ReprEntry(lines, reprargs, localsrepr, reprfileloc, style) + elif style == "value": + if excinfo: + lines.extend(str(excinfo.value).split("\n")) + return ReprEntry(lines, None, None, None, style) + else: + if excinfo: + lines.extend(self.get_exconly(excinfo, indent=4)) + return ReprEntry(lines, None, None, None, style) + + def _makepath(self, path: Union[Path, str]) -> str: + if not self.abspath and isinstance(path, Path): + try: + np = bestrelpath(Path.cwd(), path) + except OSError: + return str(path) + if len(np) < len(str(path)): + return np + return str(path) + + def repr_traceback(self, excinfo: ExceptionInfo[BaseException]) -> "ReprTraceback": + traceback = excinfo.traceback + if self.tbfilter: + traceback = traceback.filter() + + if isinstance(excinfo.value, RecursionError): + traceback, extraline = self._truncate_recursive_traceback(traceback) + else: + extraline = None + + last = traceback[-1] + entries = [] + if self.style == "value": + reprentry = self.repr_traceback_entry(last, excinfo) + entries.append(reprentry) + return ReprTraceback(entries, None, style=self.style) + + for index, entry in enumerate(traceback): + einfo = (last == entry) and excinfo or None + reprentry = self.repr_traceback_entry(entry, einfo) + entries.append(reprentry) + return ReprTraceback(entries, extraline, style=self.style) + + def _truncate_recursive_traceback( + self, traceback: Traceback + ) -> Tuple[Traceback, Optional[str]]: + """Truncate the given recursive traceback trying to find the starting + point of the recursion. + + The detection is done by going through each traceback entry and + finding the point in which the locals of the frame are equal to the + locals of a previous frame (see ``recursionindex()``). + + Handle the situation where the recursion process might raise an + exception (for example comparing numpy arrays using equality raises a + TypeError), in which case we do our best to warn the user of the + error and show a limited traceback. + """ + try: + recursionindex = traceback.recursionindex() + except Exception as e: + max_frames = 10 + extraline: Optional[str] = ( + "!!! Recursion error detected, but an error occurred locating the origin of recursion.\n" + " The following exception happened when comparing locals in the stack frame:\n" + " {exc_type}: {exc_msg}\n" + " Displaying first and last {max_frames} stack frames out of {total}." + ).format( + exc_type=type(e).__name__, + exc_msg=str(e), + max_frames=max_frames, + total=len(traceback), + ) + # Type ignored because adding two instances of a List subtype + # currently incorrectly has type List instead of the subtype. + traceback = traceback[:max_frames] + traceback[-max_frames:] # type: ignore + else: + if recursionindex is not None: + extraline = "!!! Recursion detected (same locals & position)" + traceback = traceback[: recursionindex + 1] + else: + extraline = None + + return traceback, extraline + + def repr_excinfo( + self, excinfo: ExceptionInfo[BaseException] + ) -> "ExceptionChainRepr": + repr_chain: List[ + Tuple[ReprTraceback, Optional[ReprFileLocation], Optional[str]] + ] = [] + e: Optional[BaseException] = excinfo.value + excinfo_: Optional[ExceptionInfo[BaseException]] = excinfo + descr = None + seen: Set[int] = set() + while e is not None and id(e) not in seen: + seen.add(id(e)) + if excinfo_: + reprtraceback = self.repr_traceback(excinfo_) + reprcrash: Optional[ReprFileLocation] = ( + excinfo_._getreprcrash() if self.style != "value" else None + ) + else: + # Fallback to native repr if the exception doesn't have a traceback: + # ExceptionInfo objects require a full traceback to work. + reprtraceback = ReprTracebackNative( + traceback.format_exception(type(e), e, None) + ) + reprcrash = None + + repr_chain += [(reprtraceback, reprcrash, descr)] + if e.__cause__ is not None and self.chain: + e = e.__cause__ + excinfo_ = ( + ExceptionInfo.from_exc_info((type(e), e, e.__traceback__)) + if e.__traceback__ + else None + ) + descr = "The above exception was the direct cause of the following exception:" + elif ( + e.__context__ is not None and not e.__suppress_context__ and self.chain + ): + e = e.__context__ + excinfo_ = ( + ExceptionInfo.from_exc_info((type(e), e, e.__traceback__)) + if e.__traceback__ + else None + ) + descr = "During handling of the above exception, another exception occurred:" + else: + e = None + repr_chain.reverse() + return ExceptionChainRepr(repr_chain) + + +@attr.s(eq=False, auto_attribs=True) +class TerminalRepr: + def __str__(self) -> str: + # FYI this is called from pytest-xdist's serialization of exception + # information. + io = StringIO() + tw = TerminalWriter(file=io) + self.toterminal(tw) + return io.getvalue().strip() + + def __repr__(self) -> str: + return f"<{self.__class__} instance at {id(self):0x}>" + + def toterminal(self, tw: TerminalWriter) -> None: + raise NotImplementedError() + + +# This class is abstract -- only subclasses are instantiated. +@attr.s(eq=False) +class ExceptionRepr(TerminalRepr): + # Provided by subclasses. + reprcrash: Optional["ReprFileLocation"] + reprtraceback: "ReprTraceback" + + def __attrs_post_init__(self) -> None: + self.sections: List[Tuple[str, str, str]] = [] + + def addsection(self, name: str, content: str, sep: str = "-") -> None: + self.sections.append((name, content, sep)) + + def toterminal(self, tw: TerminalWriter) -> None: + for name, content, sep in self.sections: + tw.sep(sep, name) + tw.line(content) + + +@attr.s(eq=False, auto_attribs=True) +class ExceptionChainRepr(ExceptionRepr): + chain: Sequence[Tuple["ReprTraceback", Optional["ReprFileLocation"], Optional[str]]] + + def __attrs_post_init__(self) -> None: + super().__attrs_post_init__() + # reprcrash and reprtraceback of the outermost (the newest) exception + # in the chain. + self.reprtraceback = self.chain[-1][0] + self.reprcrash = self.chain[-1][1] + + def toterminal(self, tw: TerminalWriter) -> None: + for element in self.chain: + element[0].toterminal(tw) + if element[2] is not None: + tw.line("") + tw.line(element[2], yellow=True) + super().toterminal(tw) + + +@attr.s(eq=False, auto_attribs=True) +class ReprExceptionInfo(ExceptionRepr): + reprtraceback: "ReprTraceback" + reprcrash: "ReprFileLocation" + + def toterminal(self, tw: TerminalWriter) -> None: + self.reprtraceback.toterminal(tw) + super().toterminal(tw) + + +@attr.s(eq=False, auto_attribs=True) +class ReprTraceback(TerminalRepr): + reprentries: Sequence[Union["ReprEntry", "ReprEntryNative"]] + extraline: Optional[str] + style: "_TracebackStyle" + + entrysep: ClassVar = "_ " + + def toterminal(self, tw: TerminalWriter) -> None: + # The entries might have different styles. + for i, entry in enumerate(self.reprentries): + if entry.style == "long": + tw.line("") + entry.toterminal(tw) + if i < len(self.reprentries) - 1: + next_entry = self.reprentries[i + 1] + if ( + entry.style == "long" + or entry.style == "short" + and next_entry.style == "long" + ): + tw.sep(self.entrysep) + + if self.extraline: + tw.line(self.extraline) + + +class ReprTracebackNative(ReprTraceback): + def __init__(self, tblines: Sequence[str]) -> None: + self.style = "native" + self.reprentries = [ReprEntryNative(tblines)] + self.extraline = None + + +@attr.s(eq=False, auto_attribs=True) +class ReprEntryNative(TerminalRepr): + lines: Sequence[str] + + style: ClassVar["_TracebackStyle"] = "native" + + def toterminal(self, tw: TerminalWriter) -> None: + tw.write("".join(self.lines)) + + +@attr.s(eq=False, auto_attribs=True) +class ReprEntry(TerminalRepr): + lines: Sequence[str] + reprfuncargs: Optional["ReprFuncArgs"] + reprlocals: Optional["ReprLocals"] + reprfileloc: Optional["ReprFileLocation"] + style: "_TracebackStyle" + + def _write_entry_lines(self, tw: TerminalWriter) -> None: + """Write the source code portions of a list of traceback entries with syntax highlighting. + + Usually entries are lines like these: + + " x = 1" + "> assert x == 2" + "E assert 1 == 2" + + This function takes care of rendering the "source" portions of it (the lines without + the "E" prefix) using syntax highlighting, taking care to not highlighting the ">" + character, as doing so might break line continuations. + """ + + if not self.lines: + return + + # separate indents and source lines that are not failures: we want to + # highlight the code but not the indentation, which may contain markers + # such as "> assert 0" + fail_marker = f"{FormattedExcinfo.fail_marker} " + indent_size = len(fail_marker) + indents: List[str] = [] + source_lines: List[str] = [] + failure_lines: List[str] = [] + for index, line in enumerate(self.lines): + is_failure_line = line.startswith(fail_marker) + if is_failure_line: + # from this point on all lines are considered part of the failure + failure_lines.extend(self.lines[index:]) + break + else: + if self.style == "value": + source_lines.append(line) + else: + indents.append(line[:indent_size]) + source_lines.append(line[indent_size:]) + + tw._write_source(source_lines, indents) + + # failure lines are always completely red and bold + for line in failure_lines: + tw.line(line, bold=True, red=True) + + def toterminal(self, tw: TerminalWriter) -> None: + if self.style == "short": + assert self.reprfileloc is not None + self.reprfileloc.toterminal(tw) + self._write_entry_lines(tw) + if self.reprlocals: + self.reprlocals.toterminal(tw, indent=" " * 8) + return + + if self.reprfuncargs: + self.reprfuncargs.toterminal(tw) + + self._write_entry_lines(tw) + + if self.reprlocals: + tw.line("") + self.reprlocals.toterminal(tw) + if self.reprfileloc: + if self.lines: + tw.line("") + self.reprfileloc.toterminal(tw) + + def __str__(self) -> str: + return "{}\n{}\n{}".format( + "\n".join(self.lines), self.reprlocals, self.reprfileloc + ) + + +@attr.s(eq=False, auto_attribs=True) +class ReprFileLocation(TerminalRepr): + path: str = attr.ib(converter=str) + lineno: int + message: str + + def toterminal(self, tw: TerminalWriter) -> None: + # Filename and lineno output for each entry, using an output format + # that most editors understand. + msg = self.message + i = msg.find("\n") + if i != -1: + msg = msg[:i] + tw.write(self.path, bold=True, red=True) + tw.line(f":{self.lineno}: {msg}") + + +@attr.s(eq=False, auto_attribs=True) +class ReprLocals(TerminalRepr): + lines: Sequence[str] + + def toterminal(self, tw: TerminalWriter, indent="") -> None: + for line in self.lines: + tw.line(indent + line) + + +@attr.s(eq=False, auto_attribs=True) +class ReprFuncArgs(TerminalRepr): + args: Sequence[Tuple[str, object]] + + def toterminal(self, tw: TerminalWriter) -> None: + if self.args: + linesofar = "" + for name, value in self.args: + ns = f"{name} = {value}" + if len(ns) + len(linesofar) + 2 > tw.fullwidth: + if linesofar: + tw.line(linesofar) + linesofar = ns + else: + if linesofar: + linesofar += ", " + ns + else: + linesofar = ns + if linesofar: + tw.line(linesofar) + tw.line("") + + +def getfslineno(obj: object) -> Tuple[Union[str, Path], int]: + """Return source location (path, lineno) for the given object. + + If the source cannot be determined return ("", -1). + + The line number is 0-based. + """ + # xxx let decorators etc specify a sane ordering + # NOTE: this used to be done in _pytest.compat.getfslineno, initially added + # in 6ec13a2b9. It ("place_as") appears to be something very custom. + obj = get_real_func(obj) + if hasattr(obj, "place_as"): + obj = obj.place_as # type: ignore[attr-defined] + + try: + code = Code.from_function(obj) + except TypeError: + try: + fn = inspect.getsourcefile(obj) or inspect.getfile(obj) # type: ignore[arg-type] + except TypeError: + return "", -1 + + fspath = fn and absolutepath(fn) or "" + lineno = -1 + if fspath: + try: + _, lineno = findsource(obj) + except OSError: + pass + return fspath, lineno + + return code.path, code.firstlineno + + +# Relative paths that we use to filter traceback entries from appearing to the user; +# see filter_traceback. +# note: if we need to add more paths than what we have now we should probably use a list +# for better maintenance. + +_PLUGGY_DIR = Path(pluggy.__file__.rstrip("oc")) +# pluggy is either a package or a single module depending on the version +if _PLUGGY_DIR.name == "__init__.py": + _PLUGGY_DIR = _PLUGGY_DIR.parent +_PYTEST_DIR = Path(_pytest.__file__).parent + + +def filter_traceback(entry: TracebackEntry) -> bool: + """Return True if a TracebackEntry instance should be included in tracebacks. + + We hide traceback entries of: + + * dynamically generated code (no code to show up for it); + * internal traceback from pytest or its internal libraries, py and pluggy. + """ + # entry.path might sometimes return a str object when the entry + # points to dynamically generated code. + # See https://bitbucket.org/pytest-dev/py/issues/71. + raw_filename = entry.frame.code.raw.co_filename + is_generated = "<" in raw_filename and ">" in raw_filename + if is_generated: + return False + + # entry.path might point to a non-existing file, in which case it will + # also return a str object. See #1133. + p = Path(entry.path) + + parents = p.parents + if _PLUGGY_DIR in parents: + return False + if _PYTEST_DIR in parents: + return False + + return True diff --git a/venv/lib/python3.10/site-packages/_pytest/_code/source.py b/venv/lib/python3.10/site-packages/_pytest/_code/source.py new file mode 100644 index 0000000..208cfb8 --- /dev/null +++ b/venv/lib/python3.10/site-packages/_pytest/_code/source.py @@ -0,0 +1,217 @@ +import ast +import inspect +import textwrap +import tokenize +import types +import warnings +from bisect import bisect_right +from typing import Iterable +from typing import Iterator +from typing import List +from typing import Optional +from typing import overload +from typing import Tuple +from typing import Union + + +class Source: + """An immutable object holding a source code fragment. + + When using Source(...), the source lines are deindented. + """ + + def __init__(self, obj: object = None) -> None: + if not obj: + self.lines: List[str] = [] + elif isinstance(obj, Source): + self.lines = obj.lines + elif isinstance(obj, (tuple, list)): + self.lines = deindent(x.rstrip("\n") for x in obj) + elif isinstance(obj, str): + self.lines = deindent(obj.split("\n")) + else: + try: + rawcode = getrawcode(obj) + src = inspect.getsource(rawcode) + except TypeError: + src = inspect.getsource(obj) # type: ignore[arg-type] + self.lines = deindent(src.split("\n")) + + def __eq__(self, other: object) -> bool: + if not isinstance(other, Source): + return NotImplemented + return self.lines == other.lines + + # Ignore type because of https://github.com/python/mypy/issues/4266. + __hash__ = None # type: ignore + + @overload + def __getitem__(self, key: int) -> str: + ... + + @overload + def __getitem__(self, key: slice) -> "Source": + ... + + def __getitem__(self, key: Union[int, slice]) -> Union[str, "Source"]: + if isinstance(key, int): + return self.lines[key] + else: + if key.step not in (None, 1): + raise IndexError("cannot slice a Source with a step") + newsource = Source() + newsource.lines = self.lines[key.start : key.stop] + return newsource + + def __iter__(self) -> Iterator[str]: + return iter(self.lines) + + def __len__(self) -> int: + return len(self.lines) + + def strip(self) -> "Source": + """Return new Source object with trailing and leading blank lines removed.""" + start, end = 0, len(self) + while start < end and not self.lines[start].strip(): + start += 1 + while end > start and not self.lines[end - 1].strip(): + end -= 1 + source = Source() + source.lines[:] = self.lines[start:end] + return source + + def indent(self, indent: str = " " * 4) -> "Source": + """Return a copy of the source object with all lines indented by the + given indent-string.""" + newsource = Source() + newsource.lines = [(indent + line) for line in self.lines] + return newsource + + def getstatement(self, lineno: int) -> "Source": + """Return Source statement which contains the given linenumber + (counted from 0).""" + start, end = self.getstatementrange(lineno) + return self[start:end] + + def getstatementrange(self, lineno: int) -> Tuple[int, int]: + """Return (start, end) tuple which spans the minimal statement region + which containing the given lineno.""" + if not (0 <= lineno < len(self)): + raise IndexError("lineno out of range") + ast, start, end = getstatementrange_ast(lineno, self) + return start, end + + def deindent(self) -> "Source": + """Return a new Source object deindented.""" + newsource = Source() + newsource.lines[:] = deindent(self.lines) + return newsource + + def __str__(self) -> str: + return "\n".join(self.lines) + + +# +# helper functions +# + + +def findsource(obj) -> Tuple[Optional[Source], int]: + try: + sourcelines, lineno = inspect.findsource(obj) + except Exception: + return None, -1 + source = Source() + source.lines = [line.rstrip() for line in sourcelines] + return source, lineno + + +def getrawcode(obj: object, trycall: bool = True) -> types.CodeType: + """Return code object for given function.""" + try: + return obj.__code__ # type: ignore[attr-defined,no-any-return] + except AttributeError: + pass + if trycall: + call = getattr(obj, "__call__", None) + if call and not isinstance(obj, type): + return getrawcode(call, trycall=False) + raise TypeError(f"could not get code object for {obj!r}") + + +def deindent(lines: Iterable[str]) -> List[str]: + return textwrap.dedent("\n".join(lines)).splitlines() + + +def get_statement_startend2(lineno: int, node: ast.AST) -> Tuple[int, Optional[int]]: + # Flatten all statements and except handlers into one lineno-list. + # AST's line numbers start indexing at 1. + values: List[int] = [] + for x in ast.walk(node): + if isinstance(x, (ast.stmt, ast.ExceptHandler)): + # Before Python 3.8, the lineno of a decorated class or function pointed at the decorator. + # Since Python 3.8, the lineno points to the class/def, so need to include the decorators. + if isinstance(x, (ast.ClassDef, ast.FunctionDef, ast.AsyncFunctionDef)): + for d in x.decorator_list: + values.append(d.lineno - 1) + values.append(x.lineno - 1) + for name in ("finalbody", "orelse"): + val: Optional[List[ast.stmt]] = getattr(x, name, None) + if val: + # Treat the finally/orelse part as its own statement. + values.append(val[0].lineno - 1 - 1) + values.sort() + insert_index = bisect_right(values, lineno) + start = values[insert_index - 1] + if insert_index >= len(values): + end = None + else: + end = values[insert_index] + return start, end + + +def getstatementrange_ast( + lineno: int, + source: Source, + assertion: bool = False, + astnode: Optional[ast.AST] = None, +) -> Tuple[ast.AST, int, int]: + if astnode is None: + content = str(source) + # See #4260: + # Don't produce duplicate warnings when compiling source to find AST. + with warnings.catch_warnings(): + warnings.simplefilter("ignore") + astnode = ast.parse(content, "source", "exec") + + start, end = get_statement_startend2(lineno, astnode) + # We need to correct the end: + # - ast-parsing strips comments + # - there might be empty lines + # - we might have lesser indented code blocks at the end + if end is None: + end = len(source.lines) + + if end > start + 1: + # Make sure we don't span differently indented code blocks + # by using the BlockFinder helper used which inspect.getsource() uses itself. + block_finder = inspect.BlockFinder() + # If we start with an indented line, put blockfinder to "started" mode. + block_finder.started = source.lines[start][0].isspace() + it = ((x + "\n") for x in source.lines[start:end]) + try: + for tok in tokenize.generate_tokens(lambda: next(it)): + block_finder.tokeneater(*tok) + except (inspect.EndOfBlock, IndentationError): + end = block_finder.last + start + except Exception: + pass + + # The end might still point to a comment or empty line, correct it. + while end: + line = source.lines[end - 1].lstrip() + if line.startswith("#") or not line: + end -= 1 + else: + break + return astnode, start, end diff --git a/venv/lib/python3.10/site-packages/_pytest/_io/__init__.py b/venv/lib/python3.10/site-packages/_pytest/_io/__init__.py new file mode 100644 index 0000000..db001e9 --- /dev/null +++ b/venv/lib/python3.10/site-packages/_pytest/_io/__init__.py @@ -0,0 +1,8 @@ +from .terminalwriter import get_terminal_width +from .terminalwriter import TerminalWriter + + +__all__ = [ + "TerminalWriter", + "get_terminal_width", +] diff --git a/venv/lib/python3.10/site-packages/_pytest/_io/saferepr.py b/venv/lib/python3.10/site-packages/_pytest/_io/saferepr.py new file mode 100644 index 0000000..e7ff5ca --- /dev/null +++ b/venv/lib/python3.10/site-packages/_pytest/_io/saferepr.py @@ -0,0 +1,153 @@ +import pprint +import reprlib +from typing import Any +from typing import Dict +from typing import IO +from typing import Optional + + +def _try_repr_or_str(obj: object) -> str: + try: + return repr(obj) + except (KeyboardInterrupt, SystemExit): + raise + except BaseException: + return f'{type(obj).__name__}("{obj}")' + + +def _format_repr_exception(exc: BaseException, obj: object) -> str: + try: + exc_info = _try_repr_or_str(exc) + except (KeyboardInterrupt, SystemExit): + raise + except BaseException as exc: + exc_info = f"unpresentable exception ({_try_repr_or_str(exc)})" + return "<[{} raised in repr()] {} object at 0x{:x}>".format( + exc_info, type(obj).__name__, id(obj) + ) + + +def _ellipsize(s: str, maxsize: int) -> str: + if len(s) > maxsize: + i = max(0, (maxsize - 3) // 2) + j = max(0, maxsize - 3 - i) + return s[:i] + "..." + s[len(s) - j :] + return s + + +class SafeRepr(reprlib.Repr): + """ + repr.Repr that limits the resulting size of repr() and includes + information on exceptions raised during the call. + """ + + def __init__(self, maxsize: Optional[int]) -> None: + """ + :param maxsize: + If not None, will truncate the resulting repr to that specific size, using ellipsis + somewhere in the middle to hide the extra text. + If None, will not impose any size limits on the returning repr. + """ + super().__init__() + # ``maxstring`` is used by the superclass, and needs to be an int; using a + # very large number in case maxsize is None, meaning we want to disable + # truncation. + self.maxstring = maxsize if maxsize is not None else 1_000_000_000 + self.maxsize = maxsize + + def repr(self, x: object) -> str: + try: + s = super().repr(x) + except (KeyboardInterrupt, SystemExit): + raise + except BaseException as exc: + s = _format_repr_exception(exc, x) + if self.maxsize is not None: + s = _ellipsize(s, self.maxsize) + return s + + def repr_instance(self, x: object, level: int) -> str: + try: + s = repr(x) + except (KeyboardInterrupt, SystemExit): + raise + except BaseException as exc: + s = _format_repr_exception(exc, x) + if self.maxsize is not None: + s = _ellipsize(s, self.maxsize) + return s + + +def safeformat(obj: object) -> str: + """Return a pretty printed string for the given object. + + Failing __repr__ functions of user instances will be represented + with a short exception info. + """ + try: + return pprint.pformat(obj) + except Exception as exc: + return _format_repr_exception(exc, obj) + + +# Maximum size of overall repr of objects to display during assertion errors. +DEFAULT_REPR_MAX_SIZE = 240 + + +def saferepr(obj: object, maxsize: Optional[int] = DEFAULT_REPR_MAX_SIZE) -> str: + """Return a size-limited safe repr-string for the given object. + + Failing __repr__ functions of user instances will be represented + with a short exception info and 'saferepr' generally takes + care to never raise exceptions itself. + + This function is a wrapper around the Repr/reprlib functionality of the + stdlib. + """ + return SafeRepr(maxsize).repr(obj) + + +class AlwaysDispatchingPrettyPrinter(pprint.PrettyPrinter): + """PrettyPrinter that always dispatches (regardless of width).""" + + def _format( + self, + object: object, + stream: IO[str], + indent: int, + allowance: int, + context: Dict[int, Any], + level: int, + ) -> None: + # Type ignored because _dispatch is private. + p = self._dispatch.get(type(object).__repr__, None) # type: ignore[attr-defined] + + objid = id(object) + if objid in context or p is None: + # Type ignored because _format is private. + super()._format( # type: ignore[misc] + object, + stream, + indent, + allowance, + context, + level, + ) + return + + context[objid] = 1 + p(self, object, stream, indent, allowance, context, level + 1) + del context[objid] + + +def _pformat_dispatch( + object: object, + indent: int = 1, + width: int = 80, + depth: Optional[int] = None, + *, + compact: bool = False, +) -> str: + return AlwaysDispatchingPrettyPrinter( + indent=indent, width=width, depth=depth, compact=compact + ).pformat(object) diff --git a/venv/lib/python3.10/site-packages/_pytest/_io/terminalwriter.py b/venv/lib/python3.10/site-packages/_pytest/_io/terminalwriter.py new file mode 100644 index 0000000..379035d --- /dev/null +++ b/venv/lib/python3.10/site-packages/_pytest/_io/terminalwriter.py @@ -0,0 +1,233 @@ +"""Helper functions for writing to terminals and files.""" +import os +import shutil +import sys +from typing import Optional +from typing import Sequence +from typing import TextIO + +from .wcwidth import wcswidth +from _pytest.compat import final + + +# This code was initially copied from py 1.8.1, file _io/terminalwriter.py. + + +def get_terminal_width() -> int: + width, _ = shutil.get_terminal_size(fallback=(80, 24)) + + # The Windows get_terminal_size may be bogus, let's sanify a bit. + if width < 40: + width = 80 + + return width + + +def should_do_markup(file: TextIO) -> bool: + if os.environ.get("PY_COLORS") == "1": + return True + if os.environ.get("PY_COLORS") == "0": + return False + if "NO_COLOR" in os.environ: + return False + if "FORCE_COLOR" in os.environ: + return True + return ( + hasattr(file, "isatty") and file.isatty() and os.environ.get("TERM") != "dumb" + ) + + +@final +class TerminalWriter: + _esctable = dict( + black=30, + red=31, + green=32, + yellow=33, + blue=34, + purple=35, + cyan=36, + white=37, + Black=40, + Red=41, + Green=42, + Yellow=43, + Blue=44, + Purple=45, + Cyan=46, + White=47, + bold=1, + light=2, + blink=5, + invert=7, + ) + + def __init__(self, file: Optional[TextIO] = None) -> None: + if file is None: + file = sys.stdout + if hasattr(file, "isatty") and file.isatty() and sys.platform == "win32": + try: + import colorama + except ImportError: + pass + else: + file = colorama.AnsiToWin32(file).stream + assert file is not None + self._file = file + self.hasmarkup = should_do_markup(file) + self._current_line = "" + self._terminal_width: Optional[int] = None + self.code_highlight = True + + @property + def fullwidth(self) -> int: + if self._terminal_width is not None: + return self._terminal_width + return get_terminal_width() + + @fullwidth.setter + def fullwidth(self, value: int) -> None: + self._terminal_width = value + + @property + def width_of_current_line(self) -> int: + """Return an estimate of the width so far in the current line.""" + return wcswidth(self._current_line) + + def markup(self, text: str, **markup: bool) -> str: + for name in markup: + if name not in self._esctable: + raise ValueError(f"unknown markup: {name!r}") + if self.hasmarkup: + esc = [self._esctable[name] for name, on in markup.items() if on] + if esc: + text = "".join("\x1b[%sm" % cod for cod in esc) + text + "\x1b[0m" + return text + + def sep( + self, + sepchar: str, + title: Optional[str] = None, + fullwidth: Optional[int] = None, + **markup: bool, + ) -> None: + if fullwidth is None: + fullwidth = self.fullwidth + # The goal is to have the line be as long as possible + # under the condition that len(line) <= fullwidth. + if sys.platform == "win32": + # If we print in the last column on windows we are on a + # new line but there is no way to verify/neutralize this + # (we may not know the exact line width). + # So let's be defensive to avoid empty lines in the output. + fullwidth -= 1 + if title is not None: + # we want 2 + 2*len(fill) + len(title) <= fullwidth + # i.e. 2 + 2*len(sepchar)*N + len(title) <= fullwidth + # 2*len(sepchar)*N <= fullwidth - len(title) - 2 + # N <= (fullwidth - len(title) - 2) // (2*len(sepchar)) + N = max((fullwidth - len(title) - 2) // (2 * len(sepchar)), 1) + fill = sepchar * N + line = f"{fill} {title} {fill}" + else: + # we want len(sepchar)*N <= fullwidth + # i.e. N <= fullwidth // len(sepchar) + line = sepchar * (fullwidth // len(sepchar)) + # In some situations there is room for an extra sepchar at the right, + # in particular if we consider that with a sepchar like "_ " the + # trailing space is not important at the end of the line. + if len(line) + len(sepchar.rstrip()) <= fullwidth: + line += sepchar.rstrip() + + self.line(line, **markup) + + def write(self, msg: str, *, flush: bool = False, **markup: bool) -> None: + if msg: + current_line = msg.rsplit("\n", 1)[-1] + if "\n" in msg: + self._current_line = current_line + else: + self._current_line += current_line + + msg = self.markup(msg, **markup) + + try: + self._file.write(msg) + except UnicodeEncodeError: + # Some environments don't support printing general Unicode + # strings, due to misconfiguration or otherwise; in that case, + # print the string escaped to ASCII. + # When the Unicode situation improves we should consider + # letting the error propagate instead of masking it (see #7475 + # for one brief attempt). + msg = msg.encode("unicode-escape").decode("ascii") + self._file.write(msg) + + if flush: + self.flush() + + def line(self, s: str = "", **markup: bool) -> None: + self.write(s, **markup) + self.write("\n") + + def flush(self) -> None: + self._file.flush() + + def _write_source(self, lines: Sequence[str], indents: Sequence[str] = ()) -> None: + """Write lines of source code possibly highlighted. + + Keeping this private for now because the API is clunky. We should discuss how + to evolve the terminal writer so we can have more precise color support, for example + being able to write part of a line in one color and the rest in another, and so on. + """ + if indents and len(indents) != len(lines): + raise ValueError( + "indents size ({}) should have same size as lines ({})".format( + len(indents), len(lines) + ) + ) + if not indents: + indents = [""] * len(lines) + source = "\n".join(lines) + new_lines = self._highlight(source).splitlines() + for indent, new_line in zip(indents, new_lines): + self.line(indent + new_line) + + def _highlight(self, source: str) -> str: + """Highlight the given source code if we have markup support.""" + from _pytest.config.exceptions import UsageError + + if not self.hasmarkup or not self.code_highlight: + return source + try: + from pygments.formatters.terminal import TerminalFormatter + from pygments.lexers.python import PythonLexer + from pygments import highlight + import pygments.util + except ImportError: + return source + else: + try: + highlighted: str = highlight( + source, + PythonLexer(), + TerminalFormatter( + bg=os.getenv("PYTEST_THEME_MODE", "dark"), + style=os.getenv("PYTEST_THEME"), + ), + ) + return highlighted + except pygments.util.ClassNotFound: + raise UsageError( + "PYTEST_THEME environment variable had an invalid value: '{}'. " + "Only valid pygment styles are allowed.".format( + os.getenv("PYTEST_THEME") + ) + ) + except pygments.util.OptionError: + raise UsageError( + "PYTEST_THEME_MODE environment variable had an invalid value: '{}'. " + "The only allowed values are 'dark' and 'light'.".format( + os.getenv("PYTEST_THEME_MODE") + ) + ) diff --git a/venv/lib/python3.10/site-packages/_pytest/_io/wcwidth.py b/venv/lib/python3.10/site-packages/_pytest/_io/wcwidth.py new file mode 100644 index 0000000..e5c7bf4 --- /dev/null +++ b/venv/lib/python3.10/site-packages/_pytest/_io/wcwidth.py @@ -0,0 +1,55 @@ +import unicodedata +from functools import lru_cache + + +@lru_cache(100) +def wcwidth(c: str) -> int: + """Determine how many columns are needed to display a character in a terminal. + + Returns -1 if the character is not printable. + Returns 0, 1 or 2 for other characters. + """ + o = ord(c) + + # ASCII fast path. + if 0x20 <= o < 0x07F: + return 1 + + # Some Cf/Zp/Zl characters which should be zero-width. + if ( + o == 0x0000 + or 0x200B <= o <= 0x200F + or 0x2028 <= o <= 0x202E + or 0x2060 <= o <= 0x2063 + ): + return 0 + + category = unicodedata.category(c) + + # Control characters. + if category == "Cc": + return -1 + + # Combining characters with zero width. + if category in ("Me", "Mn"): + return 0 + + # Full/Wide east asian characters. + if unicodedata.east_asian_width(c) in ("F", "W"): + return 2 + + return 1 + + +def wcswidth(s: str) -> int: + """Determine how many columns are needed to display a string in a terminal. + + Returns -1 if the string contains non-printable characters. + """ + width = 0 + for c in unicodedata.normalize("NFC", s): + wc = wcwidth(c) + if wc < 0: + return -1 + width += wc + return width diff --git a/venv/lib/python3.10/site-packages/_pytest/_version.py b/venv/lib/python3.10/site-packages/_pytest/_version.py new file mode 100644 index 0000000..9094161 --- /dev/null +++ b/venv/lib/python3.10/site-packages/_pytest/_version.py @@ -0,0 +1,5 @@ +# coding: utf-8 +# file generated by setuptools_scm +# don't change, don't track in version control +version = '7.1.2' +version_tuple = (7, 1, 2) diff --git a/venv/lib/python3.10/site-packages/_pytest/assertion/__init__.py b/venv/lib/python3.10/site-packages/_pytest/assertion/__init__.py new file mode 100644 index 0000000..480a26a --- /dev/null +++ b/venv/lib/python3.10/site-packages/_pytest/assertion/__init__.py @@ -0,0 +1,181 @@ +"""Support for presenting detailed information in failing assertions.""" +import sys +from typing import Any +from typing import Generator +from typing import List +from typing import Optional +from typing import TYPE_CHECKING + +from _pytest.assertion import rewrite +from _pytest.assertion import truncate +from _pytest.assertion import util +from _pytest.assertion.rewrite import assertstate_key +from _pytest.config import Config +from _pytest.config import hookimpl +from _pytest.config.argparsing import Parser +from _pytest.nodes import Item + +if TYPE_CHECKING: + from _pytest.main import Session + + +def pytest_addoption(parser: Parser) -> None: + group = parser.getgroup("debugconfig") + group.addoption( + "--assert", + action="store", + dest="assertmode", + choices=("rewrite", "plain"), + default="rewrite", + metavar="MODE", + help=( + "Control assertion debugging tools.\n" + "'plain' performs no assertion debugging.\n" + "'rewrite' (the default) rewrites assert statements in test modules" + " on import to provide assert expression information." + ), + ) + parser.addini( + "enable_assertion_pass_hook", + type="bool", + default=False, + help="Enables the pytest_assertion_pass hook." + "Make sure to delete any previously generated pyc cache files.", + ) + + +def register_assert_rewrite(*names: str) -> None: + """Register one or more module names to be rewritten on import. + + This function will make sure that this module or all modules inside + the package will get their assert statements rewritten. + Thus you should make sure to call this before the module is + actually imported, usually in your __init__.py if you are a plugin + using a package. + + :raises TypeError: If the given module names are not strings. + """ + for name in names: + if not isinstance(name, str): + msg = "expected module names as *args, got {0} instead" # type: ignore[unreachable] + raise TypeError(msg.format(repr(names))) + for hook in sys.meta_path: + if isinstance(hook, rewrite.AssertionRewritingHook): + importhook = hook + break + else: + # TODO(typing): Add a protocol for mark_rewrite() and use it + # for importhook and for PytestPluginManager.rewrite_hook. + importhook = DummyRewriteHook() # type: ignore + importhook.mark_rewrite(*names) + + +class DummyRewriteHook: + """A no-op import hook for when rewriting is disabled.""" + + def mark_rewrite(self, *names: str) -> None: + pass + + +class AssertionState: + """State for the assertion plugin.""" + + def __init__(self, config: Config, mode) -> None: + self.mode = mode + self.trace = config.trace.root.get("assertion") + self.hook: Optional[rewrite.AssertionRewritingHook] = None + + +def install_importhook(config: Config) -> rewrite.AssertionRewritingHook: + """Try to install the rewrite hook, raise SystemError if it fails.""" + config.stash[assertstate_key] = AssertionState(config, "rewrite") + config.stash[assertstate_key].hook = hook = rewrite.AssertionRewritingHook(config) + sys.meta_path.insert(0, hook) + config.stash[assertstate_key].trace("installed rewrite import hook") + + def undo() -> None: + hook = config.stash[assertstate_key].hook + if hook is not None and hook in sys.meta_path: + sys.meta_path.remove(hook) + + config.add_cleanup(undo) + return hook + + +def pytest_collection(session: "Session") -> None: + # This hook is only called when test modules are collected + # so for example not in the managing process of pytest-xdist + # (which does not collect test modules). + assertstate = session.config.stash.get(assertstate_key, None) + if assertstate: + if assertstate.hook is not None: + assertstate.hook.set_session(session) + + +@hookimpl(tryfirst=True, hookwrapper=True) +def pytest_runtest_protocol(item: Item) -> Generator[None, None, None]: + """Setup the pytest_assertrepr_compare and pytest_assertion_pass hooks. + + The rewrite module will use util._reprcompare if it exists to use custom + reporting via the pytest_assertrepr_compare hook. This sets up this custom + comparison for the test. + """ + + ihook = item.ihook + + def callbinrepr(op, left: object, right: object) -> Optional[str]: + """Call the pytest_assertrepr_compare hook and prepare the result. + + This uses the first result from the hook and then ensures the + following: + * Overly verbose explanations are truncated unless configured otherwise + (eg. if running in verbose mode). + * Embedded newlines are escaped to help util.format_explanation() + later. + * If the rewrite mode is used embedded %-characters are replaced + to protect later % formatting. + + The result can be formatted by util.format_explanation() for + pretty printing. + """ + hook_result = ihook.pytest_assertrepr_compare( + config=item.config, op=op, left=left, right=right + ) + for new_expl in hook_result: + if new_expl: + new_expl = truncate.truncate_if_required(new_expl, item) + new_expl = [line.replace("\n", "\\n") for line in new_expl] + res = "\n~".join(new_expl) + if item.config.getvalue("assertmode") == "rewrite": + res = res.replace("%", "%%") + return res + return None + + saved_assert_hooks = util._reprcompare, util._assertion_pass + util._reprcompare = callbinrepr + util._config = item.config + + if ihook.pytest_assertion_pass.get_hookimpls(): + + def call_assertion_pass_hook(lineno: int, orig: str, expl: str) -> None: + ihook.pytest_assertion_pass(item=item, lineno=lineno, orig=orig, expl=expl) + + util._assertion_pass = call_assertion_pass_hook + + yield + + util._reprcompare, util._assertion_pass = saved_assert_hooks + util._config = None + + +def pytest_sessionfinish(session: "Session") -> None: + assertstate = session.config.stash.get(assertstate_key, None) + if assertstate: + if assertstate.hook is not None: + assertstate.hook.set_session(None) + + +def pytest_assertrepr_compare( + config: Config, op: str, left: Any, right: Any +) -> Optional[List[str]]: + return util.assertrepr_compare(config=config, op=op, left=left, right=right) diff --git a/venv/lib/python3.10/site-packages/_pytest/assertion/rewrite.py b/venv/lib/python3.10/site-packages/_pytest/assertion/rewrite.py new file mode 100644 index 0000000..8109676 --- /dev/null +++ b/venv/lib/python3.10/site-packages/_pytest/assertion/rewrite.py @@ -0,0 +1,1129 @@ +"""Rewrite assertion AST to produce nice error messages.""" +import ast +import errno +import functools +import importlib.abc +import importlib.machinery +import importlib.util +import io +import itertools +import marshal +import os +import struct +import sys +import tokenize +import types +from pathlib import Path +from pathlib import PurePath +from typing import Callable +from typing import Dict +from typing import IO +from typing import Iterable +from typing import Iterator +from typing import List +from typing import Optional +from typing import Sequence +from typing import Set +from typing import Tuple +from typing import TYPE_CHECKING +from typing import Union + +from _pytest._io.saferepr import DEFAULT_REPR_MAX_SIZE +from _pytest._io.saferepr import saferepr +from _pytest._version import version +from _pytest.assertion import util +from _pytest.assertion.util import ( # noqa: F401 + format_explanation as _format_explanation, +) +from _pytest.config import Config +from _pytest.main import Session +from _pytest.pathlib import absolutepath +from _pytest.pathlib import fnmatch_ex +from _pytest.stash import StashKey + +if TYPE_CHECKING: + from _pytest.assertion import AssertionState + + +assertstate_key = StashKey["AssertionState"]() + + +# pytest caches rewritten pycs in pycache dirs +PYTEST_TAG = f"{sys.implementation.cache_tag}-pytest-{version}" +PYC_EXT = ".py" + (__debug__ and "c" or "o") +PYC_TAIL = "." + PYTEST_TAG + PYC_EXT + + +class AssertionRewritingHook(importlib.abc.MetaPathFinder, importlib.abc.Loader): + """PEP302/PEP451 import hook which rewrites asserts.""" + + def __init__(self, config: Config) -> None: + self.config = config + try: + self.fnpats = config.getini("python_files") + except ValueError: + self.fnpats = ["test_*.py", "*_test.py"] + self.session: Optional[Session] = None + self._rewritten_names: Dict[str, Path] = {} + self._must_rewrite: Set[str] = set() + # flag to guard against trying to rewrite a pyc file while we are already writing another pyc file, + # which might result in infinite recursion (#3506) + self._writing_pyc = False + self._basenames_to_check_rewrite = {"conftest"} + self._marked_for_rewrite_cache: Dict[str, bool] = {} + self._session_paths_checked = False + + def set_session(self, session: Optional[Session]) -> None: + self.session = session + self._session_paths_checked = False + + # Indirection so we can mock calls to find_spec originated from the hook during testing + _find_spec = importlib.machinery.PathFinder.find_spec + + def find_spec( + self, + name: str, + path: Optional[Sequence[Union[str, bytes]]] = None, + target: Optional[types.ModuleType] = None, + ) -> Optional[importlib.machinery.ModuleSpec]: + if self._writing_pyc: + return None + state = self.config.stash[assertstate_key] + if self._early_rewrite_bailout(name, state): + return None + state.trace("find_module called for: %s" % name) + + # Type ignored because mypy is confused about the `self` binding here. + spec = self._find_spec(name, path) # type: ignore + if ( + # the import machinery could not find a file to import + spec is None + # this is a namespace package (without `__init__.py`) + # there's nothing to rewrite there + or spec.origin is None + # we can only rewrite source files + or not isinstance(spec.loader, importlib.machinery.SourceFileLoader) + # if the file doesn't exist, we can't rewrite it + or not os.path.exists(spec.origin) + ): + return None + else: + fn = spec.origin + + if not self._should_rewrite(name, fn, state): + return None + + return importlib.util.spec_from_file_location( + name, + fn, + loader=self, + submodule_search_locations=spec.submodule_search_locations, + ) + + def create_module( + self, spec: importlib.machinery.ModuleSpec + ) -> Optional[types.ModuleType]: + return None # default behaviour is fine + + def exec_module(self, module: types.ModuleType) -> None: + assert module.__spec__ is not None + assert module.__spec__.origin is not None + fn = Path(module.__spec__.origin) + state = self.config.stash[assertstate_key] + + self._rewritten_names[module.__name__] = fn + + # The requested module looks like a test file, so rewrite it. This is + # the most magical part of the process: load the source, rewrite the + # asserts, and load the rewritten source. We also cache the rewritten + # module code in a special pyc. We must be aware of the possibility of + # concurrent pytest processes rewriting and loading pycs. To avoid + # tricky race conditions, we maintain the following invariant: The + # cached pyc is always a complete, valid pyc. Operations on it must be + # atomic. POSIX's atomic rename comes in handy. + write = not sys.dont_write_bytecode + cache_dir = get_cache_dir(fn) + if write: + ok = try_makedirs(cache_dir) + if not ok: + write = False + state.trace(f"read only directory: {cache_dir}") + + cache_name = fn.name[:-3] + PYC_TAIL + pyc = cache_dir / cache_name + # Notice that even if we're in a read-only directory, I'm going + # to check for a cached pyc. This may not be optimal... + co = _read_pyc(fn, pyc, state.trace) + if co is None: + state.trace(f"rewriting {fn!r}") + source_stat, co = _rewrite_test(fn, self.config) + if write: + self._writing_pyc = True + try: + _write_pyc(state, co, source_stat, pyc) + finally: + self._writing_pyc = False + else: + state.trace(f"found cached rewritten pyc for {fn}") + exec(co, module.__dict__) + + def _early_rewrite_bailout(self, name: str, state: "AssertionState") -> bool: + """A fast way to get out of rewriting modules. + + Profiling has shown that the call to PathFinder.find_spec (inside of + the find_spec from this class) is a major slowdown, so, this method + tries to filter what we're sure won't be rewritten before getting to + it. + """ + if self.session is not None and not self._session_paths_checked: + self._session_paths_checked = True + for initial_path in self.session._initialpaths: + # Make something as c:/projects/my_project/path.py -> + # ['c:', 'projects', 'my_project', 'path.py'] + parts = str(initial_path).split(os.path.sep) + # add 'path' to basenames to be checked. + self._basenames_to_check_rewrite.add(os.path.splitext(parts[-1])[0]) + + # Note: conftest already by default in _basenames_to_check_rewrite. + parts = name.split(".") + if parts[-1] in self._basenames_to_check_rewrite: + return False + + # For matching the name it must be as if it was a filename. + path = PurePath(os.path.sep.join(parts) + ".py") + + for pat in self.fnpats: + # if the pattern contains subdirectories ("tests/**.py" for example) we can't bail out based + # on the name alone because we need to match against the full path + if os.path.dirname(pat): + return False + if fnmatch_ex(pat, path): + return False + + if self._is_marked_for_rewrite(name, state): + return False + + state.trace(f"early skip of rewriting module: {name}") + return True + + def _should_rewrite(self, name: str, fn: str, state: "AssertionState") -> bool: + # always rewrite conftest files + if os.path.basename(fn) == "conftest.py": + state.trace(f"rewriting conftest file: {fn!r}") + return True + + if self.session is not None: + if self.session.isinitpath(absolutepath(fn)): + state.trace(f"matched test file (was specified on cmdline): {fn!r}") + return True + + # modules not passed explicitly on the command line are only + # rewritten if they match the naming convention for test files + fn_path = PurePath(fn) + for pat in self.fnpats: + if fnmatch_ex(pat, fn_path): + state.trace(f"matched test file {fn!r}") + return True + + return self._is_marked_for_rewrite(name, state) + + def _is_marked_for_rewrite(self, name: str, state: "AssertionState") -> bool: + try: + return self._marked_for_rewrite_cache[name] + except KeyError: + for marked in self._must_rewrite: + if name == marked or name.startswith(marked + "."): + state.trace(f"matched marked file {name!r} (from {marked!r})") + self._marked_for_rewrite_cache[name] = True + return True + + self._marked_for_rewrite_cache[name] = False + return False + + def mark_rewrite(self, *names: str) -> None: + """Mark import names as needing to be rewritten. + + The named module or package as well as any nested modules will + be rewritten on import. + """ + already_imported = ( + set(names).intersection(sys.modules).difference(self._rewritten_names) + ) + for name in already_imported: + mod = sys.modules[name] + if not AssertionRewriter.is_rewrite_disabled( + mod.__doc__ or "" + ) and not isinstance(mod.__loader__, type(self)): + self._warn_already_imported(name) + self._must_rewrite.update(names) + self._marked_for_rewrite_cache.clear() + + def _warn_already_imported(self, name: str) -> None: + from _pytest.warning_types import PytestAssertRewriteWarning + + self.config.issue_config_time_warning( + PytestAssertRewriteWarning( + "Module already imported so cannot be rewritten: %s" % name + ), + stacklevel=5, + ) + + def get_data(self, pathname: Union[str, bytes]) -> bytes: + """Optional PEP302 get_data API.""" + with open(pathname, "rb") as f: + return f.read() + + if sys.version_info >= (3, 10): + + def get_resource_reader(self, name: str) -> importlib.abc.TraversableResources: # type: ignore + if sys.version_info < (3, 11): + from importlib.readers import FileReader + else: + from importlib.resources.readers import FileReader + + return FileReader(types.SimpleNamespace(path=self._rewritten_names[name])) + + +def _write_pyc_fp( + fp: IO[bytes], source_stat: os.stat_result, co: types.CodeType +) -> None: + # Technically, we don't have to have the same pyc format as + # (C)Python, since these "pycs" should never be seen by builtin + # import. However, there's little reason to deviate. + fp.write(importlib.util.MAGIC_NUMBER) + # https://www.python.org/dev/peps/pep-0552/ + flags = b"\x00\x00\x00\x00" + fp.write(flags) + # as of now, bytecode header expects 32-bit numbers for size and mtime (#4903) + mtime = int(source_stat.st_mtime) & 0xFFFFFFFF + size = source_stat.st_size & 0xFFFFFFFF + # " bool: + try: + with atomic_write(os.fspath(pyc), mode="wb", overwrite=True) as fp: + _write_pyc_fp(fp, source_stat, co) + except OSError as e: + state.trace(f"error writing pyc file at {pyc}: {e}") + # we ignore any failure to write the cache file + # there are many reasons, permission-denied, pycache dir being a + # file etc. + return False + return True + +else: + + def _write_pyc( + state: "AssertionState", + co: types.CodeType, + source_stat: os.stat_result, + pyc: Path, + ) -> bool: + proc_pyc = f"{pyc}.{os.getpid()}" + try: + fp = open(proc_pyc, "wb") + except OSError as e: + state.trace(f"error writing pyc file at {proc_pyc}: errno={e.errno}") + return False + + try: + _write_pyc_fp(fp, source_stat, co) + os.rename(proc_pyc, pyc) + except OSError as e: + state.trace(f"error writing pyc file at {pyc}: {e}") + # we ignore any failure to write the cache file + # there are many reasons, permission-denied, pycache dir being a + # file etc. + return False + finally: + fp.close() + return True + + +def _rewrite_test(fn: Path, config: Config) -> Tuple[os.stat_result, types.CodeType]: + """Read and rewrite *fn* and return the code object.""" + stat = os.stat(fn) + source = fn.read_bytes() + strfn = str(fn) + tree = ast.parse(source, filename=strfn) + rewrite_asserts(tree, source, strfn, config) + co = compile(tree, strfn, "exec", dont_inherit=True) + return stat, co + + +def _read_pyc( + source: Path, pyc: Path, trace: Callable[[str], None] = lambda x: None +) -> Optional[types.CodeType]: + """Possibly read a pytest pyc containing rewritten code. + + Return rewritten code if successful or None if not. + """ + try: + fp = open(pyc, "rb") + except OSError: + return None + with fp: + try: + stat_result = os.stat(source) + mtime = int(stat_result.st_mtime) + size = stat_result.st_size + data = fp.read(16) + except OSError as e: + trace(f"_read_pyc({source}): OSError {e}") + return None + # Check for invalid or out of date pyc file. + if len(data) != (16): + trace("_read_pyc(%s): invalid pyc (too short)" % source) + return None + if data[:4] != importlib.util.MAGIC_NUMBER: + trace("_read_pyc(%s): invalid pyc (bad magic number)" % source) + return None + if data[4:8] != b"\x00\x00\x00\x00": + trace("_read_pyc(%s): invalid pyc (unsupported flags)" % source) + return None + mtime_data = data[8:12] + if int.from_bytes(mtime_data, "little") != mtime & 0xFFFFFFFF: + trace("_read_pyc(%s): out of date" % source) + return None + size_data = data[12:16] + if int.from_bytes(size_data, "little") != size & 0xFFFFFFFF: + trace("_read_pyc(%s): invalid pyc (incorrect size)" % source) + return None + try: + co = marshal.load(fp) + except Exception as e: + trace(f"_read_pyc({source}): marshal.load error {e}") + return None + if not isinstance(co, types.CodeType): + trace("_read_pyc(%s): not a code object" % source) + return None + return co + + +def rewrite_asserts( + mod: ast.Module, + source: bytes, + module_path: Optional[str] = None, + config: Optional[Config] = None, +) -> None: + """Rewrite the assert statements in mod.""" + AssertionRewriter(module_path, config, source).run(mod) + + +def _saferepr(obj: object) -> str: + r"""Get a safe repr of an object for assertion error messages. + + The assertion formatting (util.format_explanation()) requires + newlines to be escaped since they are a special character for it. + Normally assertion.util.format_explanation() does this but for a + custom repr it is possible to contain one of the special escape + sequences, especially '\n{' and '\n}' are likely to be present in + JSON reprs. + """ + maxsize = _get_maxsize_for_saferepr(util._config) + return saferepr(obj, maxsize=maxsize).replace("\n", "\\n") + + +def _get_maxsize_for_saferepr(config: Optional[Config]) -> Optional[int]: + """Get `maxsize` configuration for saferepr based on the given config object.""" + verbosity = config.getoption("verbose") if config is not None else 0 + if verbosity >= 2: + return None + if verbosity >= 1: + return DEFAULT_REPR_MAX_SIZE * 10 + return DEFAULT_REPR_MAX_SIZE + + +def _format_assertmsg(obj: object) -> str: + r"""Format the custom assertion message given. + + For strings this simply replaces newlines with '\n~' so that + util.format_explanation() will preserve them instead of escaping + newlines. For other objects saferepr() is used first. + """ + # reprlib appears to have a bug which means that if a string + # contains a newline it gets escaped, however if an object has a + # .__repr__() which contains newlines it does not get escaped. + # However in either case we want to preserve the newline. + replaces = [("\n", "\n~"), ("%", "%%")] + if not isinstance(obj, str): + obj = saferepr(obj) + replaces.append(("\\n", "\n~")) + + for r1, r2 in replaces: + obj = obj.replace(r1, r2) + + return obj + + +def _should_repr_global_name(obj: object) -> bool: + if callable(obj): + return False + + try: + return not hasattr(obj, "__name__") + except Exception: + return True + + +def _format_boolop(explanations: Iterable[str], is_or: bool) -> str: + explanation = "(" + (is_or and " or " or " and ").join(explanations) + ")" + return explanation.replace("%", "%%") + + +def _call_reprcompare( + ops: Sequence[str], + results: Sequence[bool], + expls: Sequence[str], + each_obj: Sequence[object], +) -> str: + for i, res, expl in zip(range(len(ops)), results, expls): + try: + done = not res + except Exception: + done = True + if done: + break + if util._reprcompare is not None: + custom = util._reprcompare(ops[i], each_obj[i], each_obj[i + 1]) + if custom is not None: + return custom + return expl + + +def _call_assertion_pass(lineno: int, orig: str, expl: str) -> None: + if util._assertion_pass is not None: + util._assertion_pass(lineno, orig, expl) + + +def _check_if_assertion_pass_impl() -> bool: + """Check if any plugins implement the pytest_assertion_pass hook + in order not to generate explanation unnecessarily (might be expensive).""" + return True if util._assertion_pass else False + + +UNARY_MAP = {ast.Not: "not %s", ast.Invert: "~%s", ast.USub: "-%s", ast.UAdd: "+%s"} + +BINOP_MAP = { + ast.BitOr: "|", + ast.BitXor: "^", + ast.BitAnd: "&", + ast.LShift: "<<", + ast.RShift: ">>", + ast.Add: "+", + ast.Sub: "-", + ast.Mult: "*", + ast.Div: "/", + ast.FloorDiv: "//", + ast.Mod: "%%", # escaped for string formatting + ast.Eq: "==", + ast.NotEq: "!=", + ast.Lt: "<", + ast.LtE: "<=", + ast.Gt: ">", + ast.GtE: ">=", + ast.Pow: "**", + ast.Is: "is", + ast.IsNot: "is not", + ast.In: "in", + ast.NotIn: "not in", + ast.MatMult: "@", +} + + +def traverse_node(node: ast.AST) -> Iterator[ast.AST]: + """Recursively yield node and all its children in depth-first order.""" + yield node + for child in ast.iter_child_nodes(node): + yield from traverse_node(child) + + +@functools.lru_cache(maxsize=1) +def _get_assertion_exprs(src: bytes) -> Dict[int, str]: + """Return a mapping from {lineno: "assertion test expression"}.""" + ret: Dict[int, str] = {} + + depth = 0 + lines: List[str] = [] + assert_lineno: Optional[int] = None + seen_lines: Set[int] = set() + + def _write_and_reset() -> None: + nonlocal depth, lines, assert_lineno, seen_lines + assert assert_lineno is not None + ret[assert_lineno] = "".join(lines).rstrip().rstrip("\\") + depth = 0 + lines = [] + assert_lineno = None + seen_lines = set() + + tokens = tokenize.tokenize(io.BytesIO(src).readline) + for tp, source, (lineno, offset), _, line in tokens: + if tp == tokenize.NAME and source == "assert": + assert_lineno = lineno + elif assert_lineno is not None: + # keep track of depth for the assert-message `,` lookup + if tp == tokenize.OP and source in "([{": + depth += 1 + elif tp == tokenize.OP and source in ")]}": + depth -= 1 + + if not lines: + lines.append(line[offset:]) + seen_lines.add(lineno) + # a non-nested comma separates the expression from the message + elif depth == 0 and tp == tokenize.OP and source == ",": + # one line assert with message + if lineno in seen_lines and len(lines) == 1: + offset_in_trimmed = offset + len(lines[-1]) - len(line) + lines[-1] = lines[-1][:offset_in_trimmed] + # multi-line assert with message + elif lineno in seen_lines: + lines[-1] = lines[-1][:offset] + # multi line assert with escapd newline before message + else: + lines.append(line[:offset]) + _write_and_reset() + elif tp in {tokenize.NEWLINE, tokenize.ENDMARKER}: + _write_and_reset() + elif lines and lineno not in seen_lines: + lines.append(line) + seen_lines.add(lineno) + + return ret + + +class AssertionRewriter(ast.NodeVisitor): + """Assertion rewriting implementation. + + The main entrypoint is to call .run() with an ast.Module instance, + this will then find all the assert statements and rewrite them to + provide intermediate values and a detailed assertion error. See + http://pybites.blogspot.be/2011/07/behind-scenes-of-pytests-new-assertion.html + for an overview of how this works. + + The entry point here is .run() which will iterate over all the + statements in an ast.Module and for each ast.Assert statement it + finds call .visit() with it. Then .visit_Assert() takes over and + is responsible for creating new ast statements to replace the + original assert statement: it rewrites the test of an assertion + to provide intermediate values and replace it with an if statement + which raises an assertion error with a detailed explanation in + case the expression is false and calls pytest_assertion_pass hook + if expression is true. + + For this .visit_Assert() uses the visitor pattern to visit all the + AST nodes of the ast.Assert.test field, each visit call returning + an AST node and the corresponding explanation string. During this + state is kept in several instance attributes: + + :statements: All the AST statements which will replace the assert + statement. + + :variables: This is populated by .variable() with each variable + used by the statements so that they can all be set to None at + the end of the statements. + + :variable_counter: Counter to create new unique variables needed + by statements. Variables are created using .variable() and + have the form of "@py_assert0". + + :expl_stmts: The AST statements which will be executed to get + data from the assertion. This is the code which will construct + the detailed assertion message that is used in the AssertionError + or for the pytest_assertion_pass hook. + + :explanation_specifiers: A dict filled by .explanation_param() + with %-formatting placeholders and their corresponding + expressions to use in the building of an assertion message. + This is used by .pop_format_context() to build a message. + + :stack: A stack of the explanation_specifiers dicts maintained by + .push_format_context() and .pop_format_context() which allows + to build another %-formatted string while already building one. + + This state is reset on every new assert statement visited and used + by the other visitors. + """ + + def __init__( + self, module_path: Optional[str], config: Optional[Config], source: bytes + ) -> None: + super().__init__() + self.module_path = module_path + self.config = config + if config is not None: + self.enable_assertion_pass_hook = config.getini( + "enable_assertion_pass_hook" + ) + else: + self.enable_assertion_pass_hook = False + self.source = source + + def run(self, mod: ast.Module) -> None: + """Find all assert statements in *mod* and rewrite them.""" + if not mod.body: + # Nothing to do. + return + + # We'll insert some special imports at the top of the module, but after any + # docstrings and __future__ imports, so first figure out where that is. + doc = getattr(mod, "docstring", None) + expect_docstring = doc is None + if doc is not None and self.is_rewrite_disabled(doc): + return + pos = 0 + lineno = 1 + for item in mod.body: + if ( + expect_docstring + and isinstance(item, ast.Expr) + and isinstance(item.value, ast.Str) + ): + doc = item.value.s + if self.is_rewrite_disabled(doc): + return + expect_docstring = False + elif ( + isinstance(item, ast.ImportFrom) + and item.level == 0 + and item.module == "__future__" + ): + pass + else: + break + pos += 1 + # Special case: for a decorated function, set the lineno to that of the + # first decorator, not the `def`. Issue #4984. + if isinstance(item, ast.FunctionDef) and item.decorator_list: + lineno = item.decorator_list[0].lineno + else: + lineno = item.lineno + # Now actually insert the special imports. + if sys.version_info >= (3, 10): + aliases = [ + ast.alias("builtins", "@py_builtins", lineno=lineno, col_offset=0), + ast.alias( + "_pytest.assertion.rewrite", + "@pytest_ar", + lineno=lineno, + col_offset=0, + ), + ] + else: + aliases = [ + ast.alias("builtins", "@py_builtins"), + ast.alias("_pytest.assertion.rewrite", "@pytest_ar"), + ] + imports = [ + ast.Import([alias], lineno=lineno, col_offset=0) for alias in aliases + ] + mod.body[pos:pos] = imports + + # Collect asserts. + nodes: List[ast.AST] = [mod] + while nodes: + node = nodes.pop() + for name, field in ast.iter_fields(node): + if isinstance(field, list): + new: List[ast.AST] = [] + for i, child in enumerate(field): + if isinstance(child, ast.Assert): + # Transform assert. + new.extend(self.visit(child)) + else: + new.append(child) + if isinstance(child, ast.AST): + nodes.append(child) + setattr(node, name, new) + elif ( + isinstance(field, ast.AST) + # Don't recurse into expressions as they can't contain + # asserts. + and not isinstance(field, ast.expr) + ): + nodes.append(field) + + @staticmethod + def is_rewrite_disabled(docstring: str) -> bool: + return "PYTEST_DONT_REWRITE" in docstring + + def variable(self) -> str: + """Get a new variable.""" + # Use a character invalid in python identifiers to avoid clashing. + name = "@py_assert" + str(next(self.variable_counter)) + self.variables.append(name) + return name + + def assign(self, expr: ast.expr) -> ast.Name: + """Give *expr* a name.""" + name = self.variable() + self.statements.append(ast.Assign([ast.Name(name, ast.Store())], expr)) + return ast.Name(name, ast.Load()) + + def display(self, expr: ast.expr) -> ast.expr: + """Call saferepr on the expression.""" + return self.helper("_saferepr", expr) + + def helper(self, name: str, *args: ast.expr) -> ast.expr: + """Call a helper in this module.""" + py_name = ast.Name("@pytest_ar", ast.Load()) + attr = ast.Attribute(py_name, name, ast.Load()) + return ast.Call(attr, list(args), []) + + def builtin(self, name: str) -> ast.Attribute: + """Return the builtin called *name*.""" + builtin_name = ast.Name("@py_builtins", ast.Load()) + return ast.Attribute(builtin_name, name, ast.Load()) + + def explanation_param(self, expr: ast.expr) -> str: + """Return a new named %-formatting placeholder for expr. + + This creates a %-formatting placeholder for expr in the + current formatting context, e.g. ``%(py0)s``. The placeholder + and expr are placed in the current format context so that it + can be used on the next call to .pop_format_context(). + """ + specifier = "py" + str(next(self.variable_counter)) + self.explanation_specifiers[specifier] = expr + return "%(" + specifier + ")s" + + def push_format_context(self) -> None: + """Create a new formatting context. + + The format context is used for when an explanation wants to + have a variable value formatted in the assertion message. In + this case the value required can be added using + .explanation_param(). Finally .pop_format_context() is used + to format a string of %-formatted values as added by + .explanation_param(). + """ + self.explanation_specifiers: Dict[str, ast.expr] = {} + self.stack.append(self.explanation_specifiers) + + def pop_format_context(self, expl_expr: ast.expr) -> ast.Name: + """Format the %-formatted string with current format context. + + The expl_expr should be an str ast.expr instance constructed from + the %-placeholders created by .explanation_param(). This will + add the required code to format said string to .expl_stmts and + return the ast.Name instance of the formatted string. + """ + current = self.stack.pop() + if self.stack: + self.explanation_specifiers = self.stack[-1] + keys = [ast.Str(key) for key in current.keys()] + format_dict = ast.Dict(keys, list(current.values())) + form = ast.BinOp(expl_expr, ast.Mod(), format_dict) + name = "@py_format" + str(next(self.variable_counter)) + if self.enable_assertion_pass_hook: + self.format_variables.append(name) + self.expl_stmts.append(ast.Assign([ast.Name(name, ast.Store())], form)) + return ast.Name(name, ast.Load()) + + def generic_visit(self, node: ast.AST) -> Tuple[ast.Name, str]: + """Handle expressions we don't have custom code for.""" + assert isinstance(node, ast.expr) + res = self.assign(node) + return res, self.explanation_param(self.display(res)) + + def visit_Assert(self, assert_: ast.Assert) -> List[ast.stmt]: + """Return the AST statements to replace the ast.Assert instance. + + This rewrites the test of an assertion to provide + intermediate values and replace it with an if statement which + raises an assertion error with a detailed explanation in case + the expression is false. + """ + if isinstance(assert_.test, ast.Tuple) and len(assert_.test.elts) >= 1: + from _pytest.warning_types import PytestAssertRewriteWarning + import warnings + + # TODO: This assert should not be needed. + assert self.module_path is not None + warnings.warn_explicit( + PytestAssertRewriteWarning( + "assertion is always true, perhaps remove parentheses?" + ), + category=None, + filename=self.module_path, + lineno=assert_.lineno, + ) + + self.statements: List[ast.stmt] = [] + self.variables: List[str] = [] + self.variable_counter = itertools.count() + + if self.enable_assertion_pass_hook: + self.format_variables: List[str] = [] + + self.stack: List[Dict[str, ast.expr]] = [] + self.expl_stmts: List[ast.stmt] = [] + self.push_format_context() + # Rewrite assert into a bunch of statements. + top_condition, explanation = self.visit(assert_.test) + + negation = ast.UnaryOp(ast.Not(), top_condition) + + if self.enable_assertion_pass_hook: # Experimental pytest_assertion_pass hook + msg = self.pop_format_context(ast.Str(explanation)) + + # Failed + if assert_.msg: + assertmsg = self.helper("_format_assertmsg", assert_.msg) + gluestr = "\n>assert " + else: + assertmsg = ast.Str("") + gluestr = "assert " + err_explanation = ast.BinOp(ast.Str(gluestr), ast.Add(), msg) + err_msg = ast.BinOp(assertmsg, ast.Add(), err_explanation) + err_name = ast.Name("AssertionError", ast.Load()) + fmt = self.helper("_format_explanation", err_msg) + exc = ast.Call(err_name, [fmt], []) + raise_ = ast.Raise(exc, None) + statements_fail = [] + statements_fail.extend(self.expl_stmts) + statements_fail.append(raise_) + + # Passed + fmt_pass = self.helper("_format_explanation", msg) + orig = _get_assertion_exprs(self.source)[assert_.lineno] + hook_call_pass = ast.Expr( + self.helper( + "_call_assertion_pass", + ast.Num(assert_.lineno), + ast.Str(orig), + fmt_pass, + ) + ) + # If any hooks implement assert_pass hook + hook_impl_test = ast.If( + self.helper("_check_if_assertion_pass_impl"), + self.expl_stmts + [hook_call_pass], + [], + ) + statements_pass = [hook_impl_test] + + # Test for assertion condition + main_test = ast.If(negation, statements_fail, statements_pass) + self.statements.append(main_test) + if self.format_variables: + variables = [ + ast.Name(name, ast.Store()) for name in self.format_variables + ] + clear_format = ast.Assign(variables, ast.NameConstant(None)) + self.statements.append(clear_format) + + else: # Original assertion rewriting + # Create failure message. + body = self.expl_stmts + self.statements.append(ast.If(negation, body, [])) + if assert_.msg: + assertmsg = self.helper("_format_assertmsg", assert_.msg) + explanation = "\n>assert " + explanation + else: + assertmsg = ast.Str("") + explanation = "assert " + explanation + template = ast.BinOp(assertmsg, ast.Add(), ast.Str(explanation)) + msg = self.pop_format_context(template) + fmt = self.helper("_format_explanation", msg) + err_name = ast.Name("AssertionError", ast.Load()) + exc = ast.Call(err_name, [fmt], []) + raise_ = ast.Raise(exc, None) + + body.append(raise_) + + # Clear temporary variables by setting them to None. + if self.variables: + variables = [ast.Name(name, ast.Store()) for name in self.variables] + clear = ast.Assign(variables, ast.NameConstant(None)) + self.statements.append(clear) + # Fix locations (line numbers/column offsets). + for stmt in self.statements: + for node in traverse_node(stmt): + ast.copy_location(node, assert_) + return self.statements + + def visit_Name(self, name: ast.Name) -> Tuple[ast.Name, str]: + # Display the repr of the name if it's a local variable or + # _should_repr_global_name() thinks it's acceptable. + locs = ast.Call(self.builtin("locals"), [], []) + inlocs = ast.Compare(ast.Str(name.id), [ast.In()], [locs]) + dorepr = self.helper("_should_repr_global_name", name) + test = ast.BoolOp(ast.Or(), [inlocs, dorepr]) + expr = ast.IfExp(test, self.display(name), ast.Str(name.id)) + return name, self.explanation_param(expr) + + def visit_BoolOp(self, boolop: ast.BoolOp) -> Tuple[ast.Name, str]: + res_var = self.variable() + expl_list = self.assign(ast.List([], ast.Load())) + app = ast.Attribute(expl_list, "append", ast.Load()) + is_or = int(isinstance(boolop.op, ast.Or)) + body = save = self.statements + fail_save = self.expl_stmts + levels = len(boolop.values) - 1 + self.push_format_context() + # Process each operand, short-circuiting if needed. + for i, v in enumerate(boolop.values): + if i: + fail_inner: List[ast.stmt] = [] + # cond is set in a prior loop iteration below + self.expl_stmts.append(ast.If(cond, fail_inner, [])) # noqa + self.expl_stmts = fail_inner + self.push_format_context() + res, expl = self.visit(v) + body.append(ast.Assign([ast.Name(res_var, ast.Store())], res)) + expl_format = self.pop_format_context(ast.Str(expl)) + call = ast.Call(app, [expl_format], []) + self.expl_stmts.append(ast.Expr(call)) + if i < levels: + cond: ast.expr = res + if is_or: + cond = ast.UnaryOp(ast.Not(), cond) + inner: List[ast.stmt] = [] + self.statements.append(ast.If(cond, inner, [])) + self.statements = body = inner + self.statements = save + self.expl_stmts = fail_save + expl_template = self.helper("_format_boolop", expl_list, ast.Num(is_or)) + expl = self.pop_format_context(expl_template) + return ast.Name(res_var, ast.Load()), self.explanation_param(expl) + + def visit_UnaryOp(self, unary: ast.UnaryOp) -> Tuple[ast.Name, str]: + pattern = UNARY_MAP[unary.op.__class__] + operand_res, operand_expl = self.visit(unary.operand) + res = self.assign(ast.UnaryOp(unary.op, operand_res)) + return res, pattern % (operand_expl,) + + def visit_BinOp(self, binop: ast.BinOp) -> Tuple[ast.Name, str]: + symbol = BINOP_MAP[binop.op.__class__] + left_expr, left_expl = self.visit(binop.left) + right_expr, right_expl = self.visit(binop.right) + explanation = f"({left_expl} {symbol} {right_expl})" + res = self.assign(ast.BinOp(left_expr, binop.op, right_expr)) + return res, explanation + + def visit_Call(self, call: ast.Call) -> Tuple[ast.Name, str]: + new_func, func_expl = self.visit(call.func) + arg_expls = [] + new_args = [] + new_kwargs = [] + for arg in call.args: + res, expl = self.visit(arg) + arg_expls.append(expl) + new_args.append(res) + for keyword in call.keywords: + res, expl = self.visit(keyword.value) + new_kwargs.append(ast.keyword(keyword.arg, res)) + if keyword.arg: + arg_expls.append(keyword.arg + "=" + expl) + else: # **args have `arg` keywords with an .arg of None + arg_expls.append("**" + expl) + + expl = "{}({})".format(func_expl, ", ".join(arg_expls)) + new_call = ast.Call(new_func, new_args, new_kwargs) + res = self.assign(new_call) + res_expl = self.explanation_param(self.display(res)) + outer_expl = f"{res_expl}\n{{{res_expl} = {expl}\n}}" + return res, outer_expl + + def visit_Starred(self, starred: ast.Starred) -> Tuple[ast.Starred, str]: + # A Starred node can appear in a function call. + res, expl = self.visit(starred.value) + new_starred = ast.Starred(res, starred.ctx) + return new_starred, "*" + expl + + def visit_Attribute(self, attr: ast.Attribute) -> Tuple[ast.Name, str]: + if not isinstance(attr.ctx, ast.Load): + return self.generic_visit(attr) + value, value_expl = self.visit(attr.value) + res = self.assign(ast.Attribute(value, attr.attr, ast.Load())) + res_expl = self.explanation_param(self.display(res)) + pat = "%s\n{%s = %s.%s\n}" + expl = pat % (res_expl, res_expl, value_expl, attr.attr) + return res, expl + + def visit_Compare(self, comp: ast.Compare) -> Tuple[ast.expr, str]: + self.push_format_context() + left_res, left_expl = self.visit(comp.left) + if isinstance(comp.left, (ast.Compare, ast.BoolOp)): + left_expl = f"({left_expl})" + res_variables = [self.variable() for i in range(len(comp.ops))] + load_names = [ast.Name(v, ast.Load()) for v in res_variables] + store_names = [ast.Name(v, ast.Store()) for v in res_variables] + it = zip(range(len(comp.ops)), comp.ops, comp.comparators) + expls = [] + syms = [] + results = [left_res] + for i, op, next_operand in it: + next_res, next_expl = self.visit(next_operand) + if isinstance(next_operand, (ast.Compare, ast.BoolOp)): + next_expl = f"({next_expl})" + results.append(next_res) + sym = BINOP_MAP[op.__class__] + syms.append(ast.Str(sym)) + expl = f"{left_expl} {sym} {next_expl}" + expls.append(ast.Str(expl)) + res_expr = ast.Compare(left_res, [op], [next_res]) + self.statements.append(ast.Assign([store_names[i]], res_expr)) + left_res, left_expl = next_res, next_expl + # Use pytest.assertion.util._reprcompare if that's available. + expl_call = self.helper( + "_call_reprcompare", + ast.Tuple(syms, ast.Load()), + ast.Tuple(load_names, ast.Load()), + ast.Tuple(expls, ast.Load()), + ast.Tuple(results, ast.Load()), + ) + if len(comp.ops) > 1: + res: ast.expr = ast.BoolOp(ast.And(), load_names) + else: + res = load_names[0] + return res, self.explanation_param(self.pop_format_context(expl_call)) + + +def try_makedirs(cache_dir: Path) -> bool: + """Attempt to create the given directory and sub-directories exist. + + Returns True if successful or if it already exists. + """ + try: + os.makedirs(cache_dir, exist_ok=True) + except (FileNotFoundError, NotADirectoryError, FileExistsError): + # One of the path components was not a directory: + # - we're in a zip file + # - it is a file + return False + except PermissionError: + return False + except OSError as e: + # as of now, EROFS doesn't have an equivalent OSError-subclass + if e.errno == errno.EROFS: + return False + raise + return True + + +def get_cache_dir(file_path: Path) -> Path: + """Return the cache directory to write .pyc files for the given .py file path.""" + if sys.version_info >= (3, 8) and sys.pycache_prefix: + # given: + # prefix = '/tmp/pycs' + # path = '/home/user/proj/test_app.py' + # we want: + # '/tmp/pycs/home/user/proj' + return Path(sys.pycache_prefix) / Path(*file_path.parts[1:-1]) + else: + # classic pycache directory + return file_path.parent / "__pycache__" diff --git a/venv/lib/python3.10/site-packages/_pytest/assertion/truncate.py b/venv/lib/python3.10/site-packages/_pytest/assertion/truncate.py new file mode 100644 index 0000000..ce148dc --- /dev/null +++ b/venv/lib/python3.10/site-packages/_pytest/assertion/truncate.py @@ -0,0 +1,94 @@ +"""Utilities for truncating assertion output. + +Current default behaviour is to truncate assertion explanations at +~8 terminal lines, unless running in "-vv" mode or running on CI. +""" +from typing import List +from typing import Optional + +from _pytest.assertion import util +from _pytest.nodes import Item + + +DEFAULT_MAX_LINES = 8 +DEFAULT_MAX_CHARS = 8 * 80 +USAGE_MSG = "use '-vv' to show" + + +def truncate_if_required( + explanation: List[str], item: Item, max_length: Optional[int] = None +) -> List[str]: + """Truncate this assertion explanation if the given test item is eligible.""" + if _should_truncate_item(item): + return _truncate_explanation(explanation) + return explanation + + +def _should_truncate_item(item: Item) -> bool: + """Whether or not this test item is eligible for truncation.""" + verbose = item.config.option.verbose + return verbose < 2 and not util.running_on_ci() + + +def _truncate_explanation( + input_lines: List[str], + max_lines: Optional[int] = None, + max_chars: Optional[int] = None, +) -> List[str]: + """Truncate given list of strings that makes up the assertion explanation. + + Truncates to either 8 lines, or 640 characters - whichever the input reaches + first. The remaining lines will be replaced by a usage message. + """ + + if max_lines is None: + max_lines = DEFAULT_MAX_LINES + if max_chars is None: + max_chars = DEFAULT_MAX_CHARS + + # Check if truncation required + input_char_count = len("".join(input_lines)) + if len(input_lines) <= max_lines and input_char_count <= max_chars: + return input_lines + + # Truncate first to max_lines, and then truncate to max_chars if max_chars + # is exceeded. + truncated_explanation = input_lines[:max_lines] + truncated_explanation = _truncate_by_char_count(truncated_explanation, max_chars) + + # Add ellipsis to final line + truncated_explanation[-1] = truncated_explanation[-1] + "..." + + # Append useful message to explanation + truncated_line_count = len(input_lines) - len(truncated_explanation) + truncated_line_count += 1 # Account for the part-truncated final line + msg = "...Full output truncated" + if truncated_line_count == 1: + msg += f" ({truncated_line_count} line hidden)" + else: + msg += f" ({truncated_line_count} lines hidden)" + msg += f", {USAGE_MSG}" + truncated_explanation.extend(["", str(msg)]) + return truncated_explanation + + +def _truncate_by_char_count(input_lines: List[str], max_chars: int) -> List[str]: + # Check if truncation required + if len("".join(input_lines)) <= max_chars: + return input_lines + + # Find point at which input length exceeds total allowed length + iterated_char_count = 0 + for iterated_index, input_line in enumerate(input_lines): + if iterated_char_count + len(input_line) > max_chars: + break + iterated_char_count += len(input_line) + + # Create truncated explanation with modified final line + truncated_result = input_lines[:iterated_index] + final_line = input_lines[iterated_index] + if final_line: + final_line_truncate_point = max_chars - iterated_char_count + final_line = final_line[:final_line_truncate_point] + truncated_result.append(final_line) + return truncated_result diff --git a/venv/lib/python3.10/site-packages/_pytest/assertion/util.py b/venv/lib/python3.10/site-packages/_pytest/assertion/util.py new file mode 100644 index 0000000..b1f1687 --- /dev/null +++ b/venv/lib/python3.10/site-packages/_pytest/assertion/util.py @@ -0,0 +1,509 @@ +"""Utilities for assertion debugging.""" +import collections.abc +import os +import pprint +from typing import AbstractSet +from typing import Any +from typing import Callable +from typing import Iterable +from typing import List +from typing import Mapping +from typing import Optional +from typing import Sequence + +import _pytest._code +from _pytest import outcomes +from _pytest._io.saferepr import _pformat_dispatch +from _pytest._io.saferepr import safeformat +from _pytest._io.saferepr import saferepr +from _pytest.config import Config + +# The _reprcompare attribute on the util module is used by the new assertion +# interpretation code and assertion rewriter to detect this plugin was +# loaded and in turn call the hooks defined here as part of the +# DebugInterpreter. +_reprcompare: Optional[Callable[[str, object, object], Optional[str]]] = None + +# Works similarly as _reprcompare attribute. Is populated with the hook call +# when pytest_runtest_setup is called. +_assertion_pass: Optional[Callable[[int, str, str], None]] = None + +# Config object which is assigned during pytest_runtest_protocol. +_config: Optional[Config] = None + + +def format_explanation(explanation: str) -> str: + r"""Format an explanation. + + Normally all embedded newlines are escaped, however there are + three exceptions: \n{, \n} and \n~. The first two are intended + cover nested explanations, see function and attribute explanations + for examples (.visit_Call(), visit_Attribute()). The last one is + for when one explanation needs to span multiple lines, e.g. when + displaying diffs. + """ + lines = _split_explanation(explanation) + result = _format_lines(lines) + return "\n".join(result) + + +def _split_explanation(explanation: str) -> List[str]: + r"""Return a list of individual lines in the explanation. + + This will return a list of lines split on '\n{', '\n}' and '\n~'. + Any other newlines will be escaped and appear in the line as the + literal '\n' characters. + """ + raw_lines = (explanation or "").split("\n") + lines = [raw_lines[0]] + for values in raw_lines[1:]: + if values and values[0] in ["{", "}", "~", ">"]: + lines.append(values) + else: + lines[-1] += "\\n" + values + return lines + + +def _format_lines(lines: Sequence[str]) -> List[str]: + """Format the individual lines. + + This will replace the '{', '}' and '~' characters of our mini formatting + language with the proper 'where ...', 'and ...' and ' + ...' text, taking + care of indentation along the way. + + Return a list of formatted lines. + """ + result = list(lines[:1]) + stack = [0] + stackcnt = [0] + for line in lines[1:]: + if line.startswith("{"): + if stackcnt[-1]: + s = "and " + else: + s = "where " + stack.append(len(result)) + stackcnt[-1] += 1 + stackcnt.append(0) + result.append(" +" + " " * (len(stack) - 1) + s + line[1:]) + elif line.startswith("}"): + stack.pop() + stackcnt.pop() + result[stack[-1]] += line[1:] + else: + assert line[0] in ["~", ">"] + stack[-1] += 1 + indent = len(stack) if line.startswith("~") else len(stack) - 1 + result.append(" " * indent + line[1:]) + assert len(stack) == 1 + return result + + +def issequence(x: Any) -> bool: + return isinstance(x, collections.abc.Sequence) and not isinstance(x, str) + + +def istext(x: Any) -> bool: + return isinstance(x, str) + + +def isdict(x: Any) -> bool: + return isinstance(x, dict) + + +def isset(x: Any) -> bool: + return isinstance(x, (set, frozenset)) + + +def isnamedtuple(obj: Any) -> bool: + return isinstance(obj, tuple) and getattr(obj, "_fields", None) is not None + + +def isdatacls(obj: Any) -> bool: + return getattr(obj, "__dataclass_fields__", None) is not None + + +def isattrs(obj: Any) -> bool: + return getattr(obj, "__attrs_attrs__", None) is not None + + +def isiterable(obj: Any) -> bool: + try: + iter(obj) + return not istext(obj) + except TypeError: + return False + + +def has_default_eq( + obj: object, +) -> bool: + """Check if an instance of an object contains the default eq + + First, we check if the object's __eq__ attribute has __code__, + if so, we check the equally of the method code filename (__code__.co_filename) + to the default one generated by the dataclass and attr module + for dataclasses the default co_filename is , for attrs class, the __eq__ should contain "attrs eq generated" + """ + # inspired from https://github.com/willmcgugan/rich/blob/07d51ffc1aee6f16bd2e5a25b4e82850fb9ed778/rich/pretty.py#L68 + if hasattr(obj.__eq__, "__code__") and hasattr(obj.__eq__.__code__, "co_filename"): + code_filename = obj.__eq__.__code__.co_filename + + if isattrs(obj): + return "attrs generated eq" in code_filename + + return code_filename == "" # data class + return True + + +def assertrepr_compare(config, op: str, left: Any, right: Any) -> Optional[List[str]]: + """Return specialised explanations for some operators/operands.""" + verbose = config.getoption("verbose") + if verbose > 1: + left_repr = safeformat(left) + right_repr = safeformat(right) + else: + # XXX: "15 chars indentation" is wrong + # ("E AssertionError: assert "); should use term width. + maxsize = ( + 80 - 15 - len(op) - 2 + ) // 2 # 15 chars indentation, 1 space around op + left_repr = saferepr(left, maxsize=maxsize) + right_repr = saferepr(right, maxsize=maxsize) + + summary = f"{left_repr} {op} {right_repr}" + + explanation = None + try: + if op == "==": + explanation = _compare_eq_any(left, right, verbose) + elif op == "not in": + if istext(left) and istext(right): + explanation = _notin_text(left, right, verbose) + except outcomes.Exit: + raise + except Exception: + explanation = [ + "(pytest_assertion plugin: representation of details failed: {}.".format( + _pytest._code.ExceptionInfo.from_current()._getreprcrash() + ), + " Probably an object has a faulty __repr__.)", + ] + + if not explanation: + return None + + return [summary] + explanation + + +def _compare_eq_any(left: Any, right: Any, verbose: int = 0) -> List[str]: + explanation = [] + if istext(left) and istext(right): + explanation = _diff_text(left, right, verbose) + else: + from _pytest.python_api import ApproxBase + + if isinstance(left, ApproxBase) or isinstance(right, ApproxBase): + # Although the common order should be obtained == expected, this ensures both ways + approx_side = left if isinstance(left, ApproxBase) else right + other_side = right if isinstance(left, ApproxBase) else left + + explanation = approx_side._repr_compare(other_side) + elif type(left) == type(right) and ( + isdatacls(left) or isattrs(left) or isnamedtuple(left) + ): + # Note: unlike dataclasses/attrs, namedtuples compare only the + # field values, not the type or field names. But this branch + # intentionally only handles the same-type case, which was often + # used in older code bases before dataclasses/attrs were available. + explanation = _compare_eq_cls(left, right, verbose) + elif issequence(left) and issequence(right): + explanation = _compare_eq_sequence(left, right, verbose) + elif isset(left) and isset(right): + explanation = _compare_eq_set(left, right, verbose) + elif isdict(left) and isdict(right): + explanation = _compare_eq_dict(left, right, verbose) + + if isiterable(left) and isiterable(right): + expl = _compare_eq_iterable(left, right, verbose) + explanation.extend(expl) + + return explanation + + +def _diff_text(left: str, right: str, verbose: int = 0) -> List[str]: + """Return the explanation for the diff between text. + + Unless --verbose is used this will skip leading and trailing + characters which are identical to keep the diff minimal. + """ + from difflib import ndiff + + explanation: List[str] = [] + + if verbose < 1: + i = 0 # just in case left or right has zero length + for i in range(min(len(left), len(right))): + if left[i] != right[i]: + break + if i > 42: + i -= 10 # Provide some context + explanation = [ + "Skipping %s identical leading characters in diff, use -v to show" % i + ] + left = left[i:] + right = right[i:] + if len(left) == len(right): + for i in range(len(left)): + if left[-i] != right[-i]: + break + if i > 42: + i -= 10 # Provide some context + explanation += [ + "Skipping {} identical trailing " + "characters in diff, use -v to show".format(i) + ] + left = left[:-i] + right = right[:-i] + keepends = True + if left.isspace() or right.isspace(): + left = repr(str(left)) + right = repr(str(right)) + explanation += ["Strings contain only whitespace, escaping them using repr()"] + # "right" is the expected base against which we compare "left", + # see https://github.com/pytest-dev/pytest/issues/3333 + explanation += [ + line.strip("\n") + for line in ndiff(right.splitlines(keepends), left.splitlines(keepends)) + ] + return explanation + + +def _surrounding_parens_on_own_lines(lines: List[str]) -> None: + """Move opening/closing parenthesis/bracket to own lines.""" + opening = lines[0][:1] + if opening in ["(", "[", "{"]: + lines[0] = " " + lines[0][1:] + lines[:] = [opening] + lines + closing = lines[-1][-1:] + if closing in [")", "]", "}"]: + lines[-1] = lines[-1][:-1] + "," + lines[:] = lines + [closing] + + +def _compare_eq_iterable( + left: Iterable[Any], right: Iterable[Any], verbose: int = 0 +) -> List[str]: + if verbose <= 0 and not running_on_ci(): + return ["Use -v to get more diff"] + # dynamic import to speedup pytest + import difflib + + left_formatting = pprint.pformat(left).splitlines() + right_formatting = pprint.pformat(right).splitlines() + + # Re-format for different output lengths. + lines_left = len(left_formatting) + lines_right = len(right_formatting) + if lines_left != lines_right: + left_formatting = _pformat_dispatch(left).splitlines() + right_formatting = _pformat_dispatch(right).splitlines() + + if lines_left > 1 or lines_right > 1: + _surrounding_parens_on_own_lines(left_formatting) + _surrounding_parens_on_own_lines(right_formatting) + + explanation = ["Full diff:"] + # "right" is the expected base against which we compare "left", + # see https://github.com/pytest-dev/pytest/issues/3333 + explanation.extend( + line.rstrip() for line in difflib.ndiff(right_formatting, left_formatting) + ) + return explanation + + +def _compare_eq_sequence( + left: Sequence[Any], right: Sequence[Any], verbose: int = 0 +) -> List[str]: + comparing_bytes = isinstance(left, bytes) and isinstance(right, bytes) + explanation: List[str] = [] + len_left = len(left) + len_right = len(right) + for i in range(min(len_left, len_right)): + if left[i] != right[i]: + if comparing_bytes: + # when comparing bytes, we want to see their ascii representation + # instead of their numeric values (#5260) + # using a slice gives us the ascii representation: + # >>> s = b'foo' + # >>> s[0] + # 102 + # >>> s[0:1] + # b'f' + left_value = left[i : i + 1] + right_value = right[i : i + 1] + else: + left_value = left[i] + right_value = right[i] + + explanation += [f"At index {i} diff: {left_value!r} != {right_value!r}"] + break + + if comparing_bytes: + # when comparing bytes, it doesn't help to show the "sides contain one or more + # items" longer explanation, so skip it + + return explanation + + len_diff = len_left - len_right + if len_diff: + if len_diff > 0: + dir_with_more = "Left" + extra = saferepr(left[len_right]) + else: + len_diff = 0 - len_diff + dir_with_more = "Right" + extra = saferepr(right[len_left]) + + if len_diff == 1: + explanation += [f"{dir_with_more} contains one more item: {extra}"] + else: + explanation += [ + "%s contains %d more items, first extra item: %s" + % (dir_with_more, len_diff, extra) + ] + return explanation + + +def _compare_eq_set( + left: AbstractSet[Any], right: AbstractSet[Any], verbose: int = 0 +) -> List[str]: + explanation = [] + diff_left = left - right + diff_right = right - left + if diff_left: + explanation.append("Extra items in the left set:") + for item in diff_left: + explanation.append(saferepr(item)) + if diff_right: + explanation.append("Extra items in the right set:") + for item in diff_right: + explanation.append(saferepr(item)) + return explanation + + +def _compare_eq_dict( + left: Mapping[Any, Any], right: Mapping[Any, Any], verbose: int = 0 +) -> List[str]: + explanation: List[str] = [] + set_left = set(left) + set_right = set(right) + common = set_left.intersection(set_right) + same = {k: left[k] for k in common if left[k] == right[k]} + if same and verbose < 2: + explanation += ["Omitting %s identical items, use -vv to show" % len(same)] + elif same: + explanation += ["Common items:"] + explanation += pprint.pformat(same).splitlines() + diff = {k for k in common if left[k] != right[k]} + if diff: + explanation += ["Differing items:"] + for k in diff: + explanation += [saferepr({k: left[k]}) + " != " + saferepr({k: right[k]})] + extra_left = set_left - set_right + len_extra_left = len(extra_left) + if len_extra_left: + explanation.append( + "Left contains %d more item%s:" + % (len_extra_left, "" if len_extra_left == 1 else "s") + ) + explanation.extend( + pprint.pformat({k: left[k] for k in extra_left}).splitlines() + ) + extra_right = set_right - set_left + len_extra_right = len(extra_right) + if len_extra_right: + explanation.append( + "Right contains %d more item%s:" + % (len_extra_right, "" if len_extra_right == 1 else "s") + ) + explanation.extend( + pprint.pformat({k: right[k] for k in extra_right}).splitlines() + ) + return explanation + + +def _compare_eq_cls(left: Any, right: Any, verbose: int) -> List[str]: + if not has_default_eq(left): + return [] + if isdatacls(left): + import dataclasses + + all_fields = dataclasses.fields(left) + fields_to_check = [info.name for info in all_fields if info.compare] + elif isattrs(left): + all_fields = left.__attrs_attrs__ + fields_to_check = [field.name for field in all_fields if getattr(field, "eq")] + elif isnamedtuple(left): + fields_to_check = left._fields + else: + assert False + + indent = " " + same = [] + diff = [] + for field in fields_to_check: + if getattr(left, field) == getattr(right, field): + same.append(field) + else: + diff.append(field) + + explanation = [] + if same or diff: + explanation += [""] + if same and verbose < 2: + explanation.append("Omitting %s identical items, use -vv to show" % len(same)) + elif same: + explanation += ["Matching attributes:"] + explanation += pprint.pformat(same).splitlines() + if diff: + explanation += ["Differing attributes:"] + explanation += pprint.pformat(diff).splitlines() + for field in diff: + field_left = getattr(left, field) + field_right = getattr(right, field) + explanation += [ + "", + "Drill down into differing attribute %s:" % field, + ("%s%s: %r != %r") % (indent, field, field_left, field_right), + ] + explanation += [ + indent + line + for line in _compare_eq_any(field_left, field_right, verbose) + ] + return explanation + + +def _notin_text(term: str, text: str, verbose: int = 0) -> List[str]: + index = text.find(term) + head = text[:index] + tail = text[index + len(term) :] + correct_text = head + tail + diff = _diff_text(text, correct_text, verbose) + newdiff = ["%s is contained here:" % saferepr(term, maxsize=42)] + for line in diff: + if line.startswith("Skipping"): + continue + if line.startswith("- "): + continue + if line.startswith("+ "): + newdiff.append(" " + line[2:]) + else: + newdiff.append(line) + return newdiff + + +def running_on_ci() -> bool: + """Check if we're currently running on a CI system.""" + env_vars = ["CI", "BUILD_NUMBER"] + return any(var in os.environ for var in env_vars) diff --git a/venv/lib/python3.10/site-packages/_pytest/cacheprovider.py b/venv/lib/python3.10/site-packages/_pytest/cacheprovider.py new file mode 100644 index 0000000..681d02b --- /dev/null +++ b/venv/lib/python3.10/site-packages/_pytest/cacheprovider.py @@ -0,0 +1,580 @@ +"""Implementation of the cache provider.""" +# This plugin was not named "cache" to avoid conflicts with the external +# pytest-cache version. +import json +import os +from pathlib import Path +from typing import Dict +from typing import Generator +from typing import Iterable +from typing import List +from typing import Optional +from typing import Set +from typing import Union + +import attr + +from .pathlib import resolve_from_str +from .pathlib import rm_rf +from .reports import CollectReport +from _pytest import nodes +from _pytest._io import TerminalWriter +from _pytest.compat import final +from _pytest.config import Config +from _pytest.config import ExitCode +from _pytest.config import hookimpl +from _pytest.config.argparsing import Parser +from _pytest.deprecated import check_ispytest +from _pytest.fixtures import fixture +from _pytest.fixtures import FixtureRequest +from _pytest.main import Session +from _pytest.python import Module +from _pytest.python import Package +from _pytest.reports import TestReport + + +README_CONTENT = """\ +# pytest cache directory # + +This directory contains data from the pytest's cache plugin, +which provides the `--lf` and `--ff` options, as well as the `cache` fixture. + +**Do not** commit this to version control. + +See [the docs](https://docs.pytest.org/en/stable/how-to/cache.html) for more information. +""" + +CACHEDIR_TAG_CONTENT = b"""\ +Signature: 8a477f597d28d172789f06886806bc55 +# This file is a cache directory tag created by pytest. +# For information about cache directory tags, see: +# https://bford.info/cachedir/spec.html +""" + + +@final +@attr.s(init=False, auto_attribs=True) +class Cache: + _cachedir: Path = attr.ib(repr=False) + _config: Config = attr.ib(repr=False) + + # Sub-directory under cache-dir for directories created by `mkdir()`. + _CACHE_PREFIX_DIRS = "d" + + # Sub-directory under cache-dir for values created by `set()`. + _CACHE_PREFIX_VALUES = "v" + + def __init__( + self, cachedir: Path, config: Config, *, _ispytest: bool = False + ) -> None: + check_ispytest(_ispytest) + self._cachedir = cachedir + self._config = config + + @classmethod + def for_config(cls, config: Config, *, _ispytest: bool = False) -> "Cache": + """Create the Cache instance for a Config. + + :meta private: + """ + check_ispytest(_ispytest) + cachedir = cls.cache_dir_from_config(config, _ispytest=True) + if config.getoption("cacheclear") and cachedir.is_dir(): + cls.clear_cache(cachedir, _ispytest=True) + return cls(cachedir, config, _ispytest=True) + + @classmethod + def clear_cache(cls, cachedir: Path, _ispytest: bool = False) -> None: + """Clear the sub-directories used to hold cached directories and values. + + :meta private: + """ + check_ispytest(_ispytest) + for prefix in (cls._CACHE_PREFIX_DIRS, cls._CACHE_PREFIX_VALUES): + d = cachedir / prefix + if d.is_dir(): + rm_rf(d) + + @staticmethod + def cache_dir_from_config(config: Config, *, _ispytest: bool = False) -> Path: + """Get the path to the cache directory for a Config. + + :meta private: + """ + check_ispytest(_ispytest) + return resolve_from_str(config.getini("cache_dir"), config.rootpath) + + def warn(self, fmt: str, *, _ispytest: bool = False, **args: object) -> None: + """Issue a cache warning. + + :meta private: + """ + check_ispytest(_ispytest) + import warnings + from _pytest.warning_types import PytestCacheWarning + + warnings.warn( + PytestCacheWarning(fmt.format(**args) if args else fmt), + self._config.hook, + stacklevel=3, + ) + + def mkdir(self, name: str) -> Path: + """Return a directory path object with the given name. + + If the directory does not yet exist, it will be created. You can use + it to manage files to e.g. store/retrieve database dumps across test + sessions. + + .. versionadded:: 7.0 + + :param name: + Must be a string not containing a ``/`` separator. + Make sure the name contains your plugin or application + identifiers to prevent clashes with other cache users. + """ + path = Path(name) + if len(path.parts) > 1: + raise ValueError("name is not allowed to contain path separators") + res = self._cachedir.joinpath(self._CACHE_PREFIX_DIRS, path) + res.mkdir(exist_ok=True, parents=True) + return res + + def _getvaluepath(self, key: str) -> Path: + return self._cachedir.joinpath(self._CACHE_PREFIX_VALUES, Path(key)) + + def get(self, key: str, default): + """Return the cached value for the given key. + + If no value was yet cached or the value cannot be read, the specified + default is returned. + + :param key: + Must be a ``/`` separated value. Usually the first + name is the name of your plugin or your application. + :param default: + The value to return in case of a cache-miss or invalid cache value. + """ + path = self._getvaluepath(key) + try: + with path.open("r") as f: + return json.load(f) + except (ValueError, OSError): + return default + + def set(self, key: str, value: object) -> None: + """Save value for the given key. + + :param key: + Must be a ``/`` separated value. Usually the first + name is the name of your plugin or your application. + :param value: + Must be of any combination of basic python types, + including nested types like lists of dictionaries. + """ + path = self._getvaluepath(key) + try: + if path.parent.is_dir(): + cache_dir_exists_already = True + else: + cache_dir_exists_already = self._cachedir.exists() + path.parent.mkdir(exist_ok=True, parents=True) + except OSError: + self.warn("could not create cache path {path}", path=path, _ispytest=True) + return + if not cache_dir_exists_already: + self._ensure_supporting_files() + data = json.dumps(value, indent=2) + try: + f = path.open("w") + except OSError: + self.warn("cache could not write path {path}", path=path, _ispytest=True) + else: + with f: + f.write(data) + + def _ensure_supporting_files(self) -> None: + """Create supporting files in the cache dir that are not really part of the cache.""" + readme_path = self._cachedir / "README.md" + readme_path.write_text(README_CONTENT) + + gitignore_path = self._cachedir.joinpath(".gitignore") + msg = "# Created by pytest automatically.\n*\n" + gitignore_path.write_text(msg, encoding="UTF-8") + + cachedir_tag_path = self._cachedir.joinpath("CACHEDIR.TAG") + cachedir_tag_path.write_bytes(CACHEDIR_TAG_CONTENT) + + +class LFPluginCollWrapper: + def __init__(self, lfplugin: "LFPlugin") -> None: + self.lfplugin = lfplugin + self._collected_at_least_one_failure = False + + @hookimpl(hookwrapper=True) + def pytest_make_collect_report(self, collector: nodes.Collector): + if isinstance(collector, Session): + out = yield + res: CollectReport = out.get_result() + + # Sort any lf-paths to the beginning. + lf_paths = self.lfplugin._last_failed_paths + + res.result = sorted( + res.result, + # use stable sort to priorize last failed + key=lambda x: x.path in lf_paths, + reverse=True, + ) + return + + elif isinstance(collector, Module): + if collector.path in self.lfplugin._last_failed_paths: + out = yield + res = out.get_result() + result = res.result + lastfailed = self.lfplugin.lastfailed + + # Only filter with known failures. + if not self._collected_at_least_one_failure: + if not any(x.nodeid in lastfailed for x in result): + return + self.lfplugin.config.pluginmanager.register( + LFPluginCollSkipfiles(self.lfplugin), "lfplugin-collskip" + ) + self._collected_at_least_one_failure = True + + session = collector.session + result[:] = [ + x + for x in result + if x.nodeid in lastfailed + # Include any passed arguments (not trivial to filter). + or session.isinitpath(x.path) + # Keep all sub-collectors. + or isinstance(x, nodes.Collector) + ] + return + yield + + +class LFPluginCollSkipfiles: + def __init__(self, lfplugin: "LFPlugin") -> None: + self.lfplugin = lfplugin + + @hookimpl + def pytest_make_collect_report( + self, collector: nodes.Collector + ) -> Optional[CollectReport]: + # Packages are Modules, but _last_failed_paths only contains + # test-bearing paths and doesn't try to include the paths of their + # packages, so don't filter them. + if isinstance(collector, Module) and not isinstance(collector, Package): + if collector.path not in self.lfplugin._last_failed_paths: + self.lfplugin._skipped_files += 1 + + return CollectReport( + collector.nodeid, "passed", longrepr=None, result=[] + ) + return None + + +class LFPlugin: + """Plugin which implements the --lf (run last-failing) option.""" + + def __init__(self, config: Config) -> None: + self.config = config + active_keys = "lf", "failedfirst" + self.active = any(config.getoption(key) for key in active_keys) + assert config.cache + self.lastfailed: Dict[str, bool] = config.cache.get("cache/lastfailed", {}) + self._previously_failed_count: Optional[int] = None + self._report_status: Optional[str] = None + self._skipped_files = 0 # count skipped files during collection due to --lf + + if config.getoption("lf"): + self._last_failed_paths = self.get_last_failed_paths() + config.pluginmanager.register( + LFPluginCollWrapper(self), "lfplugin-collwrapper" + ) + + def get_last_failed_paths(self) -> Set[Path]: + """Return a set with all Paths()s of the previously failed nodeids.""" + rootpath = self.config.rootpath + result = {rootpath / nodeid.split("::")[0] for nodeid in self.lastfailed} + return {x for x in result if x.exists()} + + def pytest_report_collectionfinish(self) -> Optional[str]: + if self.active and self.config.getoption("verbose") >= 0: + return "run-last-failure: %s" % self._report_status + return None + + def pytest_runtest_logreport(self, report: TestReport) -> None: + if (report.when == "call" and report.passed) or report.skipped: + self.lastfailed.pop(report.nodeid, None) + elif report.failed: + self.lastfailed[report.nodeid] = True + + def pytest_collectreport(self, report: CollectReport) -> None: + passed = report.outcome in ("passed", "skipped") + if passed: + if report.nodeid in self.lastfailed: + self.lastfailed.pop(report.nodeid) + self.lastfailed.update((item.nodeid, True) for item in report.result) + else: + self.lastfailed[report.nodeid] = True + + @hookimpl(hookwrapper=True, tryfirst=True) + def pytest_collection_modifyitems( + self, config: Config, items: List[nodes.Item] + ) -> Generator[None, None, None]: + yield + + if not self.active: + return + + if self.lastfailed: + previously_failed = [] + previously_passed = [] + for item in items: + if item.nodeid in self.lastfailed: + previously_failed.append(item) + else: + previously_passed.append(item) + self._previously_failed_count = len(previously_failed) + + if not previously_failed: + # Running a subset of all tests with recorded failures + # only outside of it. + self._report_status = "%d known failures not in selected tests" % ( + len(self.lastfailed), + ) + else: + if self.config.getoption("lf"): + items[:] = previously_failed + config.hook.pytest_deselected(items=previously_passed) + else: # --failedfirst + items[:] = previously_failed + previously_passed + + noun = "failure" if self._previously_failed_count == 1 else "failures" + suffix = " first" if self.config.getoption("failedfirst") else "" + self._report_status = "rerun previous {count} {noun}{suffix}".format( + count=self._previously_failed_count, suffix=suffix, noun=noun + ) + + if self._skipped_files > 0: + files_noun = "file" if self._skipped_files == 1 else "files" + self._report_status += " (skipped {files} {files_noun})".format( + files=self._skipped_files, files_noun=files_noun + ) + else: + self._report_status = "no previously failed tests, " + if self.config.getoption("last_failed_no_failures") == "none": + self._report_status += "deselecting all items." + config.hook.pytest_deselected(items=items[:]) + items[:] = [] + else: + self._report_status += "not deselecting items." + + def pytest_sessionfinish(self, session: Session) -> None: + config = self.config + if config.getoption("cacheshow") or hasattr(config, "workerinput"): + return + + assert config.cache is not None + saved_lastfailed = config.cache.get("cache/lastfailed", {}) + if saved_lastfailed != self.lastfailed: + config.cache.set("cache/lastfailed", self.lastfailed) + + +class NFPlugin: + """Plugin which implements the --nf (run new-first) option.""" + + def __init__(self, config: Config) -> None: + self.config = config + self.active = config.option.newfirst + assert config.cache is not None + self.cached_nodeids = set(config.cache.get("cache/nodeids", [])) + + @hookimpl(hookwrapper=True, tryfirst=True) + def pytest_collection_modifyitems( + self, items: List[nodes.Item] + ) -> Generator[None, None, None]: + yield + + if self.active: + new_items: Dict[str, nodes.Item] = {} + other_items: Dict[str, nodes.Item] = {} + for item in items: + if item.nodeid not in self.cached_nodeids: + new_items[item.nodeid] = item + else: + other_items[item.nodeid] = item + + items[:] = self._get_increasing_order( + new_items.values() + ) + self._get_increasing_order(other_items.values()) + self.cached_nodeids.update(new_items) + else: + self.cached_nodeids.update(item.nodeid for item in items) + + def _get_increasing_order(self, items: Iterable[nodes.Item]) -> List[nodes.Item]: + return sorted(items, key=lambda item: item.path.stat().st_mtime, reverse=True) # type: ignore[no-any-return] + + def pytest_sessionfinish(self) -> None: + config = self.config + if config.getoption("cacheshow") or hasattr(config, "workerinput"): + return + + if config.getoption("collectonly"): + return + + assert config.cache is not None + config.cache.set("cache/nodeids", sorted(self.cached_nodeids)) + + +def pytest_addoption(parser: Parser) -> None: + group = parser.getgroup("general") + group.addoption( + "--lf", + "--last-failed", + action="store_true", + dest="lf", + help="rerun only the tests that failed " + "at the last run (or all if none failed)", + ) + group.addoption( + "--ff", + "--failed-first", + action="store_true", + dest="failedfirst", + help="run all tests, but run the last failures first.\n" + "This may re-order tests and thus lead to " + "repeated fixture setup/teardown.", + ) + group.addoption( + "--nf", + "--new-first", + action="store_true", + dest="newfirst", + help="run tests from new files first, then the rest of the tests " + "sorted by file mtime", + ) + group.addoption( + "--cache-show", + action="append", + nargs="?", + dest="cacheshow", + help=( + "show cache contents, don't perform collection or tests. " + "Optional argument: glob (default: '*')." + ), + ) + group.addoption( + "--cache-clear", + action="store_true", + dest="cacheclear", + help="remove all cache contents at start of test run.", + ) + cache_dir_default = ".pytest_cache" + if "TOX_ENV_DIR" in os.environ: + cache_dir_default = os.path.join(os.environ["TOX_ENV_DIR"], cache_dir_default) + parser.addini("cache_dir", default=cache_dir_default, help="cache directory path.") + group.addoption( + "--lfnf", + "--last-failed-no-failures", + action="store", + dest="last_failed_no_failures", + choices=("all", "none"), + default="all", + help="which tests to run with no previously (known) failures.", + ) + + +def pytest_cmdline_main(config: Config) -> Optional[Union[int, ExitCode]]: + if config.option.cacheshow: + from _pytest.main import wrap_session + + return wrap_session(config, cacheshow) + return None + + +@hookimpl(tryfirst=True) +def pytest_configure(config: Config) -> None: + config.cache = Cache.for_config(config, _ispytest=True) + config.pluginmanager.register(LFPlugin(config), "lfplugin") + config.pluginmanager.register(NFPlugin(config), "nfplugin") + + +@fixture +def cache(request: FixtureRequest) -> Cache: + """Return a cache object that can persist state between testing sessions. + + cache.get(key, default) + cache.set(key, value) + + Keys must be ``/`` separated strings, where the first part is usually the + name of your plugin or application to avoid clashes with other cache users. + + Values can be any object handled by the json stdlib module. + """ + assert request.config.cache is not None + return request.config.cache + + +def pytest_report_header(config: Config) -> Optional[str]: + """Display cachedir with --cache-show and if non-default.""" + if config.option.verbose > 0 or config.getini("cache_dir") != ".pytest_cache": + assert config.cache is not None + cachedir = config.cache._cachedir + # TODO: evaluate generating upward relative paths + # starting with .., ../.. if sensible + + try: + displaypath = cachedir.relative_to(config.rootpath) + except ValueError: + displaypath = cachedir + return f"cachedir: {displaypath}" + return None + + +def cacheshow(config: Config, session: Session) -> int: + from pprint import pformat + + assert config.cache is not None + + tw = TerminalWriter() + tw.line("cachedir: " + str(config.cache._cachedir)) + if not config.cache._cachedir.is_dir(): + tw.line("cache is empty") + return 0 + + glob = config.option.cacheshow[0] + if glob is None: + glob = "*" + + dummy = object() + basedir = config.cache._cachedir + vdir = basedir / Cache._CACHE_PREFIX_VALUES + tw.sep("-", "cache values for %r" % glob) + for valpath in sorted(x for x in vdir.rglob(glob) if x.is_file()): + key = str(valpath.relative_to(vdir)) + val = config.cache.get(key, dummy) + if val is dummy: + tw.line("%s contains unreadable content, will be ignored" % key) + else: + tw.line("%s contains:" % key) + for line in pformat(val).splitlines(): + tw.line(" " + line) + + ddir = basedir / Cache._CACHE_PREFIX_DIRS + if ddir.is_dir(): + contents = sorted(ddir.rglob(glob)) + tw.sep("-", "cache directories for %r" % glob) + for p in contents: + # if p.is_dir(): + # print("%s/" % p.relative_to(basedir)) + if p.is_file(): + key = str(p.relative_to(basedir)) + tw.line(f"{key} is a file of length {p.stat().st_size:d}") + return 0 diff --git a/venv/lib/python3.10/site-packages/_pytest/capture.py b/venv/lib/python3.10/site-packages/_pytest/capture.py new file mode 100644 index 0000000..ee9de37 --- /dev/null +++ b/venv/lib/python3.10/site-packages/_pytest/capture.py @@ -0,0 +1,942 @@ +"""Per-test stdout/stderr capturing mechanism.""" +import contextlib +import functools +import io +import os +import sys +from io import UnsupportedOperation +from tempfile import TemporaryFile +from typing import Any +from typing import AnyStr +from typing import Generator +from typing import Generic +from typing import Iterator +from typing import Optional +from typing import TextIO +from typing import Tuple +from typing import TYPE_CHECKING +from typing import Union + +from _pytest.compat import final +from _pytest.config import Config +from _pytest.config import hookimpl +from _pytest.config.argparsing import Parser +from _pytest.deprecated import check_ispytest +from _pytest.fixtures import fixture +from _pytest.fixtures import SubRequest +from _pytest.nodes import Collector +from _pytest.nodes import File +from _pytest.nodes import Item + +if TYPE_CHECKING: + from typing_extensions import Literal + + _CaptureMethod = Literal["fd", "sys", "no", "tee-sys"] + + +def pytest_addoption(parser: Parser) -> None: + group = parser.getgroup("general") + group._addoption( + "--capture", + action="store", + default="fd", + metavar="method", + choices=["fd", "sys", "no", "tee-sys"], + help="per-test capturing method: one of fd|sys|no|tee-sys.", + ) + group._addoption( + "-s", + action="store_const", + const="no", + dest="capture", + help="shortcut for --capture=no.", + ) + + +def _colorama_workaround() -> None: + """Ensure colorama is imported so that it attaches to the correct stdio + handles on Windows. + + colorama uses the terminal on import time. So if something does the + first import of colorama while I/O capture is active, colorama will + fail in various ways. + """ + if sys.platform.startswith("win32"): + try: + import colorama # noqa: F401 + except ImportError: + pass + + +def _windowsconsoleio_workaround(stream: TextIO) -> None: + """Workaround for Windows Unicode console handling. + + Python 3.6 implemented Unicode console handling for Windows. This works + by reading/writing to the raw console handle using + ``{Read,Write}ConsoleW``. + + The problem is that we are going to ``dup2`` over the stdio file + descriptors when doing ``FDCapture`` and this will ``CloseHandle`` the + handles used by Python to write to the console. Though there is still some + weirdness and the console handle seems to only be closed randomly and not + on the first call to ``CloseHandle``, or maybe it gets reopened with the + same handle value when we suspend capturing. + + The workaround in this case will reopen stdio with a different fd which + also means a different handle by replicating the logic in + "Py_lifecycle.c:initstdio/create_stdio". + + :param stream: + In practice ``sys.stdout`` or ``sys.stderr``, but given + here as parameter for unittesting purposes. + + See https://github.com/pytest-dev/py/issues/103. + """ + if not sys.platform.startswith("win32") or hasattr(sys, "pypy_version_info"): + return + + # Bail out if ``stream`` doesn't seem like a proper ``io`` stream (#2666). + if not hasattr(stream, "buffer"): # type: ignore[unreachable] + return + + buffered = hasattr(stream.buffer, "raw") + raw_stdout = stream.buffer.raw if buffered else stream.buffer # type: ignore[attr-defined] + + if not isinstance(raw_stdout, io._WindowsConsoleIO): # type: ignore[attr-defined] + return + + def _reopen_stdio(f, mode): + if not buffered and mode[0] == "w": + buffering = 0 + else: + buffering = -1 + + return io.TextIOWrapper( + open(os.dup(f.fileno()), mode, buffering), + f.encoding, + f.errors, + f.newlines, + f.line_buffering, + ) + + sys.stdin = _reopen_stdio(sys.stdin, "rb") + sys.stdout = _reopen_stdio(sys.stdout, "wb") + sys.stderr = _reopen_stdio(sys.stderr, "wb") + + +@hookimpl(hookwrapper=True) +def pytest_load_initial_conftests(early_config: Config): + ns = early_config.known_args_namespace + if ns.capture == "fd": + _windowsconsoleio_workaround(sys.stdout) + _colorama_workaround() + pluginmanager = early_config.pluginmanager + capman = CaptureManager(ns.capture) + pluginmanager.register(capman, "capturemanager") + + # Make sure that capturemanager is properly reset at final shutdown. + early_config.add_cleanup(capman.stop_global_capturing) + + # Finally trigger conftest loading but while capturing (issue #93). + capman.start_global_capturing() + outcome = yield + capman.suspend_global_capture() + if outcome.excinfo is not None: + out, err = capman.read_global_capture() + sys.stdout.write(out) + sys.stderr.write(err) + + +# IO Helpers. + + +class EncodedFile(io.TextIOWrapper): + __slots__ = () + + @property + def name(self) -> str: + # Ensure that file.name is a string. Workaround for a Python bug + # fixed in >=3.7.4: https://bugs.python.org/issue36015 + return repr(self.buffer) + + @property + def mode(self) -> str: + # TextIOWrapper doesn't expose a mode, but at least some of our + # tests check it. + return self.buffer.mode.replace("b", "") + + +class CaptureIO(io.TextIOWrapper): + def __init__(self) -> None: + super().__init__(io.BytesIO(), encoding="UTF-8", newline="", write_through=True) + + def getvalue(self) -> str: + assert isinstance(self.buffer, io.BytesIO) + return self.buffer.getvalue().decode("UTF-8") + + +class TeeCaptureIO(CaptureIO): + def __init__(self, other: TextIO) -> None: + self._other = other + super().__init__() + + def write(self, s: str) -> int: + super().write(s) + return self._other.write(s) + + +class DontReadFromInput: + encoding = None + + def read(self, *args): + raise OSError( + "pytest: reading from stdin while output is captured! Consider using `-s`." + ) + + readline = read + readlines = read + __next__ = read + + def __iter__(self): + return self + + def fileno(self) -> int: + raise UnsupportedOperation("redirected stdin is pseudofile, has no fileno()") + + def isatty(self) -> bool: + return False + + def close(self) -> None: + pass + + @property + def buffer(self): + return self + + +# Capture classes. + + +patchsysdict = {0: "stdin", 1: "stdout", 2: "stderr"} + + +class NoCapture: + EMPTY_BUFFER = None + __init__ = start = done = suspend = resume = lambda *args: None + + +class SysCaptureBinary: + + EMPTY_BUFFER = b"" + + def __init__(self, fd: int, tmpfile=None, *, tee: bool = False) -> None: + name = patchsysdict[fd] + self._old = getattr(sys, name) + self.name = name + if tmpfile is None: + if name == "stdin": + tmpfile = DontReadFromInput() + else: + tmpfile = CaptureIO() if not tee else TeeCaptureIO(self._old) + self.tmpfile = tmpfile + self._state = "initialized" + + def repr(self, class_name: str) -> str: + return "<{} {} _old={} _state={!r} tmpfile={!r}>".format( + class_name, + self.name, + hasattr(self, "_old") and repr(self._old) or "", + self._state, + self.tmpfile, + ) + + def __repr__(self) -> str: + return "<{} {} _old={} _state={!r} tmpfile={!r}>".format( + self.__class__.__name__, + self.name, + hasattr(self, "_old") and repr(self._old) or "", + self._state, + self.tmpfile, + ) + + def _assert_state(self, op: str, states: Tuple[str, ...]) -> None: + assert ( + self._state in states + ), "cannot {} in state {!r}: expected one of {}".format( + op, self._state, ", ".join(states) + ) + + def start(self) -> None: + self._assert_state("start", ("initialized",)) + setattr(sys, self.name, self.tmpfile) + self._state = "started" + + def snap(self): + self._assert_state("snap", ("started", "suspended")) + self.tmpfile.seek(0) + res = self.tmpfile.buffer.read() + self.tmpfile.seek(0) + self.tmpfile.truncate() + return res + + def done(self) -> None: + self._assert_state("done", ("initialized", "started", "suspended", "done")) + if self._state == "done": + return + setattr(sys, self.name, self._old) + del self._old + self.tmpfile.close() + self._state = "done" + + def suspend(self) -> None: + self._assert_state("suspend", ("started", "suspended")) + setattr(sys, self.name, self._old) + self._state = "suspended" + + def resume(self) -> None: + self._assert_state("resume", ("started", "suspended")) + if self._state == "started": + return + setattr(sys, self.name, self.tmpfile) + self._state = "started" + + def writeorg(self, data) -> None: + self._assert_state("writeorg", ("started", "suspended")) + self._old.flush() + self._old.buffer.write(data) + self._old.buffer.flush() + + +class SysCapture(SysCaptureBinary): + EMPTY_BUFFER = "" # type: ignore[assignment] + + def snap(self): + res = self.tmpfile.getvalue() + self.tmpfile.seek(0) + self.tmpfile.truncate() + return res + + def writeorg(self, data): + self._assert_state("writeorg", ("started", "suspended")) + self._old.write(data) + self._old.flush() + + +class FDCaptureBinary: + """Capture IO to/from a given OS-level file descriptor. + + snap() produces `bytes`. + """ + + EMPTY_BUFFER = b"" + + def __init__(self, targetfd: int) -> None: + self.targetfd = targetfd + + try: + os.fstat(targetfd) + except OSError: + # FD capturing is conceptually simple -- create a temporary file, + # redirect the FD to it, redirect back when done. But when the + # target FD is invalid it throws a wrench into this lovely scheme. + # + # Tests themselves shouldn't care if the FD is valid, FD capturing + # should work regardless of external circumstances. So falling back + # to just sys capturing is not a good option. + # + # Further complications are the need to support suspend() and the + # possibility of FD reuse (e.g. the tmpfile getting the very same + # target FD). The following approach is robust, I believe. + self.targetfd_invalid: Optional[int] = os.open(os.devnull, os.O_RDWR) + os.dup2(self.targetfd_invalid, targetfd) + else: + self.targetfd_invalid = None + self.targetfd_save = os.dup(targetfd) + + if targetfd == 0: + self.tmpfile = open(os.devnull) + self.syscapture = SysCapture(targetfd) + else: + self.tmpfile = EncodedFile( + TemporaryFile(buffering=0), + encoding="utf-8", + errors="replace", + newline="", + write_through=True, + ) + if targetfd in patchsysdict: + self.syscapture = SysCapture(targetfd, self.tmpfile) + else: + self.syscapture = NoCapture() + + self._state = "initialized" + + def __repr__(self) -> str: + return "<{} {} oldfd={} _state={!r} tmpfile={!r}>".format( + self.__class__.__name__, + self.targetfd, + self.targetfd_save, + self._state, + self.tmpfile, + ) + + def _assert_state(self, op: str, states: Tuple[str, ...]) -> None: + assert ( + self._state in states + ), "cannot {} in state {!r}: expected one of {}".format( + op, self._state, ", ".join(states) + ) + + def start(self) -> None: + """Start capturing on targetfd using memorized tmpfile.""" + self._assert_state("start", ("initialized",)) + os.dup2(self.tmpfile.fileno(), self.targetfd) + self.syscapture.start() + self._state = "started" + + def snap(self): + self._assert_state("snap", ("started", "suspended")) + self.tmpfile.seek(0) + res = self.tmpfile.buffer.read() + self.tmpfile.seek(0) + self.tmpfile.truncate() + return res + + def done(self) -> None: + """Stop capturing, restore streams, return original capture file, + seeked to position zero.""" + self._assert_state("done", ("initialized", "started", "suspended", "done")) + if self._state == "done": + return + os.dup2(self.targetfd_save, self.targetfd) + os.close(self.targetfd_save) + if self.targetfd_invalid is not None: + if self.targetfd_invalid != self.targetfd: + os.close(self.targetfd) + os.close(self.targetfd_invalid) + self.syscapture.done() + self.tmpfile.close() + self._state = "done" + + def suspend(self) -> None: + self._assert_state("suspend", ("started", "suspended")) + if self._state == "suspended": + return + self.syscapture.suspend() + os.dup2(self.targetfd_save, self.targetfd) + self._state = "suspended" + + def resume(self) -> None: + self._assert_state("resume", ("started", "suspended")) + if self._state == "started": + return + self.syscapture.resume() + os.dup2(self.tmpfile.fileno(), self.targetfd) + self._state = "started" + + def writeorg(self, data): + """Write to original file descriptor.""" + self._assert_state("writeorg", ("started", "suspended")) + os.write(self.targetfd_save, data) + + +class FDCapture(FDCaptureBinary): + """Capture IO to/from a given OS-level file descriptor. + + snap() produces text. + """ + + # Ignore type because it doesn't match the type in the superclass (bytes). + EMPTY_BUFFER = "" # type: ignore + + def snap(self): + self._assert_state("snap", ("started", "suspended")) + self.tmpfile.seek(0) + res = self.tmpfile.read() + self.tmpfile.seek(0) + self.tmpfile.truncate() + return res + + def writeorg(self, data): + """Write to original file descriptor.""" + super().writeorg(data.encode("utf-8")) # XXX use encoding of original stream + + +# MultiCapture + + +# This class was a namedtuple, but due to mypy limitation[0] it could not be +# made generic, so was replaced by a regular class which tries to emulate the +# pertinent parts of a namedtuple. If the mypy limitation is ever lifted, can +# make it a namedtuple again. +# [0]: https://github.com/python/mypy/issues/685 +@final +@functools.total_ordering +class CaptureResult(Generic[AnyStr]): + """The result of :method:`CaptureFixture.readouterr`.""" + + __slots__ = ("out", "err") + + def __init__(self, out: AnyStr, err: AnyStr) -> None: + self.out: AnyStr = out + self.err: AnyStr = err + + def __len__(self) -> int: + return 2 + + def __iter__(self) -> Iterator[AnyStr]: + return iter((self.out, self.err)) + + def __getitem__(self, item: int) -> AnyStr: + return tuple(self)[item] + + def _replace( + self, *, out: Optional[AnyStr] = None, err: Optional[AnyStr] = None + ) -> "CaptureResult[AnyStr]": + return CaptureResult( + out=self.out if out is None else out, err=self.err if err is None else err + ) + + def count(self, value: AnyStr) -> int: + return tuple(self).count(value) + + def index(self, value) -> int: + return tuple(self).index(value) + + def __eq__(self, other: object) -> bool: + if not isinstance(other, (CaptureResult, tuple)): + return NotImplemented + return tuple(self) == tuple(other) + + def __hash__(self) -> int: + return hash(tuple(self)) + + def __lt__(self, other: object) -> bool: + if not isinstance(other, (CaptureResult, tuple)): + return NotImplemented + return tuple(self) < tuple(other) + + def __repr__(self) -> str: + return f"CaptureResult(out={self.out!r}, err={self.err!r})" + + +class MultiCapture(Generic[AnyStr]): + _state = None + _in_suspended = False + + def __init__(self, in_, out, err) -> None: + self.in_ = in_ + self.out = out + self.err = err + + def __repr__(self) -> str: + return "".format( + self.out, + self.err, + self.in_, + self._state, + self._in_suspended, + ) + + def start_capturing(self) -> None: + self._state = "started" + if self.in_: + self.in_.start() + if self.out: + self.out.start() + if self.err: + self.err.start() + + def pop_outerr_to_orig(self) -> Tuple[AnyStr, AnyStr]: + """Pop current snapshot out/err capture and flush to orig streams.""" + out, err = self.readouterr() + if out: + self.out.writeorg(out) + if err: + self.err.writeorg(err) + return out, err + + def suspend_capturing(self, in_: bool = False) -> None: + self._state = "suspended" + if self.out: + self.out.suspend() + if self.err: + self.err.suspend() + if in_ and self.in_: + self.in_.suspend() + self._in_suspended = True + + def resume_capturing(self) -> None: + self._state = "started" + if self.out: + self.out.resume() + if self.err: + self.err.resume() + if self._in_suspended: + self.in_.resume() + self._in_suspended = False + + def stop_capturing(self) -> None: + """Stop capturing and reset capturing streams.""" + if self._state == "stopped": + raise ValueError("was already stopped") + self._state = "stopped" + if self.out: + self.out.done() + if self.err: + self.err.done() + if self.in_: + self.in_.done() + + def is_started(self) -> bool: + """Whether actively capturing -- not suspended or stopped.""" + return self._state == "started" + + def readouterr(self) -> CaptureResult[AnyStr]: + out = self.out.snap() if self.out else "" + err = self.err.snap() if self.err else "" + return CaptureResult(out, err) + + +def _get_multicapture(method: "_CaptureMethod") -> MultiCapture[str]: + if method == "fd": + return MultiCapture(in_=FDCapture(0), out=FDCapture(1), err=FDCapture(2)) + elif method == "sys": + return MultiCapture(in_=SysCapture(0), out=SysCapture(1), err=SysCapture(2)) + elif method == "no": + return MultiCapture(in_=None, out=None, err=None) + elif method == "tee-sys": + return MultiCapture( + in_=None, out=SysCapture(1, tee=True), err=SysCapture(2, tee=True) + ) + raise ValueError(f"unknown capturing method: {method!r}") + + +# CaptureManager and CaptureFixture + + +class CaptureManager: + """The capture plugin. + + Manages that the appropriate capture method is enabled/disabled during + collection and each test phase (setup, call, teardown). After each of + those points, the captured output is obtained and attached to the + collection/runtest report. + + There are two levels of capture: + + * global: enabled by default and can be suppressed by the ``-s`` + option. This is always enabled/disabled during collection and each test + phase. + + * fixture: when a test function or one of its fixture depend on the + ``capsys`` or ``capfd`` fixtures. In this case special handling is + needed to ensure the fixtures take precedence over the global capture. + """ + + def __init__(self, method: "_CaptureMethod") -> None: + self._method = method + self._global_capturing: Optional[MultiCapture[str]] = None + self._capture_fixture: Optional[CaptureFixture[Any]] = None + + def __repr__(self) -> str: + return "".format( + self._method, self._global_capturing, self._capture_fixture + ) + + def is_capturing(self) -> Union[str, bool]: + if self.is_globally_capturing(): + return "global" + if self._capture_fixture: + return "fixture %s" % self._capture_fixture.request.fixturename + return False + + # Global capturing control + + def is_globally_capturing(self) -> bool: + return self._method != "no" + + def start_global_capturing(self) -> None: + assert self._global_capturing is None + self._global_capturing = _get_multicapture(self._method) + self._global_capturing.start_capturing() + + def stop_global_capturing(self) -> None: + if self._global_capturing is not None: + self._global_capturing.pop_outerr_to_orig() + self._global_capturing.stop_capturing() + self._global_capturing = None + + def resume_global_capture(self) -> None: + # During teardown of the python process, and on rare occasions, capture + # attributes can be `None` while trying to resume global capture. + if self._global_capturing is not None: + self._global_capturing.resume_capturing() + + def suspend_global_capture(self, in_: bool = False) -> None: + if self._global_capturing is not None: + self._global_capturing.suspend_capturing(in_=in_) + + def suspend(self, in_: bool = False) -> None: + # Need to undo local capsys-et-al if it exists before disabling global capture. + self.suspend_fixture() + self.suspend_global_capture(in_) + + def resume(self) -> None: + self.resume_global_capture() + self.resume_fixture() + + def read_global_capture(self) -> CaptureResult[str]: + assert self._global_capturing is not None + return self._global_capturing.readouterr() + + # Fixture Control + + def set_fixture(self, capture_fixture: "CaptureFixture[Any]") -> None: + if self._capture_fixture: + current_fixture = self._capture_fixture.request.fixturename + requested_fixture = capture_fixture.request.fixturename + capture_fixture.request.raiseerror( + "cannot use {} and {} at the same time".format( + requested_fixture, current_fixture + ) + ) + self._capture_fixture = capture_fixture + + def unset_fixture(self) -> None: + self._capture_fixture = None + + def activate_fixture(self) -> None: + """If the current item is using ``capsys`` or ``capfd``, activate + them so they take precedence over the global capture.""" + if self._capture_fixture: + self._capture_fixture._start() + + def deactivate_fixture(self) -> None: + """Deactivate the ``capsys`` or ``capfd`` fixture of this item, if any.""" + if self._capture_fixture: + self._capture_fixture.close() + + def suspend_fixture(self) -> None: + if self._capture_fixture: + self._capture_fixture._suspend() + + def resume_fixture(self) -> None: + if self._capture_fixture: + self._capture_fixture._resume() + + # Helper context managers + + @contextlib.contextmanager + def global_and_fixture_disabled(self) -> Generator[None, None, None]: + """Context manager to temporarily disable global and current fixture capturing.""" + do_fixture = self._capture_fixture and self._capture_fixture._is_started() + if do_fixture: + self.suspend_fixture() + do_global = self._global_capturing and self._global_capturing.is_started() + if do_global: + self.suspend_global_capture() + try: + yield + finally: + if do_global: + self.resume_global_capture() + if do_fixture: + self.resume_fixture() + + @contextlib.contextmanager + def item_capture(self, when: str, item: Item) -> Generator[None, None, None]: + self.resume_global_capture() + self.activate_fixture() + try: + yield + finally: + self.deactivate_fixture() + self.suspend_global_capture(in_=False) + + out, err = self.read_global_capture() + item.add_report_section(when, "stdout", out) + item.add_report_section(when, "stderr", err) + + # Hooks + + @hookimpl(hookwrapper=True) + def pytest_make_collect_report(self, collector: Collector): + if isinstance(collector, File): + self.resume_global_capture() + outcome = yield + self.suspend_global_capture() + out, err = self.read_global_capture() + rep = outcome.get_result() + if out: + rep.sections.append(("Captured stdout", out)) + if err: + rep.sections.append(("Captured stderr", err)) + else: + yield + + @hookimpl(hookwrapper=True) + def pytest_runtest_setup(self, item: Item) -> Generator[None, None, None]: + with self.item_capture("setup", item): + yield + + @hookimpl(hookwrapper=True) + def pytest_runtest_call(self, item: Item) -> Generator[None, None, None]: + with self.item_capture("call", item): + yield + + @hookimpl(hookwrapper=True) + def pytest_runtest_teardown(self, item: Item) -> Generator[None, None, None]: + with self.item_capture("teardown", item): + yield + + @hookimpl(tryfirst=True) + def pytest_keyboard_interrupt(self) -> None: + self.stop_global_capturing() + + @hookimpl(tryfirst=True) + def pytest_internalerror(self) -> None: + self.stop_global_capturing() + + +class CaptureFixture(Generic[AnyStr]): + """Object returned by the :fixture:`capsys`, :fixture:`capsysbinary`, + :fixture:`capfd` and :fixture:`capfdbinary` fixtures.""" + + def __init__( + self, captureclass, request: SubRequest, *, _ispytest: bool = False + ) -> None: + check_ispytest(_ispytest) + self.captureclass = captureclass + self.request = request + self._capture: Optional[MultiCapture[AnyStr]] = None + self._captured_out = self.captureclass.EMPTY_BUFFER + self._captured_err = self.captureclass.EMPTY_BUFFER + + def _start(self) -> None: + if self._capture is None: + self._capture = MultiCapture( + in_=None, + out=self.captureclass(1), + err=self.captureclass(2), + ) + self._capture.start_capturing() + + def close(self) -> None: + if self._capture is not None: + out, err = self._capture.pop_outerr_to_orig() + self._captured_out += out + self._captured_err += err + self._capture.stop_capturing() + self._capture = None + + def readouterr(self) -> CaptureResult[AnyStr]: + """Read and return the captured output so far, resetting the internal + buffer. + + :returns: + The captured content as a namedtuple with ``out`` and ``err`` + string attributes. + """ + captured_out, captured_err = self._captured_out, self._captured_err + if self._capture is not None: + out, err = self._capture.readouterr() + captured_out += out + captured_err += err + self._captured_out = self.captureclass.EMPTY_BUFFER + self._captured_err = self.captureclass.EMPTY_BUFFER + return CaptureResult(captured_out, captured_err) + + def _suspend(self) -> None: + """Suspend this fixture's own capturing temporarily.""" + if self._capture is not None: + self._capture.suspend_capturing() + + def _resume(self) -> None: + """Resume this fixture's own capturing temporarily.""" + if self._capture is not None: + self._capture.resume_capturing() + + def _is_started(self) -> bool: + """Whether actively capturing -- not disabled or closed.""" + if self._capture is not None: + return self._capture.is_started() + return False + + @contextlib.contextmanager + def disabled(self) -> Generator[None, None, None]: + """Temporarily disable capturing while inside the ``with`` block.""" + capmanager = self.request.config.pluginmanager.getplugin("capturemanager") + with capmanager.global_and_fixture_disabled(): + yield + + +# The fixtures. + + +@fixture +def capsys(request: SubRequest) -> Generator[CaptureFixture[str], None, None]: + """Enable text capturing of writes to ``sys.stdout`` and ``sys.stderr``. + + The captured output is made available via ``capsys.readouterr()`` method + calls, which return a ``(out, err)`` namedtuple. + ``out`` and ``err`` will be ``text`` objects. + """ + capman = request.config.pluginmanager.getplugin("capturemanager") + capture_fixture = CaptureFixture[str](SysCapture, request, _ispytest=True) + capman.set_fixture(capture_fixture) + capture_fixture._start() + yield capture_fixture + capture_fixture.close() + capman.unset_fixture() + + +@fixture +def capsysbinary(request: SubRequest) -> Generator[CaptureFixture[bytes], None, None]: + """Enable bytes capturing of writes to ``sys.stdout`` and ``sys.stderr``. + + The captured output is made available via ``capsysbinary.readouterr()`` + method calls, which return a ``(out, err)`` namedtuple. + ``out`` and ``err`` will be ``bytes`` objects. + """ + capman = request.config.pluginmanager.getplugin("capturemanager") + capture_fixture = CaptureFixture[bytes](SysCaptureBinary, request, _ispytest=True) + capman.set_fixture(capture_fixture) + capture_fixture._start() + yield capture_fixture + capture_fixture.close() + capman.unset_fixture() + + +@fixture +def capfd(request: SubRequest) -> Generator[CaptureFixture[str], None, None]: + """Enable text capturing of writes to file descriptors ``1`` and ``2``. + + The captured output is made available via ``capfd.readouterr()`` method + calls, which return a ``(out, err)`` namedtuple. + ``out`` and ``err`` will be ``text`` objects. + """ + capman = request.config.pluginmanager.getplugin("capturemanager") + capture_fixture = CaptureFixture[str](FDCapture, request, _ispytest=True) + capman.set_fixture(capture_fixture) + capture_fixture._start() + yield capture_fixture + capture_fixture.close() + capman.unset_fixture() + + +@fixture +def capfdbinary(request: SubRequest) -> Generator[CaptureFixture[bytes], None, None]: + """Enable bytes capturing of writes to file descriptors ``1`` and ``2``. + + The captured output is made available via ``capfd.readouterr()`` method + calls, which return a ``(out, err)`` namedtuple. + ``out`` and ``err`` will be ``byte`` objects. + """ + capman = request.config.pluginmanager.getplugin("capturemanager") + capture_fixture = CaptureFixture[bytes](FDCaptureBinary, request, _ispytest=True) + capman.set_fixture(capture_fixture) + capture_fixture._start() + yield capture_fixture + capture_fixture.close() + capman.unset_fixture() diff --git a/venv/lib/python3.10/site-packages/_pytest/compat.py b/venv/lib/python3.10/site-packages/_pytest/compat.py new file mode 100644 index 0000000..e4c2a5f --- /dev/null +++ b/venv/lib/python3.10/site-packages/_pytest/compat.py @@ -0,0 +1,405 @@ +"""Python version compatibility code.""" +import enum +import functools +import inspect +import os +import sys +from inspect import Parameter +from inspect import signature +from pathlib import Path +from typing import Any +from typing import Callable +from typing import Generic +from typing import Optional +from typing import Tuple +from typing import TYPE_CHECKING +from typing import TypeVar +from typing import Union + +import attr +import py + +if TYPE_CHECKING: + from typing import NoReturn + from typing_extensions import Final + + +_T = TypeVar("_T") +_S = TypeVar("_S") + +#: constant to prepare valuing pylib path replacements/lazy proxies later on +# intended for removal in pytest 8.0 or 9.0 + +# fmt: off +# intentional space to create a fake difference for the verification +LEGACY_PATH = py.path. local +# fmt: on + + +def legacy_path(path: Union[str, "os.PathLike[str]"]) -> LEGACY_PATH: + """Internal wrapper to prepare lazy proxies for legacy_path instances""" + return LEGACY_PATH(path) + + +# fmt: off +# Singleton type for NOTSET, as described in: +# https://www.python.org/dev/peps/pep-0484/#support-for-singleton-types-in-unions +class NotSetType(enum.Enum): + token = 0 +NOTSET: "Final" = NotSetType.token # noqa: E305 +# fmt: on + +if sys.version_info >= (3, 8): + from importlib import metadata as importlib_metadata +else: + import importlib_metadata # noqa: F401 + + +def _format_args(func: Callable[..., Any]) -> str: + return str(signature(func)) + + +def is_generator(func: object) -> bool: + genfunc = inspect.isgeneratorfunction(func) + return genfunc and not iscoroutinefunction(func) + + +def iscoroutinefunction(func: object) -> bool: + """Return True if func is a coroutine function (a function defined with async + def syntax, and doesn't contain yield), or a function decorated with + @asyncio.coroutine. + + Note: copied and modified from Python 3.5's builtin couroutines.py to avoid + importing asyncio directly, which in turns also initializes the "logging" + module as a side-effect (see issue #8). + """ + return inspect.iscoroutinefunction(func) or getattr(func, "_is_coroutine", False) + + +def is_async_function(func: object) -> bool: + """Return True if the given function seems to be an async function or + an async generator.""" + return iscoroutinefunction(func) or inspect.isasyncgenfunction(func) + + +def getlocation(function, curdir: Optional[str] = None) -> str: + function = get_real_func(function) + fn = Path(inspect.getfile(function)) + lineno = function.__code__.co_firstlineno + if curdir is not None: + try: + relfn = fn.relative_to(curdir) + except ValueError: + pass + else: + return "%s:%d" % (relfn, lineno + 1) + return "%s:%d" % (fn, lineno + 1) + + +def num_mock_patch_args(function) -> int: + """Return number of arguments used up by mock arguments (if any).""" + patchings = getattr(function, "patchings", None) + if not patchings: + return 0 + + mock_sentinel = getattr(sys.modules.get("mock"), "DEFAULT", object()) + ut_mock_sentinel = getattr(sys.modules.get("unittest.mock"), "DEFAULT", object()) + + return len( + [ + p + for p in patchings + if not p.attribute_name + and (p.new is mock_sentinel or p.new is ut_mock_sentinel) + ] + ) + + +def getfuncargnames( + function: Callable[..., Any], + *, + name: str = "", + is_method: bool = False, + cls: Optional[type] = None, +) -> Tuple[str, ...]: + """Return the names of a function's mandatory arguments. + + Should return the names of all function arguments that: + * Aren't bound to an instance or type as in instance or class methods. + * Don't have default values. + * Aren't bound with functools.partial. + * Aren't replaced with mocks. + + The is_method and cls arguments indicate that the function should + be treated as a bound method even though it's not unless, only in + the case of cls, the function is a static method. + + The name parameter should be the original name in which the function was collected. + """ + # TODO(RonnyPfannschmidt): This function should be refactored when we + # revisit fixtures. The fixture mechanism should ask the node for + # the fixture names, and not try to obtain directly from the + # function object well after collection has occurred. + + # The parameters attribute of a Signature object contains an + # ordered mapping of parameter names to Parameter instances. This + # creates a tuple of the names of the parameters that don't have + # defaults. + try: + parameters = signature(function).parameters + except (ValueError, TypeError) as e: + from _pytest.outcomes import fail + + fail( + f"Could not determine arguments of {function!r}: {e}", + pytrace=False, + ) + + arg_names = tuple( + p.name + for p in parameters.values() + if ( + p.kind is Parameter.POSITIONAL_OR_KEYWORD + or p.kind is Parameter.KEYWORD_ONLY + ) + and p.default is Parameter.empty + ) + if not name: + name = function.__name__ + + # If this function should be treated as a bound method even though + # it's passed as an unbound method or function, remove the first + # parameter name. + if is_method or ( + # Not using `getattr` because we don't want to resolve the staticmethod. + # Not using `cls.__dict__` because we want to check the entire MRO. + cls + and not isinstance( + inspect.getattr_static(cls, name, default=None), staticmethod + ) + ): + arg_names = arg_names[1:] + # Remove any names that will be replaced with mocks. + if hasattr(function, "__wrapped__"): + arg_names = arg_names[num_mock_patch_args(function) :] + return arg_names + + +def get_default_arg_names(function: Callable[..., Any]) -> Tuple[str, ...]: + # Note: this code intentionally mirrors the code at the beginning of + # getfuncargnames, to get the arguments which were excluded from its result + # because they had default values. + return tuple( + p.name + for p in signature(function).parameters.values() + if p.kind in (Parameter.POSITIONAL_OR_KEYWORD, Parameter.KEYWORD_ONLY) + and p.default is not Parameter.empty + ) + + +_non_printable_ascii_translate_table = { + i: f"\\x{i:02x}" for i in range(128) if i not in range(32, 127) +} +_non_printable_ascii_translate_table.update( + {ord("\t"): "\\t", ord("\r"): "\\r", ord("\n"): "\\n"} +) + + +def _translate_non_printable(s: str) -> str: + return s.translate(_non_printable_ascii_translate_table) + + +STRING_TYPES = bytes, str + + +def _bytes_to_ascii(val: bytes) -> str: + return val.decode("ascii", "backslashreplace") + + +def ascii_escaped(val: Union[bytes, str]) -> str: + r"""If val is pure ASCII, return it as an str, otherwise, escape + bytes objects into a sequence of escaped bytes: + + b'\xc3\xb4\xc5\xd6' -> r'\xc3\xb4\xc5\xd6' + + and escapes unicode objects into a sequence of escaped unicode + ids, e.g.: + + r'4\nV\U00043efa\x0eMXWB\x1e\u3028\u15fd\xcd\U0007d944' + + Note: + The obvious "v.decode('unicode-escape')" will return + valid UTF-8 unicode if it finds them in bytes, but we + want to return escaped bytes for any byte, even if they match + a UTF-8 string. + """ + if isinstance(val, bytes): + ret = _bytes_to_ascii(val) + else: + ret = val.encode("unicode_escape").decode("ascii") + return _translate_non_printable(ret) + + +@attr.s +class _PytestWrapper: + """Dummy wrapper around a function object for internal use only. + + Used to correctly unwrap the underlying function object when we are + creating fixtures, because we wrap the function object ourselves with a + decorator to issue warnings when the fixture function is called directly. + """ + + obj = attr.ib() + + +def get_real_func(obj): + """Get the real function object of the (possibly) wrapped object by + functools.wraps or functools.partial.""" + start_obj = obj + for i in range(100): + # __pytest_wrapped__ is set by @pytest.fixture when wrapping the fixture function + # to trigger a warning if it gets called directly instead of by pytest: we don't + # want to unwrap further than this otherwise we lose useful wrappings like @mock.patch (#3774) + new_obj = getattr(obj, "__pytest_wrapped__", None) + if isinstance(new_obj, _PytestWrapper): + obj = new_obj.obj + break + new_obj = getattr(obj, "__wrapped__", None) + if new_obj is None: + break + obj = new_obj + else: + from _pytest._io.saferepr import saferepr + + raise ValueError( + ("could not find real function of {start}\nstopped at {current}").format( + start=saferepr(start_obj), current=saferepr(obj) + ) + ) + if isinstance(obj, functools.partial): + obj = obj.func + return obj + + +def get_real_method(obj, holder): + """Attempt to obtain the real function object that might be wrapping + ``obj``, while at the same time returning a bound method to ``holder`` if + the original object was a bound method.""" + try: + is_method = hasattr(obj, "__func__") + obj = get_real_func(obj) + except Exception: # pragma: no cover + return obj + if is_method and hasattr(obj, "__get__") and callable(obj.__get__): + obj = obj.__get__(holder) + return obj + + +def getimfunc(func): + try: + return func.__func__ + except AttributeError: + return func + + +def safe_getattr(object: Any, name: str, default: Any) -> Any: + """Like getattr but return default upon any Exception or any OutcomeException. + + Attribute access can potentially fail for 'evil' Python objects. + See issue #214. + It catches OutcomeException because of #2490 (issue #580), new outcomes + are derived from BaseException instead of Exception (for more details + check #2707). + """ + from _pytest.outcomes import TEST_OUTCOME + + try: + return getattr(object, name, default) + except TEST_OUTCOME: + return default + + +def safe_isclass(obj: object) -> bool: + """Ignore any exception via isinstance on Python 3.""" + try: + return inspect.isclass(obj) + except Exception: + return False + + +if TYPE_CHECKING: + if sys.version_info >= (3, 8): + from typing import final as final + else: + from typing_extensions import final as final +elif sys.version_info >= (3, 8): + from typing import final as final +else: + + def final(f): + return f + + +if sys.version_info >= (3, 8): + from functools import cached_property as cached_property +else: + from typing import overload + from typing import Type + + class cached_property(Generic[_S, _T]): + __slots__ = ("func", "__doc__") + + def __init__(self, func: Callable[[_S], _T]) -> None: + self.func = func + self.__doc__ = func.__doc__ + + @overload + def __get__( + self, instance: None, owner: Optional[Type[_S]] = ... + ) -> "cached_property[_S, _T]": + ... + + @overload + def __get__(self, instance: _S, owner: Optional[Type[_S]] = ...) -> _T: + ... + + def __get__(self, instance, owner=None): + if instance is None: + return self + value = instance.__dict__[self.func.__name__] = self.func(instance) + return value + + +# Perform exhaustiveness checking. +# +# Consider this example: +# +# MyUnion = Union[int, str] +# +# def handle(x: MyUnion) -> int { +# if isinstance(x, int): +# return 1 +# elif isinstance(x, str): +# return 2 +# else: +# raise Exception('unreachable') +# +# Now suppose we add a new variant: +# +# MyUnion = Union[int, str, bytes] +# +# After doing this, we must remember ourselves to go and update the handle +# function to handle the new variant. +# +# With `assert_never` we can do better: +# +# // raise Exception('unreachable') +# return assert_never(x) +# +# Now, if we forget to handle the new variant, the type-checker will emit a +# compile-time error, instead of the runtime error we would have gotten +# previously. +# +# This also work for Enums (if you use `is` to compare) and Literals. +def assert_never(value: "NoReturn") -> "NoReturn": + assert False, f"Unhandled value: {value} ({type(value).__name__})" diff --git a/venv/lib/python3.10/site-packages/_pytest/config/__init__.py b/venv/lib/python3.10/site-packages/_pytest/config/__init__.py new file mode 100644 index 0000000..91ad3f0 --- /dev/null +++ b/venv/lib/python3.10/site-packages/_pytest/config/__init__.py @@ -0,0 +1,1693 @@ +"""Command line options, ini-file and conftest.py processing.""" +import argparse +import collections.abc +import copy +import enum +import inspect +import os +import re +import shlex +import sys +import types +import warnings +from functools import lru_cache +from pathlib import Path +from textwrap import dedent +from types import TracebackType +from typing import Any +from typing import Callable +from typing import cast +from typing import Dict +from typing import Generator +from typing import IO +from typing import Iterable +from typing import Iterator +from typing import List +from typing import Optional +from typing import Sequence +from typing import Set +from typing import TextIO +from typing import Tuple +from typing import Type +from typing import TYPE_CHECKING +from typing import Union + +import attr +from pluggy import HookimplMarker +from pluggy import HookspecMarker +from pluggy import PluginManager + +import _pytest._code +import _pytest.deprecated +import _pytest.hookspec +from .exceptions import PrintHelp as PrintHelp +from .exceptions import UsageError as UsageError +from .findpaths import determine_setup +from _pytest._code import ExceptionInfo +from _pytest._code import filter_traceback +from _pytest._io import TerminalWriter +from _pytest.compat import final +from _pytest.compat import importlib_metadata +from _pytest.outcomes import fail +from _pytest.outcomes import Skipped +from _pytest.pathlib import absolutepath +from _pytest.pathlib import bestrelpath +from _pytest.pathlib import import_path +from _pytest.pathlib import ImportMode +from _pytest.pathlib import resolve_package_path +from _pytest.stash import Stash +from _pytest.warning_types import PytestConfigWarning + +if TYPE_CHECKING: + + from _pytest._code.code import _TracebackStyle + from _pytest.terminal import TerminalReporter + from .argparsing import Argument + + +_PluggyPlugin = object +"""A type to represent plugin objects. + +Plugins can be any namespace, so we can't narrow it down much, but we use an +alias to make the intent clear. + +Ideally this type would be provided by pluggy itself. +""" + + +hookimpl = HookimplMarker("pytest") +hookspec = HookspecMarker("pytest") + + +@final +class ExitCode(enum.IntEnum): + """Encodes the valid exit codes by pytest. + + Currently users and plugins may supply other exit codes as well. + + .. versionadded:: 5.0 + """ + + #: Tests passed. + OK = 0 + #: Tests failed. + TESTS_FAILED = 1 + #: pytest was interrupted. + INTERRUPTED = 2 + #: An internal error got in the way. + INTERNAL_ERROR = 3 + #: pytest was misused. + USAGE_ERROR = 4 + #: pytest couldn't find tests. + NO_TESTS_COLLECTED = 5 + + +class ConftestImportFailure(Exception): + def __init__( + self, + path: Path, + excinfo: Tuple[Type[Exception], Exception, TracebackType], + ) -> None: + super().__init__(path, excinfo) + self.path = path + self.excinfo = excinfo + + def __str__(self) -> str: + return "{}: {} (from {})".format( + self.excinfo[0].__name__, self.excinfo[1], self.path + ) + + +def filter_traceback_for_conftest_import_failure( + entry: _pytest._code.TracebackEntry, +) -> bool: + """Filter tracebacks entries which point to pytest internals or importlib. + + Make a special case for importlib because we use it to import test modules and conftest files + in _pytest.pathlib.import_path. + """ + return filter_traceback(entry) and "importlib" not in str(entry.path).split(os.sep) + + +def main( + args: Optional[Union[List[str], "os.PathLike[str]"]] = None, + plugins: Optional[Sequence[Union[str, _PluggyPlugin]]] = None, +) -> Union[int, ExitCode]: + """Perform an in-process test run. + + :param args: List of command line arguments. + :param plugins: List of plugin objects to be auto-registered during initialization. + + :returns: An exit code. + """ + try: + try: + config = _prepareconfig(args, plugins) + except ConftestImportFailure as e: + exc_info = ExceptionInfo.from_exc_info(e.excinfo) + tw = TerminalWriter(sys.stderr) + tw.line(f"ImportError while loading conftest '{e.path}'.", red=True) + exc_info.traceback = exc_info.traceback.filter( + filter_traceback_for_conftest_import_failure + ) + exc_repr = ( + exc_info.getrepr(style="short", chain=False) + if exc_info.traceback + else exc_info.exconly() + ) + formatted_tb = str(exc_repr) + for line in formatted_tb.splitlines(): + tw.line(line.rstrip(), red=True) + return ExitCode.USAGE_ERROR + else: + try: + ret: Union[ExitCode, int] = config.hook.pytest_cmdline_main( + config=config + ) + try: + return ExitCode(ret) + except ValueError: + return ret + finally: + config._ensure_unconfigure() + except UsageError as e: + tw = TerminalWriter(sys.stderr) + for msg in e.args: + tw.line(f"ERROR: {msg}\n", red=True) + return ExitCode.USAGE_ERROR + + +def console_main() -> int: + """The CLI entry point of pytest. + + This function is not meant for programmable use; use `main()` instead. + """ + # https://docs.python.org/3/library/signal.html#note-on-sigpipe + try: + code = main() + sys.stdout.flush() + return code + except BrokenPipeError: + # Python flushes standard streams on exit; redirect remaining output + # to devnull to avoid another BrokenPipeError at shutdown + devnull = os.open(os.devnull, os.O_WRONLY) + os.dup2(devnull, sys.stdout.fileno()) + return 1 # Python exits with error code 1 on EPIPE + + +class cmdline: # compatibility namespace + main = staticmethod(main) + + +def filename_arg(path: str, optname: str) -> str: + """Argparse type validator for filename arguments. + + :path: Path of filename. + :optname: Name of the option. + """ + if os.path.isdir(path): + raise UsageError(f"{optname} must be a filename, given: {path}") + return path + + +def directory_arg(path: str, optname: str) -> str: + """Argparse type validator for directory arguments. + + :path: Path of directory. + :optname: Name of the option. + """ + if not os.path.isdir(path): + raise UsageError(f"{optname} must be a directory, given: {path}") + return path + + +# Plugins that cannot be disabled via "-p no:X" currently. +essential_plugins = ( + "mark", + "main", + "runner", + "fixtures", + "helpconfig", # Provides -p. +) + +default_plugins = essential_plugins + ( + "python", + "terminal", + "debugging", + "unittest", + "capture", + "skipping", + "legacypath", + "tmpdir", + "monkeypatch", + "recwarn", + "pastebin", + "nose", + "assertion", + "junitxml", + "doctest", + "cacheprovider", + "freeze_support", + "setuponly", + "setupplan", + "stepwise", + "warnings", + "logging", + "reports", + "python_path", + *(["unraisableexception", "threadexception"] if sys.version_info >= (3, 8) else []), + "faulthandler", +) + +builtin_plugins = set(default_plugins) +builtin_plugins.add("pytester") +builtin_plugins.add("pytester_assertions") + + +def get_config( + args: Optional[List[str]] = None, + plugins: Optional[Sequence[Union[str, _PluggyPlugin]]] = None, +) -> "Config": + # subsequent calls to main will create a fresh instance + pluginmanager = PytestPluginManager() + config = Config( + pluginmanager, + invocation_params=Config.InvocationParams( + args=args or (), + plugins=plugins, + dir=Path.cwd(), + ), + ) + + if args is not None: + # Handle any "-p no:plugin" args. + pluginmanager.consider_preparse(args, exclude_only=True) + + for spec in default_plugins: + pluginmanager.import_plugin(spec) + + return config + + +def get_plugin_manager() -> "PytestPluginManager": + """Obtain a new instance of the + :py:class:`pytest.PytestPluginManager`, with default plugins + already loaded. + + This function can be used by integration with other tools, like hooking + into pytest to run tests into an IDE. + """ + return get_config().pluginmanager + + +def _prepareconfig( + args: Optional[Union[List[str], "os.PathLike[str]"]] = None, + plugins: Optional[Sequence[Union[str, _PluggyPlugin]]] = None, +) -> "Config": + if args is None: + args = sys.argv[1:] + elif isinstance(args, os.PathLike): + args = [os.fspath(args)] + elif not isinstance(args, list): + msg = "`args` parameter expected to be a list of strings, got: {!r} (type: {})" + raise TypeError(msg.format(args, type(args))) + + config = get_config(args, plugins) + pluginmanager = config.pluginmanager + try: + if plugins: + for plugin in plugins: + if isinstance(plugin, str): + pluginmanager.consider_pluginarg(plugin) + else: + pluginmanager.register(plugin) + config = pluginmanager.hook.pytest_cmdline_parse( + pluginmanager=pluginmanager, args=args + ) + return config + except BaseException: + config._ensure_unconfigure() + raise + + +def _get_directory(path: Path) -> Path: + """Get the directory of a path - itself if already a directory.""" + if path.is_file(): + return path.parent + else: + return path + + +@final +class PytestPluginManager(PluginManager): + """A :py:class:`pluggy.PluginManager ` with + additional pytest-specific functionality: + + * Loading plugins from the command line, ``PYTEST_PLUGINS`` env variable and + ``pytest_plugins`` global variables found in plugins being loaded. + * ``conftest.py`` loading during start-up. + """ + + def __init__(self) -> None: + import _pytest.assertion + + super().__init__("pytest") + + # -- State related to local conftest plugins. + # All loaded conftest modules. + self._conftest_plugins: Set[types.ModuleType] = set() + # All conftest modules applicable for a directory. + # This includes the directory's own conftest modules as well + # as those of its parent directories. + self._dirpath2confmods: Dict[Path, List[types.ModuleType]] = {} + # Cutoff directory above which conftests are no longer discovered. + self._confcutdir: Optional[Path] = None + # If set, conftest loading is skipped. + self._noconftest = False + + # _getconftestmodules()'s call to _get_directory() causes a stat + # storm when it's called potentially thousands of times in a test + # session (#9478), often with the same path, so cache it. + self._get_directory = lru_cache(256)(_get_directory) + + self._duplicatepaths: Set[Path] = set() + + # plugins that were explicitly skipped with pytest.skip + # list of (module name, skip reason) + # previously we would issue a warning when a plugin was skipped, but + # since we refactored warnings as first citizens of Config, they are + # just stored here to be used later. + self.skipped_plugins: List[Tuple[str, str]] = [] + + self.add_hookspecs(_pytest.hookspec) + self.register(self) + if os.environ.get("PYTEST_DEBUG"): + err: IO[str] = sys.stderr + encoding: str = getattr(err, "encoding", "utf8") + try: + err = open( + os.dup(err.fileno()), + mode=err.mode, + buffering=1, + encoding=encoding, + ) + except Exception: + pass + self.trace.root.setwriter(err.write) + self.enable_tracing() + + # Config._consider_importhook will set a real object if required. + self.rewrite_hook = _pytest.assertion.DummyRewriteHook() + # Used to know when we are importing conftests after the pytest_configure stage. + self._configured = False + + def parse_hookimpl_opts(self, plugin: _PluggyPlugin, name: str): + # pytest hooks are always prefixed with "pytest_", + # so we avoid accessing possibly non-readable attributes + # (see issue #1073). + if not name.startswith("pytest_"): + return + # Ignore names which can not be hooks. + if name == "pytest_plugins": + return + + method = getattr(plugin, name) + opts = super().parse_hookimpl_opts(plugin, name) + + # Consider only actual functions for hooks (#3775). + if not inspect.isroutine(method): + return + + # Collect unmarked hooks as long as they have the `pytest_' prefix. + if opts is None and name.startswith("pytest_"): + opts = {} + if opts is not None: + # TODO: DeprecationWarning, people should use hookimpl + # https://github.com/pytest-dev/pytest/issues/4562 + known_marks = {m.name for m in getattr(method, "pytestmark", [])} + + for name in ("tryfirst", "trylast", "optionalhook", "hookwrapper"): + opts.setdefault(name, hasattr(method, name) or name in known_marks) + return opts + + def parse_hookspec_opts(self, module_or_class, name: str): + opts = super().parse_hookspec_opts(module_or_class, name) + if opts is None: + method = getattr(module_or_class, name) + + if name.startswith("pytest_"): + # todo: deprecate hookspec hacks + # https://github.com/pytest-dev/pytest/issues/4562 + known_marks = {m.name for m in getattr(method, "pytestmark", [])} + opts = { + "firstresult": hasattr(method, "firstresult") + or "firstresult" in known_marks, + "historic": hasattr(method, "historic") + or "historic" in known_marks, + } + return opts + + def register( + self, plugin: _PluggyPlugin, name: Optional[str] = None + ) -> Optional[str]: + if name in _pytest.deprecated.DEPRECATED_EXTERNAL_PLUGINS: + warnings.warn( + PytestConfigWarning( + "{} plugin has been merged into the core, " + "please remove it from your requirements.".format( + name.replace("_", "-") + ) + ) + ) + return None + ret: Optional[str] = super().register(plugin, name) + if ret: + self.hook.pytest_plugin_registered.call_historic( + kwargs=dict(plugin=plugin, manager=self) + ) + + if isinstance(plugin, types.ModuleType): + self.consider_module(plugin) + return ret + + def getplugin(self, name: str): + # Support deprecated naming because plugins (xdist e.g.) use it. + plugin: Optional[_PluggyPlugin] = self.get_plugin(name) + return plugin + + def hasplugin(self, name: str) -> bool: + """Return whether a plugin with the given name is registered.""" + return bool(self.get_plugin(name)) + + def pytest_configure(self, config: "Config") -> None: + """:meta private:""" + # XXX now that the pluginmanager exposes hookimpl(tryfirst...) + # we should remove tryfirst/trylast as markers. + config.addinivalue_line( + "markers", + "tryfirst: mark a hook implementation function such that the " + "plugin machinery will try to call it first/as early as possible.", + ) + config.addinivalue_line( + "markers", + "trylast: mark a hook implementation function such that the " + "plugin machinery will try to call it last/as late as possible.", + ) + self._configured = True + + # + # Internal API for local conftest plugin handling. + # + def _set_initial_conftests( + self, namespace: argparse.Namespace, rootpath: Path + ) -> None: + """Load initial conftest files given a preparsed "namespace". + + As conftest files may add their own command line options which have + arguments ('--my-opt somepath') we might get some false positives. + All builtin and 3rd party plugins will have been loaded, however, so + common options will not confuse our logic here. + """ + current = Path.cwd() + self._confcutdir = ( + absolutepath(current / namespace.confcutdir) + if namespace.confcutdir + else None + ) + self._noconftest = namespace.noconftest + self._using_pyargs = namespace.pyargs + testpaths = namespace.file_or_dir + foundanchor = False + for testpath in testpaths: + path = str(testpath) + # remove node-id syntax + i = path.find("::") + if i != -1: + path = path[:i] + anchor = absolutepath(current / path) + if anchor.exists(): # we found some file object + self._try_load_conftest(anchor, namespace.importmode, rootpath) + foundanchor = True + if not foundanchor: + self._try_load_conftest(current, namespace.importmode, rootpath) + + def _is_in_confcutdir(self, path: Path) -> bool: + """Whether a path is within the confcutdir. + + When false, should not load conftest. + """ + if self._confcutdir is None: + return True + return path not in self._confcutdir.parents + + def _try_load_conftest( + self, anchor: Path, importmode: Union[str, ImportMode], rootpath: Path + ) -> None: + self._getconftestmodules(anchor, importmode, rootpath) + # let's also consider test* subdirs + if anchor.is_dir(): + for x in anchor.glob("test*"): + if x.is_dir(): + self._getconftestmodules(x, importmode, rootpath) + + def _getconftestmodules( + self, path: Path, importmode: Union[str, ImportMode], rootpath: Path + ) -> Sequence[types.ModuleType]: + if self._noconftest: + return [] + + directory = self._get_directory(path) + + # Optimization: avoid repeated searches in the same directory. + # Assumes always called with same importmode and rootpath. + existing_clist = self._dirpath2confmods.get(directory) + if existing_clist is not None: + return existing_clist + + # XXX these days we may rather want to use config.rootpath + # and allow users to opt into looking into the rootdir parent + # directories instead of requiring to specify confcutdir. + clist = [] + for parent in reversed((directory, *directory.parents)): + if self._is_in_confcutdir(parent): + conftestpath = parent / "conftest.py" + if conftestpath.is_file(): + mod = self._importconftest(conftestpath, importmode, rootpath) + clist.append(mod) + self._dirpath2confmods[directory] = clist + return clist + + def _rget_with_confmod( + self, + name: str, + path: Path, + importmode: Union[str, ImportMode], + rootpath: Path, + ) -> Tuple[types.ModuleType, Any]: + modules = self._getconftestmodules(path, importmode, rootpath=rootpath) + for mod in reversed(modules): + try: + return mod, getattr(mod, name) + except AttributeError: + continue + raise KeyError(name) + + def _importconftest( + self, conftestpath: Path, importmode: Union[str, ImportMode], rootpath: Path + ) -> types.ModuleType: + existing = self.get_plugin(str(conftestpath)) + if existing is not None: + return cast(types.ModuleType, existing) + + pkgpath = resolve_package_path(conftestpath) + if pkgpath is None: + _ensure_removed_sysmodule(conftestpath.stem) + + try: + mod = import_path(conftestpath, mode=importmode, root=rootpath) + except Exception as e: + assert e.__traceback__ is not None + exc_info = (type(e), e, e.__traceback__) + raise ConftestImportFailure(conftestpath, exc_info) from e + + self._check_non_top_pytest_plugins(mod, conftestpath) + + self._conftest_plugins.add(mod) + dirpath = conftestpath.parent + if dirpath in self._dirpath2confmods: + for path, mods in self._dirpath2confmods.items(): + if dirpath in path.parents or path == dirpath: + assert mod not in mods + mods.append(mod) + self.trace(f"loading conftestmodule {mod!r}") + self.consider_conftest(mod) + return mod + + def _check_non_top_pytest_plugins( + self, + mod: types.ModuleType, + conftestpath: Path, + ) -> None: + if ( + hasattr(mod, "pytest_plugins") + and self._configured + and not self._using_pyargs + ): + msg = ( + "Defining 'pytest_plugins' in a non-top-level conftest is no longer supported:\n" + "It affects the entire test suite instead of just below the conftest as expected.\n" + " {}\n" + "Please move it to a top level conftest file at the rootdir:\n" + " {}\n" + "For more information, visit:\n" + " https://docs.pytest.org/en/stable/deprecations.html#pytest-plugins-in-non-top-level-conftest-files" + ) + fail(msg.format(conftestpath, self._confcutdir), pytrace=False) + + # + # API for bootstrapping plugin loading + # + # + + def consider_preparse( + self, args: Sequence[str], *, exclude_only: bool = False + ) -> None: + """:meta private:""" + i = 0 + n = len(args) + while i < n: + opt = args[i] + i += 1 + if isinstance(opt, str): + if opt == "-p": + try: + parg = args[i] + except IndexError: + return + i += 1 + elif opt.startswith("-p"): + parg = opt[2:] + else: + continue + if exclude_only and not parg.startswith("no:"): + continue + self.consider_pluginarg(parg) + + def consider_pluginarg(self, arg: str) -> None: + """:meta private:""" + if arg.startswith("no:"): + name = arg[3:] + if name in essential_plugins: + raise UsageError("plugin %s cannot be disabled" % name) + + # PR #4304: remove stepwise if cacheprovider is blocked. + if name == "cacheprovider": + self.set_blocked("stepwise") + self.set_blocked("pytest_stepwise") + + self.set_blocked(name) + if not name.startswith("pytest_"): + self.set_blocked("pytest_" + name) + else: + name = arg + # Unblock the plugin. None indicates that it has been blocked. + # There is no interface with pluggy for this. + if self._name2plugin.get(name, -1) is None: + del self._name2plugin[name] + if not name.startswith("pytest_"): + if self._name2plugin.get("pytest_" + name, -1) is None: + del self._name2plugin["pytest_" + name] + self.import_plugin(arg, consider_entry_points=True) + + def consider_conftest(self, conftestmodule: types.ModuleType) -> None: + """:meta private:""" + self.register(conftestmodule, name=conftestmodule.__file__) + + def consider_env(self) -> None: + """:meta private:""" + self._import_plugin_specs(os.environ.get("PYTEST_PLUGINS")) + + def consider_module(self, mod: types.ModuleType) -> None: + """:meta private:""" + self._import_plugin_specs(getattr(mod, "pytest_plugins", [])) + + def _import_plugin_specs( + self, spec: Union[None, types.ModuleType, str, Sequence[str]] + ) -> None: + plugins = _get_plugin_specs_as_list(spec) + for import_spec in plugins: + self.import_plugin(import_spec) + + def import_plugin(self, modname: str, consider_entry_points: bool = False) -> None: + """Import a plugin with ``modname``. + + If ``consider_entry_points`` is True, entry point names are also + considered to find a plugin. + """ + # Most often modname refers to builtin modules, e.g. "pytester", + # "terminal" or "capture". Those plugins are registered under their + # basename for historic purposes but must be imported with the + # _pytest prefix. + assert isinstance(modname, str), ( + "module name as text required, got %r" % modname + ) + if self.is_blocked(modname) or self.get_plugin(modname) is not None: + return + + importspec = "_pytest." + modname if modname in builtin_plugins else modname + self.rewrite_hook.mark_rewrite(importspec) + + if consider_entry_points: + loaded = self.load_setuptools_entrypoints("pytest11", name=modname) + if loaded: + return + + try: + __import__(importspec) + except ImportError as e: + raise ImportError( + f'Error importing plugin "{modname}": {e.args[0]}' + ).with_traceback(e.__traceback__) from e + + except Skipped as e: + self.skipped_plugins.append((modname, e.msg or "")) + else: + mod = sys.modules[importspec] + self.register(mod, modname) + + +def _get_plugin_specs_as_list( + specs: Union[None, types.ModuleType, str, Sequence[str]] +) -> List[str]: + """Parse a plugins specification into a list of plugin names.""" + # None means empty. + if specs is None: + return [] + # Workaround for #3899 - a submodule which happens to be called "pytest_plugins". + if isinstance(specs, types.ModuleType): + return [] + # Comma-separated list. + if isinstance(specs, str): + return specs.split(",") if specs else [] + # Direct specification. + if isinstance(specs, collections.abc.Sequence): + return list(specs) + raise UsageError( + "Plugins may be specified as a sequence or a ','-separated string of plugin names. Got: %r" + % specs + ) + + +def _ensure_removed_sysmodule(modname: str) -> None: + try: + del sys.modules[modname] + except KeyError: + pass + + +class Notset: + def __repr__(self): + return "" + + +notset = Notset() + + +def _iter_rewritable_modules(package_files: Iterable[str]) -> Iterator[str]: + """Given an iterable of file names in a source distribution, return the "names" that should + be marked for assertion rewrite. + + For example the package "pytest_mock/__init__.py" should be added as "pytest_mock" in + the assertion rewrite mechanism. + + This function has to deal with dist-info based distributions and egg based distributions + (which are still very much in use for "editable" installs). + + Here are the file names as seen in a dist-info based distribution: + + pytest_mock/__init__.py + pytest_mock/_version.py + pytest_mock/plugin.py + pytest_mock.egg-info/PKG-INFO + + Here are the file names as seen in an egg based distribution: + + src/pytest_mock/__init__.py + src/pytest_mock/_version.py + src/pytest_mock/plugin.py + src/pytest_mock.egg-info/PKG-INFO + LICENSE + setup.py + + We have to take in account those two distribution flavors in order to determine which + names should be considered for assertion rewriting. + + More information: + https://github.com/pytest-dev/pytest-mock/issues/167 + """ + package_files = list(package_files) + seen_some = False + for fn in package_files: + is_simple_module = "/" not in fn and fn.endswith(".py") + is_package = fn.count("/") == 1 and fn.endswith("__init__.py") + if is_simple_module: + module_name, _ = os.path.splitext(fn) + # we ignore "setup.py" at the root of the distribution + if module_name != "setup": + seen_some = True + yield module_name + elif is_package: + package_name = os.path.dirname(fn) + seen_some = True + yield package_name + + if not seen_some: + # At this point we did not find any packages or modules suitable for assertion + # rewriting, so we try again by stripping the first path component (to account for + # "src" based source trees for example). + # This approach lets us have the common case continue to be fast, as egg-distributions + # are rarer. + new_package_files = [] + for fn in package_files: + parts = fn.split("/") + new_fn = "/".join(parts[1:]) + if new_fn: + new_package_files.append(new_fn) + if new_package_files: + yield from _iter_rewritable_modules(new_package_files) + + +def _args_converter(args: Iterable[str]) -> Tuple[str, ...]: + return tuple(args) + + +@final +class Config: + """Access to configuration values, pluginmanager and plugin hooks. + + :param PytestPluginManager pluginmanager: + A pytest PluginManager. + + :param InvocationParams invocation_params: + Object containing parameters regarding the :func:`pytest.main` + invocation. + """ + + @final + @attr.s(frozen=True, auto_attribs=True) + class InvocationParams: + """Holds parameters passed during :func:`pytest.main`. + + The object attributes are read-only. + + .. versionadded:: 5.1 + + .. note:: + + Note that the environment variable ``PYTEST_ADDOPTS`` and the ``addopts`` + ini option are handled by pytest, not being included in the ``args`` attribute. + + Plugins accessing ``InvocationParams`` must be aware of that. + """ + + args: Tuple[str, ...] = attr.ib(converter=_args_converter) + """The command-line arguments as passed to :func:`pytest.main`.""" + plugins: Optional[Sequence[Union[str, _PluggyPlugin]]] + """Extra plugins, might be `None`.""" + dir: Path + """The directory from which :func:`pytest.main` was invoked.""" + + def __init__( + self, + pluginmanager: PytestPluginManager, + *, + invocation_params: Optional[InvocationParams] = None, + ) -> None: + from .argparsing import Parser, FILE_OR_DIR + + if invocation_params is None: + invocation_params = self.InvocationParams( + args=(), plugins=None, dir=Path.cwd() + ) + + self.option = argparse.Namespace() + """Access to command line option as attributes. + + :type: argparse.Namespace + """ + + self.invocation_params = invocation_params + """The parameters with which pytest was invoked. + + :type: InvocationParams + """ + + _a = FILE_OR_DIR + self._parser = Parser( + usage=f"%(prog)s [options] [{_a}] [{_a}] [...]", + processopt=self._processopt, + _ispytest=True, + ) + self.pluginmanager = pluginmanager + """The plugin manager handles plugin registration and hook invocation. + + :type: PytestPluginManager + """ + + self.stash = Stash() + """A place where plugins can store information on the config for their + own use. + + :type: Stash + """ + # Deprecated alias. Was never public. Can be removed in a few releases. + self._store = self.stash + + from .compat import PathAwareHookProxy + + self.trace = self.pluginmanager.trace.root.get("config") + self.hook = PathAwareHookProxy(self.pluginmanager.hook) + self._inicache: Dict[str, Any] = {} + self._override_ini: Sequence[str] = () + self._opt2dest: Dict[str, str] = {} + self._cleanup: List[Callable[[], None]] = [] + self.pluginmanager.register(self, "pytestconfig") + self._configured = False + self.hook.pytest_addoption.call_historic( + kwargs=dict(parser=self._parser, pluginmanager=self.pluginmanager) + ) + + if TYPE_CHECKING: + from _pytest.cacheprovider import Cache + + self.cache: Optional[Cache] = None + + @property + def rootpath(self) -> Path: + """The path to the :ref:`rootdir `. + + :type: pathlib.Path + + .. versionadded:: 6.1 + """ + return self._rootpath + + @property + def inipath(self) -> Optional[Path]: + """The path to the :ref:`configfile `. + + :type: Optional[pathlib.Path] + + .. versionadded:: 6.1 + """ + return self._inipath + + def add_cleanup(self, func: Callable[[], None]) -> None: + """Add a function to be called when the config object gets out of + use (usually coinciding with pytest_unconfigure).""" + self._cleanup.append(func) + + def _do_configure(self) -> None: + assert not self._configured + self._configured = True + with warnings.catch_warnings(): + warnings.simplefilter("default") + self.hook.pytest_configure.call_historic(kwargs=dict(config=self)) + + def _ensure_unconfigure(self) -> None: + if self._configured: + self._configured = False + self.hook.pytest_unconfigure(config=self) + self.hook.pytest_configure._call_history = [] + while self._cleanup: + fin = self._cleanup.pop() + fin() + + def get_terminal_writer(self) -> TerminalWriter: + terminalreporter: TerminalReporter = self.pluginmanager.get_plugin( + "terminalreporter" + ) + return terminalreporter._tw + + def pytest_cmdline_parse( + self, pluginmanager: PytestPluginManager, args: List[str] + ) -> "Config": + try: + self.parse(args) + except UsageError: + + # Handle --version and --help here in a minimal fashion. + # This gets done via helpconfig normally, but its + # pytest_cmdline_main is not called in case of errors. + if getattr(self.option, "version", False) or "--version" in args: + from _pytest.helpconfig import showversion + + showversion(self) + elif ( + getattr(self.option, "help", False) or "--help" in args or "-h" in args + ): + self._parser._getparser().print_help() + sys.stdout.write( + "\nNOTE: displaying only minimal help due to UsageError.\n\n" + ) + + raise + + return self + + def notify_exception( + self, + excinfo: ExceptionInfo[BaseException], + option: Optional[argparse.Namespace] = None, + ) -> None: + if option and getattr(option, "fulltrace", False): + style: _TracebackStyle = "long" + else: + style = "native" + excrepr = excinfo.getrepr( + funcargs=True, showlocals=getattr(option, "showlocals", False), style=style + ) + res = self.hook.pytest_internalerror(excrepr=excrepr, excinfo=excinfo) + if not any(res): + for line in str(excrepr).split("\n"): + sys.stderr.write("INTERNALERROR> %s\n" % line) + sys.stderr.flush() + + def cwd_relative_nodeid(self, nodeid: str) -> str: + # nodeid's are relative to the rootpath, compute relative to cwd. + if self.invocation_params.dir != self.rootpath: + fullpath = self.rootpath / nodeid + nodeid = bestrelpath(self.invocation_params.dir, fullpath) + return nodeid + + @classmethod + def fromdictargs(cls, option_dict, args) -> "Config": + """Constructor usable for subprocesses.""" + config = get_config(args) + config.option.__dict__.update(option_dict) + config.parse(args, addopts=False) + for x in config.option.plugins: + config.pluginmanager.consider_pluginarg(x) + return config + + def _processopt(self, opt: "Argument") -> None: + for name in opt._short_opts + opt._long_opts: + self._opt2dest[name] = opt.dest + + if hasattr(opt, "default"): + if not hasattr(self.option, opt.dest): + setattr(self.option, opt.dest, opt.default) + + @hookimpl(trylast=True) + def pytest_load_initial_conftests(self, early_config: "Config") -> None: + self.pluginmanager._set_initial_conftests( + early_config.known_args_namespace, rootpath=early_config.rootpath + ) + + def _initini(self, args: Sequence[str]) -> None: + ns, unknown_args = self._parser.parse_known_and_unknown_args( + args, namespace=copy.copy(self.option) + ) + rootpath, inipath, inicfg = determine_setup( + ns.inifilename, + ns.file_or_dir + unknown_args, + rootdir_cmd_arg=ns.rootdir or None, + config=self, + ) + self._rootpath = rootpath + self._inipath = inipath + self.inicfg = inicfg + self._parser.extra_info["rootdir"] = str(self.rootpath) + self._parser.extra_info["inifile"] = str(self.inipath) + self._parser.addini("addopts", "extra command line options", "args") + self._parser.addini("minversion", "minimally required pytest version") + self._parser.addini( + "required_plugins", + "plugins that must be present for pytest to run", + type="args", + default=[], + ) + self._override_ini = ns.override_ini or () + + def _consider_importhook(self, args: Sequence[str]) -> None: + """Install the PEP 302 import hook if using assertion rewriting. + + Needs to parse the --assert= option from the commandline + and find all the installed plugins to mark them for rewriting + by the importhook. + """ + ns, unknown_args = self._parser.parse_known_and_unknown_args(args) + mode = getattr(ns, "assertmode", "plain") + if mode == "rewrite": + import _pytest.assertion + + try: + hook = _pytest.assertion.install_importhook(self) + except SystemError: + mode = "plain" + else: + self._mark_plugins_for_rewrite(hook) + self._warn_about_missing_assertion(mode) + + def _mark_plugins_for_rewrite(self, hook) -> None: + """Given an importhook, mark for rewrite any top-level + modules or packages in the distribution package for + all pytest plugins.""" + self.pluginmanager.rewrite_hook = hook + + if os.environ.get("PYTEST_DISABLE_PLUGIN_AUTOLOAD"): + # We don't autoload from setuptools entry points, no need to continue. + return + + package_files = ( + str(file) + for dist in importlib_metadata.distributions() + if any(ep.group == "pytest11" for ep in dist.entry_points) + for file in dist.files or [] + ) + + for name in _iter_rewritable_modules(package_files): + hook.mark_rewrite(name) + + def _validate_args(self, args: List[str], via: str) -> List[str]: + """Validate known args.""" + self._parser._config_source_hint = via # type: ignore + try: + self._parser.parse_known_and_unknown_args( + args, namespace=copy.copy(self.option) + ) + finally: + del self._parser._config_source_hint # type: ignore + + return args + + def _preparse(self, args: List[str], addopts: bool = True) -> None: + if addopts: + env_addopts = os.environ.get("PYTEST_ADDOPTS", "") + if len(env_addopts): + args[:] = ( + self._validate_args(shlex.split(env_addopts), "via PYTEST_ADDOPTS") + + args + ) + self._initini(args) + if addopts: + args[:] = ( + self._validate_args(self.getini("addopts"), "via addopts config") + args + ) + + self.known_args_namespace = self._parser.parse_known_args( + args, namespace=copy.copy(self.option) + ) + self._checkversion() + self._consider_importhook(args) + self.pluginmanager.consider_preparse(args, exclude_only=False) + if not os.environ.get("PYTEST_DISABLE_PLUGIN_AUTOLOAD"): + # Don't autoload from setuptools entry point. Only explicitly specified + # plugins are going to be loaded. + self.pluginmanager.load_setuptools_entrypoints("pytest11") + self.pluginmanager.consider_env() + + self.known_args_namespace = self._parser.parse_known_args( + args, namespace=copy.copy(self.known_args_namespace) + ) + + self._validate_plugins() + self._warn_about_skipped_plugins() + + if self.known_args_namespace.strict: + self.issue_config_time_warning( + _pytest.deprecated.STRICT_OPTION, stacklevel=2 + ) + + if self.known_args_namespace.confcutdir is None and self.inipath is not None: + confcutdir = str(self.inipath.parent) + self.known_args_namespace.confcutdir = confcutdir + try: + self.hook.pytest_load_initial_conftests( + early_config=self, args=args, parser=self._parser + ) + except ConftestImportFailure as e: + if self.known_args_namespace.help or self.known_args_namespace.version: + # we don't want to prevent --help/--version to work + # so just let is pass and print a warning at the end + self.issue_config_time_warning( + PytestConfigWarning(f"could not load initial conftests: {e.path}"), + stacklevel=2, + ) + else: + raise + + @hookimpl(hookwrapper=True) + def pytest_collection(self) -> Generator[None, None, None]: + # Validate invalid ini keys after collection is done so we take in account + # options added by late-loading conftest files. + yield + self._validate_config_options() + + def _checkversion(self) -> None: + import pytest + + minver = self.inicfg.get("minversion", None) + if minver: + # Imported lazily to improve start-up time. + from packaging.version import Version + + if not isinstance(minver, str): + raise pytest.UsageError( + "%s: 'minversion' must be a single value" % self.inipath + ) + + if Version(minver) > Version(pytest.__version__): + raise pytest.UsageError( + "%s: 'minversion' requires pytest-%s, actual pytest-%s'" + % ( + self.inipath, + minver, + pytest.__version__, + ) + ) + + def _validate_config_options(self) -> None: + for key in sorted(self._get_unknown_ini_keys()): + self._warn_or_fail_if_strict(f"Unknown config option: {key}\n") + + def _validate_plugins(self) -> None: + required_plugins = sorted(self.getini("required_plugins")) + if not required_plugins: + return + + # Imported lazily to improve start-up time. + from packaging.version import Version + from packaging.requirements import InvalidRequirement, Requirement + + plugin_info = self.pluginmanager.list_plugin_distinfo() + plugin_dist_info = {dist.project_name: dist.version for _, dist in plugin_info} + + missing_plugins = [] + for required_plugin in required_plugins: + try: + req = Requirement(required_plugin) + except InvalidRequirement: + missing_plugins.append(required_plugin) + continue + + if req.name not in plugin_dist_info: + missing_plugins.append(required_plugin) + elif not req.specifier.contains( + Version(plugin_dist_info[req.name]), prereleases=True + ): + missing_plugins.append(required_plugin) + + if missing_plugins: + raise UsageError( + "Missing required plugins: {}".format(", ".join(missing_plugins)), + ) + + def _warn_or_fail_if_strict(self, message: str) -> None: + if self.known_args_namespace.strict_config: + raise UsageError(message) + + self.issue_config_time_warning(PytestConfigWarning(message), stacklevel=3) + + def _get_unknown_ini_keys(self) -> List[str]: + parser_inicfg = self._parser._inidict + return [name for name in self.inicfg if name not in parser_inicfg] + + def parse(self, args: List[str], addopts: bool = True) -> None: + # Parse given cmdline arguments into this config object. + assert not hasattr( + self, "args" + ), "can only parse cmdline args at most once per Config object" + self.hook.pytest_addhooks.call_historic( + kwargs=dict(pluginmanager=self.pluginmanager) + ) + self._preparse(args, addopts=addopts) + # XXX deprecated hook: + self.hook.pytest_cmdline_preparse(config=self, args=args) + self._parser.after_preparse = True # type: ignore + try: + args = self._parser.parse_setoption( + args, self.option, namespace=self.option + ) + if not args: + if self.invocation_params.dir == self.rootpath: + args = self.getini("testpaths") + if not args: + args = [str(self.invocation_params.dir)] + self.args = args + except PrintHelp: + pass + + def issue_config_time_warning(self, warning: Warning, stacklevel: int) -> None: + """Issue and handle a warning during the "configure" stage. + + During ``pytest_configure`` we can't capture warnings using the ``catch_warnings_for_item`` + function because it is not possible to have hookwrappers around ``pytest_configure``. + + This function is mainly intended for plugins that need to issue warnings during + ``pytest_configure`` (or similar stages). + + :param warning: The warning instance. + :param stacklevel: stacklevel forwarded to warnings.warn. + """ + if self.pluginmanager.is_blocked("warnings"): + return + + cmdline_filters = self.known_args_namespace.pythonwarnings or [] + config_filters = self.getini("filterwarnings") + + with warnings.catch_warnings(record=True) as records: + warnings.simplefilter("always", type(warning)) + apply_warning_filters(config_filters, cmdline_filters) + warnings.warn(warning, stacklevel=stacklevel) + + if records: + frame = sys._getframe(stacklevel - 1) + location = frame.f_code.co_filename, frame.f_lineno, frame.f_code.co_name + self.hook.pytest_warning_recorded.call_historic( + kwargs=dict( + warning_message=records[0], + when="config", + nodeid="", + location=location, + ) + ) + + def addinivalue_line(self, name: str, line: str) -> None: + """Add a line to an ini-file option. The option must have been + declared but might not yet be set in which case the line becomes + the first line in its value.""" + x = self.getini(name) + assert isinstance(x, list) + x.append(line) # modifies the cached list inline + + def getini(self, name: str): + """Return configuration value from an :ref:`ini file `. + + If the specified name hasn't been registered through a prior + :func:`parser.addini ` call (usually from a + plugin), a ValueError is raised. + """ + try: + return self._inicache[name] + except KeyError: + self._inicache[name] = val = self._getini(name) + return val + + # Meant for easy monkeypatching by legacypath plugin. + # Can be inlined back (with no cover removed) once legacypath is gone. + def _getini_unknown_type(self, name: str, type: str, value: Union[str, List[str]]): + msg = f"unknown configuration type: {type}" + raise ValueError(msg, value) # pragma: no cover + + def _getini(self, name: str): + try: + description, type, default = self._parser._inidict[name] + except KeyError as e: + raise ValueError(f"unknown configuration value: {name!r}") from e + override_value = self._get_override_ini_value(name) + if override_value is None: + try: + value = self.inicfg[name] + except KeyError: + if default is not None: + return default + if type is None: + return "" + return [] + else: + value = override_value + # Coerce the values based on types. + # + # Note: some coercions are only required if we are reading from .ini files, because + # the file format doesn't contain type information, but when reading from toml we will + # get either str or list of str values (see _parse_ini_config_from_pyproject_toml). + # For example: + # + # ini: + # a_line_list = "tests acceptance" + # in this case, we need to split the string to obtain a list of strings. + # + # toml: + # a_line_list = ["tests", "acceptance"] + # in this case, we already have a list ready to use. + # + if type == "paths": + # TODO: This assert is probably not valid in all cases. + assert self.inipath is not None + dp = self.inipath.parent + input_values = shlex.split(value) if isinstance(value, str) else value + return [dp / x for x in input_values] + elif type == "args": + return shlex.split(value) if isinstance(value, str) else value + elif type == "linelist": + if isinstance(value, str): + return [t for t in map(lambda x: x.strip(), value.split("\n")) if t] + else: + return value + elif type == "bool": + return _strtobool(str(value).strip()) + elif type == "string": + return value + elif type is None: + return value + else: + return self._getini_unknown_type(name, type, value) + + def _getconftest_pathlist( + self, name: str, path: Path, rootpath: Path + ) -> Optional[List[Path]]: + try: + mod, relroots = self.pluginmanager._rget_with_confmod( + name, path, self.getoption("importmode"), rootpath + ) + except KeyError: + return None + assert mod.__file__ is not None + modpath = Path(mod.__file__).parent + values: List[Path] = [] + for relroot in relroots: + if isinstance(relroot, os.PathLike): + relroot = Path(relroot) + else: + relroot = relroot.replace("/", os.sep) + relroot = absolutepath(modpath / relroot) + values.append(relroot) + return values + + def _get_override_ini_value(self, name: str) -> Optional[str]: + value = None + # override_ini is a list of "ini=value" options. + # Always use the last item if multiple values are set for same ini-name, + # e.g. -o foo=bar1 -o foo=bar2 will set foo to bar2. + for ini_config in self._override_ini: + try: + key, user_ini_value = ini_config.split("=", 1) + except ValueError as e: + raise UsageError( + "-o/--override-ini expects option=value style (got: {!r}).".format( + ini_config + ) + ) from e + else: + if key == name: + value = user_ini_value + return value + + def getoption(self, name: str, default=notset, skip: bool = False): + """Return command line option value. + + :param name: Name of the option. You may also specify + the literal ``--OPT`` option instead of the "dest" option name. + :param default: Default value if no option of that name exists. + :param skip: If True, raise pytest.skip if option does not exists + or has a None value. + """ + name = self._opt2dest.get(name, name) + try: + val = getattr(self.option, name) + if val is None and skip: + raise AttributeError(name) + return val + except AttributeError as e: + if default is not notset: + return default + if skip: + import pytest + + pytest.skip(f"no {name!r} option found") + raise ValueError(f"no option named {name!r}") from e + + def getvalue(self, name: str, path=None): + """Deprecated, use getoption() instead.""" + return self.getoption(name) + + def getvalueorskip(self, name: str, path=None): + """Deprecated, use getoption(skip=True) instead.""" + return self.getoption(name, skip=True) + + def _warn_about_missing_assertion(self, mode: str) -> None: + if not _assertion_supported(): + if mode == "plain": + warning_text = ( + "ASSERTIONS ARE NOT EXECUTED" + " and FAILING TESTS WILL PASS. Are you" + " using python -O?" + ) + else: + warning_text = ( + "assertions not in test modules or" + " plugins will be ignored" + " because assert statements are not executed " + "by the underlying Python interpreter " + "(are you using python -O?)\n" + ) + self.issue_config_time_warning( + PytestConfigWarning(warning_text), + stacklevel=3, + ) + + def _warn_about_skipped_plugins(self) -> None: + for module_name, msg in self.pluginmanager.skipped_plugins: + self.issue_config_time_warning( + PytestConfigWarning(f"skipped plugin {module_name!r}: {msg}"), + stacklevel=2, + ) + + +def _assertion_supported() -> bool: + try: + assert False + except AssertionError: + return True + else: + return False # type: ignore[unreachable] + + +def create_terminal_writer( + config: Config, file: Optional[TextIO] = None +) -> TerminalWriter: + """Create a TerminalWriter instance configured according to the options + in the config object. + + Every code which requires a TerminalWriter object and has access to a + config object should use this function. + """ + tw = TerminalWriter(file=file) + + if config.option.color == "yes": + tw.hasmarkup = True + elif config.option.color == "no": + tw.hasmarkup = False + + if config.option.code_highlight == "yes": + tw.code_highlight = True + elif config.option.code_highlight == "no": + tw.code_highlight = False + + return tw + + +def _strtobool(val: str) -> bool: + """Convert a string representation of truth to True or False. + + True values are 'y', 'yes', 't', 'true', 'on', and '1'; false values + are 'n', 'no', 'f', 'false', 'off', and '0'. Raises ValueError if + 'val' is anything else. + + .. note:: Copied from distutils.util. + """ + val = val.lower() + if val in ("y", "yes", "t", "true", "on", "1"): + return True + elif val in ("n", "no", "f", "false", "off", "0"): + return False + else: + raise ValueError(f"invalid truth value {val!r}") + + +@lru_cache(maxsize=50) +def parse_warning_filter( + arg: str, *, escape: bool +) -> Tuple["warnings._ActionKind", str, Type[Warning], str, int]: + """Parse a warnings filter string. + + This is copied from warnings._setoption with the following changes: + + * Does not apply the filter. + * Escaping is optional. + * Raises UsageError so we get nice error messages on failure. + """ + __tracebackhide__ = True + error_template = dedent( + f"""\ + while parsing the following warning configuration: + + {arg} + + This error occurred: + + {{error}} + """ + ) + + parts = arg.split(":") + if len(parts) > 5: + doc_url = ( + "https://docs.python.org/3/library/warnings.html#describing-warning-filters" + ) + error = dedent( + f"""\ + Too many fields ({len(parts)}), expected at most 5 separated by colons: + + action:message:category:module:line + + For more information please consult: {doc_url} + """ + ) + raise UsageError(error_template.format(error=error)) + + while len(parts) < 5: + parts.append("") + action_, message, category_, module, lineno_ = (s.strip() for s in parts) + try: + action: "warnings._ActionKind" = warnings._getaction(action_) # type: ignore[attr-defined] + except warnings._OptionError as e: + raise UsageError(error_template.format(error=str(e))) + try: + category: Type[Warning] = _resolve_warning_category(category_) + except Exception: + exc_info = ExceptionInfo.from_current() + exception_text = exc_info.getrepr(style="native") + raise UsageError(error_template.format(error=exception_text)) + if message and escape: + message = re.escape(message) + if module and escape: + module = re.escape(module) + r"\Z" + if lineno_: + try: + lineno = int(lineno_) + if lineno < 0: + raise ValueError("number is negative") + except ValueError as e: + raise UsageError( + error_template.format(error=f"invalid lineno {lineno_!r}: {e}") + ) + else: + lineno = 0 + return action, message, category, module, lineno + + +def _resolve_warning_category(category: str) -> Type[Warning]: + """ + Copied from warnings._getcategory, but changed so it lets exceptions (specially ImportErrors) + propagate so we can get access to their tracebacks (#9218). + """ + __tracebackhide__ = True + if not category: + return Warning + + if "." not in category: + import builtins as m + + klass = category + else: + module, _, klass = category.rpartition(".") + m = __import__(module, None, None, [klass]) + cat = getattr(m, klass) + if not issubclass(cat, Warning): + raise UsageError(f"{cat} is not a Warning subclass") + return cast(Type[Warning], cat) + + +def apply_warning_filters( + config_filters: Iterable[str], cmdline_filters: Iterable[str] +) -> None: + """Applies pytest-configured filters to the warnings module""" + # Filters should have this precedence: cmdline options, config. + # Filters should be applied in the inverse order of precedence. + for arg in config_filters: + warnings.filterwarnings(*parse_warning_filter(arg, escape=False)) + + for arg in cmdline_filters: + warnings.filterwarnings(*parse_warning_filter(arg, escape=True)) diff --git a/venv/lib/python3.10/site-packages/_pytest/config/argparsing.py b/venv/lib/python3.10/site-packages/_pytest/config/argparsing.py new file mode 100644 index 0000000..b0bb3f1 --- /dev/null +++ b/venv/lib/python3.10/site-packages/_pytest/config/argparsing.py @@ -0,0 +1,535 @@ +import argparse +import os +import sys +import warnings +from gettext import gettext +from typing import Any +from typing import Callable +from typing import cast +from typing import Dict +from typing import List +from typing import Mapping +from typing import Optional +from typing import Sequence +from typing import Tuple +from typing import TYPE_CHECKING +from typing import Union + +import _pytest._io +from _pytest.compat import final +from _pytest.config.exceptions import UsageError +from _pytest.deprecated import ARGUMENT_PERCENT_DEFAULT +from _pytest.deprecated import ARGUMENT_TYPE_STR +from _pytest.deprecated import ARGUMENT_TYPE_STR_CHOICE +from _pytest.deprecated import check_ispytest + +if TYPE_CHECKING: + from typing import NoReturn + from typing_extensions import Literal + +FILE_OR_DIR = "file_or_dir" + + +@final +class Parser: + """Parser for command line arguments and ini-file values. + + :ivar extra_info: Dict of generic param -> value to display in case + there's an error processing the command line arguments. + """ + + prog: Optional[str] = None + + def __init__( + self, + usage: Optional[str] = None, + processopt: Optional[Callable[["Argument"], None]] = None, + *, + _ispytest: bool = False, + ) -> None: + check_ispytest(_ispytest) + self._anonymous = OptionGroup("custom options", parser=self, _ispytest=True) + self._groups: List[OptionGroup] = [] + self._processopt = processopt + self._usage = usage + self._inidict: Dict[str, Tuple[str, Optional[str], Any]] = {} + self._ininames: List[str] = [] + self.extra_info: Dict[str, Any] = {} + + def processoption(self, option: "Argument") -> None: + if self._processopt: + if option.dest: + self._processopt(option) + + def getgroup( + self, name: str, description: str = "", after: Optional[str] = None + ) -> "OptionGroup": + """Get (or create) a named option Group. + + :name: Name of the option group. + :description: Long description for --help output. + :after: Name of another group, used for ordering --help output. + + The returned group object has an ``addoption`` method with the same + signature as :func:`parser.addoption ` but + will be shown in the respective group in the output of + ``pytest. --help``. + """ + for group in self._groups: + if group.name == name: + return group + group = OptionGroup(name, description, parser=self, _ispytest=True) + i = 0 + for i, grp in enumerate(self._groups): + if grp.name == after: + break + self._groups.insert(i + 1, group) + return group + + def addoption(self, *opts: str, **attrs: Any) -> None: + """Register a command line option. + + :opts: Option names, can be short or long options. + :attrs: Same attributes which the ``add_argument()`` function of the + `argparse library `_ + accepts. + + After command line parsing, options are available on the pytest config + object via ``config.option.NAME`` where ``NAME`` is usually set + by passing a ``dest`` attribute, for example + ``addoption("--long", dest="NAME", ...)``. + """ + self._anonymous.addoption(*opts, **attrs) + + def parse( + self, + args: Sequence[Union[str, "os.PathLike[str]"]], + namespace: Optional[argparse.Namespace] = None, + ) -> argparse.Namespace: + from _pytest._argcomplete import try_argcomplete + + self.optparser = self._getparser() + try_argcomplete(self.optparser) + strargs = [os.fspath(x) for x in args] + return self.optparser.parse_args(strargs, namespace=namespace) + + def _getparser(self) -> "MyOptionParser": + from _pytest._argcomplete import filescompleter + + optparser = MyOptionParser(self, self.extra_info, prog=self.prog) + groups = self._groups + [self._anonymous] + for group in groups: + if group.options: + desc = group.description or group.name + arggroup = optparser.add_argument_group(desc) + for option in group.options: + n = option.names() + a = option.attrs() + arggroup.add_argument(*n, **a) + file_or_dir_arg = optparser.add_argument(FILE_OR_DIR, nargs="*") + # bash like autocompletion for dirs (appending '/') + # Type ignored because typeshed doesn't know about argcomplete. + file_or_dir_arg.completer = filescompleter # type: ignore + return optparser + + def parse_setoption( + self, + args: Sequence[Union[str, "os.PathLike[str]"]], + option: argparse.Namespace, + namespace: Optional[argparse.Namespace] = None, + ) -> List[str]: + parsedoption = self.parse(args, namespace=namespace) + for name, value in parsedoption.__dict__.items(): + setattr(option, name, value) + return cast(List[str], getattr(parsedoption, FILE_OR_DIR)) + + def parse_known_args( + self, + args: Sequence[Union[str, "os.PathLike[str]"]], + namespace: Optional[argparse.Namespace] = None, + ) -> argparse.Namespace: + """Parse and return a namespace object with known arguments at this point.""" + return self.parse_known_and_unknown_args(args, namespace=namespace)[0] + + def parse_known_and_unknown_args( + self, + args: Sequence[Union[str, "os.PathLike[str]"]], + namespace: Optional[argparse.Namespace] = None, + ) -> Tuple[argparse.Namespace, List[str]]: + """Parse and return a namespace object with known arguments, and + the remaining arguments unknown at this point.""" + optparser = self._getparser() + strargs = [os.fspath(x) for x in args] + return optparser.parse_known_args(strargs, namespace=namespace) + + def addini( + self, + name: str, + help: str, + type: Optional[ + "Literal['string', 'paths', 'pathlist', 'args', 'linelist', 'bool']" + ] = None, + default=None, + ) -> None: + """Register an ini-file option. + + :name: + Name of the ini-variable. + :type: + Type of the variable. Can be: + + * ``string``: a string + * ``bool``: a boolean + * ``args``: a list of strings, separated as in a shell + * ``linelist``: a list of strings, separated by line breaks + * ``paths``: a list of :class:`pathlib.Path`, separated as in a shell + * ``pathlist``: a list of ``py.path``, separated as in a shell + + .. versionadded:: 7.0 + The ``paths`` variable type. + + Defaults to ``string`` if ``None`` or not passed. + :default: + Default value if no ini-file option exists but is queried. + + The value of ini-variables can be retrieved via a call to + :py:func:`config.getini(name) `. + """ + assert type in (None, "string", "paths", "pathlist", "args", "linelist", "bool") + self._inidict[name] = (help, type, default) + self._ininames.append(name) + + +class ArgumentError(Exception): + """Raised if an Argument instance is created with invalid or + inconsistent arguments.""" + + def __init__(self, msg: str, option: Union["Argument", str]) -> None: + self.msg = msg + self.option_id = str(option) + + def __str__(self) -> str: + if self.option_id: + return f"option {self.option_id}: {self.msg}" + else: + return self.msg + + +class Argument: + """Class that mimics the necessary behaviour of optparse.Option. + + It's currently a least effort implementation and ignoring choices + and integer prefixes. + + https://docs.python.org/3/library/optparse.html#optparse-standard-option-types + """ + + _typ_map = {"int": int, "string": str, "float": float, "complex": complex} + + def __init__(self, *names: str, **attrs: Any) -> None: + """Store parms in private vars for use in add_argument.""" + self._attrs = attrs + self._short_opts: List[str] = [] + self._long_opts: List[str] = [] + if "%default" in (attrs.get("help") or ""): + warnings.warn(ARGUMENT_PERCENT_DEFAULT, stacklevel=3) + try: + typ = attrs["type"] + except KeyError: + pass + else: + # This might raise a keyerror as well, don't want to catch that. + if isinstance(typ, str): + if typ == "choice": + warnings.warn( + ARGUMENT_TYPE_STR_CHOICE.format(typ=typ, names=names), + stacklevel=4, + ) + # argparse expects a type here take it from + # the type of the first element + attrs["type"] = type(attrs["choices"][0]) + else: + warnings.warn( + ARGUMENT_TYPE_STR.format(typ=typ, names=names), stacklevel=4 + ) + attrs["type"] = Argument._typ_map[typ] + # Used in test_parseopt -> test_parse_defaultgetter. + self.type = attrs["type"] + else: + self.type = typ + try: + # Attribute existence is tested in Config._processopt. + self.default = attrs["default"] + except KeyError: + pass + self._set_opt_strings(names) + dest: Optional[str] = attrs.get("dest") + if dest: + self.dest = dest + elif self._long_opts: + self.dest = self._long_opts[0][2:].replace("-", "_") + else: + try: + self.dest = self._short_opts[0][1:] + except IndexError as e: + self.dest = "???" # Needed for the error repr. + raise ArgumentError("need a long or short option", self) from e + + def names(self) -> List[str]: + return self._short_opts + self._long_opts + + def attrs(self) -> Mapping[str, Any]: + # Update any attributes set by processopt. + attrs = "default dest help".split() + attrs.append(self.dest) + for attr in attrs: + try: + self._attrs[attr] = getattr(self, attr) + except AttributeError: + pass + if self._attrs.get("help"): + a = self._attrs["help"] + a = a.replace("%default", "%(default)s") + # a = a.replace('%prog', '%(prog)s') + self._attrs["help"] = a + return self._attrs + + def _set_opt_strings(self, opts: Sequence[str]) -> None: + """Directly from optparse. + + Might not be necessary as this is passed to argparse later on. + """ + for opt in opts: + if len(opt) < 2: + raise ArgumentError( + "invalid option string %r: " + "must be at least two characters long" % opt, + self, + ) + elif len(opt) == 2: + if not (opt[0] == "-" and opt[1] != "-"): + raise ArgumentError( + "invalid short option string %r: " + "must be of the form -x, (x any non-dash char)" % opt, + self, + ) + self._short_opts.append(opt) + else: + if not (opt[0:2] == "--" and opt[2] != "-"): + raise ArgumentError( + "invalid long option string %r: " + "must start with --, followed by non-dash" % opt, + self, + ) + self._long_opts.append(opt) + + def __repr__(self) -> str: + args: List[str] = [] + if self._short_opts: + args += ["_short_opts: " + repr(self._short_opts)] + if self._long_opts: + args += ["_long_opts: " + repr(self._long_opts)] + args += ["dest: " + repr(self.dest)] + if hasattr(self, "type"): + args += ["type: " + repr(self.type)] + if hasattr(self, "default"): + args += ["default: " + repr(self.default)] + return "Argument({})".format(", ".join(args)) + + +class OptionGroup: + """A group of options shown in its own section.""" + + def __init__( + self, + name: str, + description: str = "", + parser: Optional[Parser] = None, + *, + _ispytest: bool = False, + ) -> None: + check_ispytest(_ispytest) + self.name = name + self.description = description + self.options: List[Argument] = [] + self.parser = parser + + def addoption(self, *optnames: str, **attrs: Any) -> None: + """Add an option to this group. + + If a shortened version of a long option is specified, it will + be suppressed in the help. ``addoption('--twowords', '--two-words')`` + results in help showing ``--two-words`` only, but ``--twowords`` gets + accepted **and** the automatic destination is in ``args.twowords``. + """ + conflict = set(optnames).intersection( + name for opt in self.options for name in opt.names() + ) + if conflict: + raise ValueError("option names %s already added" % conflict) + option = Argument(*optnames, **attrs) + self._addoption_instance(option, shortupper=False) + + def _addoption(self, *optnames: str, **attrs: Any) -> None: + option = Argument(*optnames, **attrs) + self._addoption_instance(option, shortupper=True) + + def _addoption_instance(self, option: "Argument", shortupper: bool = False) -> None: + if not shortupper: + for opt in option._short_opts: + if opt[0] == "-" and opt[1].islower(): + raise ValueError("lowercase shortoptions reserved") + if self.parser: + self.parser.processoption(option) + self.options.append(option) + + +class MyOptionParser(argparse.ArgumentParser): + def __init__( + self, + parser: Parser, + extra_info: Optional[Dict[str, Any]] = None, + prog: Optional[str] = None, + ) -> None: + self._parser = parser + super().__init__( + prog=prog, + usage=parser._usage, + add_help=False, + formatter_class=DropShorterLongHelpFormatter, + allow_abbrev=False, + ) + # extra_info is a dict of (param -> value) to display if there's + # an usage error to provide more contextual information to the user. + self.extra_info = extra_info if extra_info else {} + + def error(self, message: str) -> "NoReturn": + """Transform argparse error message into UsageError.""" + msg = f"{self.prog}: error: {message}" + + if hasattr(self._parser, "_config_source_hint"): + # Type ignored because the attribute is set dynamically. + msg = f"{msg} ({self._parser._config_source_hint})" # type: ignore + + raise UsageError(self.format_usage() + msg) + + # Type ignored because typeshed has a very complex type in the superclass. + def parse_args( # type: ignore + self, + args: Optional[Sequence[str]] = None, + namespace: Optional[argparse.Namespace] = None, + ) -> argparse.Namespace: + """Allow splitting of positional arguments.""" + parsed, unrecognized = self.parse_known_args(args, namespace) + if unrecognized: + for arg in unrecognized: + if arg and arg[0] == "-": + lines = ["unrecognized arguments: %s" % (" ".join(unrecognized))] + for k, v in sorted(self.extra_info.items()): + lines.append(f" {k}: {v}") + self.error("\n".join(lines)) + getattr(parsed, FILE_OR_DIR).extend(unrecognized) + return parsed + + if sys.version_info[:2] < (3, 9): # pragma: no cover + # Backport of https://github.com/python/cpython/pull/14316 so we can + # disable long --argument abbreviations without breaking short flags. + def _parse_optional( + self, arg_string: str + ) -> Optional[Tuple[Optional[argparse.Action], str, Optional[str]]]: + if not arg_string: + return None + if not arg_string[0] in self.prefix_chars: + return None + if arg_string in self._option_string_actions: + action = self._option_string_actions[arg_string] + return action, arg_string, None + if len(arg_string) == 1: + return None + if "=" in arg_string: + option_string, explicit_arg = arg_string.split("=", 1) + if option_string in self._option_string_actions: + action = self._option_string_actions[option_string] + return action, option_string, explicit_arg + if self.allow_abbrev or not arg_string.startswith("--"): + option_tuples = self._get_option_tuples(arg_string) + if len(option_tuples) > 1: + msg = gettext( + "ambiguous option: %(option)s could match %(matches)s" + ) + options = ", ".join(option for _, option, _ in option_tuples) + self.error(msg % {"option": arg_string, "matches": options}) + elif len(option_tuples) == 1: + (option_tuple,) = option_tuples + return option_tuple + if self._negative_number_matcher.match(arg_string): + if not self._has_negative_number_optionals: + return None + if " " in arg_string: + return None + return None, arg_string, None + + +class DropShorterLongHelpFormatter(argparse.HelpFormatter): + """Shorten help for long options that differ only in extra hyphens. + + - Collapse **long** options that are the same except for extra hyphens. + - Shortcut if there are only two options and one of them is a short one. + - Cache result on the action object as this is called at least 2 times. + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + # Use more accurate terminal width. + if "width" not in kwargs: + kwargs["width"] = _pytest._io.get_terminal_width() + super().__init__(*args, **kwargs) + + def _format_action_invocation(self, action: argparse.Action) -> str: + orgstr = super()._format_action_invocation(action) + if orgstr and orgstr[0] != "-": # only optional arguments + return orgstr + res: Optional[str] = getattr(action, "_formatted_action_invocation", None) + if res: + return res + options = orgstr.split(", ") + if len(options) == 2 and (len(options[0]) == 2 or len(options[1]) == 2): + # a shortcut for '-h, --help' or '--abc', '-a' + action._formatted_action_invocation = orgstr # type: ignore + return orgstr + return_list = [] + short_long: Dict[str, str] = {} + for option in options: + if len(option) == 2 or option[2] == " ": + continue + if not option.startswith("--"): + raise ArgumentError( + 'long optional argument without "--": [%s]' % (option), option + ) + xxoption = option[2:] + shortened = xxoption.replace("-", "") + if shortened not in short_long or len(short_long[shortened]) < len( + xxoption + ): + short_long[shortened] = xxoption + # now short_long has been filled out to the longest with dashes + # **and** we keep the right option ordering from add_argument + for option in options: + if len(option) == 2 or option[2] == " ": + return_list.append(option) + if option[2:] == short_long.get(option.replace("-", "")): + return_list.append(option.replace(" ", "=", 1)) + formatted_action_invocation = ", ".join(return_list) + action._formatted_action_invocation = formatted_action_invocation # type: ignore + return formatted_action_invocation + + def _split_lines(self, text, width): + """Wrap lines after splitting on original newlines. + + This allows to have explicit line breaks in the help text. + """ + import textwrap + + lines = [] + for line in text.splitlines(): + lines.extend(textwrap.wrap(line.strip(), width)) + return lines diff --git a/venv/lib/python3.10/site-packages/_pytest/config/compat.py b/venv/lib/python3.10/site-packages/_pytest/config/compat.py new file mode 100644 index 0000000..ba267d2 --- /dev/null +++ b/venv/lib/python3.10/site-packages/_pytest/config/compat.py @@ -0,0 +1,71 @@ +import functools +import warnings +from pathlib import Path +from typing import Optional + +from ..compat import LEGACY_PATH +from ..compat import legacy_path +from ..deprecated import HOOK_LEGACY_PATH_ARG +from _pytest.nodes import _check_path + +# hookname: (Path, LEGACY_PATH) +imply_paths_hooks = { + "pytest_ignore_collect": ("collection_path", "path"), + "pytest_collect_file": ("file_path", "path"), + "pytest_pycollect_makemodule": ("module_path", "path"), + "pytest_report_header": ("start_path", "startdir"), + "pytest_report_collectionfinish": ("start_path", "startdir"), +} + + +class PathAwareHookProxy: + """ + this helper wraps around hook callers + until pluggy supports fixingcalls, this one will do + + it currently doesn't return full hook caller proxies for fixed hooks, + this may have to be changed later depending on bugs + """ + + def __init__(self, hook_caller): + self.__hook_caller = hook_caller + + def __dir__(self): + return dir(self.__hook_caller) + + def __getattr__(self, key, _wraps=functools.wraps): + hook = getattr(self.__hook_caller, key) + if key not in imply_paths_hooks: + self.__dict__[key] = hook + return hook + else: + path_var, fspath_var = imply_paths_hooks[key] + + @_wraps(hook) + def fixed_hook(**kw): + + path_value: Optional[Path] = kw.pop(path_var, None) + fspath_value: Optional[LEGACY_PATH] = kw.pop(fspath_var, None) + if fspath_value is not None: + warnings.warn( + HOOK_LEGACY_PATH_ARG.format( + pylib_path_arg=fspath_var, pathlib_path_arg=path_var + ), + stacklevel=2, + ) + if path_value is not None: + if fspath_value is not None: + _check_path(path_value, fspath_value) + else: + fspath_value = legacy_path(path_value) + else: + assert fspath_value is not None + path_value = Path(fspath_value) + + kw[path_var] = path_value + kw[fspath_var] = fspath_value + return hook(**kw) + + fixed_hook.__name__ = key + self.__dict__[key] = fixed_hook + return fixed_hook diff --git a/venv/lib/python3.10/site-packages/_pytest/config/exceptions.py b/venv/lib/python3.10/site-packages/_pytest/config/exceptions.py new file mode 100644 index 0000000..4f1320e --- /dev/null +++ b/venv/lib/python3.10/site-packages/_pytest/config/exceptions.py @@ -0,0 +1,11 @@ +from _pytest.compat import final + + +@final +class UsageError(Exception): + """Error in pytest usage or invocation.""" + + +class PrintHelp(Exception): + """Raised when pytest should print its help to skip the rest of the + argument parsing and validation.""" diff --git a/venv/lib/python3.10/site-packages/_pytest/config/findpaths.py b/venv/lib/python3.10/site-packages/_pytest/config/findpaths.py new file mode 100644 index 0000000..c082e65 --- /dev/null +++ b/venv/lib/python3.10/site-packages/_pytest/config/findpaths.py @@ -0,0 +1,213 @@ +import os +from pathlib import Path +from typing import Dict +from typing import Iterable +from typing import List +from typing import Optional +from typing import Sequence +from typing import Tuple +from typing import TYPE_CHECKING +from typing import Union + +import iniconfig + +from .exceptions import UsageError +from _pytest.outcomes import fail +from _pytest.pathlib import absolutepath +from _pytest.pathlib import commonpath + +if TYPE_CHECKING: + from . import Config + + +def _parse_ini_config(path: Path) -> iniconfig.IniConfig: + """Parse the given generic '.ini' file using legacy IniConfig parser, returning + the parsed object. + + Raise UsageError if the file cannot be parsed. + """ + try: + return iniconfig.IniConfig(str(path)) + except iniconfig.ParseError as exc: + raise UsageError(str(exc)) from exc + + +def load_config_dict_from_file( + filepath: Path, +) -> Optional[Dict[str, Union[str, List[str]]]]: + """Load pytest configuration from the given file path, if supported. + + Return None if the file does not contain valid pytest configuration. + """ + + # Configuration from ini files are obtained from the [pytest] section, if present. + if filepath.suffix == ".ini": + iniconfig = _parse_ini_config(filepath) + + if "pytest" in iniconfig: + return dict(iniconfig["pytest"].items()) + else: + # "pytest.ini" files are always the source of configuration, even if empty. + if filepath.name == "pytest.ini": + return {} + + # '.cfg' files are considered if they contain a "[tool:pytest]" section. + elif filepath.suffix == ".cfg": + iniconfig = _parse_ini_config(filepath) + + if "tool:pytest" in iniconfig.sections: + return dict(iniconfig["tool:pytest"].items()) + elif "pytest" in iniconfig.sections: + # If a setup.cfg contains a "[pytest]" section, we raise a failure to indicate users that + # plain "[pytest]" sections in setup.cfg files is no longer supported (#3086). + fail(CFG_PYTEST_SECTION.format(filename="setup.cfg"), pytrace=False) + + # '.toml' files are considered if they contain a [tool.pytest.ini_options] table. + elif filepath.suffix == ".toml": + import tomli + + toml_text = filepath.read_text(encoding="utf-8") + try: + config = tomli.loads(toml_text) + except tomli.TOMLDecodeError as exc: + raise UsageError(f"{filepath}: {exc}") from exc + + result = config.get("tool", {}).get("pytest", {}).get("ini_options", None) + if result is not None: + # TOML supports richer data types than ini files (strings, arrays, floats, ints, etc), + # however we need to convert all scalar values to str for compatibility with the rest + # of the configuration system, which expects strings only. + def make_scalar(v: object) -> Union[str, List[str]]: + return v if isinstance(v, list) else str(v) + + return {k: make_scalar(v) for k, v in result.items()} + + return None + + +def locate_config( + args: Iterable[Path], +) -> Tuple[Optional[Path], Optional[Path], Dict[str, Union[str, List[str]]]]: + """Search in the list of arguments for a valid ini-file for pytest, + and return a tuple of (rootdir, inifile, cfg-dict).""" + config_names = [ + "pytest.ini", + "pyproject.toml", + "tox.ini", + "setup.cfg", + ] + args = [x for x in args if not str(x).startswith("-")] + if not args: + args = [Path.cwd()] + for arg in args: + argpath = absolutepath(arg) + for base in (argpath, *argpath.parents): + for config_name in config_names: + p = base / config_name + if p.is_file(): + ini_config = load_config_dict_from_file(p) + if ini_config is not None: + return base, p, ini_config + return None, None, {} + + +def get_common_ancestor(paths: Iterable[Path]) -> Path: + common_ancestor: Optional[Path] = None + for path in paths: + if not path.exists(): + continue + if common_ancestor is None: + common_ancestor = path + else: + if common_ancestor in path.parents or path == common_ancestor: + continue + elif path in common_ancestor.parents: + common_ancestor = path + else: + shared = commonpath(path, common_ancestor) + if shared is not None: + common_ancestor = shared + if common_ancestor is None: + common_ancestor = Path.cwd() + elif common_ancestor.is_file(): + common_ancestor = common_ancestor.parent + return common_ancestor + + +def get_dirs_from_args(args: Iterable[str]) -> List[Path]: + def is_option(x: str) -> bool: + return x.startswith("-") + + def get_file_part_from_node_id(x: str) -> str: + return x.split("::")[0] + + def get_dir_from_path(path: Path) -> Path: + if path.is_dir(): + return path + return path.parent + + def safe_exists(path: Path) -> bool: + # This can throw on paths that contain characters unrepresentable at the OS level, + # or with invalid syntax on Windows (https://bugs.python.org/issue35306) + try: + return path.exists() + except OSError: + return False + + # These look like paths but may not exist + possible_paths = ( + absolutepath(get_file_part_from_node_id(arg)) + for arg in args + if not is_option(arg) + ) + + return [get_dir_from_path(path) for path in possible_paths if safe_exists(path)] + + +CFG_PYTEST_SECTION = "[pytest] section in {filename} files is no longer supported, change to [tool:pytest] instead." + + +def determine_setup( + inifile: Optional[str], + args: Sequence[str], + rootdir_cmd_arg: Optional[str] = None, + config: Optional["Config"] = None, +) -> Tuple[Path, Optional[Path], Dict[str, Union[str, List[str]]]]: + rootdir = None + dirs = get_dirs_from_args(args) + if inifile: + inipath_ = absolutepath(inifile) + inipath: Optional[Path] = inipath_ + inicfg = load_config_dict_from_file(inipath_) or {} + if rootdir_cmd_arg is None: + rootdir = inipath_.parent + else: + ancestor = get_common_ancestor(dirs) + rootdir, inipath, inicfg = locate_config([ancestor]) + if rootdir is None and rootdir_cmd_arg is None: + for possible_rootdir in (ancestor, *ancestor.parents): + if (possible_rootdir / "setup.py").is_file(): + rootdir = possible_rootdir + break + else: + if dirs != [ancestor]: + rootdir, inipath, inicfg = locate_config(dirs) + if rootdir is None: + if config is not None: + cwd = config.invocation_params.dir + else: + cwd = Path.cwd() + rootdir = get_common_ancestor([cwd, ancestor]) + is_fs_root = os.path.splitdrive(str(rootdir))[1] == "/" + if is_fs_root: + rootdir = ancestor + if rootdir_cmd_arg: + rootdir = absolutepath(os.path.expandvars(rootdir_cmd_arg)) + if not rootdir.is_dir(): + raise UsageError( + "Directory '{}' not found. Check your '--rootdir' option.".format( + rootdir + ) + ) + assert rootdir is not None + return rootdir, inipath, inicfg or {} diff --git a/venv/lib/python3.10/site-packages/_pytest/debugging.py b/venv/lib/python3.10/site-packages/_pytest/debugging.py new file mode 100644 index 0000000..452fb18 --- /dev/null +++ b/venv/lib/python3.10/site-packages/_pytest/debugging.py @@ -0,0 +1,388 @@ +"""Interactive debugging with PDB, the Python Debugger.""" +import argparse +import functools +import sys +import types +from typing import Any +from typing import Callable +from typing import Generator +from typing import List +from typing import Optional +from typing import Tuple +from typing import Type +from typing import TYPE_CHECKING +from typing import Union + +from _pytest import outcomes +from _pytest._code import ExceptionInfo +from _pytest.config import Config +from _pytest.config import ConftestImportFailure +from _pytest.config import hookimpl +from _pytest.config import PytestPluginManager +from _pytest.config.argparsing import Parser +from _pytest.config.exceptions import UsageError +from _pytest.nodes import Node +from _pytest.reports import BaseReport + +if TYPE_CHECKING: + from _pytest.capture import CaptureManager + from _pytest.runner import CallInfo + + +def _validate_usepdb_cls(value: str) -> Tuple[str, str]: + """Validate syntax of --pdbcls option.""" + try: + modname, classname = value.split(":") + except ValueError as e: + raise argparse.ArgumentTypeError( + f"{value!r} is not in the format 'modname:classname'" + ) from e + return (modname, classname) + + +def pytest_addoption(parser: Parser) -> None: + group = parser.getgroup("general") + group._addoption( + "--pdb", + dest="usepdb", + action="store_true", + help="start the interactive Python debugger on errors or KeyboardInterrupt.", + ) + group._addoption( + "--pdbcls", + dest="usepdb_cls", + metavar="modulename:classname", + type=_validate_usepdb_cls, + help="specify a custom interactive Python debugger for use with --pdb." + "For example: --pdbcls=IPython.terminal.debugger:TerminalPdb", + ) + group._addoption( + "--trace", + dest="trace", + action="store_true", + help="Immediately break when running each test.", + ) + + +def pytest_configure(config: Config) -> None: + import pdb + + if config.getvalue("trace"): + config.pluginmanager.register(PdbTrace(), "pdbtrace") + if config.getvalue("usepdb"): + config.pluginmanager.register(PdbInvoke(), "pdbinvoke") + + pytestPDB._saved.append( + (pdb.set_trace, pytestPDB._pluginmanager, pytestPDB._config) + ) + pdb.set_trace = pytestPDB.set_trace + pytestPDB._pluginmanager = config.pluginmanager + pytestPDB._config = config + + # NOTE: not using pytest_unconfigure, since it might get called although + # pytest_configure was not (if another plugin raises UsageError). + def fin() -> None: + ( + pdb.set_trace, + pytestPDB._pluginmanager, + pytestPDB._config, + ) = pytestPDB._saved.pop() + + config.add_cleanup(fin) + + +class pytestPDB: + """Pseudo PDB that defers to the real pdb.""" + + _pluginmanager: Optional[PytestPluginManager] = None + _config: Optional[Config] = None + _saved: List[ + Tuple[Callable[..., None], Optional[PytestPluginManager], Optional[Config]] + ] = [] + _recursive_debug = 0 + _wrapped_pdb_cls: Optional[Tuple[Type[Any], Type[Any]]] = None + + @classmethod + def _is_capturing(cls, capman: Optional["CaptureManager"]) -> Union[str, bool]: + if capman: + return capman.is_capturing() + return False + + @classmethod + def _import_pdb_cls(cls, capman: Optional["CaptureManager"]): + if not cls._config: + import pdb + + # Happens when using pytest.set_trace outside of a test. + return pdb.Pdb + + usepdb_cls = cls._config.getvalue("usepdb_cls") + + if cls._wrapped_pdb_cls and cls._wrapped_pdb_cls[0] == usepdb_cls: + return cls._wrapped_pdb_cls[1] + + if usepdb_cls: + modname, classname = usepdb_cls + + try: + __import__(modname) + mod = sys.modules[modname] + + # Handle --pdbcls=pdb:pdb.Pdb (useful e.g. with pdbpp). + parts = classname.split(".") + pdb_cls = getattr(mod, parts[0]) + for part in parts[1:]: + pdb_cls = getattr(pdb_cls, part) + except Exception as exc: + value = ":".join((modname, classname)) + raise UsageError( + f"--pdbcls: could not import {value!r}: {exc}" + ) from exc + else: + import pdb + + pdb_cls = pdb.Pdb + + wrapped_cls = cls._get_pdb_wrapper_class(pdb_cls, capman) + cls._wrapped_pdb_cls = (usepdb_cls, wrapped_cls) + return wrapped_cls + + @classmethod + def _get_pdb_wrapper_class(cls, pdb_cls, capman: Optional["CaptureManager"]): + import _pytest.config + + # Type ignored because mypy doesn't support "dynamic" + # inheritance like this. + class PytestPdbWrapper(pdb_cls): # type: ignore[valid-type,misc] + _pytest_capman = capman + _continued = False + + def do_debug(self, arg): + cls._recursive_debug += 1 + ret = super().do_debug(arg) + cls._recursive_debug -= 1 + return ret + + def do_continue(self, arg): + ret = super().do_continue(arg) + if cls._recursive_debug == 0: + assert cls._config is not None + tw = _pytest.config.create_terminal_writer(cls._config) + tw.line() + + capman = self._pytest_capman + capturing = pytestPDB._is_capturing(capman) + if capturing: + if capturing == "global": + tw.sep(">", "PDB continue (IO-capturing resumed)") + else: + tw.sep( + ">", + "PDB continue (IO-capturing resumed for %s)" + % capturing, + ) + assert capman is not None + capman.resume() + else: + tw.sep(">", "PDB continue") + assert cls._pluginmanager is not None + cls._pluginmanager.hook.pytest_leave_pdb(config=cls._config, pdb=self) + self._continued = True + return ret + + do_c = do_cont = do_continue + + def do_quit(self, arg): + """Raise Exit outcome when quit command is used in pdb. + + This is a bit of a hack - it would be better if BdbQuit + could be handled, but this would require to wrap the + whole pytest run, and adjust the report etc. + """ + ret = super().do_quit(arg) + + if cls._recursive_debug == 0: + outcomes.exit("Quitting debugger") + + return ret + + do_q = do_quit + do_exit = do_quit + + def setup(self, f, tb): + """Suspend on setup(). + + Needed after do_continue resumed, and entering another + breakpoint again. + """ + ret = super().setup(f, tb) + if not ret and self._continued: + # pdb.setup() returns True if the command wants to exit + # from the interaction: do not suspend capturing then. + if self._pytest_capman: + self._pytest_capman.suspend_global_capture(in_=True) + return ret + + def get_stack(self, f, t): + stack, i = super().get_stack(f, t) + if f is None: + # Find last non-hidden frame. + i = max(0, len(stack) - 1) + while i and stack[i][0].f_locals.get("__tracebackhide__", False): + i -= 1 + return stack, i + + return PytestPdbWrapper + + @classmethod + def _init_pdb(cls, method, *args, **kwargs): + """Initialize PDB debugging, dropping any IO capturing.""" + import _pytest.config + + if cls._pluginmanager is None: + capman: Optional[CaptureManager] = None + else: + capman = cls._pluginmanager.getplugin("capturemanager") + if capman: + capman.suspend(in_=True) + + if cls._config: + tw = _pytest.config.create_terminal_writer(cls._config) + tw.line() + + if cls._recursive_debug == 0: + # Handle header similar to pdb.set_trace in py37+. + header = kwargs.pop("header", None) + if header is not None: + tw.sep(">", header) + else: + capturing = cls._is_capturing(capman) + if capturing == "global": + tw.sep(">", f"PDB {method} (IO-capturing turned off)") + elif capturing: + tw.sep( + ">", + "PDB %s (IO-capturing turned off for %s)" + % (method, capturing), + ) + else: + tw.sep(">", f"PDB {method}") + + _pdb = cls._import_pdb_cls(capman)(**kwargs) + + if cls._pluginmanager: + cls._pluginmanager.hook.pytest_enter_pdb(config=cls._config, pdb=_pdb) + return _pdb + + @classmethod + def set_trace(cls, *args, **kwargs) -> None: + """Invoke debugging via ``Pdb.set_trace``, dropping any IO capturing.""" + frame = sys._getframe().f_back + _pdb = cls._init_pdb("set_trace", *args, **kwargs) + _pdb.set_trace(frame) + + +class PdbInvoke: + def pytest_exception_interact( + self, node: Node, call: "CallInfo[Any]", report: BaseReport + ) -> None: + capman = node.config.pluginmanager.getplugin("capturemanager") + if capman: + capman.suspend_global_capture(in_=True) + out, err = capman.read_global_capture() + sys.stdout.write(out) + sys.stdout.write(err) + assert call.excinfo is not None + _enter_pdb(node, call.excinfo, report) + + def pytest_internalerror(self, excinfo: ExceptionInfo[BaseException]) -> None: + tb = _postmortem_traceback(excinfo) + post_mortem(tb) + + +class PdbTrace: + @hookimpl(hookwrapper=True) + def pytest_pyfunc_call(self, pyfuncitem) -> Generator[None, None, None]: + wrap_pytest_function_for_tracing(pyfuncitem) + yield + + +def wrap_pytest_function_for_tracing(pyfuncitem): + """Change the Python function object of the given Function item by a + wrapper which actually enters pdb before calling the python function + itself, effectively leaving the user in the pdb prompt in the first + statement of the function.""" + _pdb = pytestPDB._init_pdb("runcall") + testfunction = pyfuncitem.obj + + # we can't just return `partial(pdb.runcall, testfunction)` because (on + # python < 3.7.4) runcall's first param is `func`, which means we'd get + # an exception if one of the kwargs to testfunction was called `func`. + @functools.wraps(testfunction) + def wrapper(*args, **kwargs): + func = functools.partial(testfunction, *args, **kwargs) + _pdb.runcall(func) + + pyfuncitem.obj = wrapper + + +def maybe_wrap_pytest_function_for_tracing(pyfuncitem): + """Wrap the given pytestfunct item for tracing support if --trace was given in + the command line.""" + if pyfuncitem.config.getvalue("trace"): + wrap_pytest_function_for_tracing(pyfuncitem) + + +def _enter_pdb( + node: Node, excinfo: ExceptionInfo[BaseException], rep: BaseReport +) -> BaseReport: + # XXX we re-use the TerminalReporter's terminalwriter + # because this seems to avoid some encoding related troubles + # for not completely clear reasons. + tw = node.config.pluginmanager.getplugin("terminalreporter")._tw + tw.line() + + showcapture = node.config.option.showcapture + + for sectionname, content in ( + ("stdout", rep.capstdout), + ("stderr", rep.capstderr), + ("log", rep.caplog), + ): + if showcapture in (sectionname, "all") and content: + tw.sep(">", "captured " + sectionname) + if content[-1:] == "\n": + content = content[:-1] + tw.line(content) + + tw.sep(">", "traceback") + rep.toterminal(tw) + tw.sep(">", "entering PDB") + tb = _postmortem_traceback(excinfo) + rep._pdbshown = True # type: ignore[attr-defined] + post_mortem(tb) + return rep + + +def _postmortem_traceback(excinfo: ExceptionInfo[BaseException]) -> types.TracebackType: + from doctest import UnexpectedException + + if isinstance(excinfo.value, UnexpectedException): + # A doctest.UnexpectedException is not useful for post_mortem. + # Use the underlying exception instead: + return excinfo.value.exc_info[2] + elif isinstance(excinfo.value, ConftestImportFailure): + # A config.ConftestImportFailure is not useful for post_mortem. + # Use the underlying exception instead: + return excinfo.value.excinfo[2] + else: + assert excinfo._excinfo is not None + return excinfo._excinfo[2] + + +def post_mortem(t: types.TracebackType) -> None: + p = pytestPDB._init_pdb("post_mortem") + p.reset() + p.interaction(None, t) + if p.quitting: + outcomes.exit("Quitting debugger") diff --git a/venv/lib/python3.10/site-packages/_pytest/deprecated.py b/venv/lib/python3.10/site-packages/_pytest/deprecated.py new file mode 100644 index 0000000..f2d7976 --- /dev/null +++ b/venv/lib/python3.10/site-packages/_pytest/deprecated.py @@ -0,0 +1,123 @@ +"""Deprecation messages and bits of code used elsewhere in the codebase that +is planned to be removed in the next pytest release. + +Keeping it in a central location makes it easy to track what is deprecated and should +be removed when the time comes. + +All constants defined in this module should be either instances of +:class:`PytestWarning`, or :class:`UnformattedWarning` +in case of warnings which need to format their messages. +""" +from warnings import warn + +from _pytest.warning_types import PytestDeprecationWarning +from _pytest.warning_types import PytestRemovedIn8Warning +from _pytest.warning_types import UnformattedWarning + +# set of plugins which have been integrated into the core; we use this list to ignore +# them during registration to avoid conflicts +DEPRECATED_EXTERNAL_PLUGINS = { + "pytest_catchlog", + "pytest_capturelog", + "pytest_faulthandler", +} + + +# This can be* removed pytest 8, but it's harmless and common, so no rush to remove. +# * If you're in the future: "could have been". +YIELD_FIXTURE = PytestDeprecationWarning( + "@pytest.yield_fixture is deprecated.\n" + "Use @pytest.fixture instead; they are the same." +) + +WARNING_CMDLINE_PREPARSE_HOOK = PytestRemovedIn8Warning( + "The pytest_cmdline_preparse hook is deprecated and will be removed in a future release. \n" + "Please use pytest_load_initial_conftests hook instead." +) + +FSCOLLECTOR_GETHOOKPROXY_ISINITPATH = PytestRemovedIn8Warning( + "The gethookproxy() and isinitpath() methods of FSCollector and Package are deprecated; " + "use self.session.gethookproxy() and self.session.isinitpath() instead. " +) + +STRICT_OPTION = PytestRemovedIn8Warning( + "The --strict option is deprecated, use --strict-markers instead." +) + +# This deprecation is never really meant to be removed. +PRIVATE = PytestDeprecationWarning("A private pytest class or function was used.") + +ARGUMENT_PERCENT_DEFAULT = PytestRemovedIn8Warning( + 'pytest now uses argparse. "%default" should be changed to "%(default)s"', +) + +ARGUMENT_TYPE_STR_CHOICE = UnformattedWarning( + PytestRemovedIn8Warning, + "`type` argument to addoption() is the string {typ!r}." + " For choices this is optional and can be omitted, " + " but when supplied should be a type (for example `str` or `int`)." + " (options: {names})", +) + +ARGUMENT_TYPE_STR = UnformattedWarning( + PytestRemovedIn8Warning, + "`type` argument to addoption() is the string {typ!r}, " + " but when supplied should be a type (for example `str` or `int`)." + " (options: {names})", +) + + +HOOK_LEGACY_PATH_ARG = UnformattedWarning( + PytestRemovedIn8Warning, + "The ({pylib_path_arg}: py.path.local) argument is deprecated, please use ({pathlib_path_arg}: pathlib.Path)\n" + "see https://docs.pytest.org/en/latest/deprecations.html" + "#py-path-local-arguments-for-hooks-replaced-with-pathlib-path", +) + +NODE_CTOR_FSPATH_ARG = UnformattedWarning( + PytestRemovedIn8Warning, + "The (fspath: py.path.local) argument to {node_type_name} is deprecated. " + "Please use the (path: pathlib.Path) argument instead.\n" + "See https://docs.pytest.org/en/latest/deprecations.html" + "#fspath-argument-for-node-constructors-replaced-with-pathlib-path", +) + +WARNS_NONE_ARG = PytestRemovedIn8Warning( + "Passing None has been deprecated.\n" + "See https://docs.pytest.org/en/latest/how-to/capture-warnings.html" + "#additional-use-cases-of-warnings-in-tests" + " for alternatives in common use cases." +) + +KEYWORD_MSG_ARG = UnformattedWarning( + PytestRemovedIn8Warning, + "pytest.{func}(msg=...) is now deprecated, use pytest.{func}(reason=...) instead", +) + +INSTANCE_COLLECTOR = PytestRemovedIn8Warning( + "The pytest.Instance collector type is deprecated and is no longer used. " + "See https://docs.pytest.org/en/latest/deprecations.html#the-pytest-instance-collector", +) + +# You want to make some `__init__` or function "private". +# +# def my_private_function(some, args): +# ... +# +# Do this: +# +# def my_private_function(some, args, *, _ispytest: bool = False): +# check_ispytest(_ispytest) +# ... +# +# Change all internal/allowed calls to +# +# my_private_function(some, args, _ispytest=True) +# +# All other calls will get the default _ispytest=False and trigger +# the warning (possibly error in the future). + + +def check_ispytest(ispytest: bool) -> None: + if not ispytest: + warn(PRIVATE, stacklevel=3) diff --git a/venv/lib/python3.10/site-packages/_pytest/doctest.py b/venv/lib/python3.10/site-packages/_pytest/doctest.py new file mode 100644 index 0000000..7d37be2 --- /dev/null +++ b/venv/lib/python3.10/site-packages/_pytest/doctest.py @@ -0,0 +1,734 @@ +"""Discover and run doctests in modules and test files.""" +import bdb +import inspect +import os +import platform +import sys +import traceback +import types +import warnings +from contextlib import contextmanager +from pathlib import Path +from typing import Any +from typing import Callable +from typing import Dict +from typing import Generator +from typing import Iterable +from typing import List +from typing import Optional +from typing import Pattern +from typing import Sequence +from typing import Tuple +from typing import Type +from typing import TYPE_CHECKING +from typing import Union + +import pytest +from _pytest import outcomes +from _pytest._code.code import ExceptionInfo +from _pytest._code.code import ReprFileLocation +from _pytest._code.code import TerminalRepr +from _pytest._io import TerminalWriter +from _pytest.compat import safe_getattr +from _pytest.config import Config +from _pytest.config.argparsing import Parser +from _pytest.fixtures import FixtureRequest +from _pytest.nodes import Collector +from _pytest.outcomes import OutcomeException +from _pytest.pathlib import fnmatch_ex +from _pytest.pathlib import import_path +from _pytest.python_api import approx +from _pytest.warning_types import PytestWarning + +if TYPE_CHECKING: + import doctest + +DOCTEST_REPORT_CHOICE_NONE = "none" +DOCTEST_REPORT_CHOICE_CDIFF = "cdiff" +DOCTEST_REPORT_CHOICE_NDIFF = "ndiff" +DOCTEST_REPORT_CHOICE_UDIFF = "udiff" +DOCTEST_REPORT_CHOICE_ONLY_FIRST_FAILURE = "only_first_failure" + +DOCTEST_REPORT_CHOICES = ( + DOCTEST_REPORT_CHOICE_NONE, + DOCTEST_REPORT_CHOICE_CDIFF, + DOCTEST_REPORT_CHOICE_NDIFF, + DOCTEST_REPORT_CHOICE_UDIFF, + DOCTEST_REPORT_CHOICE_ONLY_FIRST_FAILURE, +) + +# Lazy definition of runner class +RUNNER_CLASS = None +# Lazy definition of output checker class +CHECKER_CLASS: Optional[Type["doctest.OutputChecker"]] = None + + +def pytest_addoption(parser: Parser) -> None: + parser.addini( + "doctest_optionflags", + "option flags for doctests", + type="args", + default=["ELLIPSIS"], + ) + parser.addini( + "doctest_encoding", "encoding used for doctest files", default="utf-8" + ) + group = parser.getgroup("collect") + group.addoption( + "--doctest-modules", + action="store_true", + default=False, + help="run doctests in all .py modules", + dest="doctestmodules", + ) + group.addoption( + "--doctest-report", + type=str.lower, + default="udiff", + help="choose another output format for diffs on doctest failure", + choices=DOCTEST_REPORT_CHOICES, + dest="doctestreport", + ) + group.addoption( + "--doctest-glob", + action="append", + default=[], + metavar="pat", + help="doctests file matching pattern, default: test*.txt", + dest="doctestglob", + ) + group.addoption( + "--doctest-ignore-import-errors", + action="store_true", + default=False, + help="ignore doctest ImportErrors", + dest="doctest_ignore_import_errors", + ) + group.addoption( + "--doctest-continue-on-failure", + action="store_true", + default=False, + help="for a given doctest, continue to run after the first failure", + dest="doctest_continue_on_failure", + ) + + +def pytest_unconfigure() -> None: + global RUNNER_CLASS + + RUNNER_CLASS = None + + +def pytest_collect_file( + file_path: Path, + parent: Collector, +) -> Optional[Union["DoctestModule", "DoctestTextfile"]]: + config = parent.config + if file_path.suffix == ".py": + if config.option.doctestmodules and not any( + (_is_setup_py(file_path), _is_main_py(file_path)) + ): + mod: DoctestModule = DoctestModule.from_parent(parent, path=file_path) + return mod + elif _is_doctest(config, file_path, parent): + txt: DoctestTextfile = DoctestTextfile.from_parent(parent, path=file_path) + return txt + return None + + +def _is_setup_py(path: Path) -> bool: + if path.name != "setup.py": + return False + contents = path.read_bytes() + return b"setuptools" in contents or b"distutils" in contents + + +def _is_doctest(config: Config, path: Path, parent: Collector) -> bool: + if path.suffix in (".txt", ".rst") and parent.session.isinitpath(path): + return True + globs = config.getoption("doctestglob") or ["test*.txt"] + return any(fnmatch_ex(glob, path) for glob in globs) + + +def _is_main_py(path: Path) -> bool: + return path.name == "__main__.py" + + +class ReprFailDoctest(TerminalRepr): + def __init__( + self, reprlocation_lines: Sequence[Tuple[ReprFileLocation, Sequence[str]]] + ) -> None: + self.reprlocation_lines = reprlocation_lines + + def toterminal(self, tw: TerminalWriter) -> None: + for reprlocation, lines in self.reprlocation_lines: + for line in lines: + tw.line(line) + reprlocation.toterminal(tw) + + +class MultipleDoctestFailures(Exception): + def __init__(self, failures: Sequence["doctest.DocTestFailure"]) -> None: + super().__init__() + self.failures = failures + + +def _init_runner_class() -> Type["doctest.DocTestRunner"]: + import doctest + + class PytestDoctestRunner(doctest.DebugRunner): + """Runner to collect failures. + + Note that the out variable in this case is a list instead of a + stdout-like object. + """ + + def __init__( + self, + checker: Optional["doctest.OutputChecker"] = None, + verbose: Optional[bool] = None, + optionflags: int = 0, + continue_on_failure: bool = True, + ) -> None: + super().__init__(checker=checker, verbose=verbose, optionflags=optionflags) + self.continue_on_failure = continue_on_failure + + def report_failure( + self, + out, + test: "doctest.DocTest", + example: "doctest.Example", + got: str, + ) -> None: + failure = doctest.DocTestFailure(test, example, got) + if self.continue_on_failure: + out.append(failure) + else: + raise failure + + def report_unexpected_exception( + self, + out, + test: "doctest.DocTest", + example: "doctest.Example", + exc_info: Tuple[Type[BaseException], BaseException, types.TracebackType], + ) -> None: + if isinstance(exc_info[1], OutcomeException): + raise exc_info[1] + if isinstance(exc_info[1], bdb.BdbQuit): + outcomes.exit("Quitting debugger") + failure = doctest.UnexpectedException(test, example, exc_info) + if self.continue_on_failure: + out.append(failure) + else: + raise failure + + return PytestDoctestRunner + + +def _get_runner( + checker: Optional["doctest.OutputChecker"] = None, + verbose: Optional[bool] = None, + optionflags: int = 0, + continue_on_failure: bool = True, +) -> "doctest.DocTestRunner": + # We need this in order to do a lazy import on doctest + global RUNNER_CLASS + if RUNNER_CLASS is None: + RUNNER_CLASS = _init_runner_class() + # Type ignored because the continue_on_failure argument is only defined on + # PytestDoctestRunner, which is lazily defined so can't be used as a type. + return RUNNER_CLASS( # type: ignore + checker=checker, + verbose=verbose, + optionflags=optionflags, + continue_on_failure=continue_on_failure, + ) + + +class DoctestItem(pytest.Item): + def __init__( + self, + name: str, + parent: "Union[DoctestTextfile, DoctestModule]", + runner: Optional["doctest.DocTestRunner"] = None, + dtest: Optional["doctest.DocTest"] = None, + ) -> None: + super().__init__(name, parent) + self.runner = runner + self.dtest = dtest + self.obj = None + self.fixture_request: Optional[FixtureRequest] = None + + @classmethod + def from_parent( # type: ignore + cls, + parent: "Union[DoctestTextfile, DoctestModule]", + *, + name: str, + runner: "doctest.DocTestRunner", + dtest: "doctest.DocTest", + ): + # incompatible signature due to imposed limits on subclass + """The public named constructor.""" + return super().from_parent(name=name, parent=parent, runner=runner, dtest=dtest) + + def setup(self) -> None: + if self.dtest is not None: + self.fixture_request = _setup_fixtures(self) + globs = dict(getfixture=self.fixture_request.getfixturevalue) + for name, value in self.fixture_request.getfixturevalue( + "doctest_namespace" + ).items(): + globs[name] = value + self.dtest.globs.update(globs) + + def runtest(self) -> None: + assert self.dtest is not None + assert self.runner is not None + _check_all_skipped(self.dtest) + self._disable_output_capturing_for_darwin() + failures: List["doctest.DocTestFailure"] = [] + # Type ignored because we change the type of `out` from what + # doctest expects. + self.runner.run(self.dtest, out=failures) # type: ignore[arg-type] + if failures: + raise MultipleDoctestFailures(failures) + + def _disable_output_capturing_for_darwin(self) -> None: + """Disable output capturing. Otherwise, stdout is lost to doctest (#985).""" + if platform.system() != "Darwin": + return + capman = self.config.pluginmanager.getplugin("capturemanager") + if capman: + capman.suspend_global_capture(in_=True) + out, err = capman.read_global_capture() + sys.stdout.write(out) + sys.stderr.write(err) + + # TODO: Type ignored -- breaks Liskov Substitution. + def repr_failure( # type: ignore[override] + self, + excinfo: ExceptionInfo[BaseException], + ) -> Union[str, TerminalRepr]: + import doctest + + failures: Optional[ + Sequence[Union[doctest.DocTestFailure, doctest.UnexpectedException]] + ] = None + if isinstance( + excinfo.value, (doctest.DocTestFailure, doctest.UnexpectedException) + ): + failures = [excinfo.value] + elif isinstance(excinfo.value, MultipleDoctestFailures): + failures = excinfo.value.failures + + if failures is None: + return super().repr_failure(excinfo) + + reprlocation_lines = [] + for failure in failures: + example = failure.example + test = failure.test + filename = test.filename + if test.lineno is None: + lineno = None + else: + lineno = test.lineno + example.lineno + 1 + message = type(failure).__name__ + # TODO: ReprFileLocation doesn't expect a None lineno. + reprlocation = ReprFileLocation(filename, lineno, message) # type: ignore[arg-type] + checker = _get_checker() + report_choice = _get_report_choice(self.config.getoption("doctestreport")) + if lineno is not None: + assert failure.test.docstring is not None + lines = failure.test.docstring.splitlines(False) + # add line numbers to the left of the error message + assert test.lineno is not None + lines = [ + "%03d %s" % (i + test.lineno + 1, x) for (i, x) in enumerate(lines) + ] + # trim docstring error lines to 10 + lines = lines[max(example.lineno - 9, 0) : example.lineno + 1] + else: + lines = [ + "EXAMPLE LOCATION UNKNOWN, not showing all tests of that example" + ] + indent = ">>>" + for line in example.source.splitlines(): + lines.append(f"??? {indent} {line}") + indent = "..." + if isinstance(failure, doctest.DocTestFailure): + lines += checker.output_difference( + example, failure.got, report_choice + ).split("\n") + else: + inner_excinfo = ExceptionInfo.from_exc_info(failure.exc_info) + lines += ["UNEXPECTED EXCEPTION: %s" % repr(inner_excinfo.value)] + lines += [ + x.strip("\n") for x in traceback.format_exception(*failure.exc_info) + ] + reprlocation_lines.append((reprlocation, lines)) + return ReprFailDoctest(reprlocation_lines) + + def reportinfo(self) -> Tuple[Union["os.PathLike[str]", str], Optional[int], str]: + assert self.dtest is not None + return self.path, self.dtest.lineno, "[doctest] %s" % self.name + + +def _get_flag_lookup() -> Dict[str, int]: + import doctest + + return dict( + DONT_ACCEPT_TRUE_FOR_1=doctest.DONT_ACCEPT_TRUE_FOR_1, + DONT_ACCEPT_BLANKLINE=doctest.DONT_ACCEPT_BLANKLINE, + NORMALIZE_WHITESPACE=doctest.NORMALIZE_WHITESPACE, + ELLIPSIS=doctest.ELLIPSIS, + IGNORE_EXCEPTION_DETAIL=doctest.IGNORE_EXCEPTION_DETAIL, + COMPARISON_FLAGS=doctest.COMPARISON_FLAGS, + ALLOW_UNICODE=_get_allow_unicode_flag(), + ALLOW_BYTES=_get_allow_bytes_flag(), + NUMBER=_get_number_flag(), + ) + + +def get_optionflags(parent): + optionflags_str = parent.config.getini("doctest_optionflags") + flag_lookup_table = _get_flag_lookup() + flag_acc = 0 + for flag in optionflags_str: + flag_acc |= flag_lookup_table[flag] + return flag_acc + + +def _get_continue_on_failure(config): + continue_on_failure = config.getvalue("doctest_continue_on_failure") + if continue_on_failure: + # We need to turn off this if we use pdb since we should stop at + # the first failure. + if config.getvalue("usepdb"): + continue_on_failure = False + return continue_on_failure + + +class DoctestTextfile(pytest.Module): + obj = None + + def collect(self) -> Iterable[DoctestItem]: + import doctest + + # Inspired by doctest.testfile; ideally we would use it directly, + # but it doesn't support passing a custom checker. + encoding = self.config.getini("doctest_encoding") + text = self.path.read_text(encoding) + filename = str(self.path) + name = self.path.name + globs = {"__name__": "__main__"} + + optionflags = get_optionflags(self) + + runner = _get_runner( + verbose=False, + optionflags=optionflags, + checker=_get_checker(), + continue_on_failure=_get_continue_on_failure(self.config), + ) + + parser = doctest.DocTestParser() + test = parser.get_doctest(text, globs, name, filename, 0) + if test.examples: + yield DoctestItem.from_parent( + self, name=test.name, runner=runner, dtest=test + ) + + +def _check_all_skipped(test: "doctest.DocTest") -> None: + """Raise pytest.skip() if all examples in the given DocTest have the SKIP + option set.""" + import doctest + + all_skipped = all(x.options.get(doctest.SKIP, False) for x in test.examples) + if all_skipped: + pytest.skip("all tests skipped by +SKIP option") + + +def _is_mocked(obj: object) -> bool: + """Return if an object is possibly a mock object by checking the + existence of a highly improbable attribute.""" + return ( + safe_getattr(obj, "pytest_mock_example_attribute_that_shouldnt_exist", None) + is not None + ) + + +@contextmanager +def _patch_unwrap_mock_aware() -> Generator[None, None, None]: + """Context manager which replaces ``inspect.unwrap`` with a version + that's aware of mock objects and doesn't recurse into them.""" + real_unwrap = inspect.unwrap + + def _mock_aware_unwrap( + func: Callable[..., Any], *, stop: Optional[Callable[[Any], Any]] = None + ) -> Any: + try: + if stop is None or stop is _is_mocked: + return real_unwrap(func, stop=_is_mocked) + _stop = stop + return real_unwrap(func, stop=lambda obj: _is_mocked(obj) or _stop(func)) + except Exception as e: + warnings.warn( + "Got %r when unwrapping %r. This is usually caused " + "by a violation of Python's object protocol; see e.g. " + "https://github.com/pytest-dev/pytest/issues/5080" % (e, func), + PytestWarning, + ) + raise + + inspect.unwrap = _mock_aware_unwrap + try: + yield + finally: + inspect.unwrap = real_unwrap + + +class DoctestModule(pytest.Module): + def collect(self) -> Iterable[DoctestItem]: + import doctest + + class MockAwareDocTestFinder(doctest.DocTestFinder): + """A hackish doctest finder that overrides stdlib internals to fix a stdlib bug. + + https://github.com/pytest-dev/pytest/issues/3456 + https://bugs.python.org/issue25532 + """ + + def _find_lineno(self, obj, source_lines): + """Doctest code does not take into account `@property`, this + is a hackish way to fix it. https://bugs.python.org/issue17446 + + Wrapped Doctests will need to be unwrapped so the correct + line number is returned. This will be reported upstream. #8796 + """ + if isinstance(obj, property): + obj = getattr(obj, "fget", obj) + + if hasattr(obj, "__wrapped__"): + # Get the main obj in case of it being wrapped + obj = inspect.unwrap(obj) + + # Type ignored because this is a private function. + return super()._find_lineno( # type:ignore[misc] + obj, + source_lines, + ) + + def _find( + self, tests, obj, name, module, source_lines, globs, seen + ) -> None: + if _is_mocked(obj): + return + with _patch_unwrap_mock_aware(): + + # Type ignored because this is a private function. + super()._find( # type:ignore[misc] + tests, obj, name, module, source_lines, globs, seen + ) + + if self.path.name == "conftest.py": + module = self.config.pluginmanager._importconftest( + self.path, + self.config.getoption("importmode"), + rootpath=self.config.rootpath, + ) + else: + try: + module = import_path(self.path, root=self.config.rootpath) + except ImportError: + if self.config.getvalue("doctest_ignore_import_errors"): + pytest.skip("unable to import module %r" % self.path) + else: + raise + # Uses internal doctest module parsing mechanism. + finder = MockAwareDocTestFinder() + optionflags = get_optionflags(self) + runner = _get_runner( + verbose=False, + optionflags=optionflags, + checker=_get_checker(), + continue_on_failure=_get_continue_on_failure(self.config), + ) + + for test in finder.find(module, module.__name__): + if test.examples: # skip empty doctests + yield DoctestItem.from_parent( + self, name=test.name, runner=runner, dtest=test + ) + + +def _setup_fixtures(doctest_item: DoctestItem) -> FixtureRequest: + """Used by DoctestTextfile and DoctestItem to setup fixture information.""" + + def func() -> None: + pass + + doctest_item.funcargs = {} # type: ignore[attr-defined] + fm = doctest_item.session._fixturemanager + doctest_item._fixtureinfo = fm.getfixtureinfo( # type: ignore[attr-defined] + node=doctest_item, func=func, cls=None, funcargs=False + ) + fixture_request = FixtureRequest(doctest_item, _ispytest=True) + fixture_request._fillfixtures() + return fixture_request + + +def _init_checker_class() -> Type["doctest.OutputChecker"]: + import doctest + import re + + class LiteralsOutputChecker(doctest.OutputChecker): + # Based on doctest_nose_plugin.py from the nltk project + # (https://github.com/nltk/nltk) and on the "numtest" doctest extension + # by Sebastien Boisgerault (https://github.com/boisgera/numtest). + + _unicode_literal_re = re.compile(r"(\W|^)[uU]([rR]?[\'\"])", re.UNICODE) + _bytes_literal_re = re.compile(r"(\W|^)[bB]([rR]?[\'\"])", re.UNICODE) + _number_re = re.compile( + r""" + (?P + (?P + (?P [+-]?\d*)\.(?P\d+) + | + (?P [+-]?\d+)\. + ) + (?: + [Ee] + (?P [+-]?\d+) + )? + | + (?P [+-]?\d+) + (?: + [Ee] + (?P [+-]?\d+) + ) + ) + """, + re.VERBOSE, + ) + + def check_output(self, want: str, got: str, optionflags: int) -> bool: + if super().check_output(want, got, optionflags): + return True + + allow_unicode = optionflags & _get_allow_unicode_flag() + allow_bytes = optionflags & _get_allow_bytes_flag() + allow_number = optionflags & _get_number_flag() + + if not allow_unicode and not allow_bytes and not allow_number: + return False + + def remove_prefixes(regex: Pattern[str], txt: str) -> str: + return re.sub(regex, r"\1\2", txt) + + if allow_unicode: + want = remove_prefixes(self._unicode_literal_re, want) + got = remove_prefixes(self._unicode_literal_re, got) + + if allow_bytes: + want = remove_prefixes(self._bytes_literal_re, want) + got = remove_prefixes(self._bytes_literal_re, got) + + if allow_number: + got = self._remove_unwanted_precision(want, got) + + return super().check_output(want, got, optionflags) + + def _remove_unwanted_precision(self, want: str, got: str) -> str: + wants = list(self._number_re.finditer(want)) + gots = list(self._number_re.finditer(got)) + if len(wants) != len(gots): + return got + offset = 0 + for w, g in zip(wants, gots): + fraction: Optional[str] = w.group("fraction") + exponent: Optional[str] = w.group("exponent1") + if exponent is None: + exponent = w.group("exponent2") + precision = 0 if fraction is None else len(fraction) + if exponent is not None: + precision -= int(exponent) + if float(w.group()) == approx(float(g.group()), abs=10**-precision): + # They're close enough. Replace the text we actually + # got with the text we want, so that it will match when we + # check the string literally. + got = ( + got[: g.start() + offset] + w.group() + got[g.end() + offset :] + ) + offset += w.end() - w.start() - (g.end() - g.start()) + return got + + return LiteralsOutputChecker + + +def _get_checker() -> "doctest.OutputChecker": + """Return a doctest.OutputChecker subclass that supports some + additional options: + + * ALLOW_UNICODE and ALLOW_BYTES options to ignore u'' and b'' + prefixes (respectively) in string literals. Useful when the same + doctest should run in Python 2 and Python 3. + + * NUMBER to ignore floating-point differences smaller than the + precision of the literal number in the doctest. + + An inner class is used to avoid importing "doctest" at the module + level. + """ + global CHECKER_CLASS + if CHECKER_CLASS is None: + CHECKER_CLASS = _init_checker_class() + return CHECKER_CLASS() + + +def _get_allow_unicode_flag() -> int: + """Register and return the ALLOW_UNICODE flag.""" + import doctest + + return doctest.register_optionflag("ALLOW_UNICODE") + + +def _get_allow_bytes_flag() -> int: + """Register and return the ALLOW_BYTES flag.""" + import doctest + + return doctest.register_optionflag("ALLOW_BYTES") + + +def _get_number_flag() -> int: + """Register and return the NUMBER flag.""" + import doctest + + return doctest.register_optionflag("NUMBER") + + +def _get_report_choice(key: str) -> int: + """Return the actual `doctest` module flag value. + + We want to do it as late as possible to avoid importing `doctest` and all + its dependencies when parsing options, as it adds overhead and breaks tests. + """ + import doctest + + return { + DOCTEST_REPORT_CHOICE_UDIFF: doctest.REPORT_UDIFF, + DOCTEST_REPORT_CHOICE_CDIFF: doctest.REPORT_CDIFF, + DOCTEST_REPORT_CHOICE_NDIFF: doctest.REPORT_NDIFF, + DOCTEST_REPORT_CHOICE_ONLY_FIRST_FAILURE: doctest.REPORT_ONLY_FIRST_FAILURE, + DOCTEST_REPORT_CHOICE_NONE: 0, + }[key] + + +@pytest.fixture(scope="session") +def doctest_namespace() -> Dict[str, Any]: + """Fixture that returns a :py:class:`dict` that will be injected into the + namespace of doctests.""" + return dict() diff --git a/venv/lib/python3.10/site-packages/_pytest/faulthandler.py b/venv/lib/python3.10/site-packages/_pytest/faulthandler.py new file mode 100644 index 0000000..aaee307 --- /dev/null +++ b/venv/lib/python3.10/site-packages/_pytest/faulthandler.py @@ -0,0 +1,97 @@ +import io +import os +import sys +from typing import Generator +from typing import TextIO + +import pytest +from _pytest.config import Config +from _pytest.config.argparsing import Parser +from _pytest.nodes import Item +from _pytest.stash import StashKey + + +fault_handler_stderr_key = StashKey[TextIO]() +fault_handler_originally_enabled_key = StashKey[bool]() + + +def pytest_addoption(parser: Parser) -> None: + help = ( + "Dump the traceback of all threads if a test takes " + "more than TIMEOUT seconds to finish." + ) + parser.addini("faulthandler_timeout", help, default=0.0) + + +def pytest_configure(config: Config) -> None: + import faulthandler + + stderr_fd_copy = os.dup(get_stderr_fileno()) + config.stash[fault_handler_stderr_key] = open(stderr_fd_copy, "w") + config.stash[fault_handler_originally_enabled_key] = faulthandler.is_enabled() + faulthandler.enable(file=config.stash[fault_handler_stderr_key]) + + +def pytest_unconfigure(config: Config) -> None: + import faulthandler + + faulthandler.disable() + # Close the dup file installed during pytest_configure. + if fault_handler_stderr_key in config.stash: + config.stash[fault_handler_stderr_key].close() + del config.stash[fault_handler_stderr_key] + if config.stash.get(fault_handler_originally_enabled_key, False): + # Re-enable the faulthandler if it was originally enabled. + faulthandler.enable(file=get_stderr_fileno()) + + +def get_stderr_fileno() -> int: + try: + fileno = sys.stderr.fileno() + # The Twisted Logger will return an invalid file descriptor since it is not backed + # by an FD. So, let's also forward this to the same code path as with pytest-xdist. + if fileno == -1: + raise AttributeError() + return fileno + except (AttributeError, io.UnsupportedOperation): + # pytest-xdist monkeypatches sys.stderr with an object that is not an actual file. + # https://docs.python.org/3/library/faulthandler.html#issue-with-file-descriptors + # This is potentially dangerous, but the best we can do. + return sys.__stderr__.fileno() + + +def get_timeout_config_value(config: Config) -> float: + return float(config.getini("faulthandler_timeout") or 0.0) + + +@pytest.hookimpl(hookwrapper=True, trylast=True) +def pytest_runtest_protocol(item: Item) -> Generator[None, None, None]: + timeout = get_timeout_config_value(item.config) + stderr = item.config.stash[fault_handler_stderr_key] + if timeout > 0 and stderr is not None: + import faulthandler + + faulthandler.dump_traceback_later(timeout, file=stderr) + try: + yield + finally: + faulthandler.cancel_dump_traceback_later() + else: + yield + + +@pytest.hookimpl(tryfirst=True) +def pytest_enter_pdb() -> None: + """Cancel any traceback dumping due to timeout before entering pdb.""" + import faulthandler + + faulthandler.cancel_dump_traceback_later() + + +@pytest.hookimpl(tryfirst=True) +def pytest_exception_interact() -> None: + """Cancel any traceback dumping due to an interactive exception being + raised.""" + import faulthandler + + faulthandler.cancel_dump_traceback_later() diff --git a/venv/lib/python3.10/site-packages/_pytest/fixtures.py b/venv/lib/python3.10/site-packages/_pytest/fixtures.py new file mode 100644 index 0000000..ee3e93f --- /dev/null +++ b/venv/lib/python3.10/site-packages/_pytest/fixtures.py @@ -0,0 +1,1655 @@ +import functools +import inspect +import os +import sys +import warnings +from collections import defaultdict +from collections import deque +from contextlib import suppress +from pathlib import Path +from types import TracebackType +from typing import Any +from typing import Callable +from typing import cast +from typing import Dict +from typing import Generator +from typing import Generic +from typing import Iterable +from typing import Iterator +from typing import List +from typing import MutableMapping +from typing import Optional +from typing import overload +from typing import Sequence +from typing import Set +from typing import Tuple +from typing import Type +from typing import TYPE_CHECKING +from typing import TypeVar +from typing import Union + +import attr + +import _pytest +from _pytest import nodes +from _pytest._code import getfslineno +from _pytest._code.code import FormattedExcinfo +from _pytest._code.code import TerminalRepr +from _pytest._io import TerminalWriter +from _pytest.compat import _format_args +from _pytest.compat import _PytestWrapper +from _pytest.compat import assert_never +from _pytest.compat import final +from _pytest.compat import get_real_func +from _pytest.compat import get_real_method +from _pytest.compat import getfuncargnames +from _pytest.compat import getimfunc +from _pytest.compat import getlocation +from _pytest.compat import is_generator +from _pytest.compat import NOTSET +from _pytest.compat import safe_getattr +from _pytest.config import _PluggyPlugin +from _pytest.config import Config +from _pytest.config.argparsing import Parser +from _pytest.deprecated import check_ispytest +from _pytest.deprecated import YIELD_FIXTURE +from _pytest.mark import Mark +from _pytest.mark import ParameterSet +from _pytest.mark.structures import MarkDecorator +from _pytest.outcomes import fail +from _pytest.outcomes import TEST_OUTCOME +from _pytest.pathlib import absolutepath +from _pytest.pathlib import bestrelpath +from _pytest.scope import HIGH_SCOPES +from _pytest.scope import Scope +from _pytest.stash import StashKey + + +if TYPE_CHECKING: + from typing import Deque + from typing import NoReturn + + from _pytest.scope import _ScopeName + from _pytest.main import Session + from _pytest.python import CallSpec2 + from _pytest.python import Metafunc + + +# The value of the fixture -- return/yield of the fixture function (type variable). +FixtureValue = TypeVar("FixtureValue") +# The type of the fixture function (type variable). +FixtureFunction = TypeVar("FixtureFunction", bound=Callable[..., object]) +# The type of a fixture function (type alias generic in fixture value). +_FixtureFunc = Union[ + Callable[..., FixtureValue], Callable[..., Generator[FixtureValue, None, None]] +] +# The type of FixtureDef.cached_result (type alias generic in fixture value). +_FixtureCachedResult = Union[ + Tuple[ + # The result. + FixtureValue, + # Cache key. + object, + None, + ], + Tuple[ + None, + # Cache key. + object, + # Exc info if raised. + Tuple[Type[BaseException], BaseException, TracebackType], + ], +] + + +@attr.s(frozen=True, auto_attribs=True) +class PseudoFixtureDef(Generic[FixtureValue]): + cached_result: "_FixtureCachedResult[FixtureValue]" + _scope: Scope + + +def pytest_sessionstart(session: "Session") -> None: + session._fixturemanager = FixtureManager(session) + + +def get_scope_package(node, fixturedef: "FixtureDef[object]"): + import pytest + + cls = pytest.Package + current = node + fixture_package_name = "{}/{}".format(fixturedef.baseid, "__init__.py") + while current and ( + type(current) is not cls or fixture_package_name != current.nodeid + ): + current = current.parent + if current is None: + return node.session + return current + + +def get_scope_node( + node: nodes.Node, scope: Scope +) -> Optional[Union[nodes.Item, nodes.Collector]]: + import _pytest.python + + if scope is Scope.Function: + return node.getparent(nodes.Item) + elif scope is Scope.Class: + return node.getparent(_pytest.python.Class) + elif scope is Scope.Module: + return node.getparent(_pytest.python.Module) + elif scope is Scope.Package: + return node.getparent(_pytest.python.Package) + elif scope is Scope.Session: + return node.getparent(_pytest.main.Session) + else: + assert_never(scope) + + +# Used for storing artificial fixturedefs for direct parametrization. +name2pseudofixturedef_key = StashKey[Dict[str, "FixtureDef[Any]"]]() + + +def add_funcarg_pseudo_fixture_def( + collector: nodes.Collector, metafunc: "Metafunc", fixturemanager: "FixtureManager" +) -> None: + # This function will transform all collected calls to functions + # if they use direct funcargs (i.e. direct parametrization) + # because we want later test execution to be able to rely on + # an existing FixtureDef structure for all arguments. + # XXX we can probably avoid this algorithm if we modify CallSpec2 + # to directly care for creating the fixturedefs within its methods. + if not metafunc._calls[0].funcargs: + # This function call does not have direct parametrization. + return + # Collect funcargs of all callspecs into a list of values. + arg2params: Dict[str, List[object]] = {} + arg2scope: Dict[str, Scope] = {} + for callspec in metafunc._calls: + for argname, argvalue in callspec.funcargs.items(): + assert argname not in callspec.params + callspec.params[argname] = argvalue + arg2params_list = arg2params.setdefault(argname, []) + callspec.indices[argname] = len(arg2params_list) + arg2params_list.append(argvalue) + if argname not in arg2scope: + scope = callspec._arg2scope.get(argname, Scope.Function) + arg2scope[argname] = scope + callspec.funcargs.clear() + + # Register artificial FixtureDef's so that later at test execution + # time we can rely on a proper FixtureDef to exist for fixture setup. + arg2fixturedefs = metafunc._arg2fixturedefs + for argname, valuelist in arg2params.items(): + # If we have a scope that is higher than function, we need + # to make sure we only ever create an according fixturedef on + # a per-scope basis. We thus store and cache the fixturedef on the + # node related to the scope. + scope = arg2scope[argname] + node = None + if scope is not Scope.Function: + node = get_scope_node(collector, scope) + if node is None: + assert scope is Scope.Class and isinstance( + collector, _pytest.python.Module + ) + # Use module-level collector for class-scope (for now). + node = collector + if node is None: + name2pseudofixturedef = None + else: + default: Dict[str, FixtureDef[Any]] = {} + name2pseudofixturedef = node.stash.setdefault( + name2pseudofixturedef_key, default + ) + if name2pseudofixturedef is not None and argname in name2pseudofixturedef: + arg2fixturedefs[argname] = [name2pseudofixturedef[argname]] + else: + fixturedef = FixtureDef( + fixturemanager=fixturemanager, + baseid="", + argname=argname, + func=get_direct_param_fixture_func, + scope=arg2scope[argname], + params=valuelist, + unittest=False, + ids=None, + ) + arg2fixturedefs[argname] = [fixturedef] + if name2pseudofixturedef is not None: + name2pseudofixturedef[argname] = fixturedef + + +def getfixturemarker(obj: object) -> Optional["FixtureFunctionMarker"]: + """Return fixturemarker or None if it doesn't exist or raised + exceptions.""" + try: + fixturemarker: Optional[FixtureFunctionMarker] = getattr( + obj, "_pytestfixturefunction", None + ) + except TEST_OUTCOME: + # some objects raise errors like request (from flask import request) + # we don't expect them to be fixture functions + return None + return fixturemarker + + +# Parametrized fixture key, helper alias for code below. +_Key = Tuple[object, ...] + + +def get_parametrized_fixture_keys(item: nodes.Item, scope: Scope) -> Iterator[_Key]: + """Return list of keys for all parametrized arguments which match + the specified scope.""" + assert scope is not Scope.Function + try: + callspec = item.callspec # type: ignore[attr-defined] + except AttributeError: + pass + else: + cs: CallSpec2 = callspec + # cs.indices.items() is random order of argnames. Need to + # sort this so that different calls to + # get_parametrized_fixture_keys will be deterministic. + for argname, param_index in sorted(cs.indices.items()): + if cs._arg2scope[argname] != scope: + continue + if scope is Scope.Session: + key: _Key = (argname, param_index) + elif scope is Scope.Package: + key = (argname, param_index, item.path.parent) + elif scope is Scope.Module: + key = (argname, param_index, item.path) + elif scope is Scope.Class: + item_cls = item.cls # type: ignore[attr-defined] + key = (argname, param_index, item.path, item_cls) + else: + assert_never(scope) + yield key + + +# Algorithm for sorting on a per-parametrized resource setup basis. +# It is called for Session scope first and performs sorting +# down to the lower scopes such as to minimize number of "high scope" +# setups and teardowns. + + +def reorder_items(items: Sequence[nodes.Item]) -> List[nodes.Item]: + argkeys_cache: Dict[Scope, Dict[nodes.Item, Dict[_Key, None]]] = {} + items_by_argkey: Dict[Scope, Dict[_Key, Deque[nodes.Item]]] = {} + for scope in HIGH_SCOPES: + d: Dict[nodes.Item, Dict[_Key, None]] = {} + argkeys_cache[scope] = d + item_d: Dict[_Key, Deque[nodes.Item]] = defaultdict(deque) + items_by_argkey[scope] = item_d + for item in items: + keys = dict.fromkeys(get_parametrized_fixture_keys(item, scope), None) + if keys: + d[item] = keys + for key in keys: + item_d[key].append(item) + items_dict = dict.fromkeys(items, None) + return list( + reorder_items_atscope(items_dict, argkeys_cache, items_by_argkey, Scope.Session) + ) + + +def fix_cache_order( + item: nodes.Item, + argkeys_cache: Dict[Scope, Dict[nodes.Item, Dict[_Key, None]]], + items_by_argkey: Dict[Scope, Dict[_Key, "Deque[nodes.Item]"]], +) -> None: + for scope in HIGH_SCOPES: + for key in argkeys_cache[scope].get(item, []): + items_by_argkey[scope][key].appendleft(item) + + +def reorder_items_atscope( + items: Dict[nodes.Item, None], + argkeys_cache: Dict[Scope, Dict[nodes.Item, Dict[_Key, None]]], + items_by_argkey: Dict[Scope, Dict[_Key, "Deque[nodes.Item]"]], + scope: Scope, +) -> Dict[nodes.Item, None]: + if scope is Scope.Function or len(items) < 3: + return items + ignore: Set[Optional[_Key]] = set() + items_deque = deque(items) + items_done: Dict[nodes.Item, None] = {} + scoped_items_by_argkey = items_by_argkey[scope] + scoped_argkeys_cache = argkeys_cache[scope] + while items_deque: + no_argkey_group: Dict[nodes.Item, None] = {} + slicing_argkey = None + while items_deque: + item = items_deque.popleft() + if item in items_done or item in no_argkey_group: + continue + argkeys = dict.fromkeys( + (k for k in scoped_argkeys_cache.get(item, []) if k not in ignore), None + ) + if not argkeys: + no_argkey_group[item] = None + else: + slicing_argkey, _ = argkeys.popitem() + # We don't have to remove relevant items from later in the + # deque because they'll just be ignored. + matching_items = [ + i for i in scoped_items_by_argkey[slicing_argkey] if i in items + ] + for i in reversed(matching_items): + fix_cache_order(i, argkeys_cache, items_by_argkey) + items_deque.appendleft(i) + break + if no_argkey_group: + no_argkey_group = reorder_items_atscope( + no_argkey_group, argkeys_cache, items_by_argkey, scope.next_lower() + ) + for item in no_argkey_group: + items_done[item] = None + ignore.add(slicing_argkey) + return items_done + + +def get_direct_param_fixture_func(request): + return request.param + + +@attr.s(slots=True, auto_attribs=True) +class FuncFixtureInfo: + # Original function argument names. + argnames: Tuple[str, ...] + # Argnames that function immediately requires. These include argnames + + # fixture names specified via usefixtures and via autouse=True in fixture + # definitions. + initialnames: Tuple[str, ...] + names_closure: List[str] + name2fixturedefs: Dict[str, Sequence["FixtureDef[Any]"]] + + def prune_dependency_tree(self) -> None: + """Recompute names_closure from initialnames and name2fixturedefs. + + Can only reduce names_closure, which means that the new closure will + always be a subset of the old one. The order is preserved. + + This method is needed because direct parametrization may shadow some + of the fixtures that were included in the originally built dependency + tree. In this way the dependency tree can get pruned, and the closure + of argnames may get reduced. + """ + closure: Set[str] = set() + working_set = set(self.initialnames) + while working_set: + argname = working_set.pop() + # Argname may be smth not included in the original names_closure, + # in which case we ignore it. This currently happens with pseudo + # FixtureDefs which wrap 'get_direct_param_fixture_func(request)'. + # So they introduce the new dependency 'request' which might have + # been missing in the original tree (closure). + if argname not in closure and argname in self.names_closure: + closure.add(argname) + if argname in self.name2fixturedefs: + working_set.update(self.name2fixturedefs[argname][-1].argnames) + + self.names_closure[:] = sorted(closure, key=self.names_closure.index) + + +class FixtureRequest: + """A request for a fixture from a test or fixture function. + + A request object gives access to the requesting test context and has + an optional ``param`` attribute in case the fixture is parametrized + indirectly. + """ + + def __init__(self, pyfuncitem, *, _ispytest: bool = False) -> None: + check_ispytest(_ispytest) + self._pyfuncitem = pyfuncitem + #: Fixture for which this request is being performed. + self.fixturename: Optional[str] = None + self._scope = Scope.Function + self._fixture_defs: Dict[str, FixtureDef[Any]] = {} + fixtureinfo: FuncFixtureInfo = pyfuncitem._fixtureinfo + self._arg2fixturedefs = fixtureinfo.name2fixturedefs.copy() + self._arg2index: Dict[str, int] = {} + self._fixturemanager: FixtureManager = pyfuncitem.session._fixturemanager + + @property + def scope(self) -> "_ScopeName": + """Scope string, one of "function", "class", "module", "package", "session".""" + return self._scope.value + + @property + def fixturenames(self) -> List[str]: + """Names of all active fixtures in this request.""" + result = list(self._pyfuncitem._fixtureinfo.names_closure) + result.extend(set(self._fixture_defs).difference(result)) + return result + + @property + def node(self): + """Underlying collection node (depends on current request scope).""" + return self._getscopeitem(self._scope) + + def _getnextfixturedef(self, argname: str) -> "FixtureDef[Any]": + fixturedefs = self._arg2fixturedefs.get(argname, None) + if fixturedefs is None: + # We arrive here because of a dynamic call to + # getfixturevalue(argname) usage which was naturally + # not known at parsing/collection time. + assert self._pyfuncitem.parent is not None + parentid = self._pyfuncitem.parent.nodeid + fixturedefs = self._fixturemanager.getfixturedefs(argname, parentid) + # TODO: Fix this type ignore. Either add assert or adjust types. + # Can this be None here? + self._arg2fixturedefs[argname] = fixturedefs # type: ignore[assignment] + # fixturedefs list is immutable so we maintain a decreasing index. + index = self._arg2index.get(argname, 0) - 1 + if fixturedefs is None or (-index > len(fixturedefs)): + raise FixtureLookupError(argname, self) + self._arg2index[argname] = index + return fixturedefs[index] + + @property + def config(self) -> Config: + """The pytest config object associated with this request.""" + return self._pyfuncitem.config # type: ignore[no-any-return] + + @property + def function(self): + """Test function object if the request has a per-function scope.""" + if self.scope != "function": + raise AttributeError( + f"function not available in {self.scope}-scoped context" + ) + return self._pyfuncitem.obj + + @property + def cls(self): + """Class (can be None) where the test function was collected.""" + if self.scope not in ("class", "function"): + raise AttributeError(f"cls not available in {self.scope}-scoped context") + clscol = self._pyfuncitem.getparent(_pytest.python.Class) + if clscol: + return clscol.obj + + @property + def instance(self): + """Instance (can be None) on which test function was collected.""" + # unittest support hack, see _pytest.unittest.TestCaseFunction. + try: + return self._pyfuncitem._testcase + except AttributeError: + function = getattr(self, "function", None) + return getattr(function, "__self__", None) + + @property + def module(self): + """Python module object where the test function was collected.""" + if self.scope not in ("function", "class", "module"): + raise AttributeError(f"module not available in {self.scope}-scoped context") + return self._pyfuncitem.getparent(_pytest.python.Module).obj + + @property + def path(self) -> Path: + if self.scope not in ("function", "class", "module", "package"): + raise AttributeError(f"path not available in {self.scope}-scoped context") + # TODO: Remove ignore once _pyfuncitem is properly typed. + return self._pyfuncitem.path # type: ignore + + @property + def keywords(self) -> MutableMapping[str, Any]: + """Keywords/markers dictionary for the underlying node.""" + node: nodes.Node = self.node + return node.keywords + + @property + def session(self) -> "Session": + """Pytest session object.""" + return self._pyfuncitem.session # type: ignore[no-any-return] + + def addfinalizer(self, finalizer: Callable[[], object]) -> None: + """Add finalizer/teardown function to be called after the last test + within the requesting test context finished execution.""" + # XXX usually this method is shadowed by fixturedef specific ones. + self._addfinalizer(finalizer, scope=self.scope) + + def _addfinalizer(self, finalizer: Callable[[], object], scope) -> None: + node = self._getscopeitem(scope) + node.addfinalizer(finalizer) + + def applymarker(self, marker: Union[str, MarkDecorator]) -> None: + """Apply a marker to a single test function invocation. + + This method is useful if you don't want to have a keyword/marker + on all function invocations. + + :param marker: + A :class:`pytest.MarkDecorator` object created by a call + to ``pytest.mark.NAME(...)``. + """ + self.node.add_marker(marker) + + def raiseerror(self, msg: Optional[str]) -> "NoReturn": + """Raise a FixtureLookupError with the given message.""" + raise self._fixturemanager.FixtureLookupError(None, self, msg) + + def _fillfixtures(self) -> None: + item = self._pyfuncitem + fixturenames = getattr(item, "fixturenames", self.fixturenames) + for argname in fixturenames: + if argname not in item.funcargs: + item.funcargs[argname] = self.getfixturevalue(argname) + + def getfixturevalue(self, argname: str) -> Any: + """Dynamically run a named fixture function. + + Declaring fixtures via function argument is recommended where possible. + But if you can only decide whether to use another fixture at test + setup time, you may use this function to retrieve it inside a fixture + or test function body. + + :raises pytest.FixtureLookupError: + If the given fixture could not be found. + """ + fixturedef = self._get_active_fixturedef(argname) + assert fixturedef.cached_result is not None + return fixturedef.cached_result[0] + + def _get_active_fixturedef( + self, argname: str + ) -> Union["FixtureDef[object]", PseudoFixtureDef[object]]: + try: + return self._fixture_defs[argname] + except KeyError: + try: + fixturedef = self._getnextfixturedef(argname) + except FixtureLookupError: + if argname == "request": + cached_result = (self, [0], None) + return PseudoFixtureDef(cached_result, Scope.Function) + raise + # Remove indent to prevent the python3 exception + # from leaking into the call. + self._compute_fixture_value(fixturedef) + self._fixture_defs[argname] = fixturedef + return fixturedef + + def _get_fixturestack(self) -> List["FixtureDef[Any]"]: + current = self + values: List[FixtureDef[Any]] = [] + while isinstance(current, SubRequest): + values.append(current._fixturedef) # type: ignore[has-type] + current = current._parent_request + values.reverse() + return values + + def _compute_fixture_value(self, fixturedef: "FixtureDef[object]") -> None: + """Create a SubRequest based on "self" and call the execute method + of the given FixtureDef object. + + This will force the FixtureDef object to throw away any previous + results and compute a new fixture value, which will be stored into + the FixtureDef object itself. + """ + # prepare a subrequest object before calling fixture function + # (latter managed by fixturedef) + argname = fixturedef.argname + funcitem = self._pyfuncitem + scope = fixturedef._scope + try: + callspec = funcitem.callspec + except AttributeError: + callspec = None + if callspec is not None and argname in callspec.params: + param = callspec.params[argname] + param_index = callspec.indices[argname] + # If a parametrize invocation set a scope it will override + # the static scope defined with the fixture function. + with suppress(KeyError): + scope = callspec._arg2scope[argname] + else: + param = NOTSET + param_index = 0 + has_params = fixturedef.params is not None + fixtures_not_supported = getattr(funcitem, "nofuncargs", False) + if has_params and fixtures_not_supported: + msg = ( + "{name} does not support fixtures, maybe unittest.TestCase subclass?\n" + "Node id: {nodeid}\n" + "Function type: {typename}" + ).format( + name=funcitem.name, + nodeid=funcitem.nodeid, + typename=type(funcitem).__name__, + ) + fail(msg, pytrace=False) + if has_params: + frame = inspect.stack()[3] + frameinfo = inspect.getframeinfo(frame[0]) + source_path = absolutepath(frameinfo.filename) + source_lineno = frameinfo.lineno + try: + source_path_str = str( + source_path.relative_to(funcitem.config.rootpath) + ) + except ValueError: + source_path_str = str(source_path) + msg = ( + "The requested fixture has no parameter defined for test:\n" + " {}\n\n" + "Requested fixture '{}' defined in:\n{}" + "\n\nRequested here:\n{}:{}".format( + funcitem.nodeid, + fixturedef.argname, + getlocation(fixturedef.func, funcitem.config.rootpath), + source_path_str, + source_lineno, + ) + ) + fail(msg, pytrace=False) + + subrequest = SubRequest( + self, scope, param, param_index, fixturedef, _ispytest=True + ) + + # Check if a higher-level scoped fixture accesses a lower level one. + subrequest._check_scope(argname, self._scope, scope) + try: + # Call the fixture function. + fixturedef.execute(request=subrequest) + finally: + self._schedule_finalizers(fixturedef, subrequest) + + def _schedule_finalizers( + self, fixturedef: "FixtureDef[object]", subrequest: "SubRequest" + ) -> None: + # If fixture function failed it might have registered finalizers. + subrequest.node.addfinalizer(lambda: fixturedef.finish(request=subrequest)) + + def _check_scope( + self, + argname: str, + invoking_scope: Scope, + requested_scope: Scope, + ) -> None: + if argname == "request": + return + if invoking_scope > requested_scope: + # Try to report something helpful. + text = "\n".join(self._factorytraceback()) + fail( + f"ScopeMismatch: You tried to access the {requested_scope.value} scoped " + f"fixture {argname} with a {invoking_scope.value} scoped request object, " + f"involved factories:\n{text}", + pytrace=False, + ) + + def _factorytraceback(self) -> List[str]: + lines = [] + for fixturedef in self._get_fixturestack(): + factory = fixturedef.func + fs, lineno = getfslineno(factory) + if isinstance(fs, Path): + session: Session = self._pyfuncitem.session + p = bestrelpath(session.path, fs) + else: + p = fs + args = _format_args(factory) + lines.append("%s:%d: def %s%s" % (p, lineno + 1, factory.__name__, args)) + return lines + + def _getscopeitem( + self, scope: Union[Scope, "_ScopeName"] + ) -> Union[nodes.Item, nodes.Collector]: + if isinstance(scope, str): + scope = Scope(scope) + if scope is Scope.Function: + # This might also be a non-function Item despite its attribute name. + node: Optional[Union[nodes.Item, nodes.Collector]] = self._pyfuncitem + elif scope is Scope.Package: + # FIXME: _fixturedef is not defined on FixtureRequest (this class), + # but on FixtureRequest (a subclass). + node = get_scope_package(self._pyfuncitem, self._fixturedef) # type: ignore[attr-defined] + else: + node = get_scope_node(self._pyfuncitem, scope) + if node is None and scope is Scope.Class: + # Fallback to function item itself. + node = self._pyfuncitem + assert node, 'Could not obtain a node for scope "{}" for function {!r}'.format( + scope, self._pyfuncitem + ) + return node + + def __repr__(self) -> str: + return "" % (self.node) + + +@final +class SubRequest(FixtureRequest): + """A sub request for handling getting a fixture from a test function/fixture.""" + + def __init__( + self, + request: "FixtureRequest", + scope: Scope, + param: Any, + param_index: int, + fixturedef: "FixtureDef[object]", + *, + _ispytest: bool = False, + ) -> None: + check_ispytest(_ispytest) + self._parent_request = request + self.fixturename = fixturedef.argname + if param is not NOTSET: + self.param = param + self.param_index = param_index + self._scope = scope + self._fixturedef = fixturedef + self._pyfuncitem = request._pyfuncitem + self._fixture_defs = request._fixture_defs + self._arg2fixturedefs = request._arg2fixturedefs + self._arg2index = request._arg2index + self._fixturemanager = request._fixturemanager + + def __repr__(self) -> str: + return f"" + + def addfinalizer(self, finalizer: Callable[[], object]) -> None: + """Add finalizer/teardown function to be called after the last test + within the requesting test context finished execution.""" + self._fixturedef.addfinalizer(finalizer) + + def _schedule_finalizers( + self, fixturedef: "FixtureDef[object]", subrequest: "SubRequest" + ) -> None: + # If the executing fixturedef was not explicitly requested in the argument list (via + # getfixturevalue inside the fixture call) then ensure this fixture def will be finished + # first. + if fixturedef.argname not in self.fixturenames: + fixturedef.addfinalizer( + functools.partial(self._fixturedef.finish, request=self) + ) + super()._schedule_finalizers(fixturedef, subrequest) + + +@final +class FixtureLookupError(LookupError): + """Could not return a requested fixture (missing or invalid).""" + + def __init__( + self, argname: Optional[str], request: FixtureRequest, msg: Optional[str] = None + ) -> None: + self.argname = argname + self.request = request + self.fixturestack = request._get_fixturestack() + self.msg = msg + + def formatrepr(self) -> "FixtureLookupErrorRepr": + tblines: List[str] = [] + addline = tblines.append + stack = [self.request._pyfuncitem.obj] + stack.extend(map(lambda x: x.func, self.fixturestack)) + msg = self.msg + if msg is not None: + # The last fixture raise an error, let's present + # it at the requesting side. + stack = stack[:-1] + for function in stack: + fspath, lineno = getfslineno(function) + try: + lines, _ = inspect.getsourcelines(get_real_func(function)) + except (OSError, IndexError, TypeError): + error_msg = "file %s, line %s: source code not available" + addline(error_msg % (fspath, lineno + 1)) + else: + addline(f"file {fspath}, line {lineno + 1}") + for i, line in enumerate(lines): + line = line.rstrip() + addline(" " + line) + if line.lstrip().startswith("def"): + break + + if msg is None: + fm = self.request._fixturemanager + available = set() + parentid = self.request._pyfuncitem.parent.nodeid + for name, fixturedefs in fm._arg2fixturedefs.items(): + faclist = list(fm._matchfactories(fixturedefs, parentid)) + if faclist: + available.add(name) + if self.argname in available: + msg = " recursive dependency involving fixture '{}' detected".format( + self.argname + ) + else: + msg = f"fixture '{self.argname}' not found" + msg += "\n available fixtures: {}".format(", ".join(sorted(available))) + msg += "\n use 'pytest --fixtures [testpath]' for help on them." + + return FixtureLookupErrorRepr(fspath, lineno, tblines, msg, self.argname) + + +class FixtureLookupErrorRepr(TerminalRepr): + def __init__( + self, + filename: Union[str, "os.PathLike[str]"], + firstlineno: int, + tblines: Sequence[str], + errorstring: str, + argname: Optional[str], + ) -> None: + self.tblines = tblines + self.errorstring = errorstring + self.filename = filename + self.firstlineno = firstlineno + self.argname = argname + + def toterminal(self, tw: TerminalWriter) -> None: + # tw.line("FixtureLookupError: %s" %(self.argname), red=True) + for tbline in self.tblines: + tw.line(tbline.rstrip()) + lines = self.errorstring.split("\n") + if lines: + tw.line( + f"{FormattedExcinfo.fail_marker} {lines[0].strip()}", + red=True, + ) + for line in lines[1:]: + tw.line( + f"{FormattedExcinfo.flow_marker} {line.strip()}", + red=True, + ) + tw.line() + tw.line("%s:%d" % (os.fspath(self.filename), self.firstlineno + 1)) + + +def fail_fixturefunc(fixturefunc, msg: str) -> "NoReturn": + fs, lineno = getfslineno(fixturefunc) + location = f"{fs}:{lineno + 1}" + source = _pytest._code.Source(fixturefunc) + fail(msg + ":\n\n" + str(source.indent()) + "\n" + location, pytrace=False) + + +def call_fixture_func( + fixturefunc: "_FixtureFunc[FixtureValue]", request: FixtureRequest, kwargs +) -> FixtureValue: + if is_generator(fixturefunc): + fixturefunc = cast( + Callable[..., Generator[FixtureValue, None, None]], fixturefunc + ) + generator = fixturefunc(**kwargs) + try: + fixture_result = next(generator) + except StopIteration: + raise ValueError(f"{request.fixturename} did not yield a value") from None + finalizer = functools.partial(_teardown_yield_fixture, fixturefunc, generator) + request.addfinalizer(finalizer) + else: + fixturefunc = cast(Callable[..., FixtureValue], fixturefunc) + fixture_result = fixturefunc(**kwargs) + return fixture_result + + +def _teardown_yield_fixture(fixturefunc, it) -> None: + """Execute the teardown of a fixture function by advancing the iterator + after the yield and ensure the iteration ends (if not it means there is + more than one yield in the function).""" + try: + next(it) + except StopIteration: + pass + else: + fail_fixturefunc(fixturefunc, "fixture function has more than one 'yield'") + + +def _eval_scope_callable( + scope_callable: "Callable[[str, Config], _ScopeName]", + fixture_name: str, + config: Config, +) -> "_ScopeName": + try: + # Type ignored because there is no typing mechanism to specify + # keyword arguments, currently. + result = scope_callable(fixture_name=fixture_name, config=config) # type: ignore[call-arg] + except Exception as e: + raise TypeError( + "Error evaluating {} while defining fixture '{}'.\n" + "Expected a function with the signature (*, fixture_name, config)".format( + scope_callable, fixture_name + ) + ) from e + if not isinstance(result, str): + fail( + "Expected {} to return a 'str' while defining fixture '{}', but it returned:\n" + "{!r}".format(scope_callable, fixture_name, result), + pytrace=False, + ) + return result + + +@final +class FixtureDef(Generic[FixtureValue]): + """A container for a fixture definition.""" + + def __init__( + self, + fixturemanager: "FixtureManager", + baseid: Optional[str], + argname: str, + func: "_FixtureFunc[FixtureValue]", + scope: Union[Scope, "_ScopeName", Callable[[str, Config], "_ScopeName"], None], + params: Optional[Sequence[object]], + unittest: bool = False, + ids: Optional[ + Union[Tuple[Optional[object], ...], Callable[[Any], Optional[object]]] + ] = None, + ) -> None: + self._fixturemanager = fixturemanager + # The "base" node ID for the fixture. + # + # This is a node ID prefix. A fixture is only available to a node (e.g. + # a `Function` item) if the fixture's baseid is a parent of the node's + # nodeid (see the `iterparentnodeids` function for what constitutes a + # "parent" and a "prefix" in this context). + # + # For a fixture found in a Collector's object (e.g. a `Module`s module, + # a `Class`'s class), the baseid is the Collector's nodeid. + # + # For a fixture found in a conftest plugin, the baseid is the conftest's + # directory path relative to the rootdir. + # + # For other plugins, the baseid is the empty string (always matches). + self.baseid = baseid or "" + # Whether the fixture was found from a node or a conftest in the + # collection tree. Will be false for fixtures defined in non-conftest + # plugins. + self.has_location = baseid is not None + # The fixture factory function. + self.func = func + # The name by which the fixture may be requested. + self.argname = argname + if scope is None: + scope = Scope.Function + elif callable(scope): + scope = _eval_scope_callable(scope, argname, fixturemanager.config) + if isinstance(scope, str): + scope = Scope.from_user( + scope, descr=f"Fixture '{func.__name__}'", where=baseid + ) + self._scope = scope + # If the fixture is directly parametrized, the parameter values. + self.params: Optional[Sequence[object]] = params + # If the fixture is directly parametrized, a tuple of explicit IDs to + # assign to the parameter values, or a callable to generate an ID given + # a parameter value. + self.ids = ids + # The names requested by the fixtures. + self.argnames = getfuncargnames(func, name=argname, is_method=unittest) + # Whether the fixture was collected from a unittest TestCase class. + # Note that it really only makes sense to define autouse fixtures in + # unittest TestCases. + self.unittest = unittest + # If the fixture was executed, the current value of the fixture. + # Can change if the fixture is executed with different parameters. + self.cached_result: Optional[_FixtureCachedResult[FixtureValue]] = None + self._finalizers: List[Callable[[], object]] = [] + + @property + def scope(self) -> "_ScopeName": + """Scope string, one of "function", "class", "module", "package", "session".""" + return self._scope.value + + def addfinalizer(self, finalizer: Callable[[], object]) -> None: + self._finalizers.append(finalizer) + + def finish(self, request: SubRequest) -> None: + exc = None + try: + while self._finalizers: + try: + func = self._finalizers.pop() + func() + except BaseException as e: + # XXX Only first exception will be seen by user, + # ideally all should be reported. + if exc is None: + exc = e + if exc: + raise exc + finally: + ihook = request.node.ihook + ihook.pytest_fixture_post_finalizer(fixturedef=self, request=request) + # Even if finalization fails, we invalidate the cached fixture + # value and remove all finalizers because they may be bound methods + # which will keep instances alive. + self.cached_result = None + self._finalizers = [] + + def execute(self, request: SubRequest) -> FixtureValue: + # Get required arguments and register our own finish() + # with their finalization. + for argname in self.argnames: + fixturedef = request._get_active_fixturedef(argname) + if argname != "request": + # PseudoFixtureDef is only for "request". + assert isinstance(fixturedef, FixtureDef) + fixturedef.addfinalizer(functools.partial(self.finish, request=request)) + + my_cache_key = self.cache_key(request) + if self.cached_result is not None: + # note: comparison with `==` can fail (or be expensive) for e.g. + # numpy arrays (#6497). + cache_key = self.cached_result[1] + if my_cache_key is cache_key: + if self.cached_result[2] is not None: + _, val, tb = self.cached_result[2] + raise val.with_traceback(tb) + else: + result = self.cached_result[0] + return result + # We have a previous but differently parametrized fixture instance + # so we need to tear it down before creating a new one. + self.finish(request) + assert self.cached_result is None + + ihook = request.node.ihook + result = ihook.pytest_fixture_setup(fixturedef=self, request=request) + return result + + def cache_key(self, request: SubRequest) -> object: + return request.param_index if not hasattr(request, "param") else request.param + + def __repr__(self) -> str: + return "".format( + self.argname, self.scope, self.baseid + ) + + +def resolve_fixture_function( + fixturedef: FixtureDef[FixtureValue], request: FixtureRequest +) -> "_FixtureFunc[FixtureValue]": + """Get the actual callable that can be called to obtain the fixture + value, dealing with unittest-specific instances and bound methods.""" + fixturefunc = fixturedef.func + if fixturedef.unittest: + if request.instance is not None: + # Bind the unbound method to the TestCase instance. + fixturefunc = fixturedef.func.__get__(request.instance) # type: ignore[union-attr] + else: + # The fixture function needs to be bound to the actual + # request.instance so that code working with "fixturedef" behaves + # as expected. + if request.instance is not None: + # Handle the case where fixture is defined not in a test class, but some other class + # (for example a plugin class with a fixture), see #2270. + if hasattr(fixturefunc, "__self__") and not isinstance( + request.instance, fixturefunc.__self__.__class__ # type: ignore[union-attr] + ): + return fixturefunc + fixturefunc = getimfunc(fixturedef.func) + if fixturefunc != fixturedef.func: + fixturefunc = fixturefunc.__get__(request.instance) # type: ignore[union-attr] + return fixturefunc + + +def pytest_fixture_setup( + fixturedef: FixtureDef[FixtureValue], request: SubRequest +) -> FixtureValue: + """Execution of fixture setup.""" + kwargs = {} + for argname in fixturedef.argnames: + fixdef = request._get_active_fixturedef(argname) + assert fixdef.cached_result is not None + result, arg_cache_key, exc = fixdef.cached_result + request._check_scope(argname, request._scope, fixdef._scope) + kwargs[argname] = result + + fixturefunc = resolve_fixture_function(fixturedef, request) + my_cache_key = fixturedef.cache_key(request) + try: + result = call_fixture_func(fixturefunc, request, kwargs) + except TEST_OUTCOME: + exc_info = sys.exc_info() + assert exc_info[0] is not None + fixturedef.cached_result = (None, my_cache_key, exc_info) + raise + fixturedef.cached_result = (result, my_cache_key, None) + return result + + +def _ensure_immutable_ids( + ids: Optional[Union[Sequence[Optional[object]], Callable[[Any], Optional[object]]]] +) -> Optional[Union[Tuple[Optional[object], ...], Callable[[Any], Optional[object]]]]: + if ids is None: + return None + if callable(ids): + return ids + return tuple(ids) + + +def _params_converter( + params: Optional[Iterable[object]], +) -> Optional[Tuple[object, ...]]: + return tuple(params) if params is not None else None + + +def wrap_function_to_error_out_if_called_directly( + function: FixtureFunction, + fixture_marker: "FixtureFunctionMarker", +) -> FixtureFunction: + """Wrap the given fixture function so we can raise an error about it being called directly, + instead of used as an argument in a test function.""" + message = ( + 'Fixture "{name}" called directly. Fixtures are not meant to be called directly,\n' + "but are created automatically when test functions request them as parameters.\n" + "See https://docs.pytest.org/en/stable/explanation/fixtures.html for more information about fixtures, and\n" + "https://docs.pytest.org/en/stable/deprecations.html#calling-fixtures-directly about how to update your code." + ).format(name=fixture_marker.name or function.__name__) + + @functools.wraps(function) + def result(*args, **kwargs): + fail(message, pytrace=False) + + # Keep reference to the original function in our own custom attribute so we don't unwrap + # further than this point and lose useful wrappings like @mock.patch (#3774). + result.__pytest_wrapped__ = _PytestWrapper(function) # type: ignore[attr-defined] + + return cast(FixtureFunction, result) + + +@final +@attr.s(frozen=True, auto_attribs=True) +class FixtureFunctionMarker: + scope: "Union[_ScopeName, Callable[[str, Config], _ScopeName]]" + params: Optional[Tuple[object, ...]] = attr.ib(converter=_params_converter) + autouse: bool = False + ids: Optional[ + Union[Tuple[Optional[object], ...], Callable[[Any], Optional[object]]] + ] = attr.ib( + default=None, + converter=_ensure_immutable_ids, + ) + name: Optional[str] = None + + def __call__(self, function: FixtureFunction) -> FixtureFunction: + if inspect.isclass(function): + raise ValueError("class fixtures not supported (maybe in the future)") + + if getattr(function, "_pytestfixturefunction", False): + raise ValueError( + "fixture is being applied more than once to the same function" + ) + + function = wrap_function_to_error_out_if_called_directly(function, self) + + name = self.name or function.__name__ + if name == "request": + location = getlocation(function) + fail( + "'request' is a reserved word for fixtures, use another name:\n {}".format( + location + ), + pytrace=False, + ) + + # Type ignored because https://github.com/python/mypy/issues/2087. + function._pytestfixturefunction = self # type: ignore[attr-defined] + return function + + +@overload +def fixture( + fixture_function: FixtureFunction, + *, + scope: "Union[_ScopeName, Callable[[str, Config], _ScopeName]]" = ..., + params: Optional[Iterable[object]] = ..., + autouse: bool = ..., + ids: Optional[ + Union[Sequence[Optional[object]], Callable[[Any], Optional[object]]] + ] = ..., + name: Optional[str] = ..., +) -> FixtureFunction: + ... + + +@overload +def fixture( + fixture_function: None = ..., + *, + scope: "Union[_ScopeName, Callable[[str, Config], _ScopeName]]" = ..., + params: Optional[Iterable[object]] = ..., + autouse: bool = ..., + ids: Optional[ + Union[Sequence[Optional[object]], Callable[[Any], Optional[object]]] + ] = ..., + name: Optional[str] = None, +) -> FixtureFunctionMarker: + ... + + +def fixture( + fixture_function: Optional[FixtureFunction] = None, + *, + scope: "Union[_ScopeName, Callable[[str, Config], _ScopeName]]" = "function", + params: Optional[Iterable[object]] = None, + autouse: bool = False, + ids: Optional[ + Union[Sequence[Optional[object]], Callable[[Any], Optional[object]]] + ] = None, + name: Optional[str] = None, +) -> Union[FixtureFunctionMarker, FixtureFunction]: + """Decorator to mark a fixture factory function. + + This decorator can be used, with or without parameters, to define a + fixture function. + + The name of the fixture function can later be referenced to cause its + invocation ahead of running tests: test modules or classes can use the + ``pytest.mark.usefixtures(fixturename)`` marker. + + Test functions can directly use fixture names as input arguments in which + case the fixture instance returned from the fixture function will be + injected. + + Fixtures can provide their values to test functions using ``return`` or + ``yield`` statements. When using ``yield`` the code block after the + ``yield`` statement is executed as teardown code regardless of the test + outcome, and must yield exactly once. + + :param scope: + The scope for which this fixture is shared; one of ``"function"`` + (default), ``"class"``, ``"module"``, ``"package"`` or ``"session"``. + + This parameter may also be a callable which receives ``(fixture_name, config)`` + as parameters, and must return a ``str`` with one of the values mentioned above. + + See :ref:`dynamic scope` in the docs for more information. + + :param params: + An optional list of parameters which will cause multiple invocations + of the fixture function and all of the tests using it. The current + parameter is available in ``request.param``. + + :param autouse: + If True, the fixture func is activated for all tests that can see it. + If False (the default), an explicit reference is needed to activate + the fixture. + + :param ids: + Sequence of ids each corresponding to the params so that they are + part of the test id. If no ids are provided they will be generated + automatically from the params. + + :param name: + The name of the fixture. This defaults to the name of the decorated + function. If a fixture is used in the same module in which it is + defined, the function name of the fixture will be shadowed by the + function arg that requests the fixture; one way to resolve this is to + name the decorated function ``fixture_`` and then use + ``@pytest.fixture(name='')``. + """ + fixture_marker = FixtureFunctionMarker( + scope=scope, + params=params, + autouse=autouse, + ids=ids, + name=name, + ) + + # Direct decoration. + if fixture_function: + return fixture_marker(fixture_function) + + return fixture_marker + + +def yield_fixture( + fixture_function=None, + *args, + scope="function", + params=None, + autouse=False, + ids=None, + name=None, +): + """(Return a) decorator to mark a yield-fixture factory function. + + .. deprecated:: 3.0 + Use :py:func:`pytest.fixture` directly instead. + """ + warnings.warn(YIELD_FIXTURE, stacklevel=2) + return fixture( + fixture_function, + *args, + scope=scope, + params=params, + autouse=autouse, + ids=ids, + name=name, + ) + + +@fixture(scope="session") +def pytestconfig(request: FixtureRequest) -> Config: + """Session-scoped fixture that returns the session's :class:`pytest.Config` + object. + + Example:: + + def test_foo(pytestconfig): + if pytestconfig.getoption("verbose") > 0: + ... + + """ + return request.config + + +def pytest_addoption(parser: Parser) -> None: + parser.addini( + "usefixtures", + type="args", + default=[], + help="list of default fixtures to be used with this project", + ) + + +class FixtureManager: + """pytest fixture definitions and information is stored and managed + from this class. + + During collection fm.parsefactories() is called multiple times to parse + fixture function definitions into FixtureDef objects and internal + data structures. + + During collection of test functions, metafunc-mechanics instantiate + a FuncFixtureInfo object which is cached per node/func-name. + This FuncFixtureInfo object is later retrieved by Function nodes + which themselves offer a fixturenames attribute. + + The FuncFixtureInfo object holds information about fixtures and FixtureDefs + relevant for a particular function. An initial list of fixtures is + assembled like this: + + - ini-defined usefixtures + - autouse-marked fixtures along the collection chain up from the function + - usefixtures markers at module/class/function level + - test function funcargs + + Subsequently the funcfixtureinfo.fixturenames attribute is computed + as the closure of the fixtures needed to setup the initial fixtures, + i.e. fixtures needed by fixture functions themselves are appended + to the fixturenames list. + + Upon the test-setup phases all fixturenames are instantiated, retrieved + by a lookup of their FuncFixtureInfo. + """ + + FixtureLookupError = FixtureLookupError + FixtureLookupErrorRepr = FixtureLookupErrorRepr + + def __init__(self, session: "Session") -> None: + self.session = session + self.config: Config = session.config + self._arg2fixturedefs: Dict[str, List[FixtureDef[Any]]] = {} + self._holderobjseen: Set[object] = set() + # A mapping from a nodeid to a list of autouse fixtures it defines. + self._nodeid_autousenames: Dict[str, List[str]] = { + "": self.config.getini("usefixtures"), + } + session.config.pluginmanager.register(self, "funcmanage") + + def _get_direct_parametrize_args(self, node: nodes.Node) -> List[str]: + """Return all direct parametrization arguments of a node, so we don't + mistake them for fixtures. + + Check https://github.com/pytest-dev/pytest/issues/5036. + + These things are done later as well when dealing with parametrization + so this could be improved. + """ + parametrize_argnames: List[str] = [] + for marker in node.iter_markers(name="parametrize"): + if not marker.kwargs.get("indirect", False): + p_argnames, _ = ParameterSet._parse_parametrize_args( + *marker.args, **marker.kwargs + ) + parametrize_argnames.extend(p_argnames) + + return parametrize_argnames + + def getfixtureinfo( + self, node: nodes.Node, func, cls, funcargs: bool = True + ) -> FuncFixtureInfo: + if funcargs and not getattr(node, "nofuncargs", False): + argnames = getfuncargnames(func, name=node.name, cls=cls) + else: + argnames = () + + usefixtures = tuple( + arg for mark in node.iter_markers(name="usefixtures") for arg in mark.args + ) + initialnames = usefixtures + argnames + fm = node.session._fixturemanager + initialnames, names_closure, arg2fixturedefs = fm.getfixtureclosure( + initialnames, node, ignore_args=self._get_direct_parametrize_args(node) + ) + return FuncFixtureInfo(argnames, initialnames, names_closure, arg2fixturedefs) + + def pytest_plugin_registered(self, plugin: _PluggyPlugin) -> None: + nodeid = None + try: + p = absolutepath(plugin.__file__) # type: ignore[attr-defined] + except AttributeError: + pass + else: + # Construct the base nodeid which is later used to check + # what fixtures are visible for particular tests (as denoted + # by their test id). + if p.name.startswith("conftest.py"): + try: + nodeid = str(p.parent.relative_to(self.config.rootpath)) + except ValueError: + nodeid = "" + if nodeid == ".": + nodeid = "" + if os.sep != nodes.SEP: + nodeid = nodeid.replace(os.sep, nodes.SEP) + + self.parsefactories(plugin, nodeid) + + def _getautousenames(self, nodeid: str) -> Iterator[str]: + """Return the names of autouse fixtures applicable to nodeid.""" + for parentnodeid in nodes.iterparentnodeids(nodeid): + basenames = self._nodeid_autousenames.get(parentnodeid) + if basenames: + yield from basenames + + def getfixtureclosure( + self, + fixturenames: Tuple[str, ...], + parentnode: nodes.Node, + ignore_args: Sequence[str] = (), + ) -> Tuple[Tuple[str, ...], List[str], Dict[str, Sequence[FixtureDef[Any]]]]: + # Collect the closure of all fixtures, starting with the given + # fixturenames as the initial set. As we have to visit all + # factory definitions anyway, we also return an arg2fixturedefs + # mapping so that the caller can reuse it and does not have + # to re-discover fixturedefs again for each fixturename + # (discovering matching fixtures for a given name/node is expensive). + + parentid = parentnode.nodeid + fixturenames_closure = list(self._getautousenames(parentid)) + + def merge(otherlist: Iterable[str]) -> None: + for arg in otherlist: + if arg not in fixturenames_closure: + fixturenames_closure.append(arg) + + merge(fixturenames) + + # At this point, fixturenames_closure contains what we call "initialnames", + # which is a set of fixturenames the function immediately requests. We + # need to return it as well, so save this. + initialnames = tuple(fixturenames_closure) + + arg2fixturedefs: Dict[str, Sequence[FixtureDef[Any]]] = {} + lastlen = -1 + while lastlen != len(fixturenames_closure): + lastlen = len(fixturenames_closure) + for argname in fixturenames_closure: + if argname in ignore_args: + continue + if argname in arg2fixturedefs: + continue + fixturedefs = self.getfixturedefs(argname, parentid) + if fixturedefs: + arg2fixturedefs[argname] = fixturedefs + merge(fixturedefs[-1].argnames) + + def sort_by_scope(arg_name: str) -> Scope: + try: + fixturedefs = arg2fixturedefs[arg_name] + except KeyError: + return Scope.Function + else: + return fixturedefs[-1]._scope + + fixturenames_closure.sort(key=sort_by_scope, reverse=True) + return initialnames, fixturenames_closure, arg2fixturedefs + + def pytest_generate_tests(self, metafunc: "Metafunc") -> None: + """Generate new tests based on parametrized fixtures used by the given metafunc""" + + def get_parametrize_mark_argnames(mark: Mark) -> Sequence[str]: + args, _ = ParameterSet._parse_parametrize_args(*mark.args, **mark.kwargs) + return args + + for argname in metafunc.fixturenames: + # Get the FixtureDefs for the argname. + fixture_defs = metafunc._arg2fixturedefs.get(argname) + if not fixture_defs: + # Will raise FixtureLookupError at setup time if not parametrized somewhere + # else (e.g @pytest.mark.parametrize) + continue + + # If the test itself parametrizes using this argname, give it + # precedence. + if any( + argname in get_parametrize_mark_argnames(mark) + for mark in metafunc.definition.iter_markers("parametrize") + ): + continue + + # In the common case we only look at the fixture def with the + # closest scope (last in the list). But if the fixture overrides + # another fixture, while requesting the super fixture, keep going + # in case the super fixture is parametrized (#1953). + for fixturedef in reversed(fixture_defs): + # Fixture is parametrized, apply it and stop. + if fixturedef.params is not None: + metafunc.parametrize( + argname, + fixturedef.params, + indirect=True, + scope=fixturedef.scope, + ids=fixturedef.ids, + ) + break + + # Not requesting the overridden super fixture, stop. + if argname not in fixturedef.argnames: + break + + # Try next super fixture, if any. + + def pytest_collection_modifyitems(self, items: List[nodes.Item]) -> None: + # Separate parametrized setups. + items[:] = reorder_items(items) + + def parsefactories( + self, node_or_obj, nodeid=NOTSET, unittest: bool = False + ) -> None: + if nodeid is not NOTSET: + holderobj = node_or_obj + else: + holderobj = node_or_obj.obj + nodeid = node_or_obj.nodeid + if holderobj in self._holderobjseen: + return + + self._holderobjseen.add(holderobj) + autousenames = [] + for name in dir(holderobj): + # ugly workaround for one of the fspath deprecated property of node + # todo: safely generalize + if isinstance(holderobj, nodes.Node) and name == "fspath": + continue + + # The attribute can be an arbitrary descriptor, so the attribute + # access below can raise. safe_getatt() ignores such exceptions. + obj = safe_getattr(holderobj, name, None) + marker = getfixturemarker(obj) + if not isinstance(marker, FixtureFunctionMarker): + # Magic globals with __getattr__ might have got us a wrong + # fixture attribute. + continue + + if marker.name: + name = marker.name + + # During fixture definition we wrap the original fixture function + # to issue a warning if called directly, so here we unwrap it in + # order to not emit the warning when pytest itself calls the + # fixture function. + obj = get_real_method(obj, holderobj) + + fixture_def = FixtureDef( + fixturemanager=self, + baseid=nodeid, + argname=name, + func=obj, + scope=marker.scope, + params=marker.params, + unittest=unittest, + ids=marker.ids, + ) + + faclist = self._arg2fixturedefs.setdefault(name, []) + if fixture_def.has_location: + faclist.append(fixture_def) + else: + # fixturedefs with no location are at the front + # so this inserts the current fixturedef after the + # existing fixturedefs from external plugins but + # before the fixturedefs provided in conftests. + i = len([f for f in faclist if not f.has_location]) + faclist.insert(i, fixture_def) + if marker.autouse: + autousenames.append(name) + + if autousenames: + self._nodeid_autousenames.setdefault(nodeid or "", []).extend(autousenames) + + def getfixturedefs( + self, argname: str, nodeid: str + ) -> Optional[Sequence[FixtureDef[Any]]]: + """Get a list of fixtures which are applicable to the given node id. + + :param str argname: Name of the fixture to search for. + :param str nodeid: Full node id of the requesting test. + :rtype: Sequence[FixtureDef] + """ + try: + fixturedefs = self._arg2fixturedefs[argname] + except KeyError: + return None + return tuple(self._matchfactories(fixturedefs, nodeid)) + + def _matchfactories( + self, fixturedefs: Iterable[FixtureDef[Any]], nodeid: str + ) -> Iterator[FixtureDef[Any]]: + parentnodeids = set(nodes.iterparentnodeids(nodeid)) + for fixturedef in fixturedefs: + if fixturedef.baseid in parentnodeids: + yield fixturedef diff --git a/venv/lib/python3.10/site-packages/_pytest/freeze_support.py b/venv/lib/python3.10/site-packages/_pytest/freeze_support.py new file mode 100644 index 0000000..9f8ea23 --- /dev/null +++ b/venv/lib/python3.10/site-packages/_pytest/freeze_support.py @@ -0,0 +1,44 @@ +"""Provides a function to report all internal modules for using freezing +tools.""" +import types +from typing import Iterator +from typing import List +from typing import Union + + +def freeze_includes() -> List[str]: + """Return a list of module names used by pytest that should be + included by cx_freeze.""" + import _pytest + + result = list(_iter_all_modules(_pytest)) + return result + + +def _iter_all_modules( + package: Union[str, types.ModuleType], + prefix: str = "", +) -> Iterator[str]: + """Iterate over the names of all modules that can be found in the given + package, recursively. + + >>> import _pytest + >>> list(_iter_all_modules(_pytest)) + ['_pytest._argcomplete', '_pytest._code.code', ...] + """ + import os + import pkgutil + + if isinstance(package, str): + path = package + else: + # Type ignored because typeshed doesn't define ModuleType.__path__ + # (only defined on packages). + package_path = package.__path__ # type: ignore[attr-defined] + path, prefix = package_path[0], package.__name__ + "." + for _, name, is_package in pkgutil.iter_modules([path]): + if is_package: + for m in _iter_all_modules(os.path.join(path, name), prefix=name + "."): + yield prefix + m + else: + yield prefix + name diff --git a/venv/lib/python3.10/site-packages/_pytest/helpconfig.py b/venv/lib/python3.10/site-packages/_pytest/helpconfig.py new file mode 100644 index 0000000..aca2cd3 --- /dev/null +++ b/venv/lib/python3.10/site-packages/_pytest/helpconfig.py @@ -0,0 +1,264 @@ +"""Version info, help messages, tracing configuration.""" +import os +import sys +from argparse import Action +from typing import List +from typing import Optional +from typing import Union + +import pytest +from _pytest.config import Config +from _pytest.config import ExitCode +from _pytest.config import PrintHelp +from _pytest.config.argparsing import Parser + + +class HelpAction(Action): + """An argparse Action that will raise an exception in order to skip the + rest of the argument parsing when --help is passed. + + This prevents argparse from quitting due to missing required arguments + when any are defined, for example by ``pytest_addoption``. + This is similar to the way that the builtin argparse --help option is + implemented by raising SystemExit. + """ + + def __init__(self, option_strings, dest=None, default=False, help=None): + super().__init__( + option_strings=option_strings, + dest=dest, + const=True, + default=default, + nargs=0, + help=help, + ) + + def __call__(self, parser, namespace, values, option_string=None): + setattr(namespace, self.dest, self.const) + + # We should only skip the rest of the parsing after preparse is done. + if getattr(parser._parser, "after_preparse", False): + raise PrintHelp + + +def pytest_addoption(parser: Parser) -> None: + group = parser.getgroup("debugconfig") + group.addoption( + "--version", + "-V", + action="count", + default=0, + dest="version", + help="display pytest version and information about plugins. " + "When given twice, also display information about plugins.", + ) + group._addoption( + "-h", + "--help", + action=HelpAction, + dest="help", + help="show help message and configuration info", + ) + group._addoption( + "-p", + action="append", + dest="plugins", + default=[], + metavar="name", + help="early-load given plugin module name or entry point (multi-allowed).\n" + "To avoid loading of plugins, use the `no:` prefix, e.g. " + "`no:doctest`.", + ) + group.addoption( + "--traceconfig", + "--trace-config", + action="store_true", + default=False, + help="trace considerations of conftest.py files.", + ) + group.addoption( + "--debug", + action="store", + nargs="?", + const="pytestdebug.log", + dest="debug", + metavar="DEBUG_FILE_NAME", + help="store internal tracing debug information in this log file.\n" + "This file is opened with 'w' and truncated as a result, care advised.\n" + "Defaults to 'pytestdebug.log'.", + ) + group._addoption( + "-o", + "--override-ini", + dest="override_ini", + action="append", + help='override ini option with "option=value" style, e.g. `-o xfail_strict=True -o cache_dir=cache`.', + ) + + +@pytest.hookimpl(hookwrapper=True) +def pytest_cmdline_parse(): + outcome = yield + config: Config = outcome.get_result() + + if config.option.debug: + # --debug | --debug was provided. + path = config.option.debug + debugfile = open(path, "w") + debugfile.write( + "versions pytest-%s, " + "python-%s\ncwd=%s\nargs=%s\n\n" + % ( + pytest.__version__, + ".".join(map(str, sys.version_info)), + os.getcwd(), + config.invocation_params.args, + ) + ) + config.trace.root.setwriter(debugfile.write) + undo_tracing = config.pluginmanager.enable_tracing() + sys.stderr.write("writing pytest debug information to %s\n" % path) + + def unset_tracing() -> None: + debugfile.close() + sys.stderr.write("wrote pytest debug information to %s\n" % debugfile.name) + config.trace.root.setwriter(None) + undo_tracing() + + config.add_cleanup(unset_tracing) + + +def showversion(config: Config) -> None: + if config.option.version > 1: + sys.stdout.write( + "This is pytest version {}, imported from {}\n".format( + pytest.__version__, pytest.__file__ + ) + ) + plugininfo = getpluginversioninfo(config) + if plugininfo: + for line in plugininfo: + sys.stdout.write(line + "\n") + else: + sys.stdout.write(f"pytest {pytest.__version__}\n") + + +def pytest_cmdline_main(config: Config) -> Optional[Union[int, ExitCode]]: + if config.option.version > 0: + showversion(config) + return 0 + elif config.option.help: + config._do_configure() + showhelp(config) + config._ensure_unconfigure() + return 0 + return None + + +def showhelp(config: Config) -> None: + import textwrap + + reporter = config.pluginmanager.get_plugin("terminalreporter") + tw = reporter._tw + tw.write(config._parser.optparser.format_help()) + tw.line() + tw.line( + "[pytest] ini-options in the first pytest.ini|tox.ini|setup.cfg file found:" + ) + tw.line() + + columns = tw.fullwidth # costly call + indent_len = 24 # based on argparse's max_help_position=24 + indent = " " * indent_len + for name in config._parser._ininames: + help, type, default = config._parser._inidict[name] + if type is None: + type = "string" + if help is None: + raise TypeError(f"help argument cannot be None for {name}") + spec = f"{name} ({type}):" + tw.write(" %s" % spec) + spec_len = len(spec) + if spec_len > (indent_len - 3): + # Display help starting at a new line. + tw.line() + helplines = textwrap.wrap( + help, + columns, + initial_indent=indent, + subsequent_indent=indent, + break_on_hyphens=False, + ) + + for line in helplines: + tw.line(line) + else: + # Display help starting after the spec, following lines indented. + tw.write(" " * (indent_len - spec_len - 2)) + wrapped = textwrap.wrap(help, columns - indent_len, break_on_hyphens=False) + + if wrapped: + tw.line(wrapped[0]) + for line in wrapped[1:]: + tw.line(indent + line) + + tw.line() + tw.line("environment variables:") + vars = [ + ("PYTEST_ADDOPTS", "extra command line options"), + ("PYTEST_PLUGINS", "comma-separated plugins to load during startup"), + ("PYTEST_DISABLE_PLUGIN_AUTOLOAD", "set to disable plugin auto-loading"), + ("PYTEST_DEBUG", "set to enable debug tracing of pytest's internals"), + ] + for name, help in vars: + tw.line(f" {name:<24} {help}") + tw.line() + tw.line() + + tw.line("to see available markers type: pytest --markers") + tw.line("to see available fixtures type: pytest --fixtures") + tw.line( + "(shown according to specified file_or_dir or current dir " + "if not specified; fixtures with leading '_' are only shown " + "with the '-v' option" + ) + + for warningreport in reporter.stats.get("warnings", []): + tw.line("warning : " + warningreport.message, red=True) + return + + +conftest_options = [("pytest_plugins", "list of plugin names to load")] + + +def getpluginversioninfo(config: Config) -> List[str]: + lines = [] + plugininfo = config.pluginmanager.list_plugin_distinfo() + if plugininfo: + lines.append("setuptools registered plugins:") + for plugin, dist in plugininfo: + loc = getattr(plugin, "__file__", repr(plugin)) + content = f"{dist.project_name}-{dist.version} at {loc}" + lines.append(" " + content) + return lines + + +def pytest_report_header(config: Config) -> List[str]: + lines = [] + if config.option.debug or config.option.traceconfig: + lines.append(f"using: pytest-{pytest.__version__}") + + verinfo = getpluginversioninfo(config) + if verinfo: + lines.extend(verinfo) + + if config.option.traceconfig: + lines.append("active plugins:") + items = config.pluginmanager.list_name_plugin() + for name, plugin in items: + if hasattr(plugin, "__file__"): + r = plugin.__file__ + else: + r = repr(plugin) + lines.append(f" {name:<20}: {r}") + return lines diff --git a/venv/lib/python3.10/site-packages/_pytest/hookspec.py b/venv/lib/python3.10/site-packages/_pytest/hookspec.py new file mode 100644 index 0000000..a03c0e9 --- /dev/null +++ b/venv/lib/python3.10/site-packages/_pytest/hookspec.py @@ -0,0 +1,892 @@ +"""Hook specifications for pytest plugins which are invoked by pytest itself +and by builtin plugins.""" +from pathlib import Path +from typing import Any +from typing import Dict +from typing import List +from typing import Mapping +from typing import Optional +from typing import Sequence +from typing import Tuple +from typing import TYPE_CHECKING +from typing import Union + +from pluggy import HookspecMarker + +from _pytest.deprecated import WARNING_CMDLINE_PREPARSE_HOOK + +if TYPE_CHECKING: + import pdb + import warnings + from typing_extensions import Literal + + from _pytest._code.code import ExceptionRepr + from _pytest.code import ExceptionInfo + from _pytest.config import Config + from _pytest.config import ExitCode + from _pytest.config import PytestPluginManager + from _pytest.config import _PluggyPlugin + from _pytest.config.argparsing import Parser + from _pytest.fixtures import FixtureDef + from _pytest.fixtures import SubRequest + from _pytest.main import Session + from _pytest.nodes import Collector + from _pytest.nodes import Item + from _pytest.outcomes import Exit + from _pytest.python import Class + from _pytest.python import Function + from _pytest.python import Metafunc + from _pytest.python import Module + from _pytest.reports import CollectReport + from _pytest.reports import TestReport + from _pytest.runner import CallInfo + from _pytest.terminal import TerminalReporter + from _pytest.compat import LEGACY_PATH + + +hookspec = HookspecMarker("pytest") + +# ------------------------------------------------------------------------- +# Initialization hooks called for every plugin +# ------------------------------------------------------------------------- + + +@hookspec(historic=True) +def pytest_addhooks(pluginmanager: "PytestPluginManager") -> None: + """Called at plugin registration time to allow adding new hooks via a call to + ``pluginmanager.add_hookspecs(module_or_class, prefix)``. + + :param pytest.PytestPluginManager pluginmanager: The pytest plugin manager. + + .. note:: + This hook is incompatible with ``hookwrapper=True``. + """ + + +@hookspec(historic=True) +def pytest_plugin_registered( + plugin: "_PluggyPlugin", manager: "PytestPluginManager" +) -> None: + """A new pytest plugin got registered. + + :param plugin: The plugin module or instance. + :param pytest.PytestPluginManager manager: pytest plugin manager. + + .. note:: + This hook is incompatible with ``hookwrapper=True``. + """ + + +@hookspec(historic=True) +def pytest_addoption(parser: "Parser", pluginmanager: "PytestPluginManager") -> None: + """Register argparse-style options and ini-style config values, + called once at the beginning of a test run. + + .. note:: + + This function should be implemented only in plugins or ``conftest.py`` + files situated at the tests root directory due to how pytest + :ref:`discovers plugins during startup `. + + :param pytest.Parser parser: + To add command line options, call + :py:func:`parser.addoption(...) `. + To add ini-file values call :py:func:`parser.addini(...) + `. + + :param pytest.PytestPluginManager pluginmanager: + The pytest plugin manager, which can be used to install :py:func:`hookspec`'s + or :py:func:`hookimpl`'s and allow one plugin to call another plugin's hooks + to change how command line options are added. + + Options can later be accessed through the + :py:class:`config ` object, respectively: + + - :py:func:`config.getoption(name) ` to + retrieve the value of a command line option. + + - :py:func:`config.getini(name) ` to retrieve + a value read from an ini-style file. + + The config object is passed around on many internal objects via the ``.config`` + attribute or can be retrieved as the ``pytestconfig`` fixture. + + .. note:: + This hook is incompatible with ``hookwrapper=True``. + """ + + +@hookspec(historic=True) +def pytest_configure(config: "Config") -> None: + """Allow plugins and conftest files to perform initial configuration. + + This hook is called for every plugin and initial conftest file + after command line options have been parsed. + + After that, the hook is called for other conftest files as they are + imported. + + .. note:: + This hook is incompatible with ``hookwrapper=True``. + + :param pytest.Config config: The pytest config object. + """ + + +# ------------------------------------------------------------------------- +# Bootstrapping hooks called for plugins registered early enough: +# internal and 3rd party plugins. +# ------------------------------------------------------------------------- + + +@hookspec(firstresult=True) +def pytest_cmdline_parse( + pluginmanager: "PytestPluginManager", args: List[str] +) -> Optional["Config"]: + """Return an initialized config object, parsing the specified args. + + Stops at first non-None result, see :ref:`firstresult`. + + .. note:: + This hook will only be called for plugin classes passed to the + ``plugins`` arg when using `pytest.main`_ to perform an in-process + test run. + + :param pytest.PytestPluginManager pluginmanager: The pytest plugin manager. + :param List[str] args: List of arguments passed on the command line. + """ + + +@hookspec(warn_on_impl=WARNING_CMDLINE_PREPARSE_HOOK) +def pytest_cmdline_preparse(config: "Config", args: List[str]) -> None: + """(**Deprecated**) modify command line arguments before option parsing. + + This hook is considered deprecated and will be removed in a future pytest version. Consider + using :hook:`pytest_load_initial_conftests` instead. + + .. note:: + This hook will not be called for ``conftest.py`` files, only for setuptools plugins. + + :param pytest.Config config: The pytest config object. + :param List[str] args: Arguments passed on the command line. + """ + + +@hookspec(firstresult=True) +def pytest_cmdline_main(config: "Config") -> Optional[Union["ExitCode", int]]: + """Called for performing the main command line action. The default + implementation will invoke the configure hooks and runtest_mainloop. + + Stops at first non-None result, see :ref:`firstresult`. + + :param pytest.Config config: The pytest config object. + """ + + +def pytest_load_initial_conftests( + early_config: "Config", parser: "Parser", args: List[str] +) -> None: + """Called to implement the loading of initial conftest files ahead + of command line option parsing. + + .. note:: + This hook will not be called for ``conftest.py`` files, only for setuptools plugins. + + :param pytest.Config early_config: The pytest config object. + :param List[str] args: Arguments passed on the command line. + :param pytest.Parser parser: To add command line options. + """ + + +# ------------------------------------------------------------------------- +# collection hooks +# ------------------------------------------------------------------------- + + +@hookspec(firstresult=True) +def pytest_collection(session: "Session") -> Optional[object]: + """Perform the collection phase for the given session. + + Stops at first non-None result, see :ref:`firstresult`. + The return value is not used, but only stops further processing. + + The default collection phase is this (see individual hooks for full details): + + 1. Starting from ``session`` as the initial collector: + + 1. ``pytest_collectstart(collector)`` + 2. ``report = pytest_make_collect_report(collector)`` + 3. ``pytest_exception_interact(collector, call, report)`` if an interactive exception occurred + 4. For each collected node: + + 1. If an item, ``pytest_itemcollected(item)`` + 2. If a collector, recurse into it. + + 5. ``pytest_collectreport(report)`` + + 2. ``pytest_collection_modifyitems(session, config, items)`` + + 1. ``pytest_deselected(items)`` for any deselected items (may be called multiple times) + + 3. ``pytest_collection_finish(session)`` + 4. Set ``session.items`` to the list of collected items + 5. Set ``session.testscollected`` to the number of collected items + + You can implement this hook to only perform some action before collection, + for example the terminal plugin uses it to start displaying the collection + counter (and returns `None`). + + :param pytest.Session session: The pytest session object. + """ + + +def pytest_collection_modifyitems( + session: "Session", config: "Config", items: List["Item"] +) -> None: + """Called after collection has been performed. May filter or re-order + the items in-place. + + :param pytest.Session session: The pytest session object. + :param pytest.Config config: The pytest config object. + :param List[pytest.Item] items: List of item objects. + """ + + +def pytest_collection_finish(session: "Session") -> None: + """Called after collection has been performed and modified. + + :param pytest.Session session: The pytest session object. + """ + + +@hookspec(firstresult=True) +def pytest_ignore_collect( + collection_path: Path, path: "LEGACY_PATH", config: "Config" +) -> Optional[bool]: + """Return True to prevent considering this path for collection. + + This hook is consulted for all files and directories prior to calling + more specific hooks. + + Stops at first non-None result, see :ref:`firstresult`. + + :param pathlib.Path collection_path : The path to analyze. + :param LEGACY_PATH path: The path to analyze (deprecated). + :param pytest.Config config: The pytest config object. + + .. versionchanged:: 7.0.0 + The ``collection_path`` parameter was added as a :class:`pathlib.Path` + equivalent of the ``path`` parameter. The ``path`` parameter + has been deprecated. + """ + + +def pytest_collect_file( + file_path: Path, path: "LEGACY_PATH", parent: "Collector" +) -> "Optional[Collector]": + """Create a Collector for the given path, or None if not relevant. + + The new node needs to have the specified ``parent`` as a parent. + + :param pathlib.Path file_path: The path to analyze. + :param LEGACY_PATH path: The path to collect (deprecated). + + .. versionchanged:: 7.0.0 + The ``file_path`` parameter was added as a :class:`pathlib.Path` + equivalent of the ``path`` parameter. The ``path`` parameter + has been deprecated. + """ + + +# logging hooks for collection + + +def pytest_collectstart(collector: "Collector") -> None: + """Collector starts collecting.""" + + +def pytest_itemcollected(item: "Item") -> None: + """We just collected a test item.""" + + +def pytest_collectreport(report: "CollectReport") -> None: + """Collector finished collecting.""" + + +def pytest_deselected(items: Sequence["Item"]) -> None: + """Called for deselected test items, e.g. by keyword. + + May be called multiple times. + """ + + +@hookspec(firstresult=True) +def pytest_make_collect_report(collector: "Collector") -> "Optional[CollectReport]": + """Perform :func:`collector.collect() ` and return + a :class:`~pytest.CollectReport`. + + Stops at first non-None result, see :ref:`firstresult`. + """ + + +# ------------------------------------------------------------------------- +# Python test function related hooks +# ------------------------------------------------------------------------- + + +@hookspec(firstresult=True) +def pytest_pycollect_makemodule( + module_path: Path, path: "LEGACY_PATH", parent +) -> Optional["Module"]: + """Return a Module collector or None for the given path. + + This hook will be called for each matching test module path. + The pytest_collect_file hook needs to be used if you want to + create test modules for files that do not match as a test module. + + Stops at first non-None result, see :ref:`firstresult`. + + :param pathlib.Path module_path: The path of the module to collect. + :param LEGACY_PATH path: The path of the module to collect (deprecated). + + .. versionchanged:: 7.0.0 + The ``module_path`` parameter was added as a :class:`pathlib.Path` + equivalent of the ``path`` parameter. + + The ``path`` parameter has been deprecated in favor of ``fspath``. + """ + + +@hookspec(firstresult=True) +def pytest_pycollect_makeitem( + collector: Union["Module", "Class"], name: str, obj: object +) -> Union[None, "Item", "Collector", List[Union["Item", "Collector"]]]: + """Return a custom item/collector for a Python object in a module, or None. + + Stops at first non-None result, see :ref:`firstresult`. + """ + + +@hookspec(firstresult=True) +def pytest_pyfunc_call(pyfuncitem: "Function") -> Optional[object]: + """Call underlying test function. + + Stops at first non-None result, see :ref:`firstresult`. + """ + + +def pytest_generate_tests(metafunc: "Metafunc") -> None: + """Generate (multiple) parametrized calls to a test function.""" + + +@hookspec(firstresult=True) +def pytest_make_parametrize_id( + config: "Config", val: object, argname: str +) -> Optional[str]: + """Return a user-friendly string representation of the given ``val`` + that will be used by @pytest.mark.parametrize calls, or None if the hook + doesn't know about ``val``. + + The parameter name is available as ``argname``, if required. + + Stops at first non-None result, see :ref:`firstresult`. + + :param pytest.Config config: The pytest config object. + :param val: The parametrized value. + :param str argname: The automatic parameter name produced by pytest. + """ + + +# ------------------------------------------------------------------------- +# runtest related hooks +# ------------------------------------------------------------------------- + + +@hookspec(firstresult=True) +def pytest_runtestloop(session: "Session") -> Optional[object]: + """Perform the main runtest loop (after collection finished). + + The default hook implementation performs the runtest protocol for all items + collected in the session (``session.items``), unless the collection failed + or the ``collectonly`` pytest option is set. + + If at any point :py:func:`pytest.exit` is called, the loop is + terminated immediately. + + If at any point ``session.shouldfail`` or ``session.shouldstop`` are set, the + loop is terminated after the runtest protocol for the current item is finished. + + :param pytest.Session session: The pytest session object. + + Stops at first non-None result, see :ref:`firstresult`. + The return value is not used, but only stops further processing. + """ + + +@hookspec(firstresult=True) +def pytest_runtest_protocol( + item: "Item", nextitem: "Optional[Item]" +) -> Optional[object]: + """Perform the runtest protocol for a single test item. + + The default runtest protocol is this (see individual hooks for full details): + + - ``pytest_runtest_logstart(nodeid, location)`` + + - Setup phase: + - ``call = pytest_runtest_setup(item)`` (wrapped in ``CallInfo(when="setup")``) + - ``report = pytest_runtest_makereport(item, call)`` + - ``pytest_runtest_logreport(report)`` + - ``pytest_exception_interact(call, report)`` if an interactive exception occurred + + - Call phase, if the the setup passed and the ``setuponly`` pytest option is not set: + - ``call = pytest_runtest_call(item)`` (wrapped in ``CallInfo(when="call")``) + - ``report = pytest_runtest_makereport(item, call)`` + - ``pytest_runtest_logreport(report)`` + - ``pytest_exception_interact(call, report)`` if an interactive exception occurred + + - Teardown phase: + - ``call = pytest_runtest_teardown(item, nextitem)`` (wrapped in ``CallInfo(when="teardown")``) + - ``report = pytest_runtest_makereport(item, call)`` + - ``pytest_runtest_logreport(report)`` + - ``pytest_exception_interact(call, report)`` if an interactive exception occurred + + - ``pytest_runtest_logfinish(nodeid, location)`` + + :param item: Test item for which the runtest protocol is performed. + :param nextitem: The scheduled-to-be-next test item (or None if this is the end my friend). + + Stops at first non-None result, see :ref:`firstresult`. + The return value is not used, but only stops further processing. + """ + + +def pytest_runtest_logstart( + nodeid: str, location: Tuple[str, Optional[int], str] +) -> None: + """Called at the start of running the runtest protocol for a single item. + + See :hook:`pytest_runtest_protocol` for a description of the runtest protocol. + + :param str nodeid: Full node ID of the item. + :param location: A tuple of ``(filename, lineno, testname)``. + """ + + +def pytest_runtest_logfinish( + nodeid: str, location: Tuple[str, Optional[int], str] +) -> None: + """Called at the end of running the runtest protocol for a single item. + + See :hook:`pytest_runtest_protocol` for a description of the runtest protocol. + + :param str nodeid: Full node ID of the item. + :param location: A tuple of ``(filename, lineno, testname)``. + """ + + +def pytest_runtest_setup(item: "Item") -> None: + """Called to perform the setup phase for a test item. + + The default implementation runs ``setup()`` on ``item`` and all of its + parents (which haven't been setup yet). This includes obtaining the + values of fixtures required by the item (which haven't been obtained + yet). + """ + + +def pytest_runtest_call(item: "Item") -> None: + """Called to run the test for test item (the call phase). + + The default implementation calls ``item.runtest()``. + """ + + +def pytest_runtest_teardown(item: "Item", nextitem: Optional["Item"]) -> None: + """Called to perform the teardown phase for a test item. + + The default implementation runs the finalizers and calls ``teardown()`` + on ``item`` and all of its parents (which need to be torn down). This + includes running the teardown phase of fixtures required by the item (if + they go out of scope). + + :param nextitem: + The scheduled-to-be-next test item (None if no further test item is + scheduled). This argument is used to perform exact teardowns, i.e. + calling just enough finalizers so that nextitem only needs to call + setup functions. + """ + + +@hookspec(firstresult=True) +def pytest_runtest_makereport( + item: "Item", call: "CallInfo[None]" +) -> Optional["TestReport"]: + """Called to create a :class:`~pytest.TestReport` for each of + the setup, call and teardown runtest phases of a test item. + + See :hook:`pytest_runtest_protocol` for a description of the runtest protocol. + + :param call: The :class:`~pytest.CallInfo` for the phase. + + Stops at first non-None result, see :ref:`firstresult`. + """ + + +def pytest_runtest_logreport(report: "TestReport") -> None: + """Process the :class:`~pytest.TestReport` produced for each + of the setup, call and teardown runtest phases of an item. + + See :hook:`pytest_runtest_protocol` for a description of the runtest protocol. + """ + + +@hookspec(firstresult=True) +def pytest_report_to_serializable( + config: "Config", + report: Union["CollectReport", "TestReport"], +) -> Optional[Dict[str, Any]]: + """Serialize the given report object into a data structure suitable for + sending over the wire, e.g. converted to JSON.""" + + +@hookspec(firstresult=True) +def pytest_report_from_serializable( + config: "Config", + data: Dict[str, Any], +) -> Optional[Union["CollectReport", "TestReport"]]: + """Restore a report object previously serialized with + :hook:`pytest_report_to_serializable`.""" + + +# ------------------------------------------------------------------------- +# Fixture related hooks +# ------------------------------------------------------------------------- + + +@hookspec(firstresult=True) +def pytest_fixture_setup( + fixturedef: "FixtureDef[Any]", request: "SubRequest" +) -> Optional[object]: + """Perform fixture setup execution. + + :returns: The return value of the call to the fixture function. + + Stops at first non-None result, see :ref:`firstresult`. + + .. note:: + If the fixture function returns None, other implementations of + this hook function will continue to be called, according to the + behavior of the :ref:`firstresult` option. + """ + + +def pytest_fixture_post_finalizer( + fixturedef: "FixtureDef[Any]", request: "SubRequest" +) -> None: + """Called after fixture teardown, but before the cache is cleared, so + the fixture result ``fixturedef.cached_result`` is still available (not + ``None``).""" + + +# ------------------------------------------------------------------------- +# test session related hooks +# ------------------------------------------------------------------------- + + +def pytest_sessionstart(session: "Session") -> None: + """Called after the ``Session`` object has been created and before performing collection + and entering the run test loop. + + :param pytest.Session session: The pytest session object. + """ + + +def pytest_sessionfinish( + session: "Session", + exitstatus: Union[int, "ExitCode"], +) -> None: + """Called after whole test run finished, right before returning the exit status to the system. + + :param pytest.Session session: The pytest session object. + :param int exitstatus: The status which pytest will return to the system. + """ + + +def pytest_unconfigure(config: "Config") -> None: + """Called before test process is exited. + + :param pytest.Config config: The pytest config object. + """ + + +# ------------------------------------------------------------------------- +# hooks for customizing the assert methods +# ------------------------------------------------------------------------- + + +def pytest_assertrepr_compare( + config: "Config", op: str, left: object, right: object +) -> Optional[List[str]]: + """Return explanation for comparisons in failing assert expressions. + + Return None for no custom explanation, otherwise return a list + of strings. The strings will be joined by newlines but any newlines + *in* a string will be escaped. Note that all but the first line will + be indented slightly, the intention is for the first line to be a summary. + + :param pytest.Config config: The pytest config object. + """ + + +def pytest_assertion_pass(item: "Item", lineno: int, orig: str, expl: str) -> None: + """Called whenever an assertion passes. + + .. versionadded:: 5.0 + + Use this hook to do some processing after a passing assertion. + The original assertion information is available in the `orig` string + and the pytest introspected assertion information is available in the + `expl` string. + + This hook must be explicitly enabled by the ``enable_assertion_pass_hook`` + ini-file option: + + .. code-block:: ini + + [pytest] + enable_assertion_pass_hook=true + + You need to **clean the .pyc** files in your project directory and interpreter libraries + when enabling this option, as assertions will require to be re-written. + + :param pytest.Item item: pytest item object of current test. + :param int lineno: Line number of the assert statement. + :param str orig: String with the original assertion. + :param str expl: String with the assert explanation. + """ + + +# ------------------------------------------------------------------------- +# Hooks for influencing reporting (invoked from _pytest_terminal). +# ------------------------------------------------------------------------- + + +def pytest_report_header( + config: "Config", start_path: Path, startdir: "LEGACY_PATH" +) -> Union[str, List[str]]: + """Return a string or list of strings to be displayed as header info for terminal reporting. + + :param pytest.Config config: The pytest config object. + :param Path start_path: The starting dir. + :param LEGACY_PATH startdir: The starting dir (deprecated). + + .. note:: + + Lines returned by a plugin are displayed before those of plugins which + ran before it. + If you want to have your line(s) displayed first, use + :ref:`trylast=True `. + + .. note:: + + This function should be implemented only in plugins or ``conftest.py`` + files situated at the tests root directory due to how pytest + :ref:`discovers plugins during startup `. + + .. versionchanged:: 7.0.0 + The ``start_path`` parameter was added as a :class:`pathlib.Path` + equivalent of the ``startdir`` parameter. The ``startdir`` parameter + has been deprecated. + """ + + +def pytest_report_collectionfinish( + config: "Config", + start_path: Path, + startdir: "LEGACY_PATH", + items: Sequence["Item"], +) -> Union[str, List[str]]: + """Return a string or list of strings to be displayed after collection + has finished successfully. + + These strings will be displayed after the standard "collected X items" message. + + .. versionadded:: 3.2 + + :param pytest.Config config: The pytest config object. + :param Path start_path: The starting dir. + :param LEGACY_PATH startdir: The starting dir (deprecated). + :param items: List of pytest items that are going to be executed; this list should not be modified. + + .. note:: + + Lines returned by a plugin are displayed before those of plugins which + ran before it. + If you want to have your line(s) displayed first, use + :ref:`trylast=True `. + + .. versionchanged:: 7.0.0 + The ``start_path`` parameter was added as a :class:`pathlib.Path` + equivalent of the ``startdir`` parameter. The ``startdir`` parameter + has been deprecated. + """ + + +@hookspec(firstresult=True) +def pytest_report_teststatus( + report: Union["CollectReport", "TestReport"], config: "Config" +) -> Tuple[str, str, Union[str, Mapping[str, bool]]]: + """Return result-category, shortletter and verbose word for status + reporting. + + The result-category is a category in which to count the result, for + example "passed", "skipped", "error" or the empty string. + + The shortletter is shown as testing progresses, for example ".", "s", + "E" or the empty string. + + The verbose word is shown as testing progresses in verbose mode, for + example "PASSED", "SKIPPED", "ERROR" or the empty string. + + pytest may style these implicitly according to the report outcome. + To provide explicit styling, return a tuple for the verbose word, + for example ``"rerun", "R", ("RERUN", {"yellow": True})``. + + :param report: The report object whose status is to be returned. + :param config: The pytest config object. + + Stops at first non-None result, see :ref:`firstresult`. + """ + + +def pytest_terminal_summary( + terminalreporter: "TerminalReporter", + exitstatus: "ExitCode", + config: "Config", +) -> None: + """Add a section to terminal summary reporting. + + :param _pytest.terminal.TerminalReporter terminalreporter: The internal terminal reporter object. + :param int exitstatus: The exit status that will be reported back to the OS. + :param pytest.Config config: The pytest config object. + + .. versionadded:: 4.2 + The ``config`` parameter. + """ + + +@hookspec(historic=True) +def pytest_warning_recorded( + warning_message: "warnings.WarningMessage", + when: "Literal['config', 'collect', 'runtest']", + nodeid: str, + location: Optional[Tuple[str, int, str]], +) -> None: + """Process a warning captured by the internal pytest warnings plugin. + + :param warnings.WarningMessage warning_message: + The captured warning. This is the same object produced by :py:func:`warnings.catch_warnings`, and contains + the same attributes as the parameters of :py:func:`warnings.showwarning`. + + :param str when: + Indicates when the warning was captured. Possible values: + + * ``"config"``: during pytest configuration/initialization stage. + * ``"collect"``: during test collection. + * ``"runtest"``: during test execution. + + :param str nodeid: + Full id of the item. + + :param tuple|None location: + When available, holds information about the execution context of the captured + warning (filename, linenumber, function). ``function`` evaluates to + when the execution context is at the module level. + + .. versionadded:: 6.0 + """ + + +# ------------------------------------------------------------------------- +# Hooks for influencing skipping +# ------------------------------------------------------------------------- + + +def pytest_markeval_namespace(config: "Config") -> Dict[str, Any]: + """Called when constructing the globals dictionary used for + evaluating string conditions in xfail/skipif markers. + + This is useful when the condition for a marker requires + objects that are expensive or impossible to obtain during + collection time, which is required by normal boolean + conditions. + + .. versionadded:: 6.2 + + :param pytest.Config config: The pytest config object. + :returns: A dictionary of additional globals to add. + """ + + +# ------------------------------------------------------------------------- +# error handling and internal debugging hooks +# ------------------------------------------------------------------------- + + +def pytest_internalerror( + excrepr: "ExceptionRepr", + excinfo: "ExceptionInfo[BaseException]", +) -> Optional[bool]: + """Called for internal errors. + + Return True to suppress the fallback handling of printing an + INTERNALERROR message directly to sys.stderr. + """ + + +def pytest_keyboard_interrupt( + excinfo: "ExceptionInfo[Union[KeyboardInterrupt, Exit]]", +) -> None: + """Called for keyboard interrupt.""" + + +def pytest_exception_interact( + node: Union["Item", "Collector"], + call: "CallInfo[Any]", + report: Union["CollectReport", "TestReport"], +) -> None: + """Called when an exception was raised which can potentially be + interactively handled. + + May be called during collection (see :hook:`pytest_make_collect_report`), + in which case ``report`` is a :class:`CollectReport`. + + May be called during runtest of an item (see :hook:`pytest_runtest_protocol`), + in which case ``report`` is a :class:`TestReport`. + + This hook is not called if the exception that was raised is an internal + exception like ``skip.Exception``. + """ + + +def pytest_enter_pdb(config: "Config", pdb: "pdb.Pdb") -> None: + """Called upon pdb.set_trace(). + + Can be used by plugins to take special action just before the python + debugger enters interactive mode. + + :param pytest.Config config: The pytest config object. + :param pdb.Pdb pdb: The Pdb instance. + """ + + +def pytest_leave_pdb(config: "Config", pdb: "pdb.Pdb") -> None: + """Called when leaving pdb (e.g. with continue after pdb.set_trace()). + + Can be used by plugins to take special action just after the python + debugger leaves interactive mode. + + :param pytest.Config config: The pytest config object. + :param pdb.Pdb pdb: The Pdb instance. + """ diff --git a/venv/lib/python3.10/site-packages/_pytest/junitxml.py b/venv/lib/python3.10/site-packages/_pytest/junitxml.py new file mode 100644 index 0000000..1b9e3bf --- /dev/null +++ b/venv/lib/python3.10/site-packages/_pytest/junitxml.py @@ -0,0 +1,696 @@ +"""Report test results in JUnit-XML format, for use with Jenkins and build +integration servers. + +Based on initial code from Ross Lawley. + +Output conforms to +https://github.com/jenkinsci/xunit-plugin/blob/master/src/main/resources/org/jenkinsci/plugins/xunit/types/model/xsd/junit-10.xsd +""" +import functools +import os +import platform +import re +import xml.etree.ElementTree as ET +from datetime import datetime +from typing import Callable +from typing import Dict +from typing import List +from typing import Match +from typing import Optional +from typing import Tuple +from typing import Union + +import pytest +from _pytest import nodes +from _pytest import timing +from _pytest._code.code import ExceptionRepr +from _pytest._code.code import ReprFileLocation +from _pytest.config import Config +from _pytest.config import filename_arg +from _pytest.config.argparsing import Parser +from _pytest.fixtures import FixtureRequest +from _pytest.reports import TestReport +from _pytest.stash import StashKey +from _pytest.terminal import TerminalReporter + + +xml_key = StashKey["LogXML"]() + + +def bin_xml_escape(arg: object) -> str: + r"""Visually escape invalid XML characters. + + For example, transforms + 'hello\aworld\b' + into + 'hello#x07world#x08' + Note that the #xABs are *not* XML escapes - missing the ampersand «. + The idea is to escape visually for the user rather than for XML itself. + """ + + def repl(matchobj: Match[str]) -> str: + i = ord(matchobj.group()) + if i <= 0xFF: + return "#x%02X" % i + else: + return "#x%04X" % i + + # The spec range of valid chars is: + # Char ::= #x9 | #xA | #xD | [#x20-#xD7FF] | [#xE000-#xFFFD] | [#x10000-#x10FFFF] + # For an unknown(?) reason, we disallow #x7F (DEL) as well. + illegal_xml_re = ( + "[^\u0009\u000A\u000D\u0020-\u007E\u0080-\uD7FF\uE000-\uFFFD\u10000-\u10FFFF]" + ) + return re.sub(illegal_xml_re, repl, str(arg)) + + +def merge_family(left, right) -> None: + result = {} + for kl, vl in left.items(): + for kr, vr in right.items(): + if not isinstance(vl, list): + raise TypeError(type(vl)) + result[kl] = vl + vr + left.update(result) + + +families = {} +families["_base"] = {"testcase": ["classname", "name"]} +families["_base_legacy"] = {"testcase": ["file", "line", "url"]} + +# xUnit 1.x inherits legacy attributes. +families["xunit1"] = families["_base"].copy() +merge_family(families["xunit1"], families["_base_legacy"]) + +# xUnit 2.x uses strict base attributes. +families["xunit2"] = families["_base"] + + +class _NodeReporter: + def __init__(self, nodeid: Union[str, TestReport], xml: "LogXML") -> None: + self.id = nodeid + self.xml = xml + self.add_stats = self.xml.add_stats + self.family = self.xml.family + self.duration = 0.0 + self.properties: List[Tuple[str, str]] = [] + self.nodes: List[ET.Element] = [] + self.attrs: Dict[str, str] = {} + + def append(self, node: ET.Element) -> None: + self.xml.add_stats(node.tag) + self.nodes.append(node) + + def add_property(self, name: str, value: object) -> None: + self.properties.append((str(name), bin_xml_escape(value))) + + def add_attribute(self, name: str, value: object) -> None: + self.attrs[str(name)] = bin_xml_escape(value) + + def make_properties_node(self) -> Optional[ET.Element]: + """Return a Junit node containing custom properties, if any.""" + if self.properties: + properties = ET.Element("properties") + for name, value in self.properties: + properties.append(ET.Element("property", name=name, value=value)) + return properties + return None + + def record_testreport(self, testreport: TestReport) -> None: + names = mangle_test_address(testreport.nodeid) + existing_attrs = self.attrs + classnames = names[:-1] + if self.xml.prefix: + classnames.insert(0, self.xml.prefix) + attrs: Dict[str, str] = { + "classname": ".".join(classnames), + "name": bin_xml_escape(names[-1]), + "file": testreport.location[0], + } + if testreport.location[1] is not None: + attrs["line"] = str(testreport.location[1]) + if hasattr(testreport, "url"): + attrs["url"] = testreport.url + self.attrs = attrs + self.attrs.update(existing_attrs) # Restore any user-defined attributes. + + # Preserve legacy testcase behavior. + if self.family == "xunit1": + return + + # Filter out attributes not permitted by this test family. + # Including custom attributes because they are not valid here. + temp_attrs = {} + for key in self.attrs.keys(): + if key in families[self.family]["testcase"]: + temp_attrs[key] = self.attrs[key] + self.attrs = temp_attrs + + def to_xml(self) -> ET.Element: + testcase = ET.Element("testcase", self.attrs, time="%.3f" % self.duration) + properties = self.make_properties_node() + if properties is not None: + testcase.append(properties) + testcase.extend(self.nodes) + return testcase + + def _add_simple(self, tag: str, message: str, data: Optional[str] = None) -> None: + node = ET.Element(tag, message=message) + node.text = bin_xml_escape(data) + self.append(node) + + def write_captured_output(self, report: TestReport) -> None: + if not self.xml.log_passing_tests and report.passed: + return + + content_out = report.capstdout + content_log = report.caplog + content_err = report.capstderr + if self.xml.logging == "no": + return + content_all = "" + if self.xml.logging in ["log", "all"]: + content_all = self._prepare_content(content_log, " Captured Log ") + if self.xml.logging in ["system-out", "out-err", "all"]: + content_all += self._prepare_content(content_out, " Captured Out ") + self._write_content(report, content_all, "system-out") + content_all = "" + if self.xml.logging in ["system-err", "out-err", "all"]: + content_all += self._prepare_content(content_err, " Captured Err ") + self._write_content(report, content_all, "system-err") + content_all = "" + if content_all: + self._write_content(report, content_all, "system-out") + + def _prepare_content(self, content: str, header: str) -> str: + return "\n".join([header.center(80, "-"), content, ""]) + + def _write_content(self, report: TestReport, content: str, jheader: str) -> None: + tag = ET.Element(jheader) + tag.text = bin_xml_escape(content) + self.append(tag) + + def append_pass(self, report: TestReport) -> None: + self.add_stats("passed") + + def append_failure(self, report: TestReport) -> None: + # msg = str(report.longrepr.reprtraceback.extraline) + if hasattr(report, "wasxfail"): + self._add_simple("skipped", "xfail-marked test passes unexpectedly") + else: + assert report.longrepr is not None + reprcrash: Optional[ReprFileLocation] = getattr( + report.longrepr, "reprcrash", None + ) + if reprcrash is not None: + message = reprcrash.message + else: + message = str(report.longrepr) + message = bin_xml_escape(message) + self._add_simple("failure", message, str(report.longrepr)) + + def append_collect_error(self, report: TestReport) -> None: + # msg = str(report.longrepr.reprtraceback.extraline) + assert report.longrepr is not None + self._add_simple("error", "collection failure", str(report.longrepr)) + + def append_collect_skipped(self, report: TestReport) -> None: + self._add_simple("skipped", "collection skipped", str(report.longrepr)) + + def append_error(self, report: TestReport) -> None: + assert report.longrepr is not None + reprcrash: Optional[ReprFileLocation] = getattr( + report.longrepr, "reprcrash", None + ) + if reprcrash is not None: + reason = reprcrash.message + else: + reason = str(report.longrepr) + + if report.when == "teardown": + msg = f'failed on teardown with "{reason}"' + else: + msg = f'failed on setup with "{reason}"' + self._add_simple("error", msg, str(report.longrepr)) + + def append_skipped(self, report: TestReport) -> None: + if hasattr(report, "wasxfail"): + xfailreason = report.wasxfail + if xfailreason.startswith("reason: "): + xfailreason = xfailreason[8:] + xfailreason = bin_xml_escape(xfailreason) + skipped = ET.Element("skipped", type="pytest.xfail", message=xfailreason) + self.append(skipped) + else: + assert isinstance(report.longrepr, tuple) + filename, lineno, skipreason = report.longrepr + if skipreason.startswith("Skipped: "): + skipreason = skipreason[9:] + details = f"{filename}:{lineno}: {skipreason}" + + skipped = ET.Element("skipped", type="pytest.skip", message=skipreason) + skipped.text = bin_xml_escape(details) + self.append(skipped) + self.write_captured_output(report) + + def finalize(self) -> None: + data = self.to_xml() + self.__dict__.clear() + # Type ignored because mypy doesn't like overriding a method. + # Also the return value doesn't match... + self.to_xml = lambda: data # type: ignore[assignment] + + +def _warn_incompatibility_with_xunit2( + request: FixtureRequest, fixture_name: str +) -> None: + """Emit a PytestWarning about the given fixture being incompatible with newer xunit revisions.""" + from _pytest.warning_types import PytestWarning + + xml = request.config.stash.get(xml_key, None) + if xml is not None and xml.family not in ("xunit1", "legacy"): + request.node.warn( + PytestWarning( + "{fixture_name} is incompatible with junit_family '{family}' (use 'legacy' or 'xunit1')".format( + fixture_name=fixture_name, family=xml.family + ) + ) + ) + + +@pytest.fixture +def record_property(request: FixtureRequest) -> Callable[[str, object], None]: + """Add extra properties to the calling test. + + User properties become part of the test report and are available to the + configured reporters, like JUnit XML. + + The fixture is callable with ``name, value``. The value is automatically + XML-encoded. + + Example:: + + def test_function(record_property): + record_property("example_key", 1) + """ + _warn_incompatibility_with_xunit2(request, "record_property") + + def append_property(name: str, value: object) -> None: + request.node.user_properties.append((name, value)) + + return append_property + + +@pytest.fixture +def record_xml_attribute(request: FixtureRequest) -> Callable[[str, object], None]: + """Add extra xml attributes to the tag for the calling test. + + The fixture is callable with ``name, value``. The value is + automatically XML-encoded. + """ + from _pytest.warning_types import PytestExperimentalApiWarning + + request.node.warn( + PytestExperimentalApiWarning("record_xml_attribute is an experimental feature") + ) + + _warn_incompatibility_with_xunit2(request, "record_xml_attribute") + + # Declare noop + def add_attr_noop(name: str, value: object) -> None: + pass + + attr_func = add_attr_noop + + xml = request.config.stash.get(xml_key, None) + if xml is not None: + node_reporter = xml.node_reporter(request.node.nodeid) + attr_func = node_reporter.add_attribute + + return attr_func + + +def _check_record_param_type(param: str, v: str) -> None: + """Used by record_testsuite_property to check that the given parameter name is of the proper + type.""" + __tracebackhide__ = True + if not isinstance(v, str): + msg = "{param} parameter needs to be a string, but {g} given" # type: ignore[unreachable] + raise TypeError(msg.format(param=param, g=type(v).__name__)) + + +@pytest.fixture(scope="session") +def record_testsuite_property(request: FixtureRequest) -> Callable[[str, object], None]: + """Record a new ```` tag as child of the root ````. + + This is suitable to writing global information regarding the entire test + suite, and is compatible with ``xunit2`` JUnit family. + + This is a ``session``-scoped fixture which is called with ``(name, value)``. Example: + + .. code-block:: python + + def test_foo(record_testsuite_property): + record_testsuite_property("ARCH", "PPC") + record_testsuite_property("STORAGE_TYPE", "CEPH") + + ``name`` must be a string, ``value`` will be converted to a string and properly xml-escaped. + + .. warning:: + + Currently this fixture **does not work** with the + `pytest-xdist `__ plugin. See + :issue:`7767` for details. + """ + + __tracebackhide__ = True + + def record_func(name: str, value: object) -> None: + """No-op function in case --junitxml was not passed in the command-line.""" + __tracebackhide__ = True + _check_record_param_type("name", name) + + xml = request.config.stash.get(xml_key, None) + if xml is not None: + record_func = xml.add_global_property # noqa + return record_func + + +def pytest_addoption(parser: Parser) -> None: + group = parser.getgroup("terminal reporting") + group.addoption( + "--junitxml", + "--junit-xml", + action="store", + dest="xmlpath", + metavar="path", + type=functools.partial(filename_arg, optname="--junitxml"), + default=None, + help="create junit-xml style report file at given path.", + ) + group.addoption( + "--junitprefix", + "--junit-prefix", + action="store", + metavar="str", + default=None, + help="prepend prefix to classnames in junit-xml output", + ) + parser.addini( + "junit_suite_name", "Test suite name for JUnit report", default="pytest" + ) + parser.addini( + "junit_logging", + "Write captured log messages to JUnit report: " + "one of no|log|system-out|system-err|out-err|all", + default="no", + ) + parser.addini( + "junit_log_passing_tests", + "Capture log information for passing tests to JUnit report: ", + type="bool", + default=True, + ) + parser.addini( + "junit_duration_report", + "Duration time to report: one of total|call", + default="total", + ) # choices=['total', 'call']) + parser.addini( + "junit_family", + "Emit XML for schema: one of legacy|xunit1|xunit2", + default="xunit2", + ) + + +def pytest_configure(config: Config) -> None: + xmlpath = config.option.xmlpath + # Prevent opening xmllog on worker nodes (xdist). + if xmlpath and not hasattr(config, "workerinput"): + junit_family = config.getini("junit_family") + config.stash[xml_key] = LogXML( + xmlpath, + config.option.junitprefix, + config.getini("junit_suite_name"), + config.getini("junit_logging"), + config.getini("junit_duration_report"), + junit_family, + config.getini("junit_log_passing_tests"), + ) + config.pluginmanager.register(config.stash[xml_key]) + + +def pytest_unconfigure(config: Config) -> None: + xml = config.stash.get(xml_key, None) + if xml: + del config.stash[xml_key] + config.pluginmanager.unregister(xml) + + +def mangle_test_address(address: str) -> List[str]: + path, possible_open_bracket, params = address.partition("[") + names = path.split("::") + # Convert file path to dotted path. + names[0] = names[0].replace(nodes.SEP, ".") + names[0] = re.sub(r"\.py$", "", names[0]) + # Put any params back. + names[-1] += possible_open_bracket + params + return names + + +class LogXML: + def __init__( + self, + logfile, + prefix: Optional[str], + suite_name: str = "pytest", + logging: str = "no", + report_duration: str = "total", + family="xunit1", + log_passing_tests: bool = True, + ) -> None: + logfile = os.path.expanduser(os.path.expandvars(logfile)) + self.logfile = os.path.normpath(os.path.abspath(logfile)) + self.prefix = prefix + self.suite_name = suite_name + self.logging = logging + self.log_passing_tests = log_passing_tests + self.report_duration = report_duration + self.family = family + self.stats: Dict[str, int] = dict.fromkeys( + ["error", "passed", "failure", "skipped"], 0 + ) + self.node_reporters: Dict[ + Tuple[Union[str, TestReport], object], _NodeReporter + ] = {} + self.node_reporters_ordered: List[_NodeReporter] = [] + self.global_properties: List[Tuple[str, str]] = [] + + # List of reports that failed on call but teardown is pending. + self.open_reports: List[TestReport] = [] + self.cnt_double_fail_tests = 0 + + # Replaces convenience family with real family. + if self.family == "legacy": + self.family = "xunit1" + + def finalize(self, report: TestReport) -> None: + nodeid = getattr(report, "nodeid", report) + # Local hack to handle xdist report order. + workernode = getattr(report, "node", None) + reporter = self.node_reporters.pop((nodeid, workernode)) + if reporter is not None: + reporter.finalize() + + def node_reporter(self, report: Union[TestReport, str]) -> _NodeReporter: + nodeid: Union[str, TestReport] = getattr(report, "nodeid", report) + # Local hack to handle xdist report order. + workernode = getattr(report, "node", None) + + key = nodeid, workernode + + if key in self.node_reporters: + # TODO: breaks for --dist=each + return self.node_reporters[key] + + reporter = _NodeReporter(nodeid, self) + + self.node_reporters[key] = reporter + self.node_reporters_ordered.append(reporter) + + return reporter + + def add_stats(self, key: str) -> None: + if key in self.stats: + self.stats[key] += 1 + + def _opentestcase(self, report: TestReport) -> _NodeReporter: + reporter = self.node_reporter(report) + reporter.record_testreport(report) + return reporter + + def pytest_runtest_logreport(self, report: TestReport) -> None: + """Handle a setup/call/teardown report, generating the appropriate + XML tags as necessary. + + Note: due to plugins like xdist, this hook may be called in interlaced + order with reports from other nodes. For example: + + Usual call order: + -> setup node1 + -> call node1 + -> teardown node1 + -> setup node2 + -> call node2 + -> teardown node2 + + Possible call order in xdist: + -> setup node1 + -> call node1 + -> setup node2 + -> call node2 + -> teardown node2 + -> teardown node1 + """ + close_report = None + if report.passed: + if report.when == "call": # ignore setup/teardown + reporter = self._opentestcase(report) + reporter.append_pass(report) + elif report.failed: + if report.when == "teardown": + # The following vars are needed when xdist plugin is used. + report_wid = getattr(report, "worker_id", None) + report_ii = getattr(report, "item_index", None) + close_report = next( + ( + rep + for rep in self.open_reports + if ( + rep.nodeid == report.nodeid + and getattr(rep, "item_index", None) == report_ii + and getattr(rep, "worker_id", None) == report_wid + ) + ), + None, + ) + if close_report: + # We need to open new testcase in case we have failure in + # call and error in teardown in order to follow junit + # schema. + self.finalize(close_report) + self.cnt_double_fail_tests += 1 + reporter = self._opentestcase(report) + if report.when == "call": + reporter.append_failure(report) + self.open_reports.append(report) + if not self.log_passing_tests: + reporter.write_captured_output(report) + else: + reporter.append_error(report) + elif report.skipped: + reporter = self._opentestcase(report) + reporter.append_skipped(report) + self.update_testcase_duration(report) + if report.when == "teardown": + reporter = self._opentestcase(report) + reporter.write_captured_output(report) + + for propname, propvalue in report.user_properties: + reporter.add_property(propname, str(propvalue)) + + self.finalize(report) + report_wid = getattr(report, "worker_id", None) + report_ii = getattr(report, "item_index", None) + close_report = next( + ( + rep + for rep in self.open_reports + if ( + rep.nodeid == report.nodeid + and getattr(rep, "item_index", None) == report_ii + and getattr(rep, "worker_id", None) == report_wid + ) + ), + None, + ) + if close_report: + self.open_reports.remove(close_report) + + def update_testcase_duration(self, report: TestReport) -> None: + """Accumulate total duration for nodeid from given report and update + the Junit.testcase with the new total if already created.""" + if self.report_duration == "total" or report.when == self.report_duration: + reporter = self.node_reporter(report) + reporter.duration += getattr(report, "duration", 0.0) + + def pytest_collectreport(self, report: TestReport) -> None: + if not report.passed: + reporter = self._opentestcase(report) + if report.failed: + reporter.append_collect_error(report) + else: + reporter.append_collect_skipped(report) + + def pytest_internalerror(self, excrepr: ExceptionRepr) -> None: + reporter = self.node_reporter("internal") + reporter.attrs.update(classname="pytest", name="internal") + reporter._add_simple("error", "internal error", str(excrepr)) + + def pytest_sessionstart(self) -> None: + self.suite_start_time = timing.time() + + def pytest_sessionfinish(self) -> None: + dirname = os.path.dirname(os.path.abspath(self.logfile)) + if not os.path.isdir(dirname): + os.makedirs(dirname) + + with open(self.logfile, "w", encoding="utf-8") as logfile: + suite_stop_time = timing.time() + suite_time_delta = suite_stop_time - self.suite_start_time + + numtests = ( + self.stats["passed"] + + self.stats["failure"] + + self.stats["skipped"] + + self.stats["error"] + - self.cnt_double_fail_tests + ) + logfile.write('') + + suite_node = ET.Element( + "testsuite", + name=self.suite_name, + errors=str(self.stats["error"]), + failures=str(self.stats["failure"]), + skipped=str(self.stats["skipped"]), + tests=str(numtests), + time="%.3f" % suite_time_delta, + timestamp=datetime.fromtimestamp(self.suite_start_time).isoformat(), + hostname=platform.node(), + ) + global_properties = self._get_global_properties_node() + if global_properties is not None: + suite_node.append(global_properties) + for node_reporter in self.node_reporters_ordered: + suite_node.append(node_reporter.to_xml()) + testsuites = ET.Element("testsuites") + testsuites.append(suite_node) + logfile.write(ET.tostring(testsuites, encoding="unicode")) + + def pytest_terminal_summary(self, terminalreporter: TerminalReporter) -> None: + terminalreporter.write_sep("-", f"generated xml file: {self.logfile}") + + def add_global_property(self, name: str, value: object) -> None: + __tracebackhide__ = True + _check_record_param_type("name", name) + self.global_properties.append((name, bin_xml_escape(value))) + + def _get_global_properties_node(self) -> Optional[ET.Element]: + """Return a Junit node containing custom properties, if any.""" + if self.global_properties: + properties = ET.Element("properties") + for name, value in self.global_properties: + properties.append(ET.Element("property", name=name, value=value)) + return properties + return None diff --git a/venv/lib/python3.10/site-packages/_pytest/legacypath.py b/venv/lib/python3.10/site-packages/_pytest/legacypath.py new file mode 100644 index 0000000..37e8c24 --- /dev/null +++ b/venv/lib/python3.10/site-packages/_pytest/legacypath.py @@ -0,0 +1,467 @@ +"""Add backward compatibility support for the legacy py path type.""" +import shlex +import subprocess +from pathlib import Path +from typing import List +from typing import Optional +from typing import TYPE_CHECKING +from typing import Union + +import attr +from iniconfig import SectionWrapper + +from _pytest.cacheprovider import Cache +from _pytest.compat import final +from _pytest.compat import LEGACY_PATH +from _pytest.compat import legacy_path +from _pytest.config import Config +from _pytest.config import hookimpl +from _pytest.config import PytestPluginManager +from _pytest.deprecated import check_ispytest +from _pytest.fixtures import fixture +from _pytest.fixtures import FixtureRequest +from _pytest.main import Session +from _pytest.monkeypatch import MonkeyPatch +from _pytest.nodes import Collector +from _pytest.nodes import Item +from _pytest.nodes import Node +from _pytest.pytester import HookRecorder +from _pytest.pytester import Pytester +from _pytest.pytester import RunResult +from _pytest.terminal import TerminalReporter +from _pytest.tmpdir import TempPathFactory + +if TYPE_CHECKING: + from typing_extensions import Final + + import pexpect + + +@final +class Testdir: + """ + Similar to :class:`Pytester`, but this class works with legacy legacy_path objects instead. + + All methods just forward to an internal :class:`Pytester` instance, converting results + to `legacy_path` objects as necessary. + """ + + __test__ = False + + CLOSE_STDIN: "Final" = Pytester.CLOSE_STDIN + TimeoutExpired: "Final" = Pytester.TimeoutExpired + + def __init__(self, pytester: Pytester, *, _ispytest: bool = False) -> None: + check_ispytest(_ispytest) + self._pytester = pytester + + @property + def tmpdir(self) -> LEGACY_PATH: + """Temporary directory where tests are executed.""" + return legacy_path(self._pytester.path) + + @property + def test_tmproot(self) -> LEGACY_PATH: + return legacy_path(self._pytester._test_tmproot) + + @property + def request(self): + return self._pytester._request + + @property + def plugins(self): + return self._pytester.plugins + + @plugins.setter + def plugins(self, plugins): + self._pytester.plugins = plugins + + @property + def monkeypatch(self) -> MonkeyPatch: + return self._pytester._monkeypatch + + def make_hook_recorder(self, pluginmanager) -> HookRecorder: + """See :meth:`Pytester.make_hook_recorder`.""" + return self._pytester.make_hook_recorder(pluginmanager) + + def chdir(self) -> None: + """See :meth:`Pytester.chdir`.""" + return self._pytester.chdir() + + def finalize(self) -> None: + """See :meth:`Pytester._finalize`.""" + return self._pytester._finalize() + + def makefile(self, ext, *args, **kwargs) -> LEGACY_PATH: + """See :meth:`Pytester.makefile`.""" + if ext and not ext.startswith("."): + # pytester.makefile is going to throw a ValueError in a way that + # testdir.makefile did not, because + # pathlib.Path is stricter suffixes than py.path + # This ext arguments is likely user error, but since testdir has + # allowed this, we will prepend "." as a workaround to avoid breaking + # testdir usage that worked before + ext = "." + ext + return legacy_path(self._pytester.makefile(ext, *args, **kwargs)) + + def makeconftest(self, source) -> LEGACY_PATH: + """See :meth:`Pytester.makeconftest`.""" + return legacy_path(self._pytester.makeconftest(source)) + + def makeini(self, source) -> LEGACY_PATH: + """See :meth:`Pytester.makeini`.""" + return legacy_path(self._pytester.makeini(source)) + + def getinicfg(self, source: str) -> SectionWrapper: + """See :meth:`Pytester.getinicfg`.""" + return self._pytester.getinicfg(source) + + def makepyprojecttoml(self, source) -> LEGACY_PATH: + """See :meth:`Pytester.makepyprojecttoml`.""" + return legacy_path(self._pytester.makepyprojecttoml(source)) + + def makepyfile(self, *args, **kwargs) -> LEGACY_PATH: + """See :meth:`Pytester.makepyfile`.""" + return legacy_path(self._pytester.makepyfile(*args, **kwargs)) + + def maketxtfile(self, *args, **kwargs) -> LEGACY_PATH: + """See :meth:`Pytester.maketxtfile`.""" + return legacy_path(self._pytester.maketxtfile(*args, **kwargs)) + + def syspathinsert(self, path=None) -> None: + """See :meth:`Pytester.syspathinsert`.""" + return self._pytester.syspathinsert(path) + + def mkdir(self, name) -> LEGACY_PATH: + """See :meth:`Pytester.mkdir`.""" + return legacy_path(self._pytester.mkdir(name)) + + def mkpydir(self, name) -> LEGACY_PATH: + """See :meth:`Pytester.mkpydir`.""" + return legacy_path(self._pytester.mkpydir(name)) + + def copy_example(self, name=None) -> LEGACY_PATH: + """See :meth:`Pytester.copy_example`.""" + return legacy_path(self._pytester.copy_example(name)) + + def getnode(self, config: Config, arg) -> Optional[Union[Item, Collector]]: + """See :meth:`Pytester.getnode`.""" + return self._pytester.getnode(config, arg) + + def getpathnode(self, path): + """See :meth:`Pytester.getpathnode`.""" + return self._pytester.getpathnode(path) + + def genitems(self, colitems: List[Union[Item, Collector]]) -> List[Item]: + """See :meth:`Pytester.genitems`.""" + return self._pytester.genitems(colitems) + + def runitem(self, source): + """See :meth:`Pytester.runitem`.""" + return self._pytester.runitem(source) + + def inline_runsource(self, source, *cmdlineargs): + """See :meth:`Pytester.inline_runsource`.""" + return self._pytester.inline_runsource(source, *cmdlineargs) + + def inline_genitems(self, *args): + """See :meth:`Pytester.inline_genitems`.""" + return self._pytester.inline_genitems(*args) + + def inline_run(self, *args, plugins=(), no_reraise_ctrlc: bool = False): + """See :meth:`Pytester.inline_run`.""" + return self._pytester.inline_run( + *args, plugins=plugins, no_reraise_ctrlc=no_reraise_ctrlc + ) + + def runpytest_inprocess(self, *args, **kwargs) -> RunResult: + """See :meth:`Pytester.runpytest_inprocess`.""" + return self._pytester.runpytest_inprocess(*args, **kwargs) + + def runpytest(self, *args, **kwargs) -> RunResult: + """See :meth:`Pytester.runpytest`.""" + return self._pytester.runpytest(*args, **kwargs) + + def parseconfig(self, *args) -> Config: + """See :meth:`Pytester.parseconfig`.""" + return self._pytester.parseconfig(*args) + + def parseconfigure(self, *args) -> Config: + """See :meth:`Pytester.parseconfigure`.""" + return self._pytester.parseconfigure(*args) + + def getitem(self, source, funcname="test_func"): + """See :meth:`Pytester.getitem`.""" + return self._pytester.getitem(source, funcname) + + def getitems(self, source): + """See :meth:`Pytester.getitems`.""" + return self._pytester.getitems(source) + + def getmodulecol(self, source, configargs=(), withinit=False): + """See :meth:`Pytester.getmodulecol`.""" + return self._pytester.getmodulecol( + source, configargs=configargs, withinit=withinit + ) + + def collect_by_name( + self, modcol: Collector, name: str + ) -> Optional[Union[Item, Collector]]: + """See :meth:`Pytester.collect_by_name`.""" + return self._pytester.collect_by_name(modcol, name) + + def popen( + self, + cmdargs, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + stdin=CLOSE_STDIN, + **kw, + ): + """See :meth:`Pytester.popen`.""" + return self._pytester.popen(cmdargs, stdout, stderr, stdin, **kw) + + def run(self, *cmdargs, timeout=None, stdin=CLOSE_STDIN) -> RunResult: + """See :meth:`Pytester.run`.""" + return self._pytester.run(*cmdargs, timeout=timeout, stdin=stdin) + + def runpython(self, script) -> RunResult: + """See :meth:`Pytester.runpython`.""" + return self._pytester.runpython(script) + + def runpython_c(self, command): + """See :meth:`Pytester.runpython_c`.""" + return self._pytester.runpython_c(command) + + def runpytest_subprocess(self, *args, timeout=None) -> RunResult: + """See :meth:`Pytester.runpytest_subprocess`.""" + return self._pytester.runpytest_subprocess(*args, timeout=timeout) + + def spawn_pytest( + self, string: str, expect_timeout: float = 10.0 + ) -> "pexpect.spawn": + """See :meth:`Pytester.spawn_pytest`.""" + return self._pytester.spawn_pytest(string, expect_timeout=expect_timeout) + + def spawn(self, cmd: str, expect_timeout: float = 10.0) -> "pexpect.spawn": + """See :meth:`Pytester.spawn`.""" + return self._pytester.spawn(cmd, expect_timeout=expect_timeout) + + def __repr__(self) -> str: + return f"" + + def __str__(self) -> str: + return str(self.tmpdir) + + +class LegacyTestdirPlugin: + @staticmethod + @fixture + def testdir(pytester: Pytester) -> Testdir: + """ + Identical to :fixture:`pytester`, and provides an instance whose methods return + legacy ``LEGACY_PATH`` objects instead when applicable. + + New code should avoid using :fixture:`testdir` in favor of :fixture:`pytester`. + """ + return Testdir(pytester, _ispytest=True) + + +@final +@attr.s(init=False, auto_attribs=True) +class TempdirFactory: + """Backward compatibility wrapper that implements :class:``_pytest.compat.LEGACY_PATH`` + for :class:``TempPathFactory``.""" + + _tmppath_factory: TempPathFactory + + def __init__( + self, tmppath_factory: TempPathFactory, *, _ispytest: bool = False + ) -> None: + check_ispytest(_ispytest) + self._tmppath_factory = tmppath_factory + + def mktemp(self, basename: str, numbered: bool = True) -> LEGACY_PATH: + """Same as :meth:`TempPathFactory.mktemp`, but returns a ``_pytest.compat.LEGACY_PATH`` object.""" + return legacy_path(self._tmppath_factory.mktemp(basename, numbered).resolve()) + + def getbasetemp(self) -> LEGACY_PATH: + """Backward compat wrapper for ``_tmppath_factory.getbasetemp``.""" + return legacy_path(self._tmppath_factory.getbasetemp().resolve()) + + +class LegacyTmpdirPlugin: + @staticmethod + @fixture(scope="session") + def tmpdir_factory(request: FixtureRequest) -> TempdirFactory: + """Return a :class:`pytest.TempdirFactory` instance for the test session.""" + # Set dynamically by pytest_configure(). + return request.config._tmpdirhandler # type: ignore + + @staticmethod + @fixture + def tmpdir(tmp_path: Path) -> LEGACY_PATH: + """Return a temporary directory path object which is unique to each test + function invocation, created as a sub directory of the base temporary + directory. + + By default, a new base temporary directory is created each test session, + and old bases are removed after 3 sessions, to aid in debugging. If + ``--basetemp`` is used then it is cleared each session. See :ref:`base + temporary directory`. + + The returned object is a `legacy_path`_ object. + + .. _legacy_path: https://py.readthedocs.io/en/latest/path.html + """ + return legacy_path(tmp_path) + + +def Cache_makedir(self: Cache, name: str) -> LEGACY_PATH: + """Return a directory path object with the given name. + + Same as :func:`mkdir`, but returns a legacy py path instance. + """ + return legacy_path(self.mkdir(name)) + + +def FixtureRequest_fspath(self: FixtureRequest) -> LEGACY_PATH: + """(deprecated) The file system path of the test module which collected this test.""" + return legacy_path(self.path) + + +def TerminalReporter_startdir(self: TerminalReporter) -> LEGACY_PATH: + """The directory from which pytest was invoked. + + Prefer to use ``startpath`` which is a :class:`pathlib.Path`. + + :type: LEGACY_PATH + """ + return legacy_path(self.startpath) + + +def Config_invocation_dir(self: Config) -> LEGACY_PATH: + """The directory from which pytest was invoked. + + Prefer to use :attr:`invocation_params.dir `, + which is a :class:`pathlib.Path`. + + :type: LEGACY_PATH + """ + return legacy_path(str(self.invocation_params.dir)) + + +def Config_rootdir(self: Config) -> LEGACY_PATH: + """The path to the :ref:`rootdir `. + + Prefer to use :attr:`rootpath`, which is a :class:`pathlib.Path`. + + :type: LEGACY_PATH + """ + return legacy_path(str(self.rootpath)) + + +def Config_inifile(self: Config) -> Optional[LEGACY_PATH]: + """The path to the :ref:`configfile `. + + Prefer to use :attr:`inipath`, which is a :class:`pathlib.Path`. + + :type: Optional[LEGACY_PATH] + """ + return legacy_path(str(self.inipath)) if self.inipath else None + + +def Session_stardir(self: Session) -> LEGACY_PATH: + """The path from which pytest was invoked. + + Prefer to use ``startpath`` which is a :class:`pathlib.Path`. + + :type: LEGACY_PATH + """ + return legacy_path(self.startpath) + + +def Config__getini_unknown_type( + self, name: str, type: str, value: Union[str, List[str]] +): + if type == "pathlist": + # TODO: This assert is probably not valid in all cases. + assert self.inipath is not None + dp = self.inipath.parent + input_values = shlex.split(value) if isinstance(value, str) else value + return [legacy_path(str(dp / x)) for x in input_values] + else: + raise ValueError(f"unknown configuration type: {type}", value) + + +def Node_fspath(self: Node) -> LEGACY_PATH: + """(deprecated) returns a legacy_path copy of self.path""" + return legacy_path(self.path) + + +def Node_fspath_set(self: Node, value: LEGACY_PATH) -> None: + self.path = Path(value) + + +@hookimpl(tryfirst=True) +def pytest_load_initial_conftests(early_config: Config) -> None: + """Monkeypatch legacy path attributes in several classes, as early as possible.""" + mp = MonkeyPatch() + early_config.add_cleanup(mp.undo) + + # Add Cache.makedir(). + mp.setattr(Cache, "makedir", Cache_makedir, raising=False) + + # Add FixtureRequest.fspath property. + mp.setattr(FixtureRequest, "fspath", property(FixtureRequest_fspath), raising=False) + + # Add TerminalReporter.startdir property. + mp.setattr( + TerminalReporter, "startdir", property(TerminalReporter_startdir), raising=False + ) + + # Add Config.{invocation_dir,rootdir,inifile} properties. + mp.setattr(Config, "invocation_dir", property(Config_invocation_dir), raising=False) + mp.setattr(Config, "rootdir", property(Config_rootdir), raising=False) + mp.setattr(Config, "inifile", property(Config_inifile), raising=False) + + # Add Session.startdir property. + mp.setattr(Session, "startdir", property(Session_stardir), raising=False) + + # Add pathlist configuration type. + mp.setattr(Config, "_getini_unknown_type", Config__getini_unknown_type) + + # Add Node.fspath property. + mp.setattr(Node, "fspath", property(Node_fspath, Node_fspath_set), raising=False) + + +@hookimpl +def pytest_configure(config: Config) -> None: + """Installs the LegacyTmpdirPlugin if the ``tmpdir`` plugin is also installed.""" + if config.pluginmanager.has_plugin("tmpdir"): + mp = MonkeyPatch() + config.add_cleanup(mp.undo) + # Create TmpdirFactory and attach it to the config object. + # + # This is to comply with existing plugins which expect the handler to be + # available at pytest_configure time, but ideally should be moved entirely + # to the tmpdir_factory session fixture. + try: + tmp_path_factory = config._tmp_path_factory # type: ignore[attr-defined] + except AttributeError: + # tmpdir plugin is blocked. + pass + else: + _tmpdirhandler = TempdirFactory(tmp_path_factory, _ispytest=True) + mp.setattr(config, "_tmpdirhandler", _tmpdirhandler, raising=False) + + config.pluginmanager.register(LegacyTmpdirPlugin, "legacypath-tmpdir") + + +@hookimpl +def pytest_plugin_registered(plugin: object, manager: PytestPluginManager) -> None: + # pytester is not loaded by default and is commonly loaded from a conftest, + # so checking for it in `pytest_configure` is not enough. + is_pytester = plugin is manager.get_plugin("pytester") + if is_pytester and not manager.is_registered(LegacyTestdirPlugin): + manager.register(LegacyTestdirPlugin, "legacypath-pytester") diff --git a/venv/lib/python3.10/site-packages/_pytest/logging.py b/venv/lib/python3.10/site-packages/_pytest/logging.py new file mode 100644 index 0000000..0163554 --- /dev/null +++ b/venv/lib/python3.10/site-packages/_pytest/logging.py @@ -0,0 +1,826 @@ +"""Access and control log capturing.""" +import io +import logging +import os +import re +from contextlib import contextmanager +from contextlib import nullcontext +from io import StringIO +from pathlib import Path +from typing import AbstractSet +from typing import Dict +from typing import Generator +from typing import List +from typing import Mapping +from typing import Optional +from typing import Tuple +from typing import TYPE_CHECKING +from typing import TypeVar +from typing import Union + +from _pytest import nodes +from _pytest._io import TerminalWriter +from _pytest.capture import CaptureManager +from _pytest.compat import final +from _pytest.config import _strtobool +from _pytest.config import Config +from _pytest.config import create_terminal_writer +from _pytest.config import hookimpl +from _pytest.config import UsageError +from _pytest.config.argparsing import Parser +from _pytest.deprecated import check_ispytest +from _pytest.fixtures import fixture +from _pytest.fixtures import FixtureRequest +from _pytest.main import Session +from _pytest.stash import StashKey +from _pytest.terminal import TerminalReporter + +if TYPE_CHECKING: + logging_StreamHandler = logging.StreamHandler[StringIO] +else: + logging_StreamHandler = logging.StreamHandler + + +DEFAULT_LOG_FORMAT = "%(levelname)-8s %(name)s:%(filename)s:%(lineno)d %(message)s" +DEFAULT_LOG_DATE_FORMAT = "%H:%M:%S" +_ANSI_ESCAPE_SEQ = re.compile(r"\x1b\[[\d;]+m") +caplog_handler_key = StashKey["LogCaptureHandler"]() +caplog_records_key = StashKey[Dict[str, List[logging.LogRecord]]]() + + +def _remove_ansi_escape_sequences(text: str) -> str: + return _ANSI_ESCAPE_SEQ.sub("", text) + + +class ColoredLevelFormatter(logging.Formatter): + """A logging formatter which colorizes the %(levelname)..s part of the + log format passed to __init__.""" + + LOGLEVEL_COLOROPTS: Mapping[int, AbstractSet[str]] = { + logging.CRITICAL: {"red"}, + logging.ERROR: {"red", "bold"}, + logging.WARNING: {"yellow"}, + logging.WARN: {"yellow"}, + logging.INFO: {"green"}, + logging.DEBUG: {"purple"}, + logging.NOTSET: set(), + } + LEVELNAME_FMT_REGEX = re.compile(r"%\(levelname\)([+-.]?\d*(?:\.\d+)?s)") + + def __init__(self, terminalwriter: TerminalWriter, *args, **kwargs) -> None: + super().__init__(*args, **kwargs) + self._terminalwriter = terminalwriter + self._original_fmt = self._style._fmt + self._level_to_fmt_mapping: Dict[int, str] = {} + + for level, color_opts in self.LOGLEVEL_COLOROPTS.items(): + self.add_color_level(level, *color_opts) + + def add_color_level(self, level: int, *color_opts: str) -> None: + """Add or update color opts for a log level. + + :param level: + Log level to apply a style to, e.g. ``logging.INFO``. + :param color_opts: + ANSI escape sequence color options. Capitalized colors indicates + background color, i.e. ``'green', 'Yellow', 'bold'`` will give bold + green text on yellow background. + + .. warning:: + This is an experimental API. + """ + + assert self._fmt is not None + levelname_fmt_match = self.LEVELNAME_FMT_REGEX.search(self._fmt) + if not levelname_fmt_match: + return + levelname_fmt = levelname_fmt_match.group() + + formatted_levelname = levelname_fmt % {"levelname": logging.getLevelName(level)} + + # add ANSI escape sequences around the formatted levelname + color_kwargs = {name: True for name in color_opts} + colorized_formatted_levelname = self._terminalwriter.markup( + formatted_levelname, **color_kwargs + ) + self._level_to_fmt_mapping[level] = self.LEVELNAME_FMT_REGEX.sub( + colorized_formatted_levelname, self._fmt + ) + + def format(self, record: logging.LogRecord) -> str: + fmt = self._level_to_fmt_mapping.get(record.levelno, self._original_fmt) + self._style._fmt = fmt + return super().format(record) + + +class PercentStyleMultiline(logging.PercentStyle): + """A logging style with special support for multiline messages. + + If the message of a record consists of multiple lines, this style + formats the message as if each line were logged separately. + """ + + def __init__(self, fmt: str, auto_indent: Union[int, str, bool, None]) -> None: + super().__init__(fmt) + self._auto_indent = self._get_auto_indent(auto_indent) + + @staticmethod + def _get_auto_indent(auto_indent_option: Union[int, str, bool, None]) -> int: + """Determine the current auto indentation setting. + + Specify auto indent behavior (on/off/fixed) by passing in + extra={"auto_indent": [value]} to the call to logging.log() or + using a --log-auto-indent [value] command line or the + log_auto_indent [value] config option. + + Default behavior is auto-indent off. + + Using the string "True" or "on" or the boolean True as the value + turns auto indent on, using the string "False" or "off" or the + boolean False or the int 0 turns it off, and specifying a + positive integer fixes the indentation position to the value + specified. + + Any other values for the option are invalid, and will silently be + converted to the default. + + :param None|bool|int|str auto_indent_option: + User specified option for indentation from command line, config + or extra kwarg. Accepts int, bool or str. str option accepts the + same range of values as boolean config options, as well as + positive integers represented in str form. + + :returns: + Indentation value, which can be + -1 (automatically determine indentation) or + 0 (auto-indent turned off) or + >0 (explicitly set indentation position). + """ + + if auto_indent_option is None: + return 0 + elif isinstance(auto_indent_option, bool): + if auto_indent_option: + return -1 + else: + return 0 + elif isinstance(auto_indent_option, int): + return int(auto_indent_option) + elif isinstance(auto_indent_option, str): + try: + return int(auto_indent_option) + except ValueError: + pass + try: + if _strtobool(auto_indent_option): + return -1 + except ValueError: + return 0 + + return 0 + + def format(self, record: logging.LogRecord) -> str: + if "\n" in record.message: + if hasattr(record, "auto_indent"): + # Passed in from the "extra={}" kwarg on the call to logging.log(). + auto_indent = self._get_auto_indent(record.auto_indent) # type: ignore[attr-defined] + else: + auto_indent = self._auto_indent + + if auto_indent: + lines = record.message.splitlines() + formatted = self._fmt % {**record.__dict__, "message": lines[0]} + + if auto_indent < 0: + indentation = _remove_ansi_escape_sequences(formatted).find( + lines[0] + ) + else: + # Optimizes logging by allowing a fixed indentation. + indentation = auto_indent + lines[0] = formatted + return ("\n" + " " * indentation).join(lines) + return self._fmt % record.__dict__ + + +def get_option_ini(config: Config, *names: str): + for name in names: + ret = config.getoption(name) # 'default' arg won't work as expected + if ret is None: + ret = config.getini(name) + if ret: + return ret + + +def pytest_addoption(parser: Parser) -> None: + """Add options to control log capturing.""" + group = parser.getgroup("logging") + + def add_option_ini(option, dest, default=None, type=None, **kwargs): + parser.addini( + dest, default=default, type=type, help="default value for " + option + ) + group.addoption(option, dest=dest, **kwargs) + + add_option_ini( + "--log-level", + dest="log_level", + default=None, + metavar="LEVEL", + help=( + "level of messages to catch/display.\n" + "Not set by default, so it depends on the root/parent log handler's" + ' effective level, where it is "WARNING" by default.' + ), + ) + add_option_ini( + "--log-format", + dest="log_format", + default=DEFAULT_LOG_FORMAT, + help="log format as used by the logging module.", + ) + add_option_ini( + "--log-date-format", + dest="log_date_format", + default=DEFAULT_LOG_DATE_FORMAT, + help="log date format as used by the logging module.", + ) + parser.addini( + "log_cli", + default=False, + type="bool", + help='enable log display during test run (also known as "live logging").', + ) + add_option_ini( + "--log-cli-level", dest="log_cli_level", default=None, help="cli logging level." + ) + add_option_ini( + "--log-cli-format", + dest="log_cli_format", + default=None, + help="log format as used by the logging module.", + ) + add_option_ini( + "--log-cli-date-format", + dest="log_cli_date_format", + default=None, + help="log date format as used by the logging module.", + ) + add_option_ini( + "--log-file", + dest="log_file", + default=None, + help="path to a file when logging will be written to.", + ) + add_option_ini( + "--log-file-level", + dest="log_file_level", + default=None, + help="log file logging level.", + ) + add_option_ini( + "--log-file-format", + dest="log_file_format", + default=DEFAULT_LOG_FORMAT, + help="log format as used by the logging module.", + ) + add_option_ini( + "--log-file-date-format", + dest="log_file_date_format", + default=DEFAULT_LOG_DATE_FORMAT, + help="log date format as used by the logging module.", + ) + add_option_ini( + "--log-auto-indent", + dest="log_auto_indent", + default=None, + help="Auto-indent multiline messages passed to the logging module. Accepts true|on, false|off or an integer.", + ) + + +_HandlerType = TypeVar("_HandlerType", bound=logging.Handler) + + +# Not using @contextmanager for performance reasons. +class catching_logs: + """Context manager that prepares the whole logging machinery properly.""" + + __slots__ = ("handler", "level", "orig_level") + + def __init__(self, handler: _HandlerType, level: Optional[int] = None) -> None: + self.handler = handler + self.level = level + + def __enter__(self): + root_logger = logging.getLogger() + if self.level is not None: + self.handler.setLevel(self.level) + root_logger.addHandler(self.handler) + if self.level is not None: + self.orig_level = root_logger.level + root_logger.setLevel(min(self.orig_level, self.level)) + return self.handler + + def __exit__(self, type, value, traceback): + root_logger = logging.getLogger() + if self.level is not None: + root_logger.setLevel(self.orig_level) + root_logger.removeHandler(self.handler) + + +class LogCaptureHandler(logging_StreamHandler): + """A logging handler that stores log records and the log text.""" + + def __init__(self) -> None: + """Create a new log handler.""" + super().__init__(StringIO()) + self.records: List[logging.LogRecord] = [] + + def emit(self, record: logging.LogRecord) -> None: + """Keep the log records in a list in addition to the log text.""" + self.records.append(record) + super().emit(record) + + def reset(self) -> None: + self.records = [] + self.stream = StringIO() + + def handleError(self, record: logging.LogRecord) -> None: + if logging.raiseExceptions: + # Fail the test if the log message is bad (emit failed). + # The default behavior of logging is to print "Logging error" + # to stderr with the call stack and some extra details. + # pytest wants to make such mistakes visible during testing. + raise + + +@final +class LogCaptureFixture: + """Provides access and control of log capturing.""" + + def __init__(self, item: nodes.Node, *, _ispytest: bool = False) -> None: + check_ispytest(_ispytest) + self._item = item + self._initial_handler_level: Optional[int] = None + # Dict of log name -> log level. + self._initial_logger_levels: Dict[Optional[str], int] = {} + + def _finalize(self) -> None: + """Finalize the fixture. + + This restores the log levels changed by :meth:`set_level`. + """ + # Restore log levels. + if self._initial_handler_level is not None: + self.handler.setLevel(self._initial_handler_level) + for logger_name, level in self._initial_logger_levels.items(): + logger = logging.getLogger(logger_name) + logger.setLevel(level) + + @property + def handler(self) -> LogCaptureHandler: + """Get the logging handler used by the fixture. + + :rtype: LogCaptureHandler + """ + return self._item.stash[caplog_handler_key] + + def get_records(self, when: str) -> List[logging.LogRecord]: + """Get the logging records for one of the possible test phases. + + :param str when: + Which test phase to obtain the records from. Valid values are: "setup", "call" and "teardown". + + :returns: The list of captured records at the given stage. + :rtype: List[logging.LogRecord] + + .. versionadded:: 3.4 + """ + return self._item.stash[caplog_records_key].get(when, []) + + @property + def text(self) -> str: + """The formatted log text.""" + return _remove_ansi_escape_sequences(self.handler.stream.getvalue()) + + @property + def records(self) -> List[logging.LogRecord]: + """The list of log records.""" + return self.handler.records + + @property + def record_tuples(self) -> List[Tuple[str, int, str]]: + """A list of a stripped down version of log records intended + for use in assertion comparison. + + The format of the tuple is: + + (logger_name, log_level, message) + """ + return [(r.name, r.levelno, r.getMessage()) for r in self.records] + + @property + def messages(self) -> List[str]: + """A list of format-interpolated log messages. + + Unlike 'records', which contains the format string and parameters for + interpolation, log messages in this list are all interpolated. + + Unlike 'text', which contains the output from the handler, log + messages in this list are unadorned with levels, timestamps, etc, + making exact comparisons more reliable. + + Note that traceback or stack info (from :func:`logging.exception` or + the `exc_info` or `stack_info` arguments to the logging functions) is + not included, as this is added by the formatter in the handler. + + .. versionadded:: 3.7 + """ + return [r.getMessage() for r in self.records] + + def clear(self) -> None: + """Reset the list of log records and the captured log text.""" + self.handler.reset() + + def set_level(self, level: Union[int, str], logger: Optional[str] = None) -> None: + """Set the level of a logger for the duration of a test. + + .. versionchanged:: 3.4 + The levels of the loggers changed by this function will be + restored to their initial values at the end of the test. + + :param int level: The level. + :param str logger: The logger to update. If not given, the root logger. + """ + logger_obj = logging.getLogger(logger) + # Save the original log-level to restore it during teardown. + self._initial_logger_levels.setdefault(logger, logger_obj.level) + logger_obj.setLevel(level) + if self._initial_handler_level is None: + self._initial_handler_level = self.handler.level + self.handler.setLevel(level) + + @contextmanager + def at_level( + self, level: Union[int, str], logger: Optional[str] = None + ) -> Generator[None, None, None]: + """Context manager that sets the level for capturing of logs. After + the end of the 'with' statement the level is restored to its original + value. + + :param int level: The level. + :param str logger: The logger to update. If not given, the root logger. + """ + logger_obj = logging.getLogger(logger) + orig_level = logger_obj.level + logger_obj.setLevel(level) + handler_orig_level = self.handler.level + self.handler.setLevel(level) + try: + yield + finally: + logger_obj.setLevel(orig_level) + self.handler.setLevel(handler_orig_level) + + +@fixture +def caplog(request: FixtureRequest) -> Generator[LogCaptureFixture, None, None]: + """Access and control log capturing. + + Captured logs are available through the following properties/methods:: + + * caplog.messages -> list of format-interpolated log messages + * caplog.text -> string containing formatted log output + * caplog.records -> list of logging.LogRecord instances + * caplog.record_tuples -> list of (logger_name, level, message) tuples + * caplog.clear() -> clear captured records and formatted log output string + """ + result = LogCaptureFixture(request.node, _ispytest=True) + yield result + result._finalize() + + +def get_log_level_for_setting(config: Config, *setting_names: str) -> Optional[int]: + for setting_name in setting_names: + log_level = config.getoption(setting_name) + if log_level is None: + log_level = config.getini(setting_name) + if log_level: + break + else: + return None + + if isinstance(log_level, str): + log_level = log_level.upper() + try: + return int(getattr(logging, log_level, log_level)) + except ValueError as e: + # Python logging does not recognise this as a logging level + raise UsageError( + "'{}' is not recognized as a logging level name for " + "'{}'. Please consider passing the " + "logging level num instead.".format(log_level, setting_name) + ) from e + + +# run after terminalreporter/capturemanager are configured +@hookimpl(trylast=True) +def pytest_configure(config: Config) -> None: + config.pluginmanager.register(LoggingPlugin(config), "logging-plugin") + + +class LoggingPlugin: + """Attaches to the logging module and captures log messages for each test.""" + + def __init__(self, config: Config) -> None: + """Create a new plugin to capture log messages. + + The formatter can be safely shared across all handlers so + create a single one for the entire test session here. + """ + self._config = config + + # Report logging. + self.formatter = self._create_formatter( + get_option_ini(config, "log_format"), + get_option_ini(config, "log_date_format"), + get_option_ini(config, "log_auto_indent"), + ) + self.log_level = get_log_level_for_setting(config, "log_level") + self.caplog_handler = LogCaptureHandler() + self.caplog_handler.setFormatter(self.formatter) + self.report_handler = LogCaptureHandler() + self.report_handler.setFormatter(self.formatter) + + # File logging. + self.log_file_level = get_log_level_for_setting(config, "log_file_level") + log_file = get_option_ini(config, "log_file") or os.devnull + if log_file != os.devnull: + directory = os.path.dirname(os.path.abspath(log_file)) + if not os.path.isdir(directory): + os.makedirs(directory) + + self.log_file_handler = _FileHandler(log_file, mode="w", encoding="UTF-8") + log_file_format = get_option_ini(config, "log_file_format", "log_format") + log_file_date_format = get_option_ini( + config, "log_file_date_format", "log_date_format" + ) + + log_file_formatter = logging.Formatter( + log_file_format, datefmt=log_file_date_format + ) + self.log_file_handler.setFormatter(log_file_formatter) + + # CLI/live logging. + self.log_cli_level = get_log_level_for_setting( + config, "log_cli_level", "log_level" + ) + if self._log_cli_enabled(): + terminal_reporter = config.pluginmanager.get_plugin("terminalreporter") + capture_manager = config.pluginmanager.get_plugin("capturemanager") + # if capturemanager plugin is disabled, live logging still works. + self.log_cli_handler: Union[ + _LiveLoggingStreamHandler, _LiveLoggingNullHandler + ] = _LiveLoggingStreamHandler(terminal_reporter, capture_manager) + else: + self.log_cli_handler = _LiveLoggingNullHandler() + log_cli_formatter = self._create_formatter( + get_option_ini(config, "log_cli_format", "log_format"), + get_option_ini(config, "log_cli_date_format", "log_date_format"), + get_option_ini(config, "log_auto_indent"), + ) + self.log_cli_handler.setFormatter(log_cli_formatter) + + def _create_formatter(self, log_format, log_date_format, auto_indent): + # Color option doesn't exist if terminal plugin is disabled. + color = getattr(self._config.option, "color", "no") + if color != "no" and ColoredLevelFormatter.LEVELNAME_FMT_REGEX.search( + log_format + ): + formatter: logging.Formatter = ColoredLevelFormatter( + create_terminal_writer(self._config), log_format, log_date_format + ) + else: + formatter = logging.Formatter(log_format, log_date_format) + + formatter._style = PercentStyleMultiline( + formatter._style._fmt, auto_indent=auto_indent + ) + + return formatter + + def set_log_path(self, fname: str) -> None: + """Set the filename parameter for Logging.FileHandler(). + + Creates parent directory if it does not exist. + + .. warning:: + This is an experimental API. + """ + fpath = Path(fname) + + if not fpath.is_absolute(): + fpath = self._config.rootpath / fpath + + if not fpath.parent.exists(): + fpath.parent.mkdir(exist_ok=True, parents=True) + + # https://github.com/python/mypy/issues/11193 + stream: io.TextIOWrapper = fpath.open(mode="w", encoding="UTF-8") # type: ignore[assignment] + old_stream = self.log_file_handler.setStream(stream) + if old_stream: + old_stream.close() + + def _log_cli_enabled(self): + """Return whether live logging is enabled.""" + enabled = self._config.getoption( + "--log-cli-level" + ) is not None or self._config.getini("log_cli") + if not enabled: + return False + + terminal_reporter = self._config.pluginmanager.get_plugin("terminalreporter") + if terminal_reporter is None: + # terminal reporter is disabled e.g. by pytest-xdist. + return False + + return True + + @hookimpl(hookwrapper=True, tryfirst=True) + def pytest_sessionstart(self) -> Generator[None, None, None]: + self.log_cli_handler.set_when("sessionstart") + + with catching_logs(self.log_cli_handler, level=self.log_cli_level): + with catching_logs(self.log_file_handler, level=self.log_file_level): + yield + + @hookimpl(hookwrapper=True, tryfirst=True) + def pytest_collection(self) -> Generator[None, None, None]: + self.log_cli_handler.set_when("collection") + + with catching_logs(self.log_cli_handler, level=self.log_cli_level): + with catching_logs(self.log_file_handler, level=self.log_file_level): + yield + + @hookimpl(hookwrapper=True) + def pytest_runtestloop(self, session: Session) -> Generator[None, None, None]: + if session.config.option.collectonly: + yield + return + + if self._log_cli_enabled() and self._config.getoption("verbose") < 1: + # The verbose flag is needed to avoid messy test progress output. + self._config.option.verbose = 1 + + with catching_logs(self.log_cli_handler, level=self.log_cli_level): + with catching_logs(self.log_file_handler, level=self.log_file_level): + yield # Run all the tests. + + @hookimpl + def pytest_runtest_logstart(self) -> None: + self.log_cli_handler.reset() + self.log_cli_handler.set_when("start") + + @hookimpl + def pytest_runtest_logreport(self) -> None: + self.log_cli_handler.set_when("logreport") + + def _runtest_for(self, item: nodes.Item, when: str) -> Generator[None, None, None]: + """Implement the internals of the pytest_runtest_xxx() hooks.""" + with catching_logs( + self.caplog_handler, + level=self.log_level, + ) as caplog_handler, catching_logs( + self.report_handler, + level=self.log_level, + ) as report_handler: + caplog_handler.reset() + report_handler.reset() + item.stash[caplog_records_key][when] = caplog_handler.records + item.stash[caplog_handler_key] = caplog_handler + + yield + + log = report_handler.stream.getvalue().strip() + item.add_report_section(when, "log", log) + + @hookimpl(hookwrapper=True) + def pytest_runtest_setup(self, item: nodes.Item) -> Generator[None, None, None]: + self.log_cli_handler.set_when("setup") + + empty: Dict[str, List[logging.LogRecord]] = {} + item.stash[caplog_records_key] = empty + yield from self._runtest_for(item, "setup") + + @hookimpl(hookwrapper=True) + def pytest_runtest_call(self, item: nodes.Item) -> Generator[None, None, None]: + self.log_cli_handler.set_when("call") + + yield from self._runtest_for(item, "call") + + @hookimpl(hookwrapper=True) + def pytest_runtest_teardown(self, item: nodes.Item) -> Generator[None, None, None]: + self.log_cli_handler.set_when("teardown") + + yield from self._runtest_for(item, "teardown") + del item.stash[caplog_records_key] + del item.stash[caplog_handler_key] + + @hookimpl + def pytest_runtest_logfinish(self) -> None: + self.log_cli_handler.set_when("finish") + + @hookimpl(hookwrapper=True, tryfirst=True) + def pytest_sessionfinish(self) -> Generator[None, None, None]: + self.log_cli_handler.set_when("sessionfinish") + + with catching_logs(self.log_cli_handler, level=self.log_cli_level): + with catching_logs(self.log_file_handler, level=self.log_file_level): + yield + + @hookimpl + def pytest_unconfigure(self) -> None: + # Close the FileHandler explicitly. + # (logging.shutdown might have lost the weakref?!) + self.log_file_handler.close() + + +class _FileHandler(logging.FileHandler): + """A logging FileHandler with pytest tweaks.""" + + def handleError(self, record: logging.LogRecord) -> None: + # Handled by LogCaptureHandler. + pass + + +class _LiveLoggingStreamHandler(logging_StreamHandler): + """A logging StreamHandler used by the live logging feature: it will + write a newline before the first log message in each test. + + During live logging we must also explicitly disable stdout/stderr + capturing otherwise it will get captured and won't appear in the + terminal. + """ + + # Officially stream needs to be a IO[str], but TerminalReporter + # isn't. So force it. + stream: TerminalReporter = None # type: ignore + + def __init__( + self, + terminal_reporter: TerminalReporter, + capture_manager: Optional[CaptureManager], + ) -> None: + super().__init__(stream=terminal_reporter) # type: ignore[arg-type] + self.capture_manager = capture_manager + self.reset() + self.set_when(None) + self._test_outcome_written = False + + def reset(self) -> None: + """Reset the handler; should be called before the start of each test.""" + self._first_record_emitted = False + + def set_when(self, when: Optional[str]) -> None: + """Prepare for the given test phase (setup/call/teardown).""" + self._when = when + self._section_name_shown = False + if when == "start": + self._test_outcome_written = False + + def emit(self, record: logging.LogRecord) -> None: + ctx_manager = ( + self.capture_manager.global_and_fixture_disabled() + if self.capture_manager + else nullcontext() + ) + with ctx_manager: + if not self._first_record_emitted: + self.stream.write("\n") + self._first_record_emitted = True + elif self._when in ("teardown", "finish"): + if not self._test_outcome_written: + self._test_outcome_written = True + self.stream.write("\n") + if not self._section_name_shown and self._when: + self.stream.section("live log " + self._when, sep="-", bold=True) + self._section_name_shown = True + super().emit(record) + + def handleError(self, record: logging.LogRecord) -> None: + # Handled by LogCaptureHandler. + pass + + +class _LiveLoggingNullHandler(logging.NullHandler): + """A logging handler used when live logging is disabled.""" + + def reset(self) -> None: + pass + + def set_when(self, when: str) -> None: + pass + + def handleError(self, record: logging.LogRecord) -> None: + # Handled by LogCaptureHandler. + pass diff --git a/venv/lib/python3.10/site-packages/_pytest/main.py b/venv/lib/python3.10/site-packages/_pytest/main.py new file mode 100644 index 0000000..8f59075 --- /dev/null +++ b/venv/lib/python3.10/site-packages/_pytest/main.py @@ -0,0 +1,895 @@ +"""Core implementation of the testing process: init, session, runtest loop.""" +import argparse +import fnmatch +import functools +import importlib +import os +import sys +from pathlib import Path +from typing import Callable +from typing import Dict +from typing import FrozenSet +from typing import Iterator +from typing import List +from typing import Optional +from typing import overload +from typing import Sequence +from typing import Set +from typing import Tuple +from typing import Type +from typing import TYPE_CHECKING +from typing import Union + +import attr + +import _pytest._code +from _pytest import nodes +from _pytest.compat import final +from _pytest.config import Config +from _pytest.config import directory_arg +from _pytest.config import ExitCode +from _pytest.config import hookimpl +from _pytest.config import PytestPluginManager +from _pytest.config import UsageError +from _pytest.config.argparsing import Parser +from _pytest.fixtures import FixtureManager +from _pytest.outcomes import exit +from _pytest.pathlib import absolutepath +from _pytest.pathlib import bestrelpath +from _pytest.pathlib import fnmatch_ex +from _pytest.pathlib import visit +from _pytest.reports import CollectReport +from _pytest.reports import TestReport +from _pytest.runner import collect_one_node +from _pytest.runner import SetupState + + +if TYPE_CHECKING: + from typing_extensions import Literal + + +def pytest_addoption(parser: Parser) -> None: + parser.addini( + "norecursedirs", + "directory patterns to avoid for recursion", + type="args", + default=[ + "*.egg", + ".*", + "_darcs", + "build", + "CVS", + "dist", + "node_modules", + "venv", + "{arch}", + ], + ) + parser.addini( + "testpaths", + "directories to search for tests when no files or directories are given in the " + "command line.", + type="args", + default=[], + ) + group = parser.getgroup("general", "running and selection options") + group._addoption( + "-x", + "--exitfirst", + action="store_const", + dest="maxfail", + const=1, + help="exit instantly on first error or failed test.", + ) + group = parser.getgroup("pytest-warnings") + group.addoption( + "-W", + "--pythonwarnings", + action="append", + help="set which warnings to report, see -W option of python itself.", + ) + parser.addini( + "filterwarnings", + type="linelist", + help="Each line specifies a pattern for " + "warnings.filterwarnings. " + "Processed after -W/--pythonwarnings.", + ) + group._addoption( + "--maxfail", + metavar="num", + action="store", + type=int, + dest="maxfail", + default=0, + help="exit after first num failures or errors.", + ) + group._addoption( + "--strict-config", + action="store_true", + help="any warnings encountered while parsing the `pytest` section of the configuration file raise errors.", + ) + group._addoption( + "--strict-markers", + action="store_true", + help="markers not registered in the `markers` section of the configuration file raise errors.", + ) + group._addoption( + "--strict", + action="store_true", + help="(deprecated) alias to --strict-markers.", + ) + group._addoption( + "-c", + metavar="file", + type=str, + dest="inifilename", + help="load configuration from `file` instead of trying to locate one of the implicit " + "configuration files.", + ) + group._addoption( + "--continue-on-collection-errors", + action="store_true", + default=False, + dest="continue_on_collection_errors", + help="Force test execution even if collection errors occur.", + ) + group._addoption( + "--rootdir", + action="store", + dest="rootdir", + help="Define root directory for tests. Can be relative path: 'root_dir', './root_dir', " + "'root_dir/another_dir/'; absolute path: '/home/user/root_dir'; path with variables: " + "'$HOME/root_dir'.", + ) + + group = parser.getgroup("collect", "collection") + group.addoption( + "--collectonly", + "--collect-only", + "--co", + action="store_true", + help="only collect tests, don't execute them.", + ) + group.addoption( + "--pyargs", + action="store_true", + help="try to interpret all arguments as python packages.", + ) + group.addoption( + "--ignore", + action="append", + metavar="path", + help="ignore path during collection (multi-allowed).", + ) + group.addoption( + "--ignore-glob", + action="append", + metavar="path", + help="ignore path pattern during collection (multi-allowed).", + ) + group.addoption( + "--deselect", + action="append", + metavar="nodeid_prefix", + help="deselect item (via node id prefix) during collection (multi-allowed).", + ) + group.addoption( + "--confcutdir", + dest="confcutdir", + default=None, + metavar="dir", + type=functools.partial(directory_arg, optname="--confcutdir"), + help="only load conftest.py's relative to specified dir.", + ) + group.addoption( + "--noconftest", + action="store_true", + dest="noconftest", + default=False, + help="Don't load any conftest.py files.", + ) + group.addoption( + "--keepduplicates", + "--keep-duplicates", + action="store_true", + dest="keepduplicates", + default=False, + help="Keep duplicate tests.", + ) + group.addoption( + "--collect-in-virtualenv", + action="store_true", + dest="collect_in_virtualenv", + default=False, + help="Don't ignore tests in a local virtualenv directory", + ) + group.addoption( + "--import-mode", + default="prepend", + choices=["prepend", "append", "importlib"], + dest="importmode", + help="prepend/append to sys.path when importing test modules and conftest files, " + "default is to prepend.", + ) + + group = parser.getgroup("debugconfig", "test session debugging and configuration") + group.addoption( + "--basetemp", + dest="basetemp", + default=None, + type=validate_basetemp, + metavar="dir", + help=( + "base temporary directory for this test run." + "(warning: this directory is removed if it exists)" + ), + ) + + +def validate_basetemp(path: str) -> str: + # GH 7119 + msg = "basetemp must not be empty, the current working directory or any parent directory of it" + + # empty path + if not path: + raise argparse.ArgumentTypeError(msg) + + def is_ancestor(base: Path, query: Path) -> bool: + """Return whether query is an ancestor of base.""" + if base == query: + return True + return query in base.parents + + # check if path is an ancestor of cwd + if is_ancestor(Path.cwd(), Path(path).absolute()): + raise argparse.ArgumentTypeError(msg) + + # check symlinks for ancestors + if is_ancestor(Path.cwd().resolve(), Path(path).resolve()): + raise argparse.ArgumentTypeError(msg) + + return path + + +def wrap_session( + config: Config, doit: Callable[[Config, "Session"], Optional[Union[int, ExitCode]]] +) -> Union[int, ExitCode]: + """Skeleton command line program.""" + session = Session.from_config(config) + session.exitstatus = ExitCode.OK + initstate = 0 + try: + try: + config._do_configure() + initstate = 1 + config.hook.pytest_sessionstart(session=session) + initstate = 2 + session.exitstatus = doit(config, session) or 0 + except UsageError: + session.exitstatus = ExitCode.USAGE_ERROR + raise + except Failed: + session.exitstatus = ExitCode.TESTS_FAILED + except (KeyboardInterrupt, exit.Exception): + excinfo = _pytest._code.ExceptionInfo.from_current() + exitstatus: Union[int, ExitCode] = ExitCode.INTERRUPTED + if isinstance(excinfo.value, exit.Exception): + if excinfo.value.returncode is not None: + exitstatus = excinfo.value.returncode + if initstate < 2: + sys.stderr.write(f"{excinfo.typename}: {excinfo.value.msg}\n") + config.hook.pytest_keyboard_interrupt(excinfo=excinfo) + session.exitstatus = exitstatus + except BaseException: + session.exitstatus = ExitCode.INTERNAL_ERROR + excinfo = _pytest._code.ExceptionInfo.from_current() + try: + config.notify_exception(excinfo, config.option) + except exit.Exception as exc: + if exc.returncode is not None: + session.exitstatus = exc.returncode + sys.stderr.write(f"{type(exc).__name__}: {exc}\n") + else: + if isinstance(excinfo.value, SystemExit): + sys.stderr.write("mainloop: caught unexpected SystemExit!\n") + + finally: + # Explicitly break reference cycle. + excinfo = None # type: ignore + os.chdir(session.startpath) + if initstate >= 2: + try: + config.hook.pytest_sessionfinish( + session=session, exitstatus=session.exitstatus + ) + except exit.Exception as exc: + if exc.returncode is not None: + session.exitstatus = exc.returncode + sys.stderr.write(f"{type(exc).__name__}: {exc}\n") + config._ensure_unconfigure() + return session.exitstatus + + +def pytest_cmdline_main(config: Config) -> Union[int, ExitCode]: + return wrap_session(config, _main) + + +def _main(config: Config, session: "Session") -> Optional[Union[int, ExitCode]]: + """Default command line protocol for initialization, session, + running tests and reporting.""" + config.hook.pytest_collection(session=session) + config.hook.pytest_runtestloop(session=session) + + if session.testsfailed: + return ExitCode.TESTS_FAILED + elif session.testscollected == 0: + return ExitCode.NO_TESTS_COLLECTED + return None + + +def pytest_collection(session: "Session") -> None: + session.perform_collect() + + +def pytest_runtestloop(session: "Session") -> bool: + if session.testsfailed and not session.config.option.continue_on_collection_errors: + raise session.Interrupted( + "%d error%s during collection" + % (session.testsfailed, "s" if session.testsfailed != 1 else "") + ) + + if session.config.option.collectonly: + return True + + for i, item in enumerate(session.items): + nextitem = session.items[i + 1] if i + 1 < len(session.items) else None + item.config.hook.pytest_runtest_protocol(item=item, nextitem=nextitem) + if session.shouldfail: + raise session.Failed(session.shouldfail) + if session.shouldstop: + raise session.Interrupted(session.shouldstop) + return True + + +def _in_venv(path: Path) -> bool: + """Attempt to detect if ``path`` is the root of a Virtual Environment by + checking for the existence of the appropriate activate script.""" + bindir = path.joinpath("Scripts" if sys.platform.startswith("win") else "bin") + try: + if not bindir.is_dir(): + return False + except OSError: + return False + activates = ( + "activate", + "activate.csh", + "activate.fish", + "Activate", + "Activate.bat", + "Activate.ps1", + ) + return any(fname.name in activates for fname in bindir.iterdir()) + + +def pytest_ignore_collect(collection_path: Path, config: Config) -> Optional[bool]: + ignore_paths = config._getconftest_pathlist( + "collect_ignore", path=collection_path.parent, rootpath=config.rootpath + ) + ignore_paths = ignore_paths or [] + excludeopt = config.getoption("ignore") + if excludeopt: + ignore_paths.extend(absolutepath(x) for x in excludeopt) + + if collection_path in ignore_paths: + return True + + ignore_globs = config._getconftest_pathlist( + "collect_ignore_glob", path=collection_path.parent, rootpath=config.rootpath + ) + ignore_globs = ignore_globs or [] + excludeglobopt = config.getoption("ignore_glob") + if excludeglobopt: + ignore_globs.extend(absolutepath(x) for x in excludeglobopt) + + if any(fnmatch.fnmatch(str(collection_path), str(glob)) for glob in ignore_globs): + return True + + allow_in_venv = config.getoption("collect_in_virtualenv") + if not allow_in_venv and _in_venv(collection_path): + return True + return None + + +def pytest_collection_modifyitems(items: List[nodes.Item], config: Config) -> None: + deselect_prefixes = tuple(config.getoption("deselect") or []) + if not deselect_prefixes: + return + + remaining = [] + deselected = [] + for colitem in items: + if colitem.nodeid.startswith(deselect_prefixes): + deselected.append(colitem) + else: + remaining.append(colitem) + + if deselected: + config.hook.pytest_deselected(items=deselected) + items[:] = remaining + + +class FSHookProxy: + def __init__(self, pm: PytestPluginManager, remove_mods) -> None: + self.pm = pm + self.remove_mods = remove_mods + + def __getattr__(self, name: str): + x = self.pm.subset_hook_caller(name, remove_plugins=self.remove_mods) + self.__dict__[name] = x + return x + + +class Interrupted(KeyboardInterrupt): + """Signals that the test run was interrupted.""" + + __module__ = "builtins" # For py3. + + +class Failed(Exception): + """Signals a stop as failed test run.""" + + +@attr.s(slots=True, auto_attribs=True) +class _bestrelpath_cache(Dict[Path, str]): + path: Path + + def __missing__(self, path: Path) -> str: + r = bestrelpath(self.path, path) + self[path] = r + return r + + +@final +class Session(nodes.FSCollector): + Interrupted = Interrupted + Failed = Failed + # Set on the session by runner.pytest_sessionstart. + _setupstate: SetupState + # Set on the session by fixtures.pytest_sessionstart. + _fixturemanager: FixtureManager + exitstatus: Union[int, ExitCode] + + def __init__(self, config: Config) -> None: + super().__init__( + path=config.rootpath, + fspath=None, + parent=None, + config=config, + session=self, + nodeid="", + ) + self.testsfailed = 0 + self.testscollected = 0 + self.shouldstop: Union[bool, str] = False + self.shouldfail: Union[bool, str] = False + self.trace = config.trace.root.get("collection") + self._initialpaths: FrozenSet[Path] = frozenset() + + self._bestrelpathcache: Dict[Path, str] = _bestrelpath_cache(config.rootpath) + + self.config.pluginmanager.register(self, name="session") + + @classmethod + def from_config(cls, config: Config) -> "Session": + session: Session = cls._create(config=config) + return session + + def __repr__(self) -> str: + return "<%s %s exitstatus=%r testsfailed=%d testscollected=%d>" % ( + self.__class__.__name__, + self.name, + getattr(self, "exitstatus", ""), + self.testsfailed, + self.testscollected, + ) + + @property + def startpath(self) -> Path: + """The path from which pytest was invoked. + + .. versionadded:: 7.0.0 + """ + return self.config.invocation_params.dir + + def _node_location_to_relpath(self, node_path: Path) -> str: + # bestrelpath is a quite slow function. + return self._bestrelpathcache[node_path] + + @hookimpl(tryfirst=True) + def pytest_collectstart(self) -> None: + if self.shouldfail: + raise self.Failed(self.shouldfail) + if self.shouldstop: + raise self.Interrupted(self.shouldstop) + + @hookimpl(tryfirst=True) + def pytest_runtest_logreport( + self, report: Union[TestReport, CollectReport] + ) -> None: + if report.failed and not hasattr(report, "wasxfail"): + self.testsfailed += 1 + maxfail = self.config.getvalue("maxfail") + if maxfail and self.testsfailed >= maxfail: + self.shouldfail = "stopping after %d failures" % (self.testsfailed) + + pytest_collectreport = pytest_runtest_logreport + + def isinitpath(self, path: Union[str, "os.PathLike[str]"]) -> bool: + # Optimization: Path(Path(...)) is much slower than isinstance. + path_ = path if isinstance(path, Path) else Path(path) + return path_ in self._initialpaths + + def gethookproxy(self, fspath: "os.PathLike[str]"): + # Optimization: Path(Path(...)) is much slower than isinstance. + path = fspath if isinstance(fspath, Path) else Path(fspath) + pm = self.config.pluginmanager + # Check if we have the common case of running + # hooks with all conftest.py files. + my_conftestmodules = pm._getconftestmodules( + path, + self.config.getoption("importmode"), + rootpath=self.config.rootpath, + ) + remove_mods = pm._conftest_plugins.difference(my_conftestmodules) + if remove_mods: + # One or more conftests are not in use at this fspath. + from .config.compat import PathAwareHookProxy + + proxy = PathAwareHookProxy(FSHookProxy(pm, remove_mods)) + else: + # All plugins are active for this fspath. + proxy = self.config.hook + return proxy + + def _recurse(self, direntry: "os.DirEntry[str]") -> bool: + if direntry.name == "__pycache__": + return False + fspath = Path(direntry.path) + ihook = self.gethookproxy(fspath.parent) + if ihook.pytest_ignore_collect(collection_path=fspath, config=self.config): + return False + norecursepatterns = self.config.getini("norecursedirs") + if any(fnmatch_ex(pat, fspath) for pat in norecursepatterns): + return False + return True + + def _collectfile( + self, fspath: Path, handle_dupes: bool = True + ) -> Sequence[nodes.Collector]: + assert ( + fspath.is_file() + ), "{!r} is not a file (isdir={!r}, exists={!r}, islink={!r})".format( + fspath, fspath.is_dir(), fspath.exists(), fspath.is_symlink() + ) + ihook = self.gethookproxy(fspath) + if not self.isinitpath(fspath): + if ihook.pytest_ignore_collect(collection_path=fspath, config=self.config): + return () + + if handle_dupes: + keepduplicates = self.config.getoption("keepduplicates") + if not keepduplicates: + duplicate_paths = self.config.pluginmanager._duplicatepaths + if fspath in duplicate_paths: + return () + else: + duplicate_paths.add(fspath) + + return ihook.pytest_collect_file(file_path=fspath, parent=self) # type: ignore[no-any-return] + + @overload + def perform_collect( + self, args: Optional[Sequence[str]] = ..., genitems: "Literal[True]" = ... + ) -> Sequence[nodes.Item]: + ... + + @overload + def perform_collect( + self, args: Optional[Sequence[str]] = ..., genitems: bool = ... + ) -> Sequence[Union[nodes.Item, nodes.Collector]]: + ... + + def perform_collect( + self, args: Optional[Sequence[str]] = None, genitems: bool = True + ) -> Sequence[Union[nodes.Item, nodes.Collector]]: + """Perform the collection phase for this session. + + This is called by the default :hook:`pytest_collection` hook + implementation; see the documentation of this hook for more details. + For testing purposes, it may also be called directly on a fresh + ``Session``. + + This function normally recursively expands any collectors collected + from the session to their items, and only items are returned. For + testing purposes, this may be suppressed by passing ``genitems=False``, + in which case the return value contains these collectors unexpanded, + and ``session.items`` is empty. + """ + if args is None: + args = self.config.args + + self.trace("perform_collect", self, args) + self.trace.root.indent += 1 + + self._notfound: List[Tuple[str, Sequence[nodes.Collector]]] = [] + self._initial_parts: List[Tuple[Path, List[str]]] = [] + self.items: List[nodes.Item] = [] + + hook = self.config.hook + + items: Sequence[Union[nodes.Item, nodes.Collector]] = self.items + try: + initialpaths: List[Path] = [] + for arg in args: + fspath, parts = resolve_collection_argument( + self.config.invocation_params.dir, + arg, + as_pypath=self.config.option.pyargs, + ) + self._initial_parts.append((fspath, parts)) + initialpaths.append(fspath) + self._initialpaths = frozenset(initialpaths) + rep = collect_one_node(self) + self.ihook.pytest_collectreport(report=rep) + self.trace.root.indent -= 1 + if self._notfound: + errors = [] + for arg, cols in self._notfound: + line = f"(no name {arg!r} in any of {cols!r})" + errors.append(f"not found: {arg}\n{line}") + raise UsageError(*errors) + if not genitems: + items = rep.result + else: + if rep.passed: + for node in rep.result: + self.items.extend(self.genitems(node)) + + self.config.pluginmanager.check_pending() + hook.pytest_collection_modifyitems( + session=self, config=self.config, items=items + ) + finally: + hook.pytest_collection_finish(session=self) + + self.testscollected = len(items) + return items + + def collect(self) -> Iterator[Union[nodes.Item, nodes.Collector]]: + from _pytest.python import Package + + # Keep track of any collected nodes in here, so we don't duplicate fixtures. + node_cache1: Dict[Path, Sequence[nodes.Collector]] = {} + node_cache2: Dict[Tuple[Type[nodes.Collector], Path], nodes.Collector] = {} + + # Keep track of any collected collectors in matchnodes paths, so they + # are not collected more than once. + matchnodes_cache: Dict[Tuple[Type[nodes.Collector], str], CollectReport] = {} + + # Dirnames of pkgs with dunder-init files. + pkg_roots: Dict[str, Package] = {} + + for argpath, names in self._initial_parts: + self.trace("processing argument", (argpath, names)) + self.trace.root.indent += 1 + + # Start with a Session root, and delve to argpath item (dir or file) + # and stack all Packages found on the way. + # No point in finding packages when collecting doctests. + if not self.config.getoption("doctestmodules", False): + pm = self.config.pluginmanager + for parent in (argpath, *argpath.parents): + if not pm._is_in_confcutdir(argpath): + break + + if parent.is_dir(): + pkginit = parent / "__init__.py" + if pkginit.is_file() and pkginit not in node_cache1: + col = self._collectfile(pkginit, handle_dupes=False) + if col: + if isinstance(col[0], Package): + pkg_roots[str(parent)] = col[0] + node_cache1[col[0].path] = [col[0]] + + # If it's a directory argument, recurse and look for any Subpackages. + # Let the Package collector deal with subnodes, don't collect here. + if argpath.is_dir(): + assert not names, f"invalid arg {(argpath, names)!r}" + + seen_dirs: Set[Path] = set() + for direntry in visit(str(argpath), self._recurse): + if not direntry.is_file(): + continue + + path = Path(direntry.path) + dirpath = path.parent + + if dirpath not in seen_dirs: + # Collect packages first. + seen_dirs.add(dirpath) + pkginit = dirpath / "__init__.py" + if pkginit.exists(): + for x in self._collectfile(pkginit): + yield x + if isinstance(x, Package): + pkg_roots[str(dirpath)] = x + if str(dirpath) in pkg_roots: + # Do not collect packages here. + continue + + for x in self._collectfile(path): + key2 = (type(x), x.path) + if key2 in node_cache2: + yield node_cache2[key2] + else: + node_cache2[key2] = x + yield x + else: + assert argpath.is_file() + + if argpath in node_cache1: + col = node_cache1[argpath] + else: + collect_root = pkg_roots.get(str(argpath.parent), self) + col = collect_root._collectfile(argpath, handle_dupes=False) + if col: + node_cache1[argpath] = col + + matching = [] + work: List[ + Tuple[Sequence[Union[nodes.Item, nodes.Collector]], Sequence[str]] + ] = [(col, names)] + while work: + self.trace("matchnodes", col, names) + self.trace.root.indent += 1 + + matchnodes, matchnames = work.pop() + for node in matchnodes: + if not matchnames: + matching.append(node) + continue + if not isinstance(node, nodes.Collector): + continue + key = (type(node), node.nodeid) + if key in matchnodes_cache: + rep = matchnodes_cache[key] + else: + rep = collect_one_node(node) + matchnodes_cache[key] = rep + if rep.passed: + submatchnodes = [] + for r in rep.result: + # TODO: Remove parametrized workaround once collection structure contains + # parametrization. + if ( + r.name == matchnames[0] + or r.name.split("[")[0] == matchnames[0] + ): + submatchnodes.append(r) + if submatchnodes: + work.append((submatchnodes, matchnames[1:])) + else: + # Report collection failures here to avoid failing to run some test + # specified in the command line because the module could not be + # imported (#134). + node.ihook.pytest_collectreport(report=rep) + + self.trace("matchnodes finished -> ", len(matching), "nodes") + self.trace.root.indent -= 1 + + if not matching: + report_arg = "::".join((str(argpath), *names)) + self._notfound.append((report_arg, col)) + continue + + # If __init__.py was the only file requested, then the matched + # node will be the corresponding Package (by default), and the + # first yielded item will be the __init__ Module itself, so + # just use that. If this special case isn't taken, then all the + # files in the package will be yielded. + if argpath.name == "__init__.py" and isinstance(matching[0], Package): + try: + yield next(iter(matching[0].collect())) + except StopIteration: + # The package collects nothing with only an __init__.py + # file in it, which gets ignored by the default + # "python_files" option. + pass + continue + + yield from matching + + self.trace.root.indent -= 1 + + def genitems( + self, node: Union[nodes.Item, nodes.Collector] + ) -> Iterator[nodes.Item]: + self.trace("genitems", node) + if isinstance(node, nodes.Item): + node.ihook.pytest_itemcollected(item=node) + yield node + else: + assert isinstance(node, nodes.Collector) + rep = collect_one_node(node) + if rep.passed: + for subnode in rep.result: + yield from self.genitems(subnode) + node.ihook.pytest_collectreport(report=rep) + + +def search_pypath(module_name: str) -> str: + """Search sys.path for the given a dotted module name, and return its file system path.""" + try: + spec = importlib.util.find_spec(module_name) + # AttributeError: looks like package module, but actually filename + # ImportError: module does not exist + # ValueError: not a module name + except (AttributeError, ImportError, ValueError): + return module_name + if spec is None or spec.origin is None or spec.origin == "namespace": + return module_name + elif spec.submodule_search_locations: + return os.path.dirname(spec.origin) + else: + return spec.origin + + +def resolve_collection_argument( + invocation_path: Path, arg: str, *, as_pypath: bool = False +) -> Tuple[Path, List[str]]: + """Parse path arguments optionally containing selection parts and return (fspath, names). + + Command-line arguments can point to files and/or directories, and optionally contain + parts for specific tests selection, for example: + + "pkg/tests/test_foo.py::TestClass::test_foo" + + This function ensures the path exists, and returns a tuple: + + (Path("/full/path/to/pkg/tests/test_foo.py"), ["TestClass", "test_foo"]) + + When as_pypath is True, expects that the command-line argument actually contains + module paths instead of file-system paths: + + "pkg.tests.test_foo::TestClass::test_foo" + + In which case we search sys.path for a matching module, and then return the *path* to the + found module. + + If the path doesn't exist, raise UsageError. + If the path is a directory and selection parts are present, raise UsageError. + """ + base, squacket, rest = str(arg).partition("[") + strpath, *parts = base.split("::") + if parts: + parts[-1] = f"{parts[-1]}{squacket}{rest}" + if as_pypath: + strpath = search_pypath(strpath) + fspath = invocation_path / strpath + fspath = absolutepath(fspath) + if not fspath.exists(): + msg = ( + "module or package not found: {arg} (missing __init__.py?)" + if as_pypath + else "file or directory not found: {arg}" + ) + raise UsageError(msg.format(arg=arg)) + if parts and fspath.is_dir(): + msg = ( + "package argument cannot contain :: selection parts: {arg}" + if as_pypath + else "directory argument cannot contain :: selection parts: {arg}" + ) + raise UsageError(msg.format(arg=arg)) + return fspath, parts diff --git a/venv/lib/python3.10/site-packages/_pytest/mark/__init__.py b/venv/lib/python3.10/site-packages/_pytest/mark/__init__.py new file mode 100644 index 0000000..11e6e34 --- /dev/null +++ b/venv/lib/python3.10/site-packages/_pytest/mark/__init__.py @@ -0,0 +1,266 @@ +"""Generic mechanism for marking and selecting python functions.""" +from typing import AbstractSet +from typing import Collection +from typing import List +from typing import Optional +from typing import TYPE_CHECKING +from typing import Union + +import attr + +from .expression import Expression +from .expression import ParseError +from .structures import EMPTY_PARAMETERSET_OPTION +from .structures import get_empty_parameterset_mark +from .structures import Mark +from .structures import MARK_GEN +from .structures import MarkDecorator +from .structures import MarkGenerator +from .structures import ParameterSet +from _pytest.config import Config +from _pytest.config import ExitCode +from _pytest.config import hookimpl +from _pytest.config import UsageError +from _pytest.config.argparsing import Parser +from _pytest.stash import StashKey + +if TYPE_CHECKING: + from _pytest.nodes import Item + + +__all__ = [ + "MARK_GEN", + "Mark", + "MarkDecorator", + "MarkGenerator", + "ParameterSet", + "get_empty_parameterset_mark", +] + + +old_mark_config_key = StashKey[Optional[Config]]() + + +def param( + *values: object, + marks: Union[MarkDecorator, Collection[Union[MarkDecorator, Mark]]] = (), + id: Optional[str] = None, +) -> ParameterSet: + """Specify a parameter in `pytest.mark.parametrize`_ calls or + :ref:`parametrized fixtures `. + + .. code-block:: python + + @pytest.mark.parametrize( + "test_input,expected", + [ + ("3+5", 8), + pytest.param("6*9", 42, marks=pytest.mark.xfail), + ], + ) + def test_eval(test_input, expected): + assert eval(test_input) == expected + + :param values: Variable args of the values of the parameter set, in order. + :keyword marks: A single mark or a list of marks to be applied to this parameter set. + :keyword str id: The id to attribute to this parameter set. + """ + return ParameterSet.param(*values, marks=marks, id=id) + + +def pytest_addoption(parser: Parser) -> None: + group = parser.getgroup("general") + group._addoption( + "-k", + action="store", + dest="keyword", + default="", + metavar="EXPRESSION", + help="only run tests which match the given substring expression. " + "An expression is a python evaluatable expression " + "where all names are substring-matched against test names " + "and their parent classes. Example: -k 'test_method or test_" + "other' matches all test functions and classes whose name " + "contains 'test_method' or 'test_other', while -k 'not test_method' " + "matches those that don't contain 'test_method' in their names. " + "-k 'not test_method and not test_other' will eliminate the matches. " + "Additionally keywords are matched to classes and functions " + "containing extra names in their 'extra_keyword_matches' set, " + "as well as functions which have names assigned directly to them. " + "The matching is case-insensitive.", + ) + + group._addoption( + "-m", + action="store", + dest="markexpr", + default="", + metavar="MARKEXPR", + help="only run tests matching given mark expression.\n" + "For example: -m 'mark1 and not mark2'.", + ) + + group.addoption( + "--markers", + action="store_true", + help="show markers (builtin, plugin and per-project ones).", + ) + + parser.addini("markers", "markers for test functions", "linelist") + parser.addini(EMPTY_PARAMETERSET_OPTION, "default marker for empty parametersets") + + +@hookimpl(tryfirst=True) +def pytest_cmdline_main(config: Config) -> Optional[Union[int, ExitCode]]: + import _pytest.config + + if config.option.markers: + config._do_configure() + tw = _pytest.config.create_terminal_writer(config) + for line in config.getini("markers"): + parts = line.split(":", 1) + name = parts[0] + rest = parts[1] if len(parts) == 2 else "" + tw.write("@pytest.mark.%s:" % name, bold=True) + tw.line(rest) + tw.line() + config._ensure_unconfigure() + return 0 + + return None + + +@attr.s(slots=True, auto_attribs=True) +class KeywordMatcher: + """A matcher for keywords. + + Given a list of names, matches any substring of one of these names. The + string inclusion check is case-insensitive. + + Will match on the name of colitem, including the names of its parents. + Only matches names of items which are either a :class:`Class` or a + :class:`Function`. + + Additionally, matches on names in the 'extra_keyword_matches' set of + any item, as well as names directly assigned to test functions. + """ + + _names: AbstractSet[str] + + @classmethod + def from_item(cls, item: "Item") -> "KeywordMatcher": + mapped_names = set() + + # Add the names of the current item and any parent items. + import pytest + + for node in item.listchain(): + if not isinstance(node, pytest.Session): + mapped_names.add(node.name) + + # Add the names added as extra keywords to current or parent items. + mapped_names.update(item.listextrakeywords()) + + # Add the names attached to the current function through direct assignment. + function_obj = getattr(item, "function", None) + if function_obj: + mapped_names.update(function_obj.__dict__) + + # Add the markers to the keywords as we no longer handle them correctly. + mapped_names.update(mark.name for mark in item.iter_markers()) + + return cls(mapped_names) + + def __call__(self, subname: str) -> bool: + subname = subname.lower() + names = (name.lower() for name in self._names) + + for name in names: + if subname in name: + return True + return False + + +def deselect_by_keyword(items: "List[Item]", config: Config) -> None: + keywordexpr = config.option.keyword.lstrip() + if not keywordexpr: + return + + expr = _parse_expression(keywordexpr, "Wrong expression passed to '-k'") + + remaining = [] + deselected = [] + for colitem in items: + if not expr.evaluate(KeywordMatcher.from_item(colitem)): + deselected.append(colitem) + else: + remaining.append(colitem) + + if deselected: + config.hook.pytest_deselected(items=deselected) + items[:] = remaining + + +@attr.s(slots=True, auto_attribs=True) +class MarkMatcher: + """A matcher for markers which are present. + + Tries to match on any marker names, attached to the given colitem. + """ + + own_mark_names: AbstractSet[str] + + @classmethod + def from_item(cls, item: "Item") -> "MarkMatcher": + mark_names = {mark.name for mark in item.iter_markers()} + return cls(mark_names) + + def __call__(self, name: str) -> bool: + return name in self.own_mark_names + + +def deselect_by_mark(items: "List[Item]", config: Config) -> None: + matchexpr = config.option.markexpr + if not matchexpr: + return + + expr = _parse_expression(matchexpr, "Wrong expression passed to '-m'") + remaining: List[Item] = [] + deselected: List[Item] = [] + for item in items: + if expr.evaluate(MarkMatcher.from_item(item)): + remaining.append(item) + else: + deselected.append(item) + if deselected: + config.hook.pytest_deselected(items=deselected) + items[:] = remaining + + +def _parse_expression(expr: str, exc_message: str) -> Expression: + try: + return Expression.compile(expr) + except ParseError as e: + raise UsageError(f"{exc_message}: {expr}: {e}") from None + + +def pytest_collection_modifyitems(items: "List[Item]", config: Config) -> None: + deselect_by_keyword(items, config) + deselect_by_mark(items, config) + + +def pytest_configure(config: Config) -> None: + config.stash[old_mark_config_key] = MARK_GEN._config + MARK_GEN._config = config + + empty_parameterset = config.getini(EMPTY_PARAMETERSET_OPTION) + + if empty_parameterset not in ("skip", "xfail", "fail_at_collect", None, ""): + raise UsageError( + "{!s} must be one of skip, xfail or fail_at_collect" + " but it is {!r}".format(EMPTY_PARAMETERSET_OPTION, empty_parameterset) + ) + + +def pytest_unconfigure(config: Config) -> None: + MARK_GEN._config = config.stash.get(old_mark_config_key, None) diff --git a/venv/lib/python3.10/site-packages/_pytest/mark/expression.py b/venv/lib/python3.10/site-packages/_pytest/mark/expression.py new file mode 100644 index 0000000..92220d7 --- /dev/null +++ b/venv/lib/python3.10/site-packages/_pytest/mark/expression.py @@ -0,0 +1,225 @@ +r"""Evaluate match expressions, as used by `-k` and `-m`. + +The grammar is: + +expression: expr? EOF +expr: and_expr ('or' and_expr)* +and_expr: not_expr ('and' not_expr)* +not_expr: 'not' not_expr | '(' expr ')' | ident +ident: (\w|:|\+|-|\.|\[|\]|\\|/)+ + +The semantics are: + +- Empty expression evaluates to False. +- ident evaluates to True of False according to a provided matcher function. +- or/and/not evaluate according to the usual boolean semantics. +""" +import ast +import enum +import re +import types +from typing import Callable +from typing import Iterator +from typing import Mapping +from typing import Optional +from typing import Sequence +from typing import TYPE_CHECKING + +import attr + +if TYPE_CHECKING: + from typing import NoReturn + + +__all__ = [ + "Expression", + "ParseError", +] + + +class TokenType(enum.Enum): + LPAREN = "left parenthesis" + RPAREN = "right parenthesis" + OR = "or" + AND = "and" + NOT = "not" + IDENT = "identifier" + EOF = "end of input" + + +@attr.s(frozen=True, slots=True, auto_attribs=True) +class Token: + type: TokenType + value: str + pos: int + + +class ParseError(Exception): + """The expression contains invalid syntax. + + :param column: The column in the line where the error occurred (1-based). + :param message: A description of the error. + """ + + def __init__(self, column: int, message: str) -> None: + self.column = column + self.message = message + + def __str__(self) -> str: + return f"at column {self.column}: {self.message}" + + +class Scanner: + __slots__ = ("tokens", "current") + + def __init__(self, input: str) -> None: + self.tokens = self.lex(input) + self.current = next(self.tokens) + + def lex(self, input: str) -> Iterator[Token]: + pos = 0 + while pos < len(input): + if input[pos] in (" ", "\t"): + pos += 1 + elif input[pos] == "(": + yield Token(TokenType.LPAREN, "(", pos) + pos += 1 + elif input[pos] == ")": + yield Token(TokenType.RPAREN, ")", pos) + pos += 1 + else: + match = re.match(r"(:?\w|:|\+|-|\.|\[|\]|\\|/)+", input[pos:]) + if match: + value = match.group(0) + if value == "or": + yield Token(TokenType.OR, value, pos) + elif value == "and": + yield Token(TokenType.AND, value, pos) + elif value == "not": + yield Token(TokenType.NOT, value, pos) + else: + yield Token(TokenType.IDENT, value, pos) + pos += len(value) + else: + raise ParseError( + pos + 1, + f'unexpected character "{input[pos]}"', + ) + yield Token(TokenType.EOF, "", pos) + + def accept(self, type: TokenType, *, reject: bool = False) -> Optional[Token]: + if self.current.type is type: + token = self.current + if token.type is not TokenType.EOF: + self.current = next(self.tokens) + return token + if reject: + self.reject((type,)) + return None + + def reject(self, expected: Sequence[TokenType]) -> "NoReturn": + raise ParseError( + self.current.pos + 1, + "expected {}; got {}".format( + " OR ".join(type.value for type in expected), + self.current.type.value, + ), + ) + + +# True, False and None are legal match expression identifiers, +# but illegal as Python identifiers. To fix this, this prefix +# is added to identifiers in the conversion to Python AST. +IDENT_PREFIX = "$" + + +def expression(s: Scanner) -> ast.Expression: + if s.accept(TokenType.EOF): + ret: ast.expr = ast.NameConstant(False) + else: + ret = expr(s) + s.accept(TokenType.EOF, reject=True) + return ast.fix_missing_locations(ast.Expression(ret)) + + +def expr(s: Scanner) -> ast.expr: + ret = and_expr(s) + while s.accept(TokenType.OR): + rhs = and_expr(s) + ret = ast.BoolOp(ast.Or(), [ret, rhs]) + return ret + + +def and_expr(s: Scanner) -> ast.expr: + ret = not_expr(s) + while s.accept(TokenType.AND): + rhs = not_expr(s) + ret = ast.BoolOp(ast.And(), [ret, rhs]) + return ret + + +def not_expr(s: Scanner) -> ast.expr: + if s.accept(TokenType.NOT): + return ast.UnaryOp(ast.Not(), not_expr(s)) + if s.accept(TokenType.LPAREN): + ret = expr(s) + s.accept(TokenType.RPAREN, reject=True) + return ret + ident = s.accept(TokenType.IDENT) + if ident: + return ast.Name(IDENT_PREFIX + ident.value, ast.Load()) + s.reject((TokenType.NOT, TokenType.LPAREN, TokenType.IDENT)) + + +class MatcherAdapter(Mapping[str, bool]): + """Adapts a matcher function to a locals mapping as required by eval().""" + + def __init__(self, matcher: Callable[[str], bool]) -> None: + self.matcher = matcher + + def __getitem__(self, key: str) -> bool: + return self.matcher(key[len(IDENT_PREFIX) :]) + + def __iter__(self) -> Iterator[str]: + raise NotImplementedError() + + def __len__(self) -> int: + raise NotImplementedError() + + +class Expression: + """A compiled match expression as used by -k and -m. + + The expression can be evaluated against different matchers. + """ + + __slots__ = ("code",) + + def __init__(self, code: types.CodeType) -> None: + self.code = code + + @classmethod + def compile(self, input: str) -> "Expression": + """Compile a match expression. + + :param input: The input expression - one line. + """ + astexpr = expression(Scanner(input)) + code: types.CodeType = compile( + astexpr, + filename="", + mode="eval", + ) + return Expression(code) + + def evaluate(self, matcher: Callable[[str], bool]) -> bool: + """Evaluate the match expression. + + :param matcher: + Given an identifier, should return whether it matches or not. + Should be prepared to handle arbitrary strings as input. + + :returns: Whether the expression matches or not. + """ + ret: bool = eval(self.code, {"__builtins__": {}}, MatcherAdapter(matcher)) + return ret diff --git a/venv/lib/python3.10/site-packages/_pytest/mark/structures.py b/venv/lib/python3.10/site-packages/_pytest/mark/structures.py new file mode 100644 index 0000000..ec41b3f --- /dev/null +++ b/venv/lib/python3.10/site-packages/_pytest/mark/structures.py @@ -0,0 +1,590 @@ +import collections.abc +import inspect +import warnings +from typing import Any +from typing import Callable +from typing import Collection +from typing import Iterable +from typing import Iterator +from typing import List +from typing import Mapping +from typing import MutableMapping +from typing import NamedTuple +from typing import Optional +from typing import overload +from typing import Sequence +from typing import Set +from typing import Tuple +from typing import Type +from typing import TYPE_CHECKING +from typing import TypeVar +from typing import Union + +import attr + +from .._code import getfslineno +from ..compat import ascii_escaped +from ..compat import final +from ..compat import NOTSET +from ..compat import NotSetType +from _pytest.config import Config +from _pytest.deprecated import check_ispytest +from _pytest.outcomes import fail +from _pytest.warning_types import PytestUnknownMarkWarning + +if TYPE_CHECKING: + from ..nodes import Node + + +EMPTY_PARAMETERSET_OPTION = "empty_parameter_set_mark" + + +def istestfunc(func) -> bool: + return callable(func) and getattr(func, "__name__", "") != "" + + +def get_empty_parameterset_mark( + config: Config, argnames: Sequence[str], func +) -> "MarkDecorator": + from ..nodes import Collector + + fs, lineno = getfslineno(func) + reason = "got empty parameter set %r, function %s at %s:%d" % ( + argnames, + func.__name__, + fs, + lineno, + ) + + requested_mark = config.getini(EMPTY_PARAMETERSET_OPTION) + if requested_mark in ("", None, "skip"): + mark = MARK_GEN.skip(reason=reason) + elif requested_mark == "xfail": + mark = MARK_GEN.xfail(reason=reason, run=False) + elif requested_mark == "fail_at_collect": + f_name = func.__name__ + _, lineno = getfslineno(func) + raise Collector.CollectError( + "Empty parameter set in '%s' at line %d" % (f_name, lineno + 1) + ) + else: + raise LookupError(requested_mark) + return mark + + +class ParameterSet(NamedTuple): + values: Sequence[Union[object, NotSetType]] + marks: Collection[Union["MarkDecorator", "Mark"]] + id: Optional[str] + + @classmethod + def param( + cls, + *values: object, + marks: Union["MarkDecorator", Collection[Union["MarkDecorator", "Mark"]]] = (), + id: Optional[str] = None, + ) -> "ParameterSet": + if isinstance(marks, MarkDecorator): + marks = (marks,) + else: + assert isinstance(marks, collections.abc.Collection) + + if id is not None: + if not isinstance(id, str): + raise TypeError(f"Expected id to be a string, got {type(id)}: {id!r}") + id = ascii_escaped(id) + return cls(values, marks, id) + + @classmethod + def extract_from( + cls, + parameterset: Union["ParameterSet", Sequence[object], object], + force_tuple: bool = False, + ) -> "ParameterSet": + """Extract from an object or objects. + + :param parameterset: + A legacy style parameterset that may or may not be a tuple, + and may or may not be wrapped into a mess of mark objects. + + :param force_tuple: + Enforce tuple wrapping so single argument tuple values + don't get decomposed and break tests. + """ + + if isinstance(parameterset, cls): + return parameterset + if force_tuple: + return cls.param(parameterset) + else: + # TODO: Refactor to fix this type-ignore. Currently the following + # passes type-checking but crashes: + # + # @pytest.mark.parametrize(('x', 'y'), [1, 2]) + # def test_foo(x, y): pass + return cls(parameterset, marks=[], id=None) # type: ignore[arg-type] + + @staticmethod + def _parse_parametrize_args( + argnames: Union[str, List[str], Tuple[str, ...]], + argvalues: Iterable[Union["ParameterSet", Sequence[object], object]], + *args, + **kwargs, + ) -> Tuple[Union[List[str], Tuple[str, ...]], bool]: + if not isinstance(argnames, (tuple, list)): + argnames = [x.strip() for x in argnames.split(",") if x.strip()] + force_tuple = len(argnames) == 1 + else: + force_tuple = False + return argnames, force_tuple + + @staticmethod + def _parse_parametrize_parameters( + argvalues: Iterable[Union["ParameterSet", Sequence[object], object]], + force_tuple: bool, + ) -> List["ParameterSet"]: + return [ + ParameterSet.extract_from(x, force_tuple=force_tuple) for x in argvalues + ] + + @classmethod + def _for_parametrize( + cls, + argnames: Union[str, List[str], Tuple[str, ...]], + argvalues: Iterable[Union["ParameterSet", Sequence[object], object]], + func, + config: Config, + nodeid: str, + ) -> Tuple[Union[List[str], Tuple[str, ...]], List["ParameterSet"]]: + argnames, force_tuple = cls._parse_parametrize_args(argnames, argvalues) + parameters = cls._parse_parametrize_parameters(argvalues, force_tuple) + del argvalues + + if parameters: + # Check all parameter sets have the correct number of values. + for param in parameters: + if len(param.values) != len(argnames): + msg = ( + '{nodeid}: in "parametrize" the number of names ({names_len}):\n' + " {names}\n" + "must be equal to the number of values ({values_len}):\n" + " {values}" + ) + fail( + msg.format( + nodeid=nodeid, + values=param.values, + names=argnames, + names_len=len(argnames), + values_len=len(param.values), + ), + pytrace=False, + ) + else: + # Empty parameter set (likely computed at runtime): create a single + # parameter set with NOTSET values, with the "empty parameter set" mark applied to it. + mark = get_empty_parameterset_mark(config, argnames, func) + parameters.append( + ParameterSet(values=(NOTSET,) * len(argnames), marks=[mark], id=None) + ) + return argnames, parameters + + +@final +@attr.s(frozen=True, init=False, auto_attribs=True) +class Mark: + #: Name of the mark. + name: str + #: Positional arguments of the mark decorator. + args: Tuple[Any, ...] + #: Keyword arguments of the mark decorator. + kwargs: Mapping[str, Any] + + #: Source Mark for ids with parametrize Marks. + _param_ids_from: Optional["Mark"] = attr.ib(default=None, repr=False) + #: Resolved/generated ids with parametrize Marks. + _param_ids_generated: Optional[Sequence[str]] = attr.ib(default=None, repr=False) + + def __init__( + self, + name: str, + args: Tuple[Any, ...], + kwargs: Mapping[str, Any], + param_ids_from: Optional["Mark"] = None, + param_ids_generated: Optional[Sequence[str]] = None, + *, + _ispytest: bool = False, + ) -> None: + """:meta private:""" + check_ispytest(_ispytest) + # Weirdness to bypass frozen=True. + object.__setattr__(self, "name", name) + object.__setattr__(self, "args", args) + object.__setattr__(self, "kwargs", kwargs) + object.__setattr__(self, "_param_ids_from", param_ids_from) + object.__setattr__(self, "_param_ids_generated", param_ids_generated) + + def _has_param_ids(self) -> bool: + return "ids" in self.kwargs or len(self.args) >= 4 + + def combined_with(self, other: "Mark") -> "Mark": + """Return a new Mark which is a combination of this + Mark and another Mark. + + Combines by appending args and merging kwargs. + + :param Mark other: The mark to combine with. + :rtype: Mark + """ + assert self.name == other.name + + # Remember source of ids with parametrize Marks. + param_ids_from: Optional[Mark] = None + if self.name == "parametrize": + if other._has_param_ids(): + param_ids_from = other + elif self._has_param_ids(): + param_ids_from = self + + return Mark( + self.name, + self.args + other.args, + dict(self.kwargs, **other.kwargs), + param_ids_from=param_ids_from, + _ispytest=True, + ) + + +# A generic parameter designating an object to which a Mark may +# be applied -- a test function (callable) or class. +# Note: a lambda is not allowed, but this can't be represented. +Markable = TypeVar("Markable", bound=Union[Callable[..., object], type]) + + +@attr.s(init=False, auto_attribs=True) +class MarkDecorator: + """A decorator for applying a mark on test functions and classes. + + ``MarkDecorators`` are created with ``pytest.mark``:: + + mark1 = pytest.mark.NAME # Simple MarkDecorator + mark2 = pytest.mark.NAME(name1=value) # Parametrized MarkDecorator + + and can then be applied as decorators to test functions:: + + @mark2 + def test_function(): + pass + + When a ``MarkDecorator`` is called, it does the following: + + 1. If called with a single class as its only positional argument and no + additional keyword arguments, it attaches the mark to the class so it + gets applied automatically to all test cases found in that class. + + 2. If called with a single function as its only positional argument and + no additional keyword arguments, it attaches the mark to the function, + containing all the arguments already stored internally in the + ``MarkDecorator``. + + 3. When called in any other case, it returns a new ``MarkDecorator`` + instance with the original ``MarkDecorator``'s content updated with + the arguments passed to this call. + + Note: The rules above prevent a ``MarkDecorator`` from storing only a + single function or class reference as its positional argument with no + additional keyword or positional arguments. You can work around this by + using `with_args()`. + """ + + mark: Mark + + def __init__(self, mark: Mark, *, _ispytest: bool = False) -> None: + """:meta private:""" + check_ispytest(_ispytest) + self.mark = mark + + @property + def name(self) -> str: + """Alias for mark.name.""" + return self.mark.name + + @property + def args(self) -> Tuple[Any, ...]: + """Alias for mark.args.""" + return self.mark.args + + @property + def kwargs(self) -> Mapping[str, Any]: + """Alias for mark.kwargs.""" + return self.mark.kwargs + + @property + def markname(self) -> str: + """:meta private:""" + return self.name # for backward-compat (2.4.1 had this attr) + + def with_args(self, *args: object, **kwargs: object) -> "MarkDecorator": + """Return a MarkDecorator with extra arguments added. + + Unlike calling the MarkDecorator, with_args() can be used even + if the sole argument is a callable/class. + """ + mark = Mark(self.name, args, kwargs, _ispytest=True) + return MarkDecorator(self.mark.combined_with(mark), _ispytest=True) + + # Type ignored because the overloads overlap with an incompatible + # return type. Not much we can do about that. Thankfully mypy picks + # the first match so it works out even if we break the rules. + @overload + def __call__(self, arg: Markable) -> Markable: # type: ignore[misc] + pass + + @overload + def __call__(self, *args: object, **kwargs: object) -> "MarkDecorator": + pass + + def __call__(self, *args: object, **kwargs: object): + """Call the MarkDecorator.""" + if args and not kwargs: + func = args[0] + is_class = inspect.isclass(func) + if len(args) == 1 and (istestfunc(func) or is_class): + store_mark(func, self.mark) + return func + return self.with_args(*args, **kwargs) + + +def get_unpacked_marks(obj: object) -> Iterable[Mark]: + """Obtain the unpacked marks that are stored on an object.""" + mark_list = getattr(obj, "pytestmark", []) + if not isinstance(mark_list, list): + mark_list = [mark_list] + return normalize_mark_list(mark_list) + + +def normalize_mark_list( + mark_list: Iterable[Union[Mark, MarkDecorator]] +) -> Iterable[Mark]: + """ + Normalize an iterable of Mark or MarkDecorator objects into a list of marks + by retrieving the `mark` attribute on MarkDecorator instances. + + :param mark_list: marks to normalize + :returns: A new list of the extracted Mark objects + """ + for mark in mark_list: + mark_obj = getattr(mark, "mark", mark) + if not isinstance(mark_obj, Mark): + raise TypeError(f"got {repr(mark_obj)} instead of Mark") + yield mark_obj + + +def store_mark(obj, mark: Mark) -> None: + """Store a Mark on an object. + + This is used to implement the Mark declarations/decorators correctly. + """ + assert isinstance(mark, Mark), mark + # Always reassign name to avoid updating pytestmark in a reference that + # was only borrowed. + obj.pytestmark = [*get_unpacked_marks(obj), mark] + + +# Typing for builtin pytest marks. This is cheating; it gives builtin marks +# special privilege, and breaks modularity. But practicality beats purity... +if TYPE_CHECKING: + from _pytest.scope import _ScopeName + + class _SkipMarkDecorator(MarkDecorator): + @overload # type: ignore[override,misc] + def __call__(self, arg: Markable) -> Markable: + ... + + @overload + def __call__(self, reason: str = ...) -> "MarkDecorator": + ... + + class _SkipifMarkDecorator(MarkDecorator): + def __call__( # type: ignore[override] + self, + condition: Union[str, bool] = ..., + *conditions: Union[str, bool], + reason: str = ..., + ) -> MarkDecorator: + ... + + class _XfailMarkDecorator(MarkDecorator): + @overload # type: ignore[override,misc] + def __call__(self, arg: Markable) -> Markable: + ... + + @overload + def __call__( + self, + condition: Union[str, bool] = ..., + *conditions: Union[str, bool], + reason: str = ..., + run: bool = ..., + raises: Union[Type[BaseException], Tuple[Type[BaseException], ...]] = ..., + strict: bool = ..., + ) -> MarkDecorator: + ... + + class _ParametrizeMarkDecorator(MarkDecorator): + def __call__( # type: ignore[override] + self, + argnames: Union[str, List[str], Tuple[str, ...]], + argvalues: Iterable[Union[ParameterSet, Sequence[object], object]], + *, + indirect: Union[bool, Sequence[str]] = ..., + ids: Optional[ + Union[ + Iterable[Union[None, str, float, int, bool]], + Callable[[Any], Optional[object]], + ] + ] = ..., + scope: Optional[_ScopeName] = ..., + ) -> MarkDecorator: + ... + + class _UsefixturesMarkDecorator(MarkDecorator): + def __call__(self, *fixtures: str) -> MarkDecorator: # type: ignore[override] + ... + + class _FilterwarningsMarkDecorator(MarkDecorator): + def __call__(self, *filters: str) -> MarkDecorator: # type: ignore[override] + ... + + +@final +class MarkGenerator: + """Factory for :class:`MarkDecorator` objects - exposed as + a ``pytest.mark`` singleton instance. + + Example:: + + import pytest + + @pytest.mark.slowtest + def test_function(): + pass + + applies a 'slowtest' :class:`Mark` on ``test_function``. + """ + + # See TYPE_CHECKING above. + if TYPE_CHECKING: + skip: _SkipMarkDecorator + skipif: _SkipifMarkDecorator + xfail: _XfailMarkDecorator + parametrize: _ParametrizeMarkDecorator + usefixtures: _UsefixturesMarkDecorator + filterwarnings: _FilterwarningsMarkDecorator + + def __init__(self, *, _ispytest: bool = False) -> None: + check_ispytest(_ispytest) + self._config: Optional[Config] = None + self._markers: Set[str] = set() + + def __getattr__(self, name: str) -> MarkDecorator: + """Generate a new :class:`MarkDecorator` with the given name.""" + if name[0] == "_": + raise AttributeError("Marker name must NOT start with underscore") + + if self._config is not None: + # We store a set of markers as a performance optimisation - if a mark + # name is in the set we definitely know it, but a mark may be known and + # not in the set. We therefore start by updating the set! + if name not in self._markers: + for line in self._config.getini("markers"): + # example lines: "skipif(condition): skip the given test if..." + # or "hypothesis: tests which use Hypothesis", so to get the + # marker name we split on both `:` and `(`. + marker = line.split(":")[0].split("(")[0].strip() + self._markers.add(marker) + + # If the name is not in the set of known marks after updating, + # then it really is time to issue a warning or an error. + if name not in self._markers: + if self._config.option.strict_markers or self._config.option.strict: + fail( + f"{name!r} not found in `markers` configuration option", + pytrace=False, + ) + + # Raise a specific error for common misspellings of "parametrize". + if name in ["parameterize", "parametrise", "parameterise"]: + __tracebackhide__ = True + fail(f"Unknown '{name}' mark, did you mean 'parametrize'?") + + warnings.warn( + "Unknown pytest.mark.%s - is this a typo? You can register " + "custom marks to avoid this warning - for details, see " + "https://docs.pytest.org/en/stable/how-to/mark.html" % name, + PytestUnknownMarkWarning, + 2, + ) + + return MarkDecorator(Mark(name, (), {}, _ispytest=True), _ispytest=True) + + +MARK_GEN = MarkGenerator(_ispytest=True) + + +@final +class NodeKeywords(MutableMapping[str, Any]): + __slots__ = ("node", "parent", "_markers") + + def __init__(self, node: "Node") -> None: + self.node = node + self.parent = node.parent + self._markers = {node.name: True} + + def __getitem__(self, key: str) -> Any: + try: + return self._markers[key] + except KeyError: + if self.parent is None: + raise + return self.parent.keywords[key] + + def __setitem__(self, key: str, value: Any) -> None: + self._markers[key] = value + + # Note: we could've avoided explicitly implementing some of the methods + # below and use the collections.abc fallback, but that would be slow. + + def __contains__(self, key: object) -> bool: + return ( + key in self._markers + or self.parent is not None + and key in self.parent.keywords + ) + + def update( # type: ignore[override] + self, + other: Union[Mapping[str, Any], Iterable[Tuple[str, Any]]] = (), + **kwds: Any, + ) -> None: + self._markers.update(other) + self._markers.update(kwds) + + def __delitem__(self, key: str) -> None: + raise ValueError("cannot delete key in keywords dict") + + def __iter__(self) -> Iterator[str]: + # Doesn't need to be fast. + yield from self._markers + if self.parent is not None: + for keyword in self.parent.keywords: + # self._marks and self.parent.keywords can have duplicates. + if keyword not in self._markers: + yield keyword + + def __len__(self) -> int: + # Doesn't need to be fast. + return sum(1 for keyword in self) + + def __repr__(self) -> str: + return f"" diff --git a/venv/lib/python3.10/site-packages/_pytest/monkeypatch.py b/venv/lib/python3.10/site-packages/_pytest/monkeypatch.py new file mode 100644 index 0000000..91d590f --- /dev/null +++ b/venv/lib/python3.10/site-packages/_pytest/monkeypatch.py @@ -0,0 +1,383 @@ +"""Monkeypatching and mocking functionality.""" +import os +import re +import sys +import warnings +from contextlib import contextmanager +from typing import Any +from typing import Generator +from typing import List +from typing import MutableMapping +from typing import Optional +from typing import overload +from typing import Tuple +from typing import TypeVar +from typing import Union + +from _pytest.compat import final +from _pytest.fixtures import fixture +from _pytest.warning_types import PytestWarning + +RE_IMPORT_ERROR_NAME = re.compile(r"^No module named (.*)$") + + +K = TypeVar("K") +V = TypeVar("V") + + +@fixture +def monkeypatch() -> Generator["MonkeyPatch", None, None]: + """A convenient fixture for monkey-patching. + + The fixture provides these methods to modify objects, dictionaries or + os.environ:: + + monkeypatch.setattr(obj, name, value, raising=True) + monkeypatch.delattr(obj, name, raising=True) + monkeypatch.setitem(mapping, name, value) + monkeypatch.delitem(obj, name, raising=True) + monkeypatch.setenv(name, value, prepend=None) + monkeypatch.delenv(name, raising=True) + monkeypatch.syspath_prepend(path) + monkeypatch.chdir(path) + + All modifications will be undone after the requesting test function or + fixture has finished. The ``raising`` parameter determines if a KeyError + or AttributeError will be raised if the set/deletion operation has no target. + """ + mpatch = MonkeyPatch() + yield mpatch + mpatch.undo() + + +def resolve(name: str) -> object: + # Simplified from zope.dottedname. + parts = name.split(".") + + used = parts.pop(0) + found: object = __import__(used) + for part in parts: + used += "." + part + try: + found = getattr(found, part) + except AttributeError: + pass + else: + continue + # We use explicit un-nesting of the handling block in order + # to avoid nested exceptions. + try: + __import__(used) + except ImportError as ex: + expected = str(ex).split()[-1] + if expected == used: + raise + else: + raise ImportError(f"import error in {used}: {ex}") from ex + found = annotated_getattr(found, part, used) + return found + + +def annotated_getattr(obj: object, name: str, ann: str) -> object: + try: + obj = getattr(obj, name) + except AttributeError as e: + raise AttributeError( + "{!r} object at {} has no attribute {!r}".format( + type(obj).__name__, ann, name + ) + ) from e + return obj + + +def derive_importpath(import_path: str, raising: bool) -> Tuple[str, object]: + if not isinstance(import_path, str) or "." not in import_path: + raise TypeError(f"must be absolute import path string, not {import_path!r}") + module, attr = import_path.rsplit(".", 1) + target = resolve(module) + if raising: + annotated_getattr(target, attr, ann=module) + return attr, target + + +class Notset: + def __repr__(self) -> str: + return "" + + +notset = Notset() + + +@final +class MonkeyPatch: + """Helper to conveniently monkeypatch attributes/items/environment + variables/syspath. + + Returned by the :fixture:`monkeypatch` fixture. + + :versionchanged:: 6.2 + Can now also be used directly as `pytest.MonkeyPatch()`, for when + the fixture is not available. In this case, use + :meth:`with MonkeyPatch.context() as mp: ` or remember to call + :meth:`undo` explicitly. + """ + + def __init__(self) -> None: + self._setattr: List[Tuple[object, str, object]] = [] + self._setitem: List[Tuple[MutableMapping[Any, Any], object, object]] = [] + self._cwd: Optional[str] = None + self._savesyspath: Optional[List[str]] = None + + @classmethod + @contextmanager + def context(cls) -> Generator["MonkeyPatch", None, None]: + """Context manager that returns a new :class:`MonkeyPatch` object + which undoes any patching done inside the ``with`` block upon exit. + + Example: + + .. code-block:: python + + import functools + + + def test_partial(monkeypatch): + with monkeypatch.context() as m: + m.setattr(functools, "partial", 3) + + Useful in situations where it is desired to undo some patches before the test ends, + such as mocking ``stdlib`` functions that might break pytest itself if mocked (for examples + of this see :issue:`3290`). + """ + m = cls() + try: + yield m + finally: + m.undo() + + @overload + def setattr( + self, + target: str, + name: object, + value: Notset = ..., + raising: bool = ..., + ) -> None: + ... + + @overload + def setattr( + self, + target: object, + name: str, + value: object, + raising: bool = ..., + ) -> None: + ... + + def setattr( + self, + target: Union[str, object], + name: Union[object, str], + value: object = notset, + raising: bool = True, + ) -> None: + """Set attribute value on target, memorizing the old value. + + For convenience you can specify a string as ``target`` which + will be interpreted as a dotted import path, with the last part + being the attribute name. For example, + ``monkeypatch.setattr("os.getcwd", lambda: "/")`` + would set the ``getcwd`` function of the ``os`` module. + + Raises AttributeError if the attribute does not exist, unless + ``raising`` is set to False. + """ + __tracebackhide__ = True + import inspect + + if isinstance(value, Notset): + if not isinstance(target, str): + raise TypeError( + "use setattr(target, name, value) or " + "setattr(target, value) with target being a dotted " + "import string" + ) + value = name + name, target = derive_importpath(target, raising) + else: + if not isinstance(name, str): + raise TypeError( + "use setattr(target, name, value) with name being a string or " + "setattr(target, value) with target being a dotted " + "import string" + ) + + oldval = getattr(target, name, notset) + if raising and oldval is notset: + raise AttributeError(f"{target!r} has no attribute {name!r}") + + # avoid class descriptors like staticmethod/classmethod + if inspect.isclass(target): + oldval = target.__dict__.get(name, notset) + self._setattr.append((target, name, oldval)) + setattr(target, name, value) + + def delattr( + self, + target: Union[object, str], + name: Union[str, Notset] = notset, + raising: bool = True, + ) -> None: + """Delete attribute ``name`` from ``target``. + + If no ``name`` is specified and ``target`` is a string + it will be interpreted as a dotted import path with the + last part being the attribute name. + + Raises AttributeError it the attribute does not exist, unless + ``raising`` is set to False. + """ + __tracebackhide__ = True + import inspect + + if isinstance(name, Notset): + if not isinstance(target, str): + raise TypeError( + "use delattr(target, name) or " + "delattr(target) with target being a dotted " + "import string" + ) + name, target = derive_importpath(target, raising) + + if not hasattr(target, name): + if raising: + raise AttributeError(name) + else: + oldval = getattr(target, name, notset) + # Avoid class descriptors like staticmethod/classmethod. + if inspect.isclass(target): + oldval = target.__dict__.get(name, notset) + self._setattr.append((target, name, oldval)) + delattr(target, name) + + def setitem(self, dic: MutableMapping[K, V], name: K, value: V) -> None: + """Set dictionary entry ``name`` to value.""" + self._setitem.append((dic, name, dic.get(name, notset))) + dic[name] = value + + def delitem(self, dic: MutableMapping[K, V], name: K, raising: bool = True) -> None: + """Delete ``name`` from dict. + + Raises ``KeyError`` if it doesn't exist, unless ``raising`` is set to + False. + """ + if name not in dic: + if raising: + raise KeyError(name) + else: + self._setitem.append((dic, name, dic.get(name, notset))) + del dic[name] + + def setenv(self, name: str, value: str, prepend: Optional[str] = None) -> None: + """Set environment variable ``name`` to ``value``. + + If ``prepend`` is a character, read the current environment variable + value and prepend the ``value`` adjoined with the ``prepend`` + character. + """ + if not isinstance(value, str): + warnings.warn( # type: ignore[unreachable] + PytestWarning( + "Value of environment variable {name} type should be str, but got " + "{value!r} (type: {type}); converted to str implicitly".format( + name=name, value=value, type=type(value).__name__ + ) + ), + stacklevel=2, + ) + value = str(value) + if prepend and name in os.environ: + value = value + prepend + os.environ[name] + self.setitem(os.environ, name, value) + + def delenv(self, name: str, raising: bool = True) -> None: + """Delete ``name`` from the environment. + + Raises ``KeyError`` if it does not exist, unless ``raising`` is set to + False. + """ + environ: MutableMapping[str, str] = os.environ + self.delitem(environ, name, raising=raising) + + def syspath_prepend(self, path) -> None: + """Prepend ``path`` to ``sys.path`` list of import locations.""" + + if self._savesyspath is None: + self._savesyspath = sys.path[:] + sys.path.insert(0, str(path)) + + # https://github.com/pypa/setuptools/blob/d8b901bc/docs/pkg_resources.txt#L162-L171 + # this is only needed when pkg_resources was already loaded by the namespace package + if "pkg_resources" in sys.modules: + from pkg_resources import fixup_namespace_packages + + fixup_namespace_packages(str(path)) + + # A call to syspathinsert() usually means that the caller wants to + # import some dynamically created files, thus with python3 we + # invalidate its import caches. + # This is especially important when any namespace package is in use, + # since then the mtime based FileFinder cache (that gets created in + # this case already) gets not invalidated when writing the new files + # quickly afterwards. + from importlib import invalidate_caches + + invalidate_caches() + + def chdir(self, path: Union[str, "os.PathLike[str]"]) -> None: + """Change the current working directory to the specified path. + + Path can be a string or a path object. + """ + if self._cwd is None: + self._cwd = os.getcwd() + os.chdir(path) + + def undo(self) -> None: + """Undo previous changes. + + This call consumes the undo stack. Calling it a second time has no + effect unless you do more monkeypatching after the undo call. + + There is generally no need to call `undo()`, since it is + called automatically during tear-down. + + Note that the same `monkeypatch` fixture is used across a + single test function invocation. If `monkeypatch` is used both by + the test function itself and one of the test fixtures, + calling `undo()` will undo all of the changes made in + both functions. + """ + for obj, name, value in reversed(self._setattr): + if value is not notset: + setattr(obj, name, value) + else: + delattr(obj, name) + self._setattr[:] = [] + for dictionary, key, value in reversed(self._setitem): + if value is notset: + try: + del dictionary[key] + except KeyError: + pass # Was already deleted, so we have the desired state. + else: + dictionary[key] = value + self._setitem[:] = [] + if self._savesyspath is not None: + sys.path[:] = self._savesyspath + self._savesyspath = None + + if self._cwd is not None: + os.chdir(self._cwd) + self._cwd = None diff --git a/venv/lib/python3.10/site-packages/_pytest/nodes.py b/venv/lib/python3.10/site-packages/_pytest/nodes.py new file mode 100644 index 0000000..1a16804 --- /dev/null +++ b/venv/lib/python3.10/site-packages/_pytest/nodes.py @@ -0,0 +1,762 @@ +import os +import warnings +from inspect import signature +from pathlib import Path +from typing import Any +from typing import Callable +from typing import cast +from typing import Iterable +from typing import Iterator +from typing import List +from typing import MutableMapping +from typing import Optional +from typing import overload +from typing import Set +from typing import Tuple +from typing import Type +from typing import TYPE_CHECKING +from typing import TypeVar +from typing import Union + +import _pytest._code +from _pytest._code import getfslineno +from _pytest._code.code import ExceptionInfo +from _pytest._code.code import TerminalRepr +from _pytest.compat import cached_property +from _pytest.compat import LEGACY_PATH +from _pytest.config import Config +from _pytest.config import ConftestImportFailure +from _pytest.deprecated import FSCOLLECTOR_GETHOOKPROXY_ISINITPATH +from _pytest.deprecated import NODE_CTOR_FSPATH_ARG +from _pytest.mark.structures import Mark +from _pytest.mark.structures import MarkDecorator +from _pytest.mark.structures import NodeKeywords +from _pytest.outcomes import fail +from _pytest.pathlib import absolutepath +from _pytest.pathlib import commonpath +from _pytest.stash import Stash +from _pytest.warning_types import PytestWarning + +if TYPE_CHECKING: + # Imported here due to circular import. + from _pytest.main import Session + from _pytest._code.code import _TracebackStyle + + +SEP = "/" + +tracebackcutdir = Path(_pytest.__file__).parent + + +def iterparentnodeids(nodeid: str) -> Iterator[str]: + """Return the parent node IDs of a given node ID, inclusive. + + For the node ID + + "testing/code/test_excinfo.py::TestFormattedExcinfo::test_repr_source" + + the result would be + + "" + "testing" + "testing/code" + "testing/code/test_excinfo.py" + "testing/code/test_excinfo.py::TestFormattedExcinfo" + "testing/code/test_excinfo.py::TestFormattedExcinfo::test_repr_source" + + Note that / components are only considered until the first ::. + """ + pos = 0 + first_colons: Optional[int] = nodeid.find("::") + if first_colons == -1: + first_colons = None + # The root Session node - always present. + yield "" + # Eagerly consume SEP parts until first colons. + while True: + at = nodeid.find(SEP, pos, first_colons) + if at == -1: + break + if at > 0: + yield nodeid[:at] + pos = at + len(SEP) + # Eagerly consume :: parts. + while True: + at = nodeid.find("::", pos) + if at == -1: + break + if at > 0: + yield nodeid[:at] + pos = at + len("::") + # The node ID itself. + if nodeid: + yield nodeid + + +def _check_path(path: Path, fspath: LEGACY_PATH) -> None: + if Path(fspath) != path: + raise ValueError( + f"Path({fspath!r}) != {path!r}\n" + "if both path and fspath are given they need to be equal" + ) + + +def _imply_path( + node_type: Type["Node"], + path: Optional[Path], + fspath: Optional[LEGACY_PATH], +) -> Path: + if fspath is not None: + warnings.warn( + NODE_CTOR_FSPATH_ARG.format( + node_type_name=node_type.__name__, + ), + stacklevel=6, + ) + if path is not None: + if fspath is not None: + _check_path(path, fspath) + return path + else: + assert fspath is not None + return Path(fspath) + + +_NodeType = TypeVar("_NodeType", bound="Node") + + +class NodeMeta(type): + def __call__(self, *k, **kw): + msg = ( + "Direct construction of {name} has been deprecated, please use {name}.from_parent.\n" + "See " + "https://docs.pytest.org/en/stable/deprecations.html#node-construction-changed-to-node-from-parent" + " for more details." + ).format(name=f"{self.__module__}.{self.__name__}") + fail(msg, pytrace=False) + + def _create(self, *k, **kw): + try: + return super().__call__(*k, **kw) + except TypeError: + sig = signature(getattr(self, "__init__")) + known_kw = {k: v for k, v in kw.items() if k in sig.parameters} + from .warning_types import PytestDeprecationWarning + + warnings.warn( + PytestDeprecationWarning( + f"{self} is not using a cooperative constructor and only takes {set(known_kw)}.\n" + "See https://docs.pytest.org/en/stable/deprecations.html" + "#constructors-of-custom-pytest-node-subclasses-should-take-kwargs " + "for more details." + ) + ) + + return super().__call__(*k, **known_kw) + + +class Node(metaclass=NodeMeta): + """Base class for Collector and Item, the components of the test + collection tree. + + Collector subclasses have children; Items are leaf nodes. + """ + + # Implemented in the legacypath plugin. + #: A ``LEGACY_PATH`` copy of the :attr:`path` attribute. Intended for usage + #: for methods not migrated to ``pathlib.Path`` yet, such as + #: :meth:`Item.reportinfo`. Will be deprecated in a future release, prefer + #: using :attr:`path` instead. + fspath: LEGACY_PATH + + # Use __slots__ to make attribute access faster. + # Note that __dict__ is still available. + __slots__ = ( + "name", + "parent", + "config", + "session", + "path", + "_nodeid", + "_store", + "__dict__", + ) + + def __init__( + self, + name: str, + parent: "Optional[Node]" = None, + config: Optional[Config] = None, + session: "Optional[Session]" = None, + fspath: Optional[LEGACY_PATH] = None, + path: Optional[Path] = None, + nodeid: Optional[str] = None, + ) -> None: + #: A unique name within the scope of the parent node. + self.name = name + + #: The parent collector node. + self.parent = parent + + if config: + #: The pytest config object. + self.config: Config = config + else: + if not parent: + raise TypeError("config or parent must be provided") + self.config = parent.config + + if session: + #: The pytest session this node is part of. + self.session = session + else: + if not parent: + raise TypeError("session or parent must be provided") + self.session = parent.session + + if path is None and fspath is None: + path = getattr(parent, "path", None) + #: Filesystem path where this node was collected from (can be None). + self.path: Path = _imply_path(type(self), path, fspath=fspath) + + # The explicit annotation is to avoid publicly exposing NodeKeywords. + #: Keywords/markers collected from all scopes. + self.keywords: MutableMapping[str, Any] = NodeKeywords(self) + + #: The marker objects belonging to this node. + self.own_markers: List[Mark] = [] + + #: Allow adding of extra keywords to use for matching. + self.extra_keyword_matches: Set[str] = set() + + if nodeid is not None: + assert "::()" not in nodeid + self._nodeid = nodeid + else: + if not self.parent: + raise TypeError("nodeid or parent must be provided") + self._nodeid = self.parent.nodeid + "::" + self.name + + #: A place where plugins can store information on the node for their + #: own use. + #: + #: :type: Stash + self.stash = Stash() + # Deprecated alias. Was never public. Can be removed in a few releases. + self._store = self.stash + + @classmethod + def from_parent(cls, parent: "Node", **kw): + """Public constructor for Nodes. + + This indirection got introduced in order to enable removing + the fragile logic from the node constructors. + + Subclasses can use ``super().from_parent(...)`` when overriding the + construction. + + :param parent: The parent node of this Node. + """ + if "config" in kw: + raise TypeError("config is not a valid argument for from_parent") + if "session" in kw: + raise TypeError("session is not a valid argument for from_parent") + return cls._create(parent=parent, **kw) + + @property + def ihook(self): + """fspath-sensitive hook proxy used to call pytest hooks.""" + return self.session.gethookproxy(self.path) + + def __repr__(self) -> str: + return "<{} {}>".format(self.__class__.__name__, getattr(self, "name", None)) + + def warn(self, warning: Warning) -> None: + """Issue a warning for this Node. + + Warnings will be displayed after the test session, unless explicitly suppressed. + + :param Warning warning: + The warning instance to issue. + + :raises ValueError: If ``warning`` instance is not a subclass of Warning. + + Example usage: + + .. code-block:: python + + node.warn(PytestWarning("some message")) + node.warn(UserWarning("some message")) + + .. versionchanged:: 6.2 + Any subclass of :class:`Warning` is now accepted, rather than only + :class:`PytestWarning ` subclasses. + """ + # enforce type checks here to avoid getting a generic type error later otherwise. + if not isinstance(warning, Warning): + raise ValueError( + "warning must be an instance of Warning or subclass, got {!r}".format( + warning + ) + ) + path, lineno = get_fslocation_from_item(self) + assert lineno is not None + warnings.warn_explicit( + warning, + category=None, + filename=str(path), + lineno=lineno + 1, + ) + + # Methods for ordering nodes. + + @property + def nodeid(self) -> str: + """A ::-separated string denoting its collection tree address.""" + return self._nodeid + + def __hash__(self) -> int: + return hash(self._nodeid) + + def setup(self) -> None: + pass + + def teardown(self) -> None: + pass + + def listchain(self) -> List["Node"]: + """Return list of all parent collectors up to self, starting from + the root of collection tree.""" + chain = [] + item: Optional[Node] = self + while item is not None: + chain.append(item) + item = item.parent + chain.reverse() + return chain + + def add_marker( + self, marker: Union[str, MarkDecorator], append: bool = True + ) -> None: + """Dynamically add a marker object to the node. + + :param append: + Whether to append the marker, or prepend it. + """ + from _pytest.mark import MARK_GEN + + if isinstance(marker, MarkDecorator): + marker_ = marker + elif isinstance(marker, str): + marker_ = getattr(MARK_GEN, marker) + else: + raise ValueError("is not a string or pytest.mark.* Marker") + self.keywords[marker_.name] = marker_ + if append: + self.own_markers.append(marker_.mark) + else: + self.own_markers.insert(0, marker_.mark) + + def iter_markers(self, name: Optional[str] = None) -> Iterator[Mark]: + """Iterate over all markers of the node. + + :param name: If given, filter the results by the name attribute. + """ + return (x[1] for x in self.iter_markers_with_node(name=name)) + + def iter_markers_with_node( + self, name: Optional[str] = None + ) -> Iterator[Tuple["Node", Mark]]: + """Iterate over all markers of the node. + + :param name: If given, filter the results by the name attribute. + :returns: An iterator of (node, mark) tuples. + """ + for node in reversed(self.listchain()): + for mark in node.own_markers: + if name is None or getattr(mark, "name", None) == name: + yield node, mark + + @overload + def get_closest_marker(self, name: str) -> Optional[Mark]: + ... + + @overload + def get_closest_marker(self, name: str, default: Mark) -> Mark: + ... + + def get_closest_marker( + self, name: str, default: Optional[Mark] = None + ) -> Optional[Mark]: + """Return the first marker matching the name, from closest (for + example function) to farther level (for example module level). + + :param default: Fallback return value if no marker was found. + :param name: Name to filter by. + """ + return next(self.iter_markers(name=name), default) + + def listextrakeywords(self) -> Set[str]: + """Return a set of all extra keywords in self and any parents.""" + extra_keywords: Set[str] = set() + for item in self.listchain(): + extra_keywords.update(item.extra_keyword_matches) + return extra_keywords + + def listnames(self) -> List[str]: + return [x.name for x in self.listchain()] + + def addfinalizer(self, fin: Callable[[], object]) -> None: + """Register a function to be called when this node is finalized. + + This method can only be called when this node is active + in a setup chain, for example during self.setup(). + """ + self.session._setupstate.addfinalizer(fin, self) + + def getparent(self, cls: Type[_NodeType]) -> Optional[_NodeType]: + """Get the next parent node (including self) which is an instance of + the given class.""" + current: Optional[Node] = self + while current and not isinstance(current, cls): + current = current.parent + assert current is None or isinstance(current, cls) + return current + + def _prunetraceback(self, excinfo: ExceptionInfo[BaseException]) -> None: + pass + + def _repr_failure_py( + self, + excinfo: ExceptionInfo[BaseException], + style: "Optional[_TracebackStyle]" = None, + ) -> TerminalRepr: + from _pytest.fixtures import FixtureLookupError + + if isinstance(excinfo.value, ConftestImportFailure): + excinfo = ExceptionInfo.from_exc_info(excinfo.value.excinfo) + if isinstance(excinfo.value, fail.Exception): + if not excinfo.value.pytrace: + style = "value" + if isinstance(excinfo.value, FixtureLookupError): + return excinfo.value.formatrepr() + if self.config.getoption("fulltrace", False): + style = "long" + else: + tb = _pytest._code.Traceback([excinfo.traceback[-1]]) + self._prunetraceback(excinfo) + if len(excinfo.traceback) == 0: + excinfo.traceback = tb + if style == "auto": + style = "long" + # XXX should excinfo.getrepr record all data and toterminal() process it? + if style is None: + if self.config.getoption("tbstyle", "auto") == "short": + style = "short" + else: + style = "long" + + if self.config.getoption("verbose", 0) > 1: + truncate_locals = False + else: + truncate_locals = True + + # excinfo.getrepr() formats paths relative to the CWD if `abspath` is False. + # It is possible for a fixture/test to change the CWD while this code runs, which + # would then result in the user seeing confusing paths in the failure message. + # To fix this, if the CWD changed, always display the full absolute path. + # It will be better to just always display paths relative to invocation_dir, but + # this requires a lot of plumbing (#6428). + try: + abspath = Path(os.getcwd()) != self.config.invocation_params.dir + except OSError: + abspath = True + + return excinfo.getrepr( + funcargs=True, + abspath=abspath, + showlocals=self.config.getoption("showlocals", False), + style=style, + tbfilter=False, # pruned already, or in --fulltrace mode. + truncate_locals=truncate_locals, + ) + + def repr_failure( + self, + excinfo: ExceptionInfo[BaseException], + style: "Optional[_TracebackStyle]" = None, + ) -> Union[str, TerminalRepr]: + """Return a representation of a collection or test failure. + + .. seealso:: :ref:`non-python tests` + + :param excinfo: Exception information for the failure. + """ + return self._repr_failure_py(excinfo, style) + + +def get_fslocation_from_item(node: "Node") -> Tuple[Union[str, Path], Optional[int]]: + """Try to extract the actual location from a node, depending on available attributes: + + * "location": a pair (path, lineno) + * "obj": a Python object that the node wraps. + * "fspath": just a path + + :rtype: A tuple of (str|Path, int) with filename and line number. + """ + # See Item.location. + location: Optional[Tuple[str, Optional[int], str]] = getattr(node, "location", None) + if location is not None: + return location[:2] + obj = getattr(node, "obj", None) + if obj is not None: + return getfslineno(obj) + return getattr(node, "fspath", "unknown location"), -1 + + +class Collector(Node): + """Collector instances create children through collect() and thus + iteratively build a tree.""" + + class CollectError(Exception): + """An error during collection, contains a custom message.""" + + def collect(self) -> Iterable[Union["Item", "Collector"]]: + """Return a list of children (items and collectors) for this + collection node.""" + raise NotImplementedError("abstract") + + # TODO: This omits the style= parameter which breaks Liskov Substitution. + def repr_failure( # type: ignore[override] + self, excinfo: ExceptionInfo[BaseException] + ) -> Union[str, TerminalRepr]: + """Return a representation of a collection failure. + + :param excinfo: Exception information for the failure. + """ + if isinstance(excinfo.value, self.CollectError) and not self.config.getoption( + "fulltrace", False + ): + exc = excinfo.value + return str(exc.args[0]) + + # Respect explicit tbstyle option, but default to "short" + # (_repr_failure_py uses "long" with "fulltrace" option always). + tbstyle = self.config.getoption("tbstyle", "auto") + if tbstyle == "auto": + tbstyle = "short" + + return self._repr_failure_py(excinfo, style=tbstyle) + + def _prunetraceback(self, excinfo: ExceptionInfo[BaseException]) -> None: + if hasattr(self, "path"): + traceback = excinfo.traceback + ntraceback = traceback.cut(path=self.path) + if ntraceback == traceback: + ntraceback = ntraceback.cut(excludepath=tracebackcutdir) + excinfo.traceback = ntraceback.filter() + + +def _check_initialpaths_for_relpath(session: "Session", path: Path) -> Optional[str]: + for initial_path in session._initialpaths: + if commonpath(path, initial_path) == initial_path: + rel = str(path.relative_to(initial_path)) + return "" if rel == "." else rel + return None + + +class FSCollector(Collector): + def __init__( + self, + fspath: Optional[LEGACY_PATH] = None, + path_or_parent: Optional[Union[Path, Node]] = None, + path: Optional[Path] = None, + name: Optional[str] = None, + parent: Optional[Node] = None, + config: Optional[Config] = None, + session: Optional["Session"] = None, + nodeid: Optional[str] = None, + ) -> None: + if path_or_parent: + if isinstance(path_or_parent, Node): + assert parent is None + parent = cast(FSCollector, path_or_parent) + elif isinstance(path_or_parent, Path): + assert path is None + path = path_or_parent + + path = _imply_path(type(self), path, fspath=fspath) + if name is None: + name = path.name + if parent is not None and parent.path != path: + try: + rel = path.relative_to(parent.path) + except ValueError: + pass + else: + name = str(rel) + name = name.replace(os.sep, SEP) + self.path = path + + if session is None: + assert parent is not None + session = parent.session + + if nodeid is None: + try: + nodeid = str(self.path.relative_to(session.config.rootpath)) + except ValueError: + nodeid = _check_initialpaths_for_relpath(session, path) + + if nodeid and os.sep != SEP: + nodeid = nodeid.replace(os.sep, SEP) + + super().__init__( + name=name, + parent=parent, + config=config, + session=session, + nodeid=nodeid, + path=path, + ) + + @classmethod + def from_parent( + cls, + parent, + *, + fspath: Optional[LEGACY_PATH] = None, + path: Optional[Path] = None, + **kw, + ): + """The public constructor.""" + return super().from_parent(parent=parent, fspath=fspath, path=path, **kw) + + def gethookproxy(self, fspath: "os.PathLike[str]"): + warnings.warn(FSCOLLECTOR_GETHOOKPROXY_ISINITPATH, stacklevel=2) + return self.session.gethookproxy(fspath) + + def isinitpath(self, path: Union[str, "os.PathLike[str]"]) -> bool: + warnings.warn(FSCOLLECTOR_GETHOOKPROXY_ISINITPATH, stacklevel=2) + return self.session.isinitpath(path) + + +class File(FSCollector): + """Base class for collecting tests from a file. + + :ref:`non-python tests`. + """ + + +class Item(Node): + """A basic test invocation item. + + Note that for a single function there might be multiple test invocation items. + """ + + nextitem = None + + def __init__( + self, + name, + parent=None, + config: Optional[Config] = None, + session: Optional["Session"] = None, + nodeid: Optional[str] = None, + **kw, + ) -> None: + # The first two arguments are intentionally passed positionally, + # to keep plugins who define a node type which inherits from + # (pytest.Item, pytest.File) working (see issue #8435). + # They can be made kwargs when the deprecation above is done. + super().__init__( + name, + parent, + config=config, + session=session, + nodeid=nodeid, + **kw, + ) + self._report_sections: List[Tuple[str, str, str]] = [] + + #: A list of tuples (name, value) that holds user defined properties + #: for this test. + self.user_properties: List[Tuple[str, object]] = [] + + self._check_item_and_collector_diamond_inheritance() + + def _check_item_and_collector_diamond_inheritance(self) -> None: + """ + Check if the current type inherits from both File and Collector + at the same time, emitting a warning accordingly (#8447). + """ + cls = type(self) + + # We inject an attribute in the type to avoid issuing this warning + # for the same class more than once, which is not helpful. + # It is a hack, but was deemed acceptable in order to avoid + # flooding the user in the common case. + attr_name = "_pytest_diamond_inheritance_warning_shown" + if getattr(cls, attr_name, False): + return + setattr(cls, attr_name, True) + + problems = ", ".join( + base.__name__ for base in cls.__bases__ if issubclass(base, Collector) + ) + if problems: + warnings.warn( + f"{cls.__name__} is an Item subclass and should not be a collector, " + f"however its bases {problems} are collectors.\n" + "Please split the Collectors and the Item into separate node types.\n" + "Pytest Doc example: https://docs.pytest.org/en/latest/example/nonpython.html\n" + "example pull request on a plugin: https://github.com/asmeurer/pytest-flakes/pull/40/", + PytestWarning, + ) + + def runtest(self) -> None: + """Run the test case for this item. + + Must be implemented by subclasses. + + .. seealso:: :ref:`non-python tests` + """ + raise NotImplementedError("runtest must be implemented by Item subclass") + + def add_report_section(self, when: str, key: str, content: str) -> None: + """Add a new report section, similar to what's done internally to add + stdout and stderr captured output:: + + item.add_report_section("call", "stdout", "report section contents") + + :param str when: + One of the possible capture states, ``"setup"``, ``"call"``, ``"teardown"``. + :param str key: + Name of the section, can be customized at will. Pytest uses ``"stdout"`` and + ``"stderr"`` internally. + :param str content: + The full contents as a string. + """ + if content: + self._report_sections.append((when, key, content)) + + def reportinfo(self) -> Tuple[Union["os.PathLike[str]", str], Optional[int], str]: + """Get location information for this item for test reports. + + Returns a tuple with three elements: + + - The path of the test (default ``self.path``) + - The line number of the test (default ``None``) + - A name of the test to be shown (default ``""``) + + .. seealso:: :ref:`non-python tests` + """ + return self.path, None, "" + + @cached_property + def location(self) -> Tuple[str, Optional[int], str]: + location = self.reportinfo() + path = absolutepath(os.fspath(location[0])) + relfspath = self.session._node_location_to_relpath(path) + assert type(location[2]) is str + return (relfspath, location[1], location[2]) diff --git a/venv/lib/python3.10/site-packages/_pytest/nose.py b/venv/lib/python3.10/site-packages/_pytest/nose.py new file mode 100644 index 0000000..b0699d2 --- /dev/null +++ b/venv/lib/python3.10/site-packages/_pytest/nose.py @@ -0,0 +1,42 @@ +"""Run testsuites written for nose.""" +from _pytest.config import hookimpl +from _pytest.fixtures import getfixturemarker +from _pytest.nodes import Item +from _pytest.python import Function +from _pytest.unittest import TestCaseFunction + + +@hookimpl(trylast=True) +def pytest_runtest_setup(item: Item) -> None: + if not isinstance(item, Function): + return + # Don't do nose style setup/teardown on direct unittest style classes. + if isinstance(item, TestCaseFunction): + return + + # Capture the narrowed type of item for the teardown closure, + # see https://github.com/python/mypy/issues/2608 + func = item + + call_optional(func.obj, "setup") + func.addfinalizer(lambda: call_optional(func.obj, "teardown")) + + # NOTE: Module- and class-level fixtures are handled in python.py + # with `pluginmanager.has_plugin("nose")` checks. + # It would have been nicer to implement them outside of core, but + # it's not straightforward. + + +def call_optional(obj: object, name: str) -> bool: + method = getattr(obj, name, None) + if method is None: + return False + is_fixture = getfixturemarker(method) is not None + if is_fixture: + return False + if not callable(method): + return False + # If there are any problems allow the exception to raise rather than + # silently ignoring it. + method() + return True diff --git a/venv/lib/python3.10/site-packages/_pytest/outcomes.py b/venv/lib/python3.10/site-packages/_pytest/outcomes.py new file mode 100644 index 0000000..25206fe --- /dev/null +++ b/venv/lib/python3.10/site-packages/_pytest/outcomes.py @@ -0,0 +1,307 @@ +"""Exception classes and constants handling test outcomes as well as +functions creating them.""" +import sys +import warnings +from typing import Any +from typing import Callable +from typing import cast +from typing import Optional +from typing import Type +from typing import TypeVar + +from _pytest.deprecated import KEYWORD_MSG_ARG + +TYPE_CHECKING = False # Avoid circular import through compat. + +if TYPE_CHECKING: + from typing import NoReturn + from typing_extensions import Protocol +else: + # typing.Protocol is only available starting from Python 3.8. It is also + # available from typing_extensions, but we don't want a runtime dependency + # on that. So use a dummy runtime implementation. + from typing import Generic + + Protocol = Generic + + +class OutcomeException(BaseException): + """OutcomeException and its subclass instances indicate and contain info + about test and collection outcomes.""" + + def __init__(self, msg: Optional[str] = None, pytrace: bool = True) -> None: + if msg is not None and not isinstance(msg, str): + error_msg = ( # type: ignore[unreachable] + "{} expected string as 'msg' parameter, got '{}' instead.\n" + "Perhaps you meant to use a mark?" + ) + raise TypeError(error_msg.format(type(self).__name__, type(msg).__name__)) + super().__init__(msg) + self.msg = msg + self.pytrace = pytrace + + def __repr__(self) -> str: + if self.msg is not None: + return self.msg + return f"<{self.__class__.__name__} instance>" + + __str__ = __repr__ + + +TEST_OUTCOME = (OutcomeException, Exception) + + +class Skipped(OutcomeException): + # XXX hackish: on 3k we fake to live in the builtins + # in order to have Skipped exception printing shorter/nicer + __module__ = "builtins" + + def __init__( + self, + msg: Optional[str] = None, + pytrace: bool = True, + allow_module_level: bool = False, + *, + _use_item_location: bool = False, + ) -> None: + super().__init__(msg=msg, pytrace=pytrace) + self.allow_module_level = allow_module_level + # If true, the skip location is reported as the item's location, + # instead of the place that raises the exception/calls skip(). + self._use_item_location = _use_item_location + + +class Failed(OutcomeException): + """Raised from an explicit call to pytest.fail().""" + + __module__ = "builtins" + + +class Exit(Exception): + """Raised for immediate program exits (no tracebacks/summaries).""" + + def __init__( + self, msg: str = "unknown reason", returncode: Optional[int] = None + ) -> None: + self.msg = msg + self.returncode = returncode + super().__init__(msg) + + +# Elaborate hack to work around https://github.com/python/mypy/issues/2087. +# Ideally would just be `exit.Exception = Exit` etc. + +_F = TypeVar("_F", bound=Callable[..., object]) +_ET = TypeVar("_ET", bound=Type[BaseException]) + + +class _WithException(Protocol[_F, _ET]): + Exception: _ET + __call__: _F + + +def _with_exception(exception_type: _ET) -> Callable[[_F], _WithException[_F, _ET]]: + def decorate(func: _F) -> _WithException[_F, _ET]: + func_with_exception = cast(_WithException[_F, _ET], func) + func_with_exception.Exception = exception_type + return func_with_exception + + return decorate + + +# Exposed helper methods. + + +@_with_exception(Exit) +def exit( + reason: str = "", returncode: Optional[int] = None, *, msg: Optional[str] = None +) -> "NoReturn": + """Exit testing process. + + :param reason: + The message to show as the reason for exiting pytest. reason has a default value + only because `msg` is deprecated. + + :param returncode: + Return code to be used when exiting pytest. + + :param msg: + Same as ``reason``, but deprecated. Will be removed in a future version, use ``reason`` instead. + """ + __tracebackhide__ = True + from _pytest.config import UsageError + + if reason and msg: + raise UsageError( + "cannot pass reason and msg to exit(), `msg` is deprecated, use `reason`." + ) + if not reason: + if msg is None: + raise UsageError("exit() requires a reason argument") + warnings.warn(KEYWORD_MSG_ARG.format(func="exit"), stacklevel=2) + reason = msg + raise Exit(reason, returncode) + + +@_with_exception(Skipped) +def skip( + reason: str = "", *, allow_module_level: bool = False, msg: Optional[str] = None +) -> "NoReturn": + """Skip an executing test with the given message. + + This function should be called only during testing (setup, call or teardown) or + during collection by using the ``allow_module_level`` flag. This function can + be called in doctests as well. + + :param reason: + The message to show the user as reason for the skip. + + :param allow_module_level: + Allows this function to be called at module level, skipping the rest + of the module. Defaults to False. + + :param msg: + Same as ``reason``, but deprecated. Will be removed in a future version, use ``reason`` instead. + + .. note:: + It is better to use the :ref:`pytest.mark.skipif ref` marker when + possible to declare a test to be skipped under certain conditions + like mismatching platforms or dependencies. + Similarly, use the ``# doctest: +SKIP`` directive (see :py:data:`doctest.SKIP`) + to skip a doctest statically. + """ + __tracebackhide__ = True + reason = _resolve_msg_to_reason("skip", reason, msg) + raise Skipped(msg=reason, allow_module_level=allow_module_level) + + +@_with_exception(Failed) +def fail( + reason: str = "", pytrace: bool = True, msg: Optional[str] = None +) -> "NoReturn": + """Explicitly fail an executing test with the given message. + + :param reason: + The message to show the user as reason for the failure. + + :param pytrace: + If False, msg represents the full failure information and no + python traceback will be reported. + + :param msg: + Same as ``reason``, but deprecated. Will be removed in a future version, use ``reason`` instead. + """ + __tracebackhide__ = True + reason = _resolve_msg_to_reason("fail", reason, msg) + raise Failed(msg=reason, pytrace=pytrace) + + +def _resolve_msg_to_reason( + func_name: str, reason: str, msg: Optional[str] = None +) -> str: + """ + Handles converting the deprecated msg parameter if provided into + reason, raising a deprecation warning. This function will be removed + when the optional msg argument is removed from here in future. + + :param str func_name: + The name of the offending function, this is formatted into the deprecation message. + + :param str reason: + The reason= passed into either pytest.fail() or pytest.skip() + + :param str msg: + The msg= passed into either pytest.fail() or pytest.skip(). This will + be converted into reason if it is provided to allow pytest.skip(msg=) or + pytest.fail(msg=) to continue working in the interim period. + + :returns: + The value to use as reason. + + """ + __tracebackhide__ = True + if msg is not None: + + if reason: + from pytest import UsageError + + raise UsageError( + f"Passing both ``reason`` and ``msg`` to pytest.{func_name}(...) is not permitted." + ) + warnings.warn(KEYWORD_MSG_ARG.format(func=func_name), stacklevel=3) + reason = msg + return reason + + +class XFailed(Failed): + """Raised from an explicit call to pytest.xfail().""" + + +@_with_exception(XFailed) +def xfail(reason: str = "") -> "NoReturn": + """Imperatively xfail an executing test or setup function with the given reason. + + This function should be called only during testing (setup, call or teardown). + + .. note:: + It is better to use the :ref:`pytest.mark.xfail ref` marker when + possible to declare a test to be xfailed under certain conditions + like known bugs or missing features. + """ + __tracebackhide__ = True + raise XFailed(reason) + + +def importorskip( + modname: str, minversion: Optional[str] = None, reason: Optional[str] = None +) -> Any: + """Import and return the requested module ``modname``, or skip the + current test if the module cannot be imported. + + :param str modname: + The name of the module to import. + :param str minversion: + If given, the imported module's ``__version__`` attribute must be at + least this minimal version, otherwise the test is still skipped. + :param str reason: + If given, this reason is shown as the message when the module cannot + be imported. + + :returns: + The imported module. This should be assigned to its canonical name. + + Example:: + + docutils = pytest.importorskip("docutils") + """ + import warnings + + __tracebackhide__ = True + compile(modname, "", "eval") # to catch syntaxerrors + + with warnings.catch_warnings(): + # Make sure to ignore ImportWarnings that might happen because + # of existing directories with the same name we're trying to + # import but without a __init__.py file. + warnings.simplefilter("ignore") + try: + __import__(modname) + except ImportError as exc: + if reason is None: + reason = f"could not import {modname!r}: {exc}" + raise Skipped(reason, allow_module_level=True) from None + mod = sys.modules[modname] + if minversion is None: + return mod + verattr = getattr(mod, "__version__", None) + if minversion is not None: + # Imported lazily to improve start-up time. + from packaging.version import Version + + if verattr is None or Version(verattr) < Version(minversion): + raise Skipped( + "module %r has __version__ %r, required is: %r" + % (modname, verattr, minversion), + allow_module_level=True, + ) + return mod diff --git a/venv/lib/python3.10/site-packages/_pytest/pastebin.py b/venv/lib/python3.10/site-packages/_pytest/pastebin.py new file mode 100644 index 0000000..385b302 --- /dev/null +++ b/venv/lib/python3.10/site-packages/_pytest/pastebin.py @@ -0,0 +1,110 @@ +"""Submit failure or test session information to a pastebin service.""" +import tempfile +from io import StringIO +from typing import IO +from typing import Union + +import pytest +from _pytest.config import Config +from _pytest.config import create_terminal_writer +from _pytest.config.argparsing import Parser +from _pytest.stash import StashKey +from _pytest.terminal import TerminalReporter + + +pastebinfile_key = StashKey[IO[bytes]]() + + +def pytest_addoption(parser: Parser) -> None: + group = parser.getgroup("terminal reporting") + group._addoption( + "--pastebin", + metavar="mode", + action="store", + dest="pastebin", + default=None, + choices=["failed", "all"], + help="send failed|all info to bpaste.net pastebin service.", + ) + + +@pytest.hookimpl(trylast=True) +def pytest_configure(config: Config) -> None: + if config.option.pastebin == "all": + tr = config.pluginmanager.getplugin("terminalreporter") + # If no terminal reporter plugin is present, nothing we can do here; + # this can happen when this function executes in a worker node + # when using pytest-xdist, for example. + if tr is not None: + # pastebin file will be UTF-8 encoded binary file. + config.stash[pastebinfile_key] = tempfile.TemporaryFile("w+b") + oldwrite = tr._tw.write + + def tee_write(s, **kwargs): + oldwrite(s, **kwargs) + if isinstance(s, str): + s = s.encode("utf-8") + config.stash[pastebinfile_key].write(s) + + tr._tw.write = tee_write + + +def pytest_unconfigure(config: Config) -> None: + if pastebinfile_key in config.stash: + pastebinfile = config.stash[pastebinfile_key] + # Get terminal contents and delete file. + pastebinfile.seek(0) + sessionlog = pastebinfile.read() + pastebinfile.close() + del config.stash[pastebinfile_key] + # Undo our patching in the terminal reporter. + tr = config.pluginmanager.getplugin("terminalreporter") + del tr._tw.__dict__["write"] + # Write summary. + tr.write_sep("=", "Sending information to Paste Service") + pastebinurl = create_new_paste(sessionlog) + tr.write_line("pastebin session-log: %s\n" % pastebinurl) + + +def create_new_paste(contents: Union[str, bytes]) -> str: + """Create a new paste using the bpaste.net service. + + :contents: Paste contents string. + :returns: URL to the pasted contents, or an error message. + """ + import re + from urllib.request import urlopen + from urllib.parse import urlencode + + params = {"code": contents, "lexer": "text", "expiry": "1week"} + url = "https://bpa.st" + try: + response: str = ( + urlopen(url, data=urlencode(params).encode("ascii")).read().decode("utf-8") + ) + except OSError as exc_info: # urllib errors + return "bad response: %s" % exc_info + m = re.search(r'href="/raw/(\w+)"', response) + if m: + return f"{url}/show/{m.group(1)}" + else: + return "bad response: invalid format ('" + response + "')" + + +def pytest_terminal_summary(terminalreporter: TerminalReporter) -> None: + if terminalreporter.config.option.pastebin != "failed": + return + if "failed" in terminalreporter.stats: + terminalreporter.write_sep("=", "Sending information to Paste Service") + for rep in terminalreporter.stats["failed"]: + try: + msg = rep.longrepr.reprtraceback.reprentries[-1].reprfileloc + except AttributeError: + msg = terminalreporter._getfailureheadline(rep) + file = StringIO() + tw = create_terminal_writer(terminalreporter.config, file) + rep.toterminal(tw) + s = file.getvalue() + assert len(s) + pastebinurl = create_new_paste(s) + terminalreporter.write_line(f"{msg} --> {pastebinurl}") diff --git a/venv/lib/python3.10/site-packages/_pytest/pathlib.py b/venv/lib/python3.10/site-packages/_pytest/pathlib.py new file mode 100644 index 0000000..c5a411b --- /dev/null +++ b/venv/lib/python3.10/site-packages/_pytest/pathlib.py @@ -0,0 +1,735 @@ +import atexit +import contextlib +import fnmatch +import importlib.util +import itertools +import os +import shutil +import sys +import uuid +import warnings +from enum import Enum +from errno import EBADF +from errno import ELOOP +from errno import ENOENT +from errno import ENOTDIR +from functools import partial +from os.path import expanduser +from os.path import expandvars +from os.path import isabs +from os.path import sep +from pathlib import Path +from pathlib import PurePath +from posixpath import sep as posix_sep +from types import ModuleType +from typing import Callable +from typing import Dict +from typing import Iterable +from typing import Iterator +from typing import Optional +from typing import Set +from typing import TypeVar +from typing import Union + +from _pytest.compat import assert_never +from _pytest.outcomes import skip +from _pytest.warning_types import PytestWarning + +LOCK_TIMEOUT = 60 * 60 * 24 * 3 + + +_AnyPurePath = TypeVar("_AnyPurePath", bound=PurePath) + +# The following function, variables and comments were +# copied from cpython 3.9 Lib/pathlib.py file. + +# EBADF - guard against macOS `stat` throwing EBADF +_IGNORED_ERRORS = (ENOENT, ENOTDIR, EBADF, ELOOP) + +_IGNORED_WINERRORS = ( + 21, # ERROR_NOT_READY - drive exists but is not accessible + 1921, # ERROR_CANT_RESOLVE_FILENAME - fix for broken symlink pointing to itself +) + + +def _ignore_error(exception): + return ( + getattr(exception, "errno", None) in _IGNORED_ERRORS + or getattr(exception, "winerror", None) in _IGNORED_WINERRORS + ) + + +def get_lock_path(path: _AnyPurePath) -> _AnyPurePath: + return path.joinpath(".lock") + + +def on_rm_rf_error(func, path: str, exc, *, start_path: Path) -> bool: + """Handle known read-only errors during rmtree. + + The returned value is used only by our own tests. + """ + exctype, excvalue = exc[:2] + + # Another process removed the file in the middle of the "rm_rf" (xdist for example). + # More context: https://github.com/pytest-dev/pytest/issues/5974#issuecomment-543799018 + if isinstance(excvalue, FileNotFoundError): + return False + + if not isinstance(excvalue, PermissionError): + warnings.warn( + PytestWarning(f"(rm_rf) error removing {path}\n{exctype}: {excvalue}") + ) + return False + + if func not in (os.rmdir, os.remove, os.unlink): + if func not in (os.open,): + warnings.warn( + PytestWarning( + "(rm_rf) unknown function {} when removing {}:\n{}: {}".format( + func, path, exctype, excvalue + ) + ) + ) + return False + + # Chmod + retry. + import stat + + def chmod_rw(p: str) -> None: + mode = os.stat(p).st_mode + os.chmod(p, mode | stat.S_IRUSR | stat.S_IWUSR) + + # For files, we need to recursively go upwards in the directories to + # ensure they all are also writable. + p = Path(path) + if p.is_file(): + for parent in p.parents: + chmod_rw(str(parent)) + # Stop when we reach the original path passed to rm_rf. + if parent == start_path: + break + chmod_rw(str(path)) + + func(path) + return True + + +def ensure_extended_length_path(path: Path) -> Path: + """Get the extended-length version of a path (Windows). + + On Windows, by default, the maximum length of a path (MAX_PATH) is 260 + characters, and operations on paths longer than that fail. But it is possible + to overcome this by converting the path to "extended-length" form before + performing the operation: + https://docs.microsoft.com/en-us/windows/win32/fileio/naming-a-file#maximum-path-length-limitation + + On Windows, this function returns the extended-length absolute version of path. + On other platforms it returns path unchanged. + """ + if sys.platform.startswith("win32"): + path = path.resolve() + path = Path(get_extended_length_path_str(str(path))) + return path + + +def get_extended_length_path_str(path: str) -> str: + """Convert a path to a Windows extended length path.""" + long_path_prefix = "\\\\?\\" + unc_long_path_prefix = "\\\\?\\UNC\\" + if path.startswith((long_path_prefix, unc_long_path_prefix)): + return path + # UNC + if path.startswith("\\\\"): + return unc_long_path_prefix + path[2:] + return long_path_prefix + path + + +def rm_rf(path: Path) -> None: + """Remove the path contents recursively, even if some elements + are read-only.""" + path = ensure_extended_length_path(path) + onerror = partial(on_rm_rf_error, start_path=path) + shutil.rmtree(str(path), onerror=onerror) + + +def find_prefixed(root: Path, prefix: str) -> Iterator[Path]: + """Find all elements in root that begin with the prefix, case insensitive.""" + l_prefix = prefix.lower() + for x in root.iterdir(): + if x.name.lower().startswith(l_prefix): + yield x + + +def extract_suffixes(iter: Iterable[PurePath], prefix: str) -> Iterator[str]: + """Return the parts of the paths following the prefix. + + :param iter: Iterator over path names. + :param prefix: Expected prefix of the path names. + """ + p_len = len(prefix) + for p in iter: + yield p.name[p_len:] + + +def find_suffixes(root: Path, prefix: str) -> Iterator[str]: + """Combine find_prefixes and extract_suffixes.""" + return extract_suffixes(find_prefixed(root, prefix), prefix) + + +def parse_num(maybe_num) -> int: + """Parse number path suffixes, returns -1 on error.""" + try: + return int(maybe_num) + except ValueError: + return -1 + + +def _force_symlink( + root: Path, target: Union[str, PurePath], link_to: Union[str, Path] +) -> None: + """Helper to create the current symlink. + + It's full of race conditions that are reasonably OK to ignore + for the context of best effort linking to the latest test run. + + The presumption being that in case of much parallelism + the inaccuracy is going to be acceptable. + """ + current_symlink = root.joinpath(target) + try: + current_symlink.unlink() + except OSError: + pass + try: + current_symlink.symlink_to(link_to) + except Exception: + pass + + +def make_numbered_dir(root: Path, prefix: str, mode: int = 0o700) -> Path: + """Create a directory with an increased number as suffix for the given prefix.""" + for i in range(10): + # try up to 10 times to create the folder + max_existing = max(map(parse_num, find_suffixes(root, prefix)), default=-1) + new_number = max_existing + 1 + new_path = root.joinpath(f"{prefix}{new_number}") + try: + new_path.mkdir(mode=mode) + except Exception: + pass + else: + _force_symlink(root, prefix + "current", new_path) + return new_path + else: + raise OSError( + "could not create numbered dir with prefix " + "{prefix} in {root} after 10 tries".format(prefix=prefix, root=root) + ) + + +def create_cleanup_lock(p: Path) -> Path: + """Create a lock to prevent premature folder cleanup.""" + lock_path = get_lock_path(p) + try: + fd = os.open(str(lock_path), os.O_WRONLY | os.O_CREAT | os.O_EXCL, 0o644) + except FileExistsError as e: + raise OSError(f"cannot create lockfile in {p}") from e + else: + pid = os.getpid() + spid = str(pid).encode() + os.write(fd, spid) + os.close(fd) + if not lock_path.is_file(): + raise OSError("lock path got renamed after successful creation") + return lock_path + + +def register_cleanup_lock_removal(lock_path: Path, register=atexit.register): + """Register a cleanup function for removing a lock, by default on atexit.""" + pid = os.getpid() + + def cleanup_on_exit(lock_path: Path = lock_path, original_pid: int = pid) -> None: + current_pid = os.getpid() + if current_pid != original_pid: + # fork + return + try: + lock_path.unlink() + except OSError: + pass + + return register(cleanup_on_exit) + + +def maybe_delete_a_numbered_dir(path: Path) -> None: + """Remove a numbered directory if its lock can be obtained and it does + not seem to be in use.""" + path = ensure_extended_length_path(path) + lock_path = None + try: + lock_path = create_cleanup_lock(path) + parent = path.parent + + garbage = parent.joinpath(f"garbage-{uuid.uuid4()}") + path.rename(garbage) + rm_rf(garbage) + except OSError: + # known races: + # * other process did a cleanup at the same time + # * deletable folder was found + # * process cwd (Windows) + return + finally: + # If we created the lock, ensure we remove it even if we failed + # to properly remove the numbered dir. + if lock_path is not None: + try: + lock_path.unlink() + except OSError: + pass + + +def ensure_deletable(path: Path, consider_lock_dead_if_created_before: float) -> bool: + """Check if `path` is deletable based on whether the lock file is expired.""" + if path.is_symlink(): + return False + lock = get_lock_path(path) + try: + if not lock.is_file(): + return True + except OSError: + # we might not have access to the lock file at all, in this case assume + # we don't have access to the entire directory (#7491). + return False + try: + lock_time = lock.stat().st_mtime + except Exception: + return False + else: + if lock_time < consider_lock_dead_if_created_before: + # We want to ignore any errors while trying to remove the lock such as: + # - PermissionDenied, like the file permissions have changed since the lock creation; + # - FileNotFoundError, in case another pytest process got here first; + # and any other cause of failure. + with contextlib.suppress(OSError): + lock.unlink() + return True + return False + + +def try_cleanup(path: Path, consider_lock_dead_if_created_before: float) -> None: + """Try to cleanup a folder if we can ensure it's deletable.""" + if ensure_deletable(path, consider_lock_dead_if_created_before): + maybe_delete_a_numbered_dir(path) + + +def cleanup_candidates(root: Path, prefix: str, keep: int) -> Iterator[Path]: + """List candidates for numbered directories to be removed - follows py.path.""" + max_existing = max(map(parse_num, find_suffixes(root, prefix)), default=-1) + max_delete = max_existing - keep + paths = find_prefixed(root, prefix) + paths, paths2 = itertools.tee(paths) + numbers = map(parse_num, extract_suffixes(paths2, prefix)) + for path, number in zip(paths, numbers): + if number <= max_delete: + yield path + + +def cleanup_numbered_dir( + root: Path, prefix: str, keep: int, consider_lock_dead_if_created_before: float +) -> None: + """Cleanup for lock driven numbered directories.""" + for path in cleanup_candidates(root, prefix, keep): + try_cleanup(path, consider_lock_dead_if_created_before) + for path in root.glob("garbage-*"): + try_cleanup(path, consider_lock_dead_if_created_before) + + +def make_numbered_dir_with_cleanup( + root: Path, + prefix: str, + keep: int, + lock_timeout: float, + mode: int, +) -> Path: + """Create a numbered dir with a cleanup lock and remove old ones.""" + e = None + for i in range(10): + try: + p = make_numbered_dir(root, prefix, mode) + lock_path = create_cleanup_lock(p) + register_cleanup_lock_removal(lock_path) + except Exception as exc: + e = exc + else: + consider_lock_dead_if_created_before = p.stat().st_mtime - lock_timeout + # Register a cleanup for program exit + atexit.register( + cleanup_numbered_dir, + root, + prefix, + keep, + consider_lock_dead_if_created_before, + ) + return p + assert e is not None + raise e + + +def resolve_from_str(input: str, rootpath: Path) -> Path: + input = expanduser(input) + input = expandvars(input) + if isabs(input): + return Path(input) + else: + return rootpath.joinpath(input) + + +def fnmatch_ex(pattern: str, path: Union[str, "os.PathLike[str]"]) -> bool: + """A port of FNMatcher from py.path.common which works with PurePath() instances. + + The difference between this algorithm and PurePath.match() is that the + latter matches "**" glob expressions for each part of the path, while + this algorithm uses the whole path instead. + + For example: + "tests/foo/bar/doc/test_foo.py" matches pattern "tests/**/doc/test*.py" + with this algorithm, but not with PurePath.match(). + + This algorithm was ported to keep backward-compatibility with existing + settings which assume paths match according this logic. + + References: + * https://bugs.python.org/issue29249 + * https://bugs.python.org/issue34731 + """ + path = PurePath(path) + iswin32 = sys.platform.startswith("win") + + if iswin32 and sep not in pattern and posix_sep in pattern: + # Running on Windows, the pattern has no Windows path separators, + # and the pattern has one or more Posix path separators. Replace + # the Posix path separators with the Windows path separator. + pattern = pattern.replace(posix_sep, sep) + + if sep not in pattern: + name = path.name + else: + name = str(path) + if path.is_absolute() and not os.path.isabs(pattern): + pattern = f"*{os.sep}{pattern}" + return fnmatch.fnmatch(name, pattern) + + +def parts(s: str) -> Set[str]: + parts = s.split(sep) + return {sep.join(parts[: i + 1]) or sep for i in range(len(parts))} + + +def symlink_or_skip(src, dst, **kwargs): + """Make a symlink, or skip the test in case symlinks are not supported.""" + try: + os.symlink(str(src), str(dst), **kwargs) + except OSError as e: + skip(f"symlinks not supported: {e}") + + +class ImportMode(Enum): + """Possible values for `mode` parameter of `import_path`.""" + + prepend = "prepend" + append = "append" + importlib = "importlib" + + +class ImportPathMismatchError(ImportError): + """Raised on import_path() if there is a mismatch of __file__'s. + + This can happen when `import_path` is called multiple times with different filenames that has + the same basename but reside in packages + (for example "/tests1/test_foo.py" and "/tests2/test_foo.py"). + """ + + +def import_path( + p: Union[str, "os.PathLike[str]"], + *, + mode: Union[str, ImportMode] = ImportMode.prepend, + root: Path, +) -> ModuleType: + """Import and return a module from the given path, which can be a file (a module) or + a directory (a package). + + The import mechanism used is controlled by the `mode` parameter: + + * `mode == ImportMode.prepend`: the directory containing the module (or package, taking + `__init__.py` files into account) will be put at the *start* of `sys.path` before + being imported with `__import__. + + * `mode == ImportMode.append`: same as `prepend`, but the directory will be appended + to the end of `sys.path`, if not already in `sys.path`. + + * `mode == ImportMode.importlib`: uses more fine control mechanisms provided by `importlib` + to import the module, which avoids having to use `__import__` and muck with `sys.path` + at all. It effectively allows having same-named test modules in different places. + + :param root: + Used as an anchor when mode == ImportMode.importlib to obtain + a unique name for the module being imported so it can safely be stored + into ``sys.modules``. + + :raises ImportPathMismatchError: + If after importing the given `path` and the module `__file__` + are different. Only raised in `prepend` and `append` modes. + """ + mode = ImportMode(mode) + + path = Path(p) + + if not path.exists(): + raise ImportError(path) + + if mode is ImportMode.importlib: + module_name = module_name_from_path(path, root) + + for meta_importer in sys.meta_path: + spec = meta_importer.find_spec(module_name, [str(path.parent)]) + if spec is not None: + break + else: + spec = importlib.util.spec_from_file_location(module_name, str(path)) + + if spec is None: + raise ImportError(f"Can't find module {module_name} at location {path}") + mod = importlib.util.module_from_spec(spec) + sys.modules[module_name] = mod + spec.loader.exec_module(mod) # type: ignore[union-attr] + insert_missing_modules(sys.modules, module_name) + return mod + + pkg_path = resolve_package_path(path) + if pkg_path is not None: + pkg_root = pkg_path.parent + names = list(path.with_suffix("").relative_to(pkg_root).parts) + if names[-1] == "__init__": + names.pop() + module_name = ".".join(names) + else: + pkg_root = path.parent + module_name = path.stem + + # Change sys.path permanently: restoring it at the end of this function would cause surprising + # problems because of delayed imports: for example, a conftest.py file imported by this function + # might have local imports, which would fail at runtime if we restored sys.path. + if mode is ImportMode.append: + if str(pkg_root) not in sys.path: + sys.path.append(str(pkg_root)) + elif mode is ImportMode.prepend: + if str(pkg_root) != sys.path[0]: + sys.path.insert(0, str(pkg_root)) + else: + assert_never(mode) + + importlib.import_module(module_name) + + mod = sys.modules[module_name] + if path.name == "__init__.py": + return mod + + ignore = os.environ.get("PY_IGNORE_IMPORTMISMATCH", "") + if ignore != "1": + module_file = mod.__file__ + if module_file is None: + raise ImportPathMismatchError(module_name, module_file, path) + + if module_file.endswith((".pyc", ".pyo")): + module_file = module_file[:-1] + if module_file.endswith(os.path.sep + "__init__.py"): + module_file = module_file[: -(len(os.path.sep + "__init__.py"))] + + try: + is_same = _is_same(str(path), module_file) + except FileNotFoundError: + is_same = False + + if not is_same: + raise ImportPathMismatchError(module_name, module_file, path) + + return mod + + +# Implement a special _is_same function on Windows which returns True if the two filenames +# compare equal, to circumvent os.path.samefile returning False for mounts in UNC (#7678). +if sys.platform.startswith("win"): + + def _is_same(f1: str, f2: str) -> bool: + return Path(f1) == Path(f2) or os.path.samefile(f1, f2) + +else: + + def _is_same(f1: str, f2: str) -> bool: + return os.path.samefile(f1, f2) + + +def module_name_from_path(path: Path, root: Path) -> str: + """ + Return a dotted module name based on the given path, anchored on root. + + For example: path="projects/src/tests/test_foo.py" and root="/projects", the + resulting module name will be "src.tests.test_foo". + """ + path = path.with_suffix("") + try: + relative_path = path.relative_to(root) + except ValueError: + # If we can't get a relative path to root, use the full path, except + # for the first part ("d:\\" or "/" depending on the platform, for example). + path_parts = path.parts[1:] + else: + # Use the parts for the relative path to the root path. + path_parts = relative_path.parts + + return ".".join(path_parts) + + +def insert_missing_modules(modules: Dict[str, ModuleType], module_name: str) -> None: + """ + Used by ``import_path`` to create intermediate modules when using mode=importlib. + + When we want to import a module as "src.tests.test_foo" for example, we need + to create empty modules "src" and "src.tests" after inserting "src.tests.test_foo", + otherwise "src.tests.test_foo" is not importable by ``__import__``. + """ + module_parts = module_name.split(".") + while module_name: + if module_name not in modules: + try: + # If sys.meta_path is empty, calling import_module will issue + # a warning and raise ModuleNotFoundError. To avoid the + # warning, we check sys.meta_path explicitly and raise the error + # ourselves to fall back to creating a dummy module. + if not sys.meta_path: + raise ModuleNotFoundError + importlib.import_module(module_name) + except ModuleNotFoundError: + module = ModuleType( + module_name, + doc="Empty module created by pytest's importmode=importlib.", + ) + modules[module_name] = module + module_parts.pop(-1) + module_name = ".".join(module_parts) + + +def resolve_package_path(path: Path) -> Optional[Path]: + """Return the Python package path by looking for the last + directory upwards which still contains an __init__.py. + + Returns None if it can not be determined. + """ + result = None + for parent in itertools.chain((path,), path.parents): + if parent.is_dir(): + if not parent.joinpath("__init__.py").is_file(): + break + if not parent.name.isidentifier(): + break + result = parent + return result + + +def visit( + path: Union[str, "os.PathLike[str]"], recurse: Callable[["os.DirEntry[str]"], bool] +) -> Iterator["os.DirEntry[str]"]: + """Walk a directory recursively, in breadth-first order. + + Entries at each directory level are sorted. + """ + + # Skip entries with symlink loops and other brokenness, so the caller doesn't + # have to deal with it. + entries = [] + for entry in os.scandir(path): + try: + entry.is_file() + except OSError as err: + if _ignore_error(err): + continue + raise + entries.append(entry) + + entries.sort(key=lambda entry: entry.name) + + yield from entries + + for entry in entries: + if entry.is_dir() and recurse(entry): + yield from visit(entry.path, recurse) + + +def absolutepath(path: Union[Path, str]) -> Path: + """Convert a path to an absolute path using os.path.abspath. + + Prefer this over Path.resolve() (see #6523). + Prefer this over Path.absolute() (not public, doesn't normalize). + """ + return Path(os.path.abspath(str(path))) + + +def commonpath(path1: Path, path2: Path) -> Optional[Path]: + """Return the common part shared with the other path, or None if there is + no common part. + + If one path is relative and one is absolute, returns None. + """ + try: + return Path(os.path.commonpath((str(path1), str(path2)))) + except ValueError: + return None + + +def bestrelpath(directory: Path, dest: Path) -> str: + """Return a string which is a relative path from directory to dest such + that directory/bestrelpath == dest. + + The paths must be either both absolute or both relative. + + If no such path can be determined, returns dest. + """ + assert isinstance(directory, Path) + assert isinstance(dest, Path) + if dest == directory: + return os.curdir + # Find the longest common directory. + base = commonpath(directory, dest) + # Can be the case on Windows for two absolute paths on different drives. + # Can be the case for two relative paths without common prefix. + # Can be the case for a relative path and an absolute path. + if not base: + return str(dest) + reldirectory = directory.relative_to(base) + reldest = dest.relative_to(base) + return os.path.join( + # Back from directory to base. + *([os.pardir] * len(reldirectory.parts)), + # Forward from base to dest. + *reldest.parts, + ) + + +# Originates from py. path.local.copy(), with siginficant trims and adjustments. +# TODO(py38): Replace with shutil.copytree(..., symlinks=True, dirs_exist_ok=True) +def copytree(source: Path, target: Path) -> None: + """Recursively copy a source directory to target.""" + assert source.is_dir() + for entry in visit(source, recurse=lambda entry: not entry.is_symlink()): + x = Path(entry) + relpath = x.relative_to(source) + newx = target / relpath + newx.parent.mkdir(exist_ok=True) + if x.is_symlink(): + newx.symlink_to(os.readlink(x)) + elif x.is_file(): + shutil.copyfile(x, newx) + elif x.is_dir(): + newx.mkdir(exist_ok=True) diff --git a/venv/lib/python3.10/site-packages/pip-22.3.1.virtualenv b/venv/lib/python3.10/site-packages/_pytest/py.typed similarity index 100% rename from venv/lib/python3.10/site-packages/pip-22.3.1.virtualenv rename to venv/lib/python3.10/site-packages/_pytest/py.typed diff --git a/venv/lib/python3.10/site-packages/_pytest/pytester.py b/venv/lib/python3.10/site-packages/_pytest/pytester.py new file mode 100644 index 0000000..8368f94 --- /dev/null +++ b/venv/lib/python3.10/site-packages/_pytest/pytester.py @@ -0,0 +1,1750 @@ +"""(Disabled by default) support for testing pytest and pytest plugins. + +PYTEST_DONT_REWRITE +""" +import collections.abc +import contextlib +import gc +import importlib +import os +import platform +import re +import shutil +import subprocess +import sys +import traceback +from fnmatch import fnmatch +from io import StringIO +from pathlib import Path +from typing import Any +from typing import Callable +from typing import Dict +from typing import Generator +from typing import IO +from typing import Iterable +from typing import List +from typing import Optional +from typing import overload +from typing import Sequence +from typing import TextIO +from typing import Tuple +from typing import Type +from typing import TYPE_CHECKING +from typing import Union +from weakref import WeakKeyDictionary + +from iniconfig import IniConfig +from iniconfig import SectionWrapper + +from _pytest import timing +from _pytest._code import Source +from _pytest.capture import _get_multicapture +from _pytest.compat import final +from _pytest.compat import NOTSET +from _pytest.compat import NotSetType +from _pytest.config import _PluggyPlugin +from _pytest.config import Config +from _pytest.config import ExitCode +from _pytest.config import hookimpl +from _pytest.config import main +from _pytest.config import PytestPluginManager +from _pytest.config.argparsing import Parser +from _pytest.deprecated import check_ispytest +from _pytest.fixtures import fixture +from _pytest.fixtures import FixtureRequest +from _pytest.main import Session +from _pytest.monkeypatch import MonkeyPatch +from _pytest.nodes import Collector +from _pytest.nodes import Item +from _pytest.outcomes import fail +from _pytest.outcomes import importorskip +from _pytest.outcomes import skip +from _pytest.pathlib import bestrelpath +from _pytest.pathlib import copytree +from _pytest.pathlib import make_numbered_dir +from _pytest.reports import CollectReport +from _pytest.reports import TestReport +from _pytest.tmpdir import TempPathFactory +from _pytest.warning_types import PytestWarning + + +if TYPE_CHECKING: + from typing_extensions import Final + from typing_extensions import Literal + + import pexpect + + +pytest_plugins = ["pytester_assertions"] + + +IGNORE_PAM = [ # filenames added when obtaining details about the current user + "/var/lib/sss/mc/passwd" +] + + +def pytest_addoption(parser: Parser) -> None: + parser.addoption( + "--lsof", + action="store_true", + dest="lsof", + default=False, + help="run FD checks if lsof is available", + ) + + parser.addoption( + "--runpytest", + default="inprocess", + dest="runpytest", + choices=("inprocess", "subprocess"), + help=( + "run pytest sub runs in tests using an 'inprocess' " + "or 'subprocess' (python -m main) method" + ), + ) + + parser.addini( + "pytester_example_dir", help="directory to take the pytester example files from" + ) + + +def pytest_configure(config: Config) -> None: + if config.getvalue("lsof"): + checker = LsofFdLeakChecker() + if checker.matching_platform(): + config.pluginmanager.register(checker) + + config.addinivalue_line( + "markers", + "pytester_example_path(*path_segments): join the given path " + "segments to `pytester_example_dir` for this test.", + ) + + +class LsofFdLeakChecker: + def get_open_files(self) -> List[Tuple[str, str]]: + out = subprocess.run( + ("lsof", "-Ffn0", "-p", str(os.getpid())), + stdout=subprocess.PIPE, + stderr=subprocess.DEVNULL, + check=True, + text=True, + ).stdout + + def isopen(line: str) -> bool: + return line.startswith("f") and ( + "deleted" not in line + and "mem" not in line + and "txt" not in line + and "cwd" not in line + ) + + open_files = [] + + for line in out.split("\n"): + if isopen(line): + fields = line.split("\0") + fd = fields[0][1:] + filename = fields[1][1:] + if filename in IGNORE_PAM: + continue + if filename.startswith("/"): + open_files.append((fd, filename)) + + return open_files + + def matching_platform(self) -> bool: + try: + subprocess.run(("lsof", "-v"), check=True) + except (OSError, subprocess.CalledProcessError): + return False + else: + return True + + @hookimpl(hookwrapper=True, tryfirst=True) + def pytest_runtest_protocol(self, item: Item) -> Generator[None, None, None]: + lines1 = self.get_open_files() + yield + if hasattr(sys, "pypy_version_info"): + gc.collect() + lines2 = self.get_open_files() + + new_fds = {t[0] for t in lines2} - {t[0] for t in lines1} + leaked_files = [t for t in lines2 if t[0] in new_fds] + if leaked_files: + error = [ + "***** %s FD leakage detected" % len(leaked_files), + *(str(f) for f in leaked_files), + "*** Before:", + *(str(f) for f in lines1), + "*** After:", + *(str(f) for f in lines2), + "***** %s FD leakage detected" % len(leaked_files), + "*** function %s:%s: %s " % item.location, + "See issue #2366", + ] + item.warn(PytestWarning("\n".join(error))) + + +# used at least by pytest-xdist plugin + + +@fixture +def _pytest(request: FixtureRequest) -> "PytestArg": + """Return a helper which offers a gethookrecorder(hook) method which + returns a HookRecorder instance which helps to make assertions about called + hooks.""" + return PytestArg(request) + + +class PytestArg: + def __init__(self, request: FixtureRequest) -> None: + self._request = request + + def gethookrecorder(self, hook) -> "HookRecorder": + hookrecorder = HookRecorder(hook._pm) + self._request.addfinalizer(hookrecorder.finish_recording) + return hookrecorder + + +def get_public_names(values: Iterable[str]) -> List[str]: + """Only return names from iterator values without a leading underscore.""" + return [x for x in values if x[0] != "_"] + + +@final +class RecordedHookCall: + """A recorded call to a hook. + + The arguments to the hook call are set as attributes. + For example: + + .. code-block:: python + + calls = hook_recorder.getcalls("pytest_runtest_setup") + # Suppose pytest_runtest_setup was called once with `item=an_item`. + assert calls[0].item is an_item + """ + + def __init__(self, name: str, kwargs) -> None: + self.__dict__.update(kwargs) + self._name = name + + def __repr__(self) -> str: + d = self.__dict__.copy() + del d["_name"] + return f"" + + if TYPE_CHECKING: + # The class has undetermined attributes, this tells mypy about it. + def __getattr__(self, key: str): + ... + + +@final +class HookRecorder: + """Record all hooks called in a plugin manager. + + Hook recorders are created by :class:`Pytester`. + + This wraps all the hook calls in the plugin manager, recording each call + before propagating the normal calls. + """ + + def __init__( + self, pluginmanager: PytestPluginManager, *, _ispytest: bool = False + ) -> None: + check_ispytest(_ispytest) + + self._pluginmanager = pluginmanager + self.calls: List[RecordedHookCall] = [] + self.ret: Optional[Union[int, ExitCode]] = None + + def before(hook_name: str, hook_impls, kwargs) -> None: + self.calls.append(RecordedHookCall(hook_name, kwargs)) + + def after(outcome, hook_name: str, hook_impls, kwargs) -> None: + pass + + self._undo_wrapping = pluginmanager.add_hookcall_monitoring(before, after) + + def finish_recording(self) -> None: + self._undo_wrapping() + + def getcalls(self, names: Union[str, Iterable[str]]) -> List[RecordedHookCall]: + """Get all recorded calls to hooks with the given names (or name).""" + if isinstance(names, str): + names = names.split() + return [call for call in self.calls if call._name in names] + + def assert_contains(self, entries: Sequence[Tuple[str, str]]) -> None: + __tracebackhide__ = True + i = 0 + entries = list(entries) + backlocals = sys._getframe(1).f_locals + while entries: + name, check = entries.pop(0) + for ind, call in enumerate(self.calls[i:]): + if call._name == name: + print("NAMEMATCH", name, call) + if eval(check, backlocals, call.__dict__): + print("CHECKERMATCH", repr(check), "->", call) + else: + print("NOCHECKERMATCH", repr(check), "-", call) + continue + i += ind + 1 + break + print("NONAMEMATCH", name, "with", call) + else: + fail(f"could not find {name!r} check {check!r}") + + def popcall(self, name: str) -> RecordedHookCall: + __tracebackhide__ = True + for i, call in enumerate(self.calls): + if call._name == name: + del self.calls[i] + return call + lines = [f"could not find call {name!r}, in:"] + lines.extend([" %s" % x for x in self.calls]) + fail("\n".join(lines)) + + def getcall(self, name: str) -> RecordedHookCall: + values = self.getcalls(name) + assert len(values) == 1, (name, values) + return values[0] + + # functionality for test reports + + @overload + def getreports( + self, + names: "Literal['pytest_collectreport']", + ) -> Sequence[CollectReport]: + ... + + @overload + def getreports( + self, + names: "Literal['pytest_runtest_logreport']", + ) -> Sequence[TestReport]: + ... + + @overload + def getreports( + self, + names: Union[str, Iterable[str]] = ( + "pytest_collectreport", + "pytest_runtest_logreport", + ), + ) -> Sequence[Union[CollectReport, TestReport]]: + ... + + def getreports( + self, + names: Union[str, Iterable[str]] = ( + "pytest_collectreport", + "pytest_runtest_logreport", + ), + ) -> Sequence[Union[CollectReport, TestReport]]: + return [x.report for x in self.getcalls(names)] + + def matchreport( + self, + inamepart: str = "", + names: Union[str, Iterable[str]] = ( + "pytest_runtest_logreport", + "pytest_collectreport", + ), + when: Optional[str] = None, + ) -> Union[CollectReport, TestReport]: + """Return a testreport whose dotted import path matches.""" + values = [] + for rep in self.getreports(names=names): + if not when and rep.when != "call" and rep.passed: + # setup/teardown passing reports - let's ignore those + continue + if when and rep.when != when: + continue + if not inamepart or inamepart in rep.nodeid.split("::"): + values.append(rep) + if not values: + raise ValueError( + "could not find test report matching %r: " + "no test reports at all!" % (inamepart,) + ) + if len(values) > 1: + raise ValueError( + "found 2 or more testreports matching {!r}: {}".format( + inamepart, values + ) + ) + return values[0] + + @overload + def getfailures( + self, + names: "Literal['pytest_collectreport']", + ) -> Sequence[CollectReport]: + ... + + @overload + def getfailures( + self, + names: "Literal['pytest_runtest_logreport']", + ) -> Sequence[TestReport]: + ... + + @overload + def getfailures( + self, + names: Union[str, Iterable[str]] = ( + "pytest_collectreport", + "pytest_runtest_logreport", + ), + ) -> Sequence[Union[CollectReport, TestReport]]: + ... + + def getfailures( + self, + names: Union[str, Iterable[str]] = ( + "pytest_collectreport", + "pytest_runtest_logreport", + ), + ) -> Sequence[Union[CollectReport, TestReport]]: + return [rep for rep in self.getreports(names) if rep.failed] + + def getfailedcollections(self) -> Sequence[CollectReport]: + return self.getfailures("pytest_collectreport") + + def listoutcomes( + self, + ) -> Tuple[ + Sequence[TestReport], + Sequence[Union[CollectReport, TestReport]], + Sequence[Union[CollectReport, TestReport]], + ]: + passed = [] + skipped = [] + failed = [] + for rep in self.getreports( + ("pytest_collectreport", "pytest_runtest_logreport") + ): + if rep.passed: + if rep.when == "call": + assert isinstance(rep, TestReport) + passed.append(rep) + elif rep.skipped: + skipped.append(rep) + else: + assert rep.failed, f"Unexpected outcome: {rep!r}" + failed.append(rep) + return passed, skipped, failed + + def countoutcomes(self) -> List[int]: + return [len(x) for x in self.listoutcomes()] + + def assertoutcome(self, passed: int = 0, skipped: int = 0, failed: int = 0) -> None: + __tracebackhide__ = True + from _pytest.pytester_assertions import assertoutcome + + outcomes = self.listoutcomes() + assertoutcome( + outcomes, + passed=passed, + skipped=skipped, + failed=failed, + ) + + def clear(self) -> None: + self.calls[:] = [] + + +@fixture +def linecomp() -> "LineComp": + """A :class: `LineComp` instance for checking that an input linearly + contains a sequence of strings.""" + return LineComp() + + +@fixture(name="LineMatcher") +def LineMatcher_fixture(request: FixtureRequest) -> Type["LineMatcher"]: + """A reference to the :class: `LineMatcher`. + + This is instantiable with a list of lines (without their trailing newlines). + This is useful for testing large texts, such as the output of commands. + """ + return LineMatcher + + +@fixture +def pytester( + request: FixtureRequest, tmp_path_factory: TempPathFactory, monkeypatch: MonkeyPatch +) -> "Pytester": + """ + Facilities to write tests/configuration files, execute pytest in isolation, and match + against expected output, perfect for black-box testing of pytest plugins. + + It attempts to isolate the test run from external factors as much as possible, modifying + the current working directory to ``path`` and environment variables during initialization. + + It is particularly useful for testing plugins. It is similar to the :fixture:`tmp_path` + fixture but provides methods which aid in testing pytest itself. + """ + return Pytester(request, tmp_path_factory, monkeypatch, _ispytest=True) + + +@fixture +def _sys_snapshot() -> Generator[None, None, None]: + snappaths = SysPathsSnapshot() + snapmods = SysModulesSnapshot() + yield + snapmods.restore() + snappaths.restore() + + +@fixture +def _config_for_test() -> Generator[Config, None, None]: + from _pytest.config import get_config + + config = get_config() + yield config + config._ensure_unconfigure() # cleanup, e.g. capman closing tmpfiles. + + +# Regex to match the session duration string in the summary: "74.34s". +rex_session_duration = re.compile(r"\d+\.\d\ds") +# Regex to match all the counts and phrases in the summary line: "34 passed, 111 skipped". +rex_outcome = re.compile(r"(\d+) (\w+)") + + +@final +class RunResult: + """The result of running a command from :class:`~pytest.Pytester`.""" + + def __init__( + self, + ret: Union[int, ExitCode], + outlines: List[str], + errlines: List[str], + duration: float, + ) -> None: + try: + self.ret: Union[int, ExitCode] = ExitCode(ret) + """The return value.""" + except ValueError: + self.ret = ret + self.outlines = outlines + """List of lines captured from stdout.""" + self.errlines = errlines + """List of lines captured from stderr.""" + self.stdout = LineMatcher(outlines) + """:class:`~pytest.LineMatcher` of stdout. + + Use e.g. :func:`str(stdout) ` to reconstruct stdout, or the commonly used + :func:`stdout.fnmatch_lines() ` method. + """ + self.stderr = LineMatcher(errlines) + """:class:`~pytest.LineMatcher` of stderr.""" + self.duration = duration + """Duration in seconds.""" + + def __repr__(self) -> str: + return ( + "" + % (self.ret, len(self.stdout.lines), len(self.stderr.lines), self.duration) + ) + + def parseoutcomes(self) -> Dict[str, int]: + """Return a dictionary of outcome noun -> count from parsing the terminal + output that the test process produced. + + The returned nouns will always be in plural form:: + + ======= 1 failed, 1 passed, 1 warning, 1 error in 0.13s ==== + + Will return ``{"failed": 1, "passed": 1, "warnings": 1, "errors": 1}``. + """ + return self.parse_summary_nouns(self.outlines) + + @classmethod + def parse_summary_nouns(cls, lines) -> Dict[str, int]: + """Extract the nouns from a pytest terminal summary line. + + It always returns the plural noun for consistency:: + + ======= 1 failed, 1 passed, 1 warning, 1 error in 0.13s ==== + + Will return ``{"failed": 1, "passed": 1, "warnings": 1, "errors": 1}``. + """ + for line in reversed(lines): + if rex_session_duration.search(line): + outcomes = rex_outcome.findall(line) + ret = {noun: int(count) for (count, noun) in outcomes} + break + else: + raise ValueError("Pytest terminal summary report not found") + + to_plural = { + "warning": "warnings", + "error": "errors", + } + return {to_plural.get(k, k): v for k, v in ret.items()} + + def assert_outcomes( + self, + passed: int = 0, + skipped: int = 0, + failed: int = 0, + errors: int = 0, + xpassed: int = 0, + xfailed: int = 0, + warnings: Optional[int] = None, + deselected: Optional[int] = None, + ) -> None: + """ + Assert that the specified outcomes appear with the respective + numbers (0 means it didn't occur) in the text output from a test run. + + ``warnings`` and ``deselected`` are only checked if not None. + """ + __tracebackhide__ = True + from _pytest.pytester_assertions import assert_outcomes + + outcomes = self.parseoutcomes() + assert_outcomes( + outcomes, + passed=passed, + skipped=skipped, + failed=failed, + errors=errors, + xpassed=xpassed, + xfailed=xfailed, + warnings=warnings, + deselected=deselected, + ) + + +class CwdSnapshot: + def __init__(self) -> None: + self.__saved = os.getcwd() + + def restore(self) -> None: + os.chdir(self.__saved) + + +class SysModulesSnapshot: + def __init__(self, preserve: Optional[Callable[[str], bool]] = None) -> None: + self.__preserve = preserve + self.__saved = dict(sys.modules) + + def restore(self) -> None: + if self.__preserve: + self.__saved.update( + (k, m) for k, m in sys.modules.items() if self.__preserve(k) + ) + sys.modules.clear() + sys.modules.update(self.__saved) + + +class SysPathsSnapshot: + def __init__(self) -> None: + self.__saved = list(sys.path), list(sys.meta_path) + + def restore(self) -> None: + sys.path[:], sys.meta_path[:] = self.__saved + + +@final +class Pytester: + """ + Facilities to write tests/configuration files, execute pytest in isolation, and match + against expected output, perfect for black-box testing of pytest plugins. + + It attempts to isolate the test run from external factors as much as possible, modifying + the current working directory to ``path`` and environment variables during initialization. + + Attributes: + + :ivar Path path: temporary directory path used to create files/run tests from, etc. + + :ivar plugins: + A list of plugins to use with :py:meth:`parseconfig` and + :py:meth:`runpytest`. Initially this is an empty list but plugins can + be added to the list. The type of items to add to the list depends on + the method using them so refer to them for details. + """ + + __test__ = False + + CLOSE_STDIN: "Final" = NOTSET + + class TimeoutExpired(Exception): + pass + + def __init__( + self, + request: FixtureRequest, + tmp_path_factory: TempPathFactory, + monkeypatch: MonkeyPatch, + *, + _ispytest: bool = False, + ) -> None: + check_ispytest(_ispytest) + self._request = request + self._mod_collections: WeakKeyDictionary[ + Collector, List[Union[Item, Collector]] + ] = WeakKeyDictionary() + if request.function: + name: str = request.function.__name__ + else: + name = request.node.name + self._name = name + self._path: Path = tmp_path_factory.mktemp(name, numbered=True) + self.plugins: List[Union[str, _PluggyPlugin]] = [] + self._cwd_snapshot = CwdSnapshot() + self._sys_path_snapshot = SysPathsSnapshot() + self._sys_modules_snapshot = self.__take_sys_modules_snapshot() + self.chdir() + self._request.addfinalizer(self._finalize) + self._method = self._request.config.getoption("--runpytest") + self._test_tmproot = tmp_path_factory.mktemp(f"tmp-{name}", numbered=True) + + self._monkeypatch = mp = monkeypatch + mp.setenv("PYTEST_DEBUG_TEMPROOT", str(self._test_tmproot)) + # Ensure no unexpected caching via tox. + mp.delenv("TOX_ENV_DIR", raising=False) + # Discard outer pytest options. + mp.delenv("PYTEST_ADDOPTS", raising=False) + # Ensure no user config is used. + tmphome = str(self.path) + mp.setenv("HOME", tmphome) + mp.setenv("USERPROFILE", tmphome) + # Do not use colors for inner runs by default. + mp.setenv("PY_COLORS", "0") + + @property + def path(self) -> Path: + """Temporary directory where files are created and pytest is executed.""" + return self._path + + def __repr__(self) -> str: + return f"" + + def _finalize(self) -> None: + """ + Clean up global state artifacts. + + Some methods modify the global interpreter state and this tries to + clean this up. It does not remove the temporary directory however so + it can be looked at after the test run has finished. + """ + self._sys_modules_snapshot.restore() + self._sys_path_snapshot.restore() + self._cwd_snapshot.restore() + + def __take_sys_modules_snapshot(self) -> SysModulesSnapshot: + # Some zope modules used by twisted-related tests keep internal state + # and can't be deleted; we had some trouble in the past with + # `zope.interface` for example. + # + # Preserve readline due to https://bugs.python.org/issue41033. + # pexpect issues a SIGWINCH. + def preserve_module(name): + return name.startswith(("zope", "readline")) + + return SysModulesSnapshot(preserve=preserve_module) + + def make_hook_recorder(self, pluginmanager: PytestPluginManager) -> HookRecorder: + """Create a new :py:class:`HookRecorder` for a PluginManager.""" + pluginmanager.reprec = reprec = HookRecorder(pluginmanager, _ispytest=True) + self._request.addfinalizer(reprec.finish_recording) + return reprec + + def chdir(self) -> None: + """Cd into the temporary directory. + + This is done automatically upon instantiation. + """ + os.chdir(self.path) + + def _makefile( + self, + ext: str, + lines: Sequence[Union[Any, bytes]], + files: Dict[str, str], + encoding: str = "utf-8", + ) -> Path: + items = list(files.items()) + + if ext and not ext.startswith("."): + raise ValueError( + f"pytester.makefile expects a file extension, try .{ext} instead of {ext}" + ) + + def to_text(s: Union[Any, bytes]) -> str: + return s.decode(encoding) if isinstance(s, bytes) else str(s) + + if lines: + source = "\n".join(to_text(x) for x in lines) + basename = self._name + items.insert(0, (basename, source)) + + ret = None + for basename, value in items: + p = self.path.joinpath(basename).with_suffix(ext) + p.parent.mkdir(parents=True, exist_ok=True) + source_ = Source(value) + source = "\n".join(to_text(line) for line in source_.lines) + p.write_text(source.strip(), encoding=encoding) + if ret is None: + ret = p + assert ret is not None + return ret + + def makefile(self, ext: str, *args: str, **kwargs: str) -> Path: + r"""Create new text file(s) in the test directory. + + :param str ext: + The extension the file(s) should use, including the dot, e.g. `.py`. + :param args: + All args are treated as strings and joined using newlines. + The result is written as contents to the file. The name of the + file is based on the test function requesting this fixture. + :param kwargs: + Each keyword is the name of a file, while the value of it will + be written as contents of the file. + + Examples: + + .. code-block:: python + + pytester.makefile(".txt", "line1", "line2") + + pytester.makefile(".ini", pytest="[pytest]\naddopts=-rs\n") + + To create binary files, use :meth:`pathlib.Path.write_bytes` directly: + + .. code-block:: python + + filename = pytester.path.joinpath("foo.bin") + filename.write_bytes(b"...") + """ + return self._makefile(ext, args, kwargs) + + def makeconftest(self, source: str) -> Path: + """Write a contest.py file with 'source' as contents.""" + return self.makepyfile(conftest=source) + + def makeini(self, source: str) -> Path: + """Write a tox.ini file with 'source' as contents.""" + return self.makefile(".ini", tox=source) + + def getinicfg(self, source: str) -> SectionWrapper: + """Return the pytest section from the tox.ini config file.""" + p = self.makeini(source) + return IniConfig(str(p))["pytest"] + + def makepyprojecttoml(self, source: str) -> Path: + """Write a pyproject.toml file with 'source' as contents. + + .. versionadded:: 6.0 + """ + return self.makefile(".toml", pyproject=source) + + def makepyfile(self, *args, **kwargs) -> Path: + r"""Shortcut for .makefile() with a .py extension. + + Defaults to the test name with a '.py' extension, e.g test_foobar.py, overwriting + existing files. + + Examples: + + .. code-block:: python + + def test_something(pytester): + # Initial file is created test_something.py. + pytester.makepyfile("foobar") + # To create multiple files, pass kwargs accordingly. + pytester.makepyfile(custom="foobar") + # At this point, both 'test_something.py' & 'custom.py' exist in the test directory. + + """ + return self._makefile(".py", args, kwargs) + + def maketxtfile(self, *args, **kwargs) -> Path: + r"""Shortcut for .makefile() with a .txt extension. + + Defaults to the test name with a '.txt' extension, e.g test_foobar.txt, overwriting + existing files. + + Examples: + + .. code-block:: python + + def test_something(pytester): + # Initial file is created test_something.txt. + pytester.maketxtfile("foobar") + # To create multiple files, pass kwargs accordingly. + pytester.maketxtfile(custom="foobar") + # At this point, both 'test_something.txt' & 'custom.txt' exist in the test directory. + + """ + return self._makefile(".txt", args, kwargs) + + def syspathinsert( + self, path: Optional[Union[str, "os.PathLike[str]"]] = None + ) -> None: + """Prepend a directory to sys.path, defaults to :attr:`path`. + + This is undone automatically when this object dies at the end of each + test. + """ + if path is None: + path = self.path + + self._monkeypatch.syspath_prepend(str(path)) + + def mkdir(self, name: str) -> Path: + """Create a new (sub)directory.""" + p = self.path / name + p.mkdir() + return p + + def mkpydir(self, name: str) -> Path: + """Create a new python package. + + This creates a (sub)directory with an empty ``__init__.py`` file so it + gets recognised as a Python package. + """ + p = self.path / name + p.mkdir() + p.joinpath("__init__.py").touch() + return p + + def copy_example(self, name: Optional[str] = None) -> Path: + """Copy file from project's directory into the testdir. + + :param str name: The name of the file to copy. + :return: path to the copied directory (inside ``self.path``). + + """ + example_dir = self._request.config.getini("pytester_example_dir") + if example_dir is None: + raise ValueError("pytester_example_dir is unset, can't copy examples") + example_dir = self._request.config.rootpath / example_dir + + for extra_element in self._request.node.iter_markers("pytester_example_path"): + assert extra_element.args + example_dir = example_dir.joinpath(*extra_element.args) + + if name is None: + func_name = self._name + maybe_dir = example_dir / func_name + maybe_file = example_dir / (func_name + ".py") + + if maybe_dir.is_dir(): + example_path = maybe_dir + elif maybe_file.is_file(): + example_path = maybe_file + else: + raise LookupError( + f"{func_name} can't be found as module or package in {example_dir}" + ) + else: + example_path = example_dir.joinpath(name) + + if example_path.is_dir() and not example_path.joinpath("__init__.py").is_file(): + copytree(example_path, self.path) + return self.path + elif example_path.is_file(): + result = self.path.joinpath(example_path.name) + shutil.copy(example_path, result) + return result + else: + raise LookupError( + f'example "{example_path}" is not found as a file or directory' + ) + + def getnode( + self, config: Config, arg: Union[str, "os.PathLike[str]"] + ) -> Optional[Union[Collector, Item]]: + """Return the collection node of a file. + + :param pytest.Config config: + A pytest config. + See :py:meth:`parseconfig` and :py:meth:`parseconfigure` for creating it. + :param os.PathLike[str] arg: + Path to the file. + """ + session = Session.from_config(config) + assert "::" not in str(arg) + p = Path(os.path.abspath(arg)) + config.hook.pytest_sessionstart(session=session) + res = session.perform_collect([str(p)], genitems=False)[0] + config.hook.pytest_sessionfinish(session=session, exitstatus=ExitCode.OK) + return res + + def getpathnode(self, path: Union[str, "os.PathLike[str]"]): + """Return the collection node of a file. + + This is like :py:meth:`getnode` but uses :py:meth:`parseconfigure` to + create the (configured) pytest Config instance. + + :param os.PathLike[str] path: Path to the file. + """ + path = Path(path) + config = self.parseconfigure(path) + session = Session.from_config(config) + x = bestrelpath(session.path, path) + config.hook.pytest_sessionstart(session=session) + res = session.perform_collect([x], genitems=False)[0] + config.hook.pytest_sessionfinish(session=session, exitstatus=ExitCode.OK) + return res + + def genitems(self, colitems: Sequence[Union[Item, Collector]]) -> List[Item]: + """Generate all test items from a collection node. + + This recurses into the collection node and returns a list of all the + test items contained within. + """ + session = colitems[0].session + result: List[Item] = [] + for colitem in colitems: + result.extend(session.genitems(colitem)) + return result + + def runitem(self, source: str) -> Any: + """Run the "test_func" Item. + + The calling test instance (class containing the test method) must + provide a ``.getrunner()`` method which should return a runner which + can run the test protocol for a single item, e.g. + :py:func:`_pytest.runner.runtestprotocol`. + """ + # used from runner functional tests + item = self.getitem(source) + # the test class where we are called from wants to provide the runner + testclassinstance = self._request.instance + runner = testclassinstance.getrunner() + return runner(item) + + def inline_runsource(self, source: str, *cmdlineargs) -> HookRecorder: + """Run a test module in process using ``pytest.main()``. + + This run writes "source" into a temporary file and runs + ``pytest.main()`` on it, returning a :py:class:`HookRecorder` instance + for the result. + + :param source: The source code of the test module. + :param cmdlineargs: Any extra command line arguments to use. + """ + p = self.makepyfile(source) + values = list(cmdlineargs) + [p] + return self.inline_run(*values) + + def inline_genitems(self, *args) -> Tuple[List[Item], HookRecorder]: + """Run ``pytest.main(['--collectonly'])`` in-process. + + Runs the :py:func:`pytest.main` function to run all of pytest inside + the test process itself like :py:meth:`inline_run`, but returns a + tuple of the collected items and a :py:class:`HookRecorder` instance. + """ + rec = self.inline_run("--collect-only", *args) + items = [x.item for x in rec.getcalls("pytest_itemcollected")] + return items, rec + + def inline_run( + self, + *args: Union[str, "os.PathLike[str]"], + plugins=(), + no_reraise_ctrlc: bool = False, + ) -> HookRecorder: + """Run ``pytest.main()`` in-process, returning a HookRecorder. + + Runs the :py:func:`pytest.main` function to run all of pytest inside + the test process itself. This means it can return a + :py:class:`HookRecorder` instance which gives more detailed results + from that run than can be done by matching stdout/stderr from + :py:meth:`runpytest`. + + :param args: + Command line arguments to pass to :py:func:`pytest.main`. + :param plugins: + Extra plugin instances the ``pytest.main()`` instance should use. + :param no_reraise_ctrlc: + Typically we reraise keyboard interrupts from the child run. If + True, the KeyboardInterrupt exception is captured. + """ + # (maybe a cpython bug?) the importlib cache sometimes isn't updated + # properly between file creation and inline_run (especially if imports + # are interspersed with file creation) + importlib.invalidate_caches() + + plugins = list(plugins) + finalizers = [] + try: + # Any sys.module or sys.path changes done while running pytest + # inline should be reverted after the test run completes to avoid + # clashing with later inline tests run within the same pytest test, + # e.g. just because they use matching test module names. + finalizers.append(self.__take_sys_modules_snapshot().restore) + finalizers.append(SysPathsSnapshot().restore) + + # Important note: + # - our tests should not leave any other references/registrations + # laying around other than possibly loaded test modules + # referenced from sys.modules, as nothing will clean those up + # automatically + + rec = [] + + class Collect: + def pytest_configure(x, config: Config) -> None: + rec.append(self.make_hook_recorder(config.pluginmanager)) + + plugins.append(Collect()) + ret = main([str(x) for x in args], plugins=plugins) + if len(rec) == 1: + reprec = rec.pop() + else: + + class reprec: # type: ignore + pass + + reprec.ret = ret + + # Typically we reraise keyboard interrupts from the child run + # because it's our user requesting interruption of the testing. + if ret == ExitCode.INTERRUPTED and not no_reraise_ctrlc: + calls = reprec.getcalls("pytest_keyboard_interrupt") + if calls and calls[-1].excinfo.type == KeyboardInterrupt: + raise KeyboardInterrupt() + return reprec + finally: + for finalizer in finalizers: + finalizer() + + def runpytest_inprocess( + self, *args: Union[str, "os.PathLike[str]"], **kwargs: Any + ) -> RunResult: + """Return result of running pytest in-process, providing a similar + interface to what self.runpytest() provides.""" + syspathinsert = kwargs.pop("syspathinsert", False) + + if syspathinsert: + self.syspathinsert() + now = timing.time() + capture = _get_multicapture("sys") + capture.start_capturing() + try: + try: + reprec = self.inline_run(*args, **kwargs) + except SystemExit as e: + ret = e.args[0] + try: + ret = ExitCode(e.args[0]) + except ValueError: + pass + + class reprec: # type: ignore + ret = ret + + except Exception: + traceback.print_exc() + + class reprec: # type: ignore + ret = ExitCode(3) + + finally: + out, err = capture.readouterr() + capture.stop_capturing() + sys.stdout.write(out) + sys.stderr.write(err) + + assert reprec.ret is not None + res = RunResult( + reprec.ret, out.splitlines(), err.splitlines(), timing.time() - now + ) + res.reprec = reprec # type: ignore + return res + + def runpytest( + self, *args: Union[str, "os.PathLike[str]"], **kwargs: Any + ) -> RunResult: + """Run pytest inline or in a subprocess, depending on the command line + option "--runpytest" and return a :py:class:`~pytest.RunResult`.""" + new_args = self._ensure_basetemp(args) + if self._method == "inprocess": + return self.runpytest_inprocess(*new_args, **kwargs) + elif self._method == "subprocess": + return self.runpytest_subprocess(*new_args, **kwargs) + raise RuntimeError(f"Unrecognized runpytest option: {self._method}") + + def _ensure_basetemp( + self, args: Sequence[Union[str, "os.PathLike[str]"]] + ) -> List[Union[str, "os.PathLike[str]"]]: + new_args = list(args) + for x in new_args: + if str(x).startswith("--basetemp"): + break + else: + new_args.append("--basetemp=%s" % self.path.parent.joinpath("basetemp")) + return new_args + + def parseconfig(self, *args: Union[str, "os.PathLike[str]"]) -> Config: + """Return a new pytest Config instance from given commandline args. + + This invokes the pytest bootstrapping code in _pytest.config to create + a new :py:class:`_pytest.core.PluginManager` and call the + pytest_cmdline_parse hook to create a new + :py:class:`pytest.Config` instance. + + If :py:attr:`plugins` has been populated they should be plugin modules + to be registered with the PluginManager. + """ + import _pytest.config + + new_args = self._ensure_basetemp(args) + new_args = [str(x) for x in new_args] + + config = _pytest.config._prepareconfig(new_args, self.plugins) # type: ignore[arg-type] + # we don't know what the test will do with this half-setup config + # object and thus we make sure it gets unconfigured properly in any + # case (otherwise capturing could still be active, for example) + self._request.addfinalizer(config._ensure_unconfigure) + return config + + def parseconfigure(self, *args: Union[str, "os.PathLike[str]"]) -> Config: + """Return a new pytest configured Config instance. + + Returns a new :py:class:`pytest.Config` instance like + :py:meth:`parseconfig`, but also calls the pytest_configure hook. + """ + config = self.parseconfig(*args) + config._do_configure() + return config + + def getitem( + self, source: Union[str, "os.PathLike[str]"], funcname: str = "test_func" + ) -> Item: + """Return the test item for a test function. + + Writes the source to a python file and runs pytest's collection on + the resulting module, returning the test item for the requested + function name. + + :param source: + The module source. + :param funcname: + The name of the test function for which to return a test item. + """ + items = self.getitems(source) + for item in items: + if item.name == funcname: + return item + assert 0, "{!r} item not found in module:\n{}\nitems: {}".format( + funcname, source, items + ) + + def getitems(self, source: Union[str, "os.PathLike[str]"]) -> List[Item]: + """Return all test items collected from the module. + + Writes the source to a Python file and runs pytest's collection on + the resulting module, returning all test items contained within. + """ + modcol = self.getmodulecol(source) + return self.genitems([modcol]) + + def getmodulecol( + self, + source: Union[str, "os.PathLike[str]"], + configargs=(), + *, + withinit: bool = False, + ): + """Return the module collection node for ``source``. + + Writes ``source`` to a file using :py:meth:`makepyfile` and then + runs the pytest collection on it, returning the collection node for the + test module. + + :param source: + The source code of the module to collect. + + :param configargs: + Any extra arguments to pass to :py:meth:`parseconfigure`. + + :param withinit: + Whether to also write an ``__init__.py`` file to the same + directory to ensure it is a package. + """ + if isinstance(source, os.PathLike): + path = self.path.joinpath(source) + assert not withinit, "not supported for paths" + else: + kw = {self._name: str(source)} + path = self.makepyfile(**kw) + if withinit: + self.makepyfile(__init__="#") + self.config = config = self.parseconfigure(path, *configargs) + return self.getnode(config, path) + + def collect_by_name( + self, modcol: Collector, name: str + ) -> Optional[Union[Item, Collector]]: + """Return the collection node for name from the module collection. + + Searches a module collection node for a collection node matching the + given name. + + :param modcol: A module collection node; see :py:meth:`getmodulecol`. + :param name: The name of the node to return. + """ + if modcol not in self._mod_collections: + self._mod_collections[modcol] = list(modcol.collect()) + for colitem in self._mod_collections[modcol]: + if colitem.name == name: + return colitem + return None + + def popen( + self, + cmdargs: Sequence[Union[str, "os.PathLike[str]"]], + stdout: Union[int, TextIO] = subprocess.PIPE, + stderr: Union[int, TextIO] = subprocess.PIPE, + stdin: Union[NotSetType, bytes, IO[Any], int] = CLOSE_STDIN, + **kw, + ): + """Invoke :py:class:`subprocess.Popen`. + + Calls :py:class:`subprocess.Popen` making sure the current working + directory is in ``PYTHONPATH``. + + You probably want to use :py:meth:`run` instead. + """ + env = os.environ.copy() + env["PYTHONPATH"] = os.pathsep.join( + filter(None, [os.getcwd(), env.get("PYTHONPATH", "")]) + ) + kw["env"] = env + + if stdin is self.CLOSE_STDIN: + kw["stdin"] = subprocess.PIPE + elif isinstance(stdin, bytes): + kw["stdin"] = subprocess.PIPE + else: + kw["stdin"] = stdin + + popen = subprocess.Popen(cmdargs, stdout=stdout, stderr=stderr, **kw) + if stdin is self.CLOSE_STDIN: + assert popen.stdin is not None + popen.stdin.close() + elif isinstance(stdin, bytes): + assert popen.stdin is not None + popen.stdin.write(stdin) + + return popen + + def run( + self, + *cmdargs: Union[str, "os.PathLike[str]"], + timeout: Optional[float] = None, + stdin: Union[NotSetType, bytes, IO[Any], int] = CLOSE_STDIN, + ) -> RunResult: + """Run a command with arguments. + + Run a process using :py:class:`subprocess.Popen` saving the stdout and + stderr. + + :param cmdargs: + The sequence of arguments to pass to :py:class:`subprocess.Popen`, + with path-like objects being converted to :py:class:`str` + automatically. + :param timeout: + The period in seconds after which to timeout and raise + :py:class:`Pytester.TimeoutExpired`. + :param stdin: + Optional standard input. + + - If it is :py:attr:`CLOSE_STDIN` (Default), then this method calls + :py:class:`subprocess.Popen` with ``stdin=subprocess.PIPE``, and + the standard input is closed immediately after the new command is + started. + + - If it is of type :py:class:`bytes`, these bytes are sent to the + standard input of the command. + + - Otherwise, it is passed through to :py:class:`subprocess.Popen`. + For further information in this case, consult the document of the + ``stdin`` parameter in :py:class:`subprocess.Popen`. + """ + __tracebackhide__ = True + + cmdargs = tuple(os.fspath(arg) for arg in cmdargs) + p1 = self.path.joinpath("stdout") + p2 = self.path.joinpath("stderr") + print("running:", *cmdargs) + print(" in:", Path.cwd()) + + with p1.open("w", encoding="utf8") as f1, p2.open("w", encoding="utf8") as f2: + now = timing.time() + popen = self.popen( + cmdargs, + stdin=stdin, + stdout=f1, + stderr=f2, + close_fds=(sys.platform != "win32"), + ) + if popen.stdin is not None: + popen.stdin.close() + + def handle_timeout() -> None: + __tracebackhide__ = True + + timeout_message = ( + "{seconds} second timeout expired running:" + " {command}".format(seconds=timeout, command=cmdargs) + ) + + popen.kill() + popen.wait() + raise self.TimeoutExpired(timeout_message) + + if timeout is None: + ret = popen.wait() + else: + try: + ret = popen.wait(timeout) + except subprocess.TimeoutExpired: + handle_timeout() + + with p1.open(encoding="utf8") as f1, p2.open(encoding="utf8") as f2: + out = f1.read().splitlines() + err = f2.read().splitlines() + + self._dump_lines(out, sys.stdout) + self._dump_lines(err, sys.stderr) + + with contextlib.suppress(ValueError): + ret = ExitCode(ret) + return RunResult(ret, out, err, timing.time() - now) + + def _dump_lines(self, lines, fp): + try: + for line in lines: + print(line, file=fp) + except UnicodeEncodeError: + print(f"couldn't print to {fp} because of encoding") + + def _getpytestargs(self) -> Tuple[str, ...]: + return sys.executable, "-mpytest" + + def runpython(self, script: "os.PathLike[str]") -> RunResult: + """Run a python script using sys.executable as interpreter.""" + return self.run(sys.executable, script) + + def runpython_c(self, command: str) -> RunResult: + """Run ``python -c "command"``.""" + return self.run(sys.executable, "-c", command) + + def runpytest_subprocess( + self, *args: Union[str, "os.PathLike[str]"], timeout: Optional[float] = None + ) -> RunResult: + """Run pytest as a subprocess with given arguments. + + Any plugins added to the :py:attr:`plugins` list will be added using the + ``-p`` command line option. Additionally ``--basetemp`` is used to put + any temporary files and directories in a numbered directory prefixed + with "runpytest-" to not conflict with the normal numbered pytest + location for temporary files and directories. + + :param args: + The sequence of arguments to pass to the pytest subprocess. + :param timeout: + The period in seconds after which to timeout and raise + :py:class:`Pytester.TimeoutExpired`. + """ + __tracebackhide__ = True + p = make_numbered_dir(root=self.path, prefix="runpytest-", mode=0o700) + args = ("--basetemp=%s" % p,) + args + plugins = [x for x in self.plugins if isinstance(x, str)] + if plugins: + args = ("-p", plugins[0]) + args + args = self._getpytestargs() + args + return self.run(*args, timeout=timeout) + + def spawn_pytest( + self, string: str, expect_timeout: float = 10.0 + ) -> "pexpect.spawn": + """Run pytest using pexpect. + + This makes sure to use the right pytest and sets up the temporary + directory locations. + + The pexpect child is returned. + """ + basetemp = self.path / "temp-pexpect" + basetemp.mkdir(mode=0o700) + invoke = " ".join(map(str, self._getpytestargs())) + cmd = f"{invoke} --basetemp={basetemp} {string}" + return self.spawn(cmd, expect_timeout=expect_timeout) + + def spawn(self, cmd: str, expect_timeout: float = 10.0) -> "pexpect.spawn": + """Run a command using pexpect. + + The pexpect child is returned. + """ + pexpect = importorskip("pexpect", "3.0") + if hasattr(sys, "pypy_version_info") and "64" in platform.machine(): + skip("pypy-64 bit not supported") + if not hasattr(pexpect, "spawn"): + skip("pexpect.spawn not available") + logfile = self.path.joinpath("spawn.out").open("wb") + + child = pexpect.spawn(cmd, logfile=logfile, timeout=expect_timeout) + self._request.addfinalizer(logfile.close) + return child + + +class LineComp: + def __init__(self) -> None: + self.stringio = StringIO() + """:class:`python:io.StringIO()` instance used for input.""" + + def assert_contains_lines(self, lines2: Sequence[str]) -> None: + """Assert that ``lines2`` are contained (linearly) in :attr:`stringio`'s value. + + Lines are matched using :func:`LineMatcher.fnmatch_lines `. + """ + __tracebackhide__ = True + val = self.stringio.getvalue() + self.stringio.truncate(0) + self.stringio.seek(0) + lines1 = val.split("\n") + LineMatcher(lines1).fnmatch_lines(lines2) + + +class LineMatcher: + """Flexible matching of text. + + This is a convenience class to test large texts like the output of + commands. + + The constructor takes a list of lines without their trailing newlines, i.e. + ``text.splitlines()``. + """ + + def __init__(self, lines: List[str]) -> None: + self.lines = lines + self._log_output: List[str] = [] + + def __str__(self) -> str: + """Return the entire original text. + + .. versionadded:: 6.2 + You can use :meth:`str` in older versions. + """ + return "\n".join(self.lines) + + def _getlines(self, lines2: Union[str, Sequence[str], Source]) -> Sequence[str]: + if isinstance(lines2, str): + lines2 = Source(lines2) + if isinstance(lines2, Source): + lines2 = lines2.strip().lines + return lines2 + + def fnmatch_lines_random(self, lines2: Sequence[str]) -> None: + """Check lines exist in the output in any order (using :func:`python:fnmatch.fnmatch`).""" + __tracebackhide__ = True + self._match_lines_random(lines2, fnmatch) + + def re_match_lines_random(self, lines2: Sequence[str]) -> None: + """Check lines exist in the output in any order (using :func:`python:re.match`).""" + __tracebackhide__ = True + self._match_lines_random(lines2, lambda name, pat: bool(re.match(pat, name))) + + def _match_lines_random( + self, lines2: Sequence[str], match_func: Callable[[str, str], bool] + ) -> None: + __tracebackhide__ = True + lines2 = self._getlines(lines2) + for line in lines2: + for x in self.lines: + if line == x or match_func(x, line): + self._log("matched: ", repr(line)) + break + else: + msg = "line %r not found in output" % line + self._log(msg) + self._fail(msg) + + def get_lines_after(self, fnline: str) -> Sequence[str]: + """Return all lines following the given line in the text. + + The given line can contain glob wildcards. + """ + for i, line in enumerate(self.lines): + if fnline == line or fnmatch(line, fnline): + return self.lines[i + 1 :] + raise ValueError("line %r not found in output" % fnline) + + def _log(self, *args) -> None: + self._log_output.append(" ".join(str(x) for x in args)) + + @property + def _log_text(self) -> str: + return "\n".join(self._log_output) + + def fnmatch_lines( + self, lines2: Sequence[str], *, consecutive: bool = False + ) -> None: + """Check lines exist in the output (using :func:`python:fnmatch.fnmatch`). + + The argument is a list of lines which have to match and can use glob + wildcards. If they do not match a pytest.fail() is called. The + matches and non-matches are also shown as part of the error message. + + :param lines2: String patterns to match. + :param consecutive: Match lines consecutively? + """ + __tracebackhide__ = True + self._match_lines(lines2, fnmatch, "fnmatch", consecutive=consecutive) + + def re_match_lines( + self, lines2: Sequence[str], *, consecutive: bool = False + ) -> None: + """Check lines exist in the output (using :func:`python:re.match`). + + The argument is a list of lines which have to match using ``re.match``. + If they do not match a pytest.fail() is called. + + The matches and non-matches are also shown as part of the error message. + + :param lines2: string patterns to match. + :param consecutive: match lines consecutively? + """ + __tracebackhide__ = True + self._match_lines( + lines2, + lambda name, pat: bool(re.match(pat, name)), + "re.match", + consecutive=consecutive, + ) + + def _match_lines( + self, + lines2: Sequence[str], + match_func: Callable[[str, str], bool], + match_nickname: str, + *, + consecutive: bool = False, + ) -> None: + """Underlying implementation of ``fnmatch_lines`` and ``re_match_lines``. + + :param Sequence[str] lines2: + List of string patterns to match. The actual format depends on + ``match_func``. + :param match_func: + A callable ``match_func(line, pattern)`` where line is the + captured line from stdout/stderr and pattern is the matching + pattern. + :param str match_nickname: + The nickname for the match function that will be logged to stdout + when a match occurs. + :param consecutive: + Match lines consecutively? + """ + if not isinstance(lines2, collections.abc.Sequence): + raise TypeError(f"invalid type for lines2: {type(lines2).__name__}") + lines2 = self._getlines(lines2) + lines1 = self.lines[:] + extralines = [] + __tracebackhide__ = True + wnick = len(match_nickname) + 1 + started = False + for line in lines2: + nomatchprinted = False + while lines1: + nextline = lines1.pop(0) + if line == nextline: + self._log("exact match:", repr(line)) + started = True + break + elif match_func(nextline, line): + self._log("%s:" % match_nickname, repr(line)) + self._log( + "{:>{width}}".format("with:", width=wnick), repr(nextline) + ) + started = True + break + else: + if consecutive and started: + msg = f"no consecutive match: {line!r}" + self._log(msg) + self._log( + "{:>{width}}".format("with:", width=wnick), repr(nextline) + ) + self._fail(msg) + if not nomatchprinted: + self._log( + "{:>{width}}".format("nomatch:", width=wnick), repr(line) + ) + nomatchprinted = True + self._log("{:>{width}}".format("and:", width=wnick), repr(nextline)) + extralines.append(nextline) + else: + msg = f"remains unmatched: {line!r}" + self._log(msg) + self._fail(msg) + self._log_output = [] + + def no_fnmatch_line(self, pat: str) -> None: + """Ensure captured lines do not match the given pattern, using ``fnmatch.fnmatch``. + + :param str pat: The pattern to match lines. + """ + __tracebackhide__ = True + self._no_match_line(pat, fnmatch, "fnmatch") + + def no_re_match_line(self, pat: str) -> None: + """Ensure captured lines do not match the given pattern, using ``re.match``. + + :param str pat: The regular expression to match lines. + """ + __tracebackhide__ = True + self._no_match_line( + pat, lambda name, pat: bool(re.match(pat, name)), "re.match" + ) + + def _no_match_line( + self, pat: str, match_func: Callable[[str, str], bool], match_nickname: str + ) -> None: + """Ensure captured lines does not have a the given pattern, using ``fnmatch.fnmatch``. + + :param str pat: The pattern to match lines. + """ + __tracebackhide__ = True + nomatch_printed = False + wnick = len(match_nickname) + 1 + for line in self.lines: + if match_func(line, pat): + msg = f"{match_nickname}: {pat!r}" + self._log(msg) + self._log("{:>{width}}".format("with:", width=wnick), repr(line)) + self._fail(msg) + else: + if not nomatch_printed: + self._log("{:>{width}}".format("nomatch:", width=wnick), repr(pat)) + nomatch_printed = True + self._log("{:>{width}}".format("and:", width=wnick), repr(line)) + self._log_output = [] + + def _fail(self, msg: str) -> None: + __tracebackhide__ = True + log_text = self._log_text + self._log_output = [] + fail(log_text) + + def str(self) -> str: + """Return the entire original text.""" + return str(self) diff --git a/venv/lib/python3.10/site-packages/_pytest/pytester_assertions.py b/venv/lib/python3.10/site-packages/_pytest/pytester_assertions.py new file mode 100644 index 0000000..657e4db --- /dev/null +++ b/venv/lib/python3.10/site-packages/_pytest/pytester_assertions.py @@ -0,0 +1,75 @@ +"""Helper plugin for pytester; should not be loaded on its own.""" +# This plugin contains assertions used by pytester. pytester cannot +# contain them itself, since it is imported by the `pytest` module, +# hence cannot be subject to assertion rewriting, which requires a +# module to not be already imported. +from typing import Dict +from typing import Optional +from typing import Sequence +from typing import Tuple +from typing import Union + +from _pytest.reports import CollectReport +from _pytest.reports import TestReport + + +def assertoutcome( + outcomes: Tuple[ + Sequence[TestReport], + Sequence[Union[CollectReport, TestReport]], + Sequence[Union[CollectReport, TestReport]], + ], + passed: int = 0, + skipped: int = 0, + failed: int = 0, +) -> None: + __tracebackhide__ = True + + realpassed, realskipped, realfailed = outcomes + obtained = { + "passed": len(realpassed), + "skipped": len(realskipped), + "failed": len(realfailed), + } + expected = {"passed": passed, "skipped": skipped, "failed": failed} + assert obtained == expected, outcomes + + +def assert_outcomes( + outcomes: Dict[str, int], + passed: int = 0, + skipped: int = 0, + failed: int = 0, + errors: int = 0, + xpassed: int = 0, + xfailed: int = 0, + warnings: Optional[int] = None, + deselected: Optional[int] = None, +) -> None: + """Assert that the specified outcomes appear with the respective + numbers (0 means it didn't occur) in the text output from a test run.""" + __tracebackhide__ = True + + obtained = { + "passed": outcomes.get("passed", 0), + "skipped": outcomes.get("skipped", 0), + "failed": outcomes.get("failed", 0), + "errors": outcomes.get("errors", 0), + "xpassed": outcomes.get("xpassed", 0), + "xfailed": outcomes.get("xfailed", 0), + } + expected = { + "passed": passed, + "skipped": skipped, + "failed": failed, + "errors": errors, + "xpassed": xpassed, + "xfailed": xfailed, + } + if warnings is not None: + obtained["warnings"] = outcomes.get("warnings", 0) + expected["warnings"] = warnings + if deselected is not None: + obtained["deselected"] = outcomes.get("deselected", 0) + expected["deselected"] = deselected + assert obtained == expected diff --git a/venv/lib/python3.10/site-packages/_pytest/python.py b/venv/lib/python3.10/site-packages/_pytest/python.py new file mode 100644 index 0000000..cd95193 --- /dev/null +++ b/venv/lib/python3.10/site-packages/_pytest/python.py @@ -0,0 +1,1807 @@ +"""Python test discovery, setup and run of test functions.""" +import enum +import fnmatch +import inspect +import itertools +import os +import sys +import types +import warnings +from collections import Counter +from collections import defaultdict +from functools import partial +from pathlib import Path +from typing import Any +from typing import Callable +from typing import Dict +from typing import Generator +from typing import Iterable +from typing import Iterator +from typing import List +from typing import Mapping +from typing import Optional +from typing import Pattern +from typing import Sequence +from typing import Set +from typing import Tuple +from typing import TYPE_CHECKING +from typing import Union + +import attr + +import _pytest +from _pytest import fixtures +from _pytest import nodes +from _pytest._code import filter_traceback +from _pytest._code import getfslineno +from _pytest._code.code import ExceptionInfo +from _pytest._code.code import TerminalRepr +from _pytest._io import TerminalWriter +from _pytest._io.saferepr import saferepr +from _pytest.compat import ascii_escaped +from _pytest.compat import assert_never +from _pytest.compat import final +from _pytest.compat import get_default_arg_names +from _pytest.compat import get_real_func +from _pytest.compat import getimfunc +from _pytest.compat import getlocation +from _pytest.compat import is_async_function +from _pytest.compat import is_generator +from _pytest.compat import LEGACY_PATH +from _pytest.compat import NOTSET +from _pytest.compat import safe_getattr +from _pytest.compat import safe_isclass +from _pytest.compat import STRING_TYPES +from _pytest.config import Config +from _pytest.config import ExitCode +from _pytest.config import hookimpl +from _pytest.config.argparsing import Parser +from _pytest.deprecated import check_ispytest +from _pytest.deprecated import FSCOLLECTOR_GETHOOKPROXY_ISINITPATH +from _pytest.deprecated import INSTANCE_COLLECTOR +from _pytest.fixtures import FuncFixtureInfo +from _pytest.main import Session +from _pytest.mark import MARK_GEN +from _pytest.mark import ParameterSet +from _pytest.mark.structures import get_unpacked_marks +from _pytest.mark.structures import Mark +from _pytest.mark.structures import MarkDecorator +from _pytest.mark.structures import normalize_mark_list +from _pytest.outcomes import fail +from _pytest.outcomes import skip +from _pytest.pathlib import bestrelpath +from _pytest.pathlib import fnmatch_ex +from _pytest.pathlib import import_path +from _pytest.pathlib import ImportPathMismatchError +from _pytest.pathlib import parts +from _pytest.pathlib import visit +from _pytest.scope import Scope +from _pytest.warning_types import PytestCollectionWarning +from _pytest.warning_types import PytestUnhandledCoroutineWarning + +if TYPE_CHECKING: + from typing_extensions import Literal + from _pytest.scope import _ScopeName + + +_PYTEST_DIR = Path(_pytest.__file__).parent + + +def pytest_addoption(parser: Parser) -> None: + group = parser.getgroup("general") + group.addoption( + "--fixtures", + "--funcargs", + action="store_true", + dest="showfixtures", + default=False, + help="show available fixtures, sorted by plugin appearance " + "(fixtures with leading '_' are only shown with '-v')", + ) + group.addoption( + "--fixtures-per-test", + action="store_true", + dest="show_fixtures_per_test", + default=False, + help="show fixtures per test", + ) + parser.addini( + "python_files", + type="args", + # NOTE: default is also used in AssertionRewritingHook. + default=["test_*.py", "*_test.py"], + help="glob-style file patterns for Python test module discovery", + ) + parser.addini( + "python_classes", + type="args", + default=["Test"], + help="prefixes or glob names for Python test class discovery", + ) + parser.addini( + "python_functions", + type="args", + default=["test"], + help="prefixes or glob names for Python test function and method discovery", + ) + parser.addini( + "disable_test_id_escaping_and_forfeit_all_rights_to_community_support", + type="bool", + default=False, + help="disable string escape non-ascii characters, might cause unwanted " + "side effects(use at your own risk)", + ) + + +def pytest_cmdline_main(config: Config) -> Optional[Union[int, ExitCode]]: + if config.option.showfixtures: + showfixtures(config) + return 0 + if config.option.show_fixtures_per_test: + show_fixtures_per_test(config) + return 0 + return None + + +def pytest_generate_tests(metafunc: "Metafunc") -> None: + for marker in metafunc.definition.iter_markers(name="parametrize"): + metafunc.parametrize(*marker.args, **marker.kwargs, _param_mark=marker) + + +def pytest_configure(config: Config) -> None: + config.addinivalue_line( + "markers", + "parametrize(argnames, argvalues): call a test function multiple " + "times passing in different arguments in turn. argvalues generally " + "needs to be a list of values if argnames specifies only one name " + "or a list of tuples of values if argnames specifies multiple names. " + "Example: @parametrize('arg1', [1,2]) would lead to two calls of the " + "decorated test function, one with arg1=1 and another with arg1=2." + "see https://docs.pytest.org/en/stable/how-to/parametrize.html for more info " + "and examples.", + ) + config.addinivalue_line( + "markers", + "usefixtures(fixturename1, fixturename2, ...): mark tests as needing " + "all of the specified fixtures. see " + "https://docs.pytest.org/en/stable/explanation/fixtures.html#usefixtures ", + ) + + +def async_warn_and_skip(nodeid: str) -> None: + msg = "async def functions are not natively supported and have been skipped.\n" + msg += ( + "You need to install a suitable plugin for your async framework, for example:\n" + ) + msg += " - anyio\n" + msg += " - pytest-asyncio\n" + msg += " - pytest-tornasync\n" + msg += " - pytest-trio\n" + msg += " - pytest-twisted" + warnings.warn(PytestUnhandledCoroutineWarning(msg.format(nodeid))) + skip(reason="async def function and no async plugin installed (see warnings)") + + +@hookimpl(trylast=True) +def pytest_pyfunc_call(pyfuncitem: "Function") -> Optional[object]: + testfunction = pyfuncitem.obj + if is_async_function(testfunction): + async_warn_and_skip(pyfuncitem.nodeid) + funcargs = pyfuncitem.funcargs + testargs = {arg: funcargs[arg] for arg in pyfuncitem._fixtureinfo.argnames} + result = testfunction(**testargs) + if hasattr(result, "__await__") or hasattr(result, "__aiter__"): + async_warn_and_skip(pyfuncitem.nodeid) + return True + + +def pytest_collect_file(file_path: Path, parent: nodes.Collector) -> Optional["Module"]: + if file_path.suffix == ".py": + if not parent.session.isinitpath(file_path): + if not path_matches_patterns( + file_path, parent.config.getini("python_files") + ["__init__.py"] + ): + return None + ihook = parent.session.gethookproxy(file_path) + module: Module = ihook.pytest_pycollect_makemodule( + module_path=file_path, parent=parent + ) + return module + return None + + +def path_matches_patterns(path: Path, patterns: Iterable[str]) -> bool: + """Return whether path matches any of the patterns in the list of globs given.""" + return any(fnmatch_ex(pattern, path) for pattern in patterns) + + +def pytest_pycollect_makemodule(module_path: Path, parent) -> "Module": + if module_path.name == "__init__.py": + pkg: Package = Package.from_parent(parent, path=module_path) + return pkg + mod: Module = Module.from_parent(parent, path=module_path) + return mod + + +@hookimpl(trylast=True) +def pytest_pycollect_makeitem( + collector: Union["Module", "Class"], name: str, obj: object +) -> Union[None, nodes.Item, nodes.Collector, List[Union[nodes.Item, nodes.Collector]]]: + assert isinstance(collector, (Class, Module)), type(collector) + # Nothing was collected elsewhere, let's do it here. + if safe_isclass(obj): + if collector.istestclass(obj, name): + klass: Class = Class.from_parent(collector, name=name, obj=obj) + return klass + elif collector.istestfunction(obj, name): + # mock seems to store unbound methods (issue473), normalize it. + obj = getattr(obj, "__func__", obj) + # We need to try and unwrap the function if it's a functools.partial + # or a functools.wrapped. + # We mustn't if it's been wrapped with mock.patch (python 2 only). + if not (inspect.isfunction(obj) or inspect.isfunction(get_real_func(obj))): + filename, lineno = getfslineno(obj) + warnings.warn_explicit( + message=PytestCollectionWarning( + "cannot collect %r because it is not a function." % name + ), + category=None, + filename=str(filename), + lineno=lineno + 1, + ) + elif getattr(obj, "__test__", True): + if is_generator(obj): + res: Function = Function.from_parent(collector, name=name) + reason = "yield tests were removed in pytest 4.0 - {name} will be ignored".format( + name=name + ) + res.add_marker(MARK_GEN.xfail(run=False, reason=reason)) + res.warn(PytestCollectionWarning(reason)) + return res + else: + return list(collector._genfunctions(name, obj)) + return None + + +class PyobjMixin(nodes.Node): + """this mix-in inherits from Node to carry over the typing information + + as its intended to always mix in before a node + its position in the mro is unaffected""" + + _ALLOW_MARKERS = True + + @property + def module(self): + """Python module object this node was collected from (can be None).""" + node = self.getparent(Module) + return node.obj if node is not None else None + + @property + def cls(self): + """Python class object this node was collected from (can be None).""" + node = self.getparent(Class) + return node.obj if node is not None else None + + @property + def instance(self): + """Python instance object the function is bound to. + + Returns None if not a test method, e.g. for a standalone test function, + a staticmethod, a class or a module. + """ + node = self.getparent(Function) + return getattr(node.obj, "__self__", None) if node is not None else None + + @property + def obj(self): + """Underlying Python object.""" + obj = getattr(self, "_obj", None) + if obj is None: + self._obj = obj = self._getobj() + # XXX evil hack + # used to avoid Function marker duplication + if self._ALLOW_MARKERS: + self.own_markers.extend(get_unpacked_marks(self.obj)) + # This assumes that `obj` is called before there is a chance + # to add custom keys to `self.keywords`, so no fear of overriding. + self.keywords.update((mark.name, mark) for mark in self.own_markers) + return obj + + @obj.setter + def obj(self, value): + self._obj = value + + def _getobj(self): + """Get the underlying Python object. May be overwritten by subclasses.""" + # TODO: Improve the type of `parent` such that assert/ignore aren't needed. + assert self.parent is not None + obj = self.parent.obj # type: ignore[attr-defined] + return getattr(obj, self.name) + + def getmodpath(self, stopatmodule: bool = True, includemodule: bool = False) -> str: + """Return Python path relative to the containing module.""" + chain = self.listchain() + chain.reverse() + parts = [] + for node in chain: + name = node.name + if isinstance(node, Module): + name = os.path.splitext(name)[0] + if stopatmodule: + if includemodule: + parts.append(name) + break + parts.append(name) + parts.reverse() + return ".".join(parts) + + def reportinfo(self) -> Tuple[Union["os.PathLike[str]", str], Optional[int], str]: + # XXX caching? + obj = self.obj + compat_co_firstlineno = getattr(obj, "compat_co_firstlineno", None) + if isinstance(compat_co_firstlineno, int): + # nose compatibility + file_path = sys.modules[obj.__module__].__file__ + assert file_path is not None + if file_path.endswith(".pyc"): + file_path = file_path[:-1] + path: Union["os.PathLike[str]", str] = file_path + lineno = compat_co_firstlineno + else: + path, lineno = getfslineno(obj) + modpath = self.getmodpath() + assert isinstance(lineno, int) + return path, lineno, modpath + + +# As an optimization, these builtin attribute names are pre-ignored when +# iterating over an object during collection -- the pytest_pycollect_makeitem +# hook is not called for them. +# fmt: off +class _EmptyClass: pass # noqa: E701 +IGNORED_ATTRIBUTES = frozenset.union( # noqa: E305 + frozenset(), + # Module. + dir(types.ModuleType("empty_module")), + # Some extra module attributes the above doesn't catch. + {"__builtins__", "__file__", "__cached__"}, + # Class. + dir(_EmptyClass), + # Instance. + dir(_EmptyClass()), +) +del _EmptyClass +# fmt: on + + +class PyCollector(PyobjMixin, nodes.Collector): + def funcnamefilter(self, name: str) -> bool: + return self._matches_prefix_or_glob_option("python_functions", name) + + def isnosetest(self, obj: object) -> bool: + """Look for the __test__ attribute, which is applied by the + @nose.tools.istest decorator. + """ + # We explicitly check for "is True" here to not mistakenly treat + # classes with a custom __getattr__ returning something truthy (like a + # function) as test classes. + return safe_getattr(obj, "__test__", False) is True + + def classnamefilter(self, name: str) -> bool: + return self._matches_prefix_or_glob_option("python_classes", name) + + def istestfunction(self, obj: object, name: str) -> bool: + if self.funcnamefilter(name) or self.isnosetest(obj): + if isinstance(obj, staticmethod): + # staticmethods need to be unwrapped. + obj = safe_getattr(obj, "__func__", False) + return callable(obj) and fixtures.getfixturemarker(obj) is None + else: + return False + + def istestclass(self, obj: object, name: str) -> bool: + return self.classnamefilter(name) or self.isnosetest(obj) + + def _matches_prefix_or_glob_option(self, option_name: str, name: str) -> bool: + """Check if the given name matches the prefix or glob-pattern defined + in ini configuration.""" + for option in self.config.getini(option_name): + if name.startswith(option): + return True + # Check that name looks like a glob-string before calling fnmatch + # because this is called for every name in each collected module, + # and fnmatch is somewhat expensive to call. + elif ("*" in option or "?" in option or "[" in option) and fnmatch.fnmatch( + name, option + ): + return True + return False + + def collect(self) -> Iterable[Union[nodes.Item, nodes.Collector]]: + if not getattr(self.obj, "__test__", True): + return [] + + # Avoid random getattrs and peek in the __dict__ instead. + dicts = [getattr(self.obj, "__dict__", {})] + if isinstance(self.obj, type): + for basecls in self.obj.__mro__: + dicts.append(basecls.__dict__) + + # In each class, nodes should be definition ordered. + # __dict__ is definition ordered. + seen: Set[str] = set() + dict_values: List[List[Union[nodes.Item, nodes.Collector]]] = [] + ihook = self.ihook + for dic in dicts: + values: List[Union[nodes.Item, nodes.Collector]] = [] + # Note: seems like the dict can change during iteration - + # be careful not to remove the list() without consideration. + for name, obj in list(dic.items()): + if name in IGNORED_ATTRIBUTES: + continue + if name in seen: + continue + seen.add(name) + res = ihook.pytest_pycollect_makeitem( + collector=self, name=name, obj=obj + ) + if res is None: + continue + elif isinstance(res, list): + values.extend(res) + else: + values.append(res) + dict_values.append(values) + + # Between classes in the class hierarchy, reverse-MRO order -- nodes + # inherited from base classes should come before subclasses. + result = [] + for values in reversed(dict_values): + result.extend(values) + return result + + def _genfunctions(self, name: str, funcobj) -> Iterator["Function"]: + modulecol = self.getparent(Module) + assert modulecol is not None + module = modulecol.obj + clscol = self.getparent(Class) + cls = clscol and clscol.obj or None + + definition = FunctionDefinition.from_parent(self, name=name, callobj=funcobj) + fixtureinfo = definition._fixtureinfo + + # pytest_generate_tests impls call metafunc.parametrize() which fills + # metafunc._calls, the outcome of the hook. + metafunc = Metafunc( + definition=definition, + fixtureinfo=fixtureinfo, + config=self.config, + cls=cls, + module=module, + _ispytest=True, + ) + methods = [] + if hasattr(module, "pytest_generate_tests"): + methods.append(module.pytest_generate_tests) + if cls is not None and hasattr(cls, "pytest_generate_tests"): + methods.append(cls().pytest_generate_tests) + self.ihook.pytest_generate_tests.call_extra(methods, dict(metafunc=metafunc)) + + if not metafunc._calls: + yield Function.from_parent(self, name=name, fixtureinfo=fixtureinfo) + else: + # Add funcargs() as fixturedefs to fixtureinfo.arg2fixturedefs. + fm = self.session._fixturemanager + fixtures.add_funcarg_pseudo_fixture_def(self, metafunc, fm) + + # Add_funcarg_pseudo_fixture_def may have shadowed some fixtures + # with direct parametrization, so make sure we update what the + # function really needs. + fixtureinfo.prune_dependency_tree() + + for callspec in metafunc._calls: + subname = f"{name}[{callspec.id}]" + yield Function.from_parent( + self, + name=subname, + callspec=callspec, + fixtureinfo=fixtureinfo, + keywords={callspec.id: True}, + originalname=name, + ) + + +class Module(nodes.File, PyCollector): + """Collector for test classes and functions.""" + + def _getobj(self): + return self._importtestmodule() + + def collect(self) -> Iterable[Union[nodes.Item, nodes.Collector]]: + self._inject_setup_module_fixture() + self._inject_setup_function_fixture() + self.session._fixturemanager.parsefactories(self) + return super().collect() + + def _inject_setup_module_fixture(self) -> None: + """Inject a hidden autouse, module scoped fixture into the collected module object + that invokes setUpModule/tearDownModule if either or both are available. + + Using a fixture to invoke this methods ensures we play nicely and unsurprisingly with + other fixtures (#517). + """ + has_nose = self.config.pluginmanager.has_plugin("nose") + setup_module = _get_first_non_fixture_func( + self.obj, ("setUpModule", "setup_module") + ) + if setup_module is None and has_nose: + # The name "setup" is too common - only treat as fixture if callable. + setup_module = _get_first_non_fixture_func(self.obj, ("setup",)) + if not callable(setup_module): + setup_module = None + teardown_module = _get_first_non_fixture_func( + self.obj, ("tearDownModule", "teardown_module") + ) + if teardown_module is None and has_nose: + teardown_module = _get_first_non_fixture_func(self.obj, ("teardown",)) + # Same as "setup" above - only treat as fixture if callable. + if not callable(teardown_module): + teardown_module = None + + if setup_module is None and teardown_module is None: + return + + @fixtures.fixture( + autouse=True, + scope="module", + # Use a unique name to speed up lookup. + name=f"_xunit_setup_module_fixture_{self.obj.__name__}", + ) + def xunit_setup_module_fixture(request) -> Generator[None, None, None]: + if setup_module is not None: + _call_with_optional_argument(setup_module, request.module) + yield + if teardown_module is not None: + _call_with_optional_argument(teardown_module, request.module) + + self.obj.__pytest_setup_module = xunit_setup_module_fixture + + def _inject_setup_function_fixture(self) -> None: + """Inject a hidden autouse, function scoped fixture into the collected module object + that invokes setup_function/teardown_function if either or both are available. + + Using a fixture to invoke this methods ensures we play nicely and unsurprisingly with + other fixtures (#517). + """ + setup_function = _get_first_non_fixture_func(self.obj, ("setup_function",)) + teardown_function = _get_first_non_fixture_func( + self.obj, ("teardown_function",) + ) + if setup_function is None and teardown_function is None: + return + + @fixtures.fixture( + autouse=True, + scope="function", + # Use a unique name to speed up lookup. + name=f"_xunit_setup_function_fixture_{self.obj.__name__}", + ) + def xunit_setup_function_fixture(request) -> Generator[None, None, None]: + if request.instance is not None: + # in this case we are bound to an instance, so we need to let + # setup_method handle this + yield + return + if setup_function is not None: + _call_with_optional_argument(setup_function, request.function) + yield + if teardown_function is not None: + _call_with_optional_argument(teardown_function, request.function) + + self.obj.__pytest_setup_function = xunit_setup_function_fixture + + def _importtestmodule(self): + # We assume we are only called once per module. + importmode = self.config.getoption("--import-mode") + try: + mod = import_path(self.path, mode=importmode, root=self.config.rootpath) + except SyntaxError as e: + raise self.CollectError( + ExceptionInfo.from_current().getrepr(style="short") + ) from e + except ImportPathMismatchError as e: + raise self.CollectError( + "import file mismatch:\n" + "imported module %r has this __file__ attribute:\n" + " %s\n" + "which is not the same as the test file we want to collect:\n" + " %s\n" + "HINT: remove __pycache__ / .pyc files and/or use a " + "unique basename for your test file modules" % e.args + ) from e + except ImportError as e: + exc_info = ExceptionInfo.from_current() + if self.config.getoption("verbose") < 2: + exc_info.traceback = exc_info.traceback.filter(filter_traceback) + exc_repr = ( + exc_info.getrepr(style="short") + if exc_info.traceback + else exc_info.exconly() + ) + formatted_tb = str(exc_repr) + raise self.CollectError( + "ImportError while importing test module '{path}'.\n" + "Hint: make sure your test modules/packages have valid Python names.\n" + "Traceback:\n" + "{traceback}".format(path=self.path, traceback=formatted_tb) + ) from e + except skip.Exception as e: + if e.allow_module_level: + raise + raise self.CollectError( + "Using pytest.skip outside of a test will skip the entire module. " + "If that's your intention, pass `allow_module_level=True`. " + "If you want to skip a specific test or an entire class, " + "use the @pytest.mark.skip or @pytest.mark.skipif decorators." + ) from e + self.config.pluginmanager.consider_module(mod) + return mod + + +class Package(Module): + def __init__( + self, + fspath: Optional[LEGACY_PATH], + parent: nodes.Collector, + # NOTE: following args are unused: + config=None, + session=None, + nodeid=None, + path=Optional[Path], + ) -> None: + # NOTE: Could be just the following, but kept as-is for compat. + # nodes.FSCollector.__init__(self, fspath, parent=parent) + session = parent.session + nodes.FSCollector.__init__( + self, + fspath=fspath, + path=path, + parent=parent, + config=config, + session=session, + nodeid=nodeid, + ) + self.name = self.path.parent.name + + def setup(self) -> None: + # Not using fixtures to call setup_module here because autouse fixtures + # from packages are not called automatically (#4085). + setup_module = _get_first_non_fixture_func( + self.obj, ("setUpModule", "setup_module") + ) + if setup_module is not None: + _call_with_optional_argument(setup_module, self.obj) + + teardown_module = _get_first_non_fixture_func( + self.obj, ("tearDownModule", "teardown_module") + ) + if teardown_module is not None: + func = partial(_call_with_optional_argument, teardown_module, self.obj) + self.addfinalizer(func) + + def gethookproxy(self, fspath: "os.PathLike[str]"): + warnings.warn(FSCOLLECTOR_GETHOOKPROXY_ISINITPATH, stacklevel=2) + return self.session.gethookproxy(fspath) + + def isinitpath(self, path: Union[str, "os.PathLike[str]"]) -> bool: + warnings.warn(FSCOLLECTOR_GETHOOKPROXY_ISINITPATH, stacklevel=2) + return self.session.isinitpath(path) + + def _recurse(self, direntry: "os.DirEntry[str]") -> bool: + if direntry.name == "__pycache__": + return False + fspath = Path(direntry.path) + ihook = self.session.gethookproxy(fspath.parent) + if ihook.pytest_ignore_collect(collection_path=fspath, config=self.config): + return False + norecursepatterns = self.config.getini("norecursedirs") + if any(fnmatch_ex(pat, fspath) for pat in norecursepatterns): + return False + return True + + def _collectfile( + self, fspath: Path, handle_dupes: bool = True + ) -> Sequence[nodes.Collector]: + assert ( + fspath.is_file() + ), "{!r} is not a file (isdir={!r}, exists={!r}, islink={!r})".format( + fspath, fspath.is_dir(), fspath.exists(), fspath.is_symlink() + ) + ihook = self.session.gethookproxy(fspath) + if not self.session.isinitpath(fspath): + if ihook.pytest_ignore_collect(collection_path=fspath, config=self.config): + return () + + if handle_dupes: + keepduplicates = self.config.getoption("keepduplicates") + if not keepduplicates: + duplicate_paths = self.config.pluginmanager._duplicatepaths + if fspath in duplicate_paths: + return () + else: + duplicate_paths.add(fspath) + + return ihook.pytest_collect_file(file_path=fspath, parent=self) # type: ignore[no-any-return] + + def collect(self) -> Iterable[Union[nodes.Item, nodes.Collector]]: + this_path = self.path.parent + init_module = this_path / "__init__.py" + if init_module.is_file() and path_matches_patterns( + init_module, self.config.getini("python_files") + ): + yield Module.from_parent(self, path=init_module) + pkg_prefixes: Set[Path] = set() + for direntry in visit(str(this_path), recurse=self._recurse): + path = Path(direntry.path) + + # We will visit our own __init__.py file, in which case we skip it. + if direntry.is_file(): + if direntry.name == "__init__.py" and path.parent == this_path: + continue + + parts_ = parts(direntry.path) + if any( + str(pkg_prefix) in parts_ and pkg_prefix / "__init__.py" != path + for pkg_prefix in pkg_prefixes + ): + continue + + if direntry.is_file(): + yield from self._collectfile(path) + elif not direntry.is_dir(): + # Broken symlink or invalid/missing file. + continue + elif path.joinpath("__init__.py").is_file(): + pkg_prefixes.add(path) + + +def _call_with_optional_argument(func, arg) -> None: + """Call the given function with the given argument if func accepts one argument, otherwise + calls func without arguments.""" + arg_count = func.__code__.co_argcount + if inspect.ismethod(func): + arg_count -= 1 + if arg_count: + func(arg) + else: + func() + + +def _get_first_non_fixture_func(obj: object, names: Iterable[str]) -> Optional[object]: + """Return the attribute from the given object to be used as a setup/teardown + xunit-style function, but only if not marked as a fixture to avoid calling it twice.""" + for name in names: + meth: Optional[object] = getattr(obj, name, None) + if meth is not None and fixtures.getfixturemarker(meth) is None: + return meth + return None + + +class Class(PyCollector): + """Collector for test methods.""" + + @classmethod + def from_parent(cls, parent, *, name, obj=None, **kw): + """The public constructor.""" + return super().from_parent(name=name, parent=parent, **kw) + + def newinstance(self): + return self.obj() + + def collect(self) -> Iterable[Union[nodes.Item, nodes.Collector]]: + if not safe_getattr(self.obj, "__test__", True): + return [] + if hasinit(self.obj): + assert self.parent is not None + self.warn( + PytestCollectionWarning( + "cannot collect test class %r because it has a " + "__init__ constructor (from: %s)" + % (self.obj.__name__, self.parent.nodeid) + ) + ) + return [] + elif hasnew(self.obj): + assert self.parent is not None + self.warn( + PytestCollectionWarning( + "cannot collect test class %r because it has a " + "__new__ constructor (from: %s)" + % (self.obj.__name__, self.parent.nodeid) + ) + ) + return [] + + self._inject_setup_class_fixture() + self._inject_setup_method_fixture() + + self.session._fixturemanager.parsefactories(self.newinstance(), self.nodeid) + + return super().collect() + + def _inject_setup_class_fixture(self) -> None: + """Inject a hidden autouse, class scoped fixture into the collected class object + that invokes setup_class/teardown_class if either or both are available. + + Using a fixture to invoke this methods ensures we play nicely and unsurprisingly with + other fixtures (#517). + """ + setup_class = _get_first_non_fixture_func(self.obj, ("setup_class",)) + teardown_class = getattr(self.obj, "teardown_class", None) + if setup_class is None and teardown_class is None: + return + + @fixtures.fixture( + autouse=True, + scope="class", + # Use a unique name to speed up lookup. + name=f"_xunit_setup_class_fixture_{self.obj.__qualname__}", + ) + def xunit_setup_class_fixture(cls) -> Generator[None, None, None]: + if setup_class is not None: + func = getimfunc(setup_class) + _call_with_optional_argument(func, self.obj) + yield + if teardown_class is not None: + func = getimfunc(teardown_class) + _call_with_optional_argument(func, self.obj) + + self.obj.__pytest_setup_class = xunit_setup_class_fixture + + def _inject_setup_method_fixture(self) -> None: + """Inject a hidden autouse, function scoped fixture into the collected class object + that invokes setup_method/teardown_method if either or both are available. + + Using a fixture to invoke this methods ensures we play nicely and unsurprisingly with + other fixtures (#517). + """ + has_nose = self.config.pluginmanager.has_plugin("nose") + setup_name = "setup_method" + setup_method = _get_first_non_fixture_func(self.obj, (setup_name,)) + if setup_method is None and has_nose: + setup_name = "setup" + setup_method = _get_first_non_fixture_func(self.obj, (setup_name,)) + teardown_name = "teardown_method" + teardown_method = getattr(self.obj, teardown_name, None) + if teardown_method is None and has_nose: + teardown_name = "teardown" + teardown_method = getattr(self.obj, teardown_name, None) + if setup_method is None and teardown_method is None: + return + + @fixtures.fixture( + autouse=True, + scope="function", + # Use a unique name to speed up lookup. + name=f"_xunit_setup_method_fixture_{self.obj.__qualname__}", + ) + def xunit_setup_method_fixture(self, request) -> Generator[None, None, None]: + method = request.function + if setup_method is not None: + func = getattr(self, setup_name) + _call_with_optional_argument(func, method) + yield + if teardown_method is not None: + func = getattr(self, teardown_name) + _call_with_optional_argument(func, method) + + self.obj.__pytest_setup_method = xunit_setup_method_fixture + + +class InstanceDummy: + """Instance used to be a node type between Class and Function. It has been + removed in pytest 7.0. Some plugins exist which reference `pytest.Instance` + only to ignore it; this dummy class keeps them working. This will be removed + in pytest 8.""" + + +def __getattr__(name: str) -> object: + if name == "Instance": + warnings.warn(INSTANCE_COLLECTOR, 2) + return InstanceDummy + raise AttributeError(f"module {__name__} has no attribute {name}") + + +def hasinit(obj: object) -> bool: + init: object = getattr(obj, "__init__", None) + if init: + return init != object.__init__ + return False + + +def hasnew(obj: object) -> bool: + new: object = getattr(obj, "__new__", None) + if new: + return new != object.__new__ + return False + + +@final +@attr.s(frozen=True, auto_attribs=True, slots=True) +class IdMaker: + """Make IDs for a parametrization.""" + + # The argnames of the parametrization. + argnames: Sequence[str] + # The ParameterSets of the parametrization. + parametersets: Sequence[ParameterSet] + # Optionally, a user-provided callable to make IDs for parameters in a + # ParameterSet. + idfn: Optional[Callable[[Any], Optional[object]]] + # Optionally, explicit IDs for ParameterSets by index. + ids: Optional[Sequence[Optional[object]]] + # Optionally, the pytest config. + # Used for controlling ASCII escaping, and for calling the + # :hook:`pytest_make_parametrize_id` hook. + config: Optional[Config] + # Optionally, the ID of the node being parametrized. + # Used only for clearer error messages. + nodeid: Optional[str] + # Optionally, the ID of the function being parametrized. + # Used only for clearer error messages. + func_name: Optional[str] + + def make_unique_parameterset_ids(self) -> List[str]: + """Make a unique identifier for each ParameterSet, that may be used to + identify the parametrization in a node ID. + + Format is -...-[counter], where prm_x_token is + - user-provided id, if given + - else an id derived from the value, applicable for certain types + - else + The counter suffix is appended only in case a string wouldn't be unique + otherwise. + """ + resolved_ids = list(self._resolve_ids()) + # All IDs must be unique! + if len(resolved_ids) != len(set(resolved_ids)): + # Record the number of occurrences of each ID. + id_counts = Counter(resolved_ids) + # Map the ID to its next suffix. + id_suffixes: Dict[str, int] = defaultdict(int) + # Suffix non-unique IDs to make them unique. + for index, id in enumerate(resolved_ids): + if id_counts[id] > 1: + resolved_ids[index] = f"{id}{id_suffixes[id]}" + id_suffixes[id] += 1 + return resolved_ids + + def _resolve_ids(self) -> Iterable[str]: + """Resolve IDs for all ParameterSets (may contain duplicates).""" + for idx, parameterset in enumerate(self.parametersets): + if parameterset.id is not None: + # ID provided directly - pytest.param(..., id="...") + yield parameterset.id + elif self.ids and idx < len(self.ids) and self.ids[idx] is not None: + # ID provided in the IDs list - parametrize(..., ids=[...]). + yield self._idval_from_value_required(self.ids[idx], idx) + else: + # ID not provided - generate it. + yield "-".join( + self._idval(val, argname, idx) + for val, argname in zip(parameterset.values, self.argnames) + ) + + def _idval(self, val: object, argname: str, idx: int) -> str: + """Make an ID for a parameter in a ParameterSet.""" + idval = self._idval_from_function(val, argname, idx) + if idval is not None: + return idval + idval = self._idval_from_hook(val, argname) + if idval is not None: + return idval + idval = self._idval_from_value(val) + if idval is not None: + return idval + return self._idval_from_argname(argname, idx) + + def _idval_from_function( + self, val: object, argname: str, idx: int + ) -> Optional[str]: + """Try to make an ID for a parameter in a ParameterSet using the + user-provided id callable, if given.""" + if self.idfn is None: + return None + try: + id = self.idfn(val) + except Exception as e: + prefix = f"{self.nodeid}: " if self.nodeid is not None else "" + msg = "error raised while trying to determine id of parameter '{}' at position {}" + msg = prefix + msg.format(argname, idx) + raise ValueError(msg) from e + if id is None: + return None + return self._idval_from_value(id) + + def _idval_from_hook(self, val: object, argname: str) -> Optional[str]: + """Try to make an ID for a parameter in a ParameterSet by calling the + :hook:`pytest_make_parametrize_id` hook.""" + if self.config: + id: Optional[str] = self.config.hook.pytest_make_parametrize_id( + config=self.config, val=val, argname=argname + ) + return id + return None + + def _idval_from_value(self, val: object) -> Optional[str]: + """Try to make an ID for a parameter in a ParameterSet from its value, + if the value type is supported.""" + if isinstance(val, STRING_TYPES): + return _ascii_escaped_by_config(val, self.config) + elif val is None or isinstance(val, (float, int, bool, complex)): + return str(val) + elif isinstance(val, Pattern): + return ascii_escaped(val.pattern) + elif val is NOTSET: + # Fallback to default. Note that NOTSET is an enum.Enum. + pass + elif isinstance(val, enum.Enum): + return str(val) + elif isinstance(getattr(val, "__name__", None), str): + # Name of a class, function, module, etc. + name: str = getattr(val, "__name__") + return name + return None + + def _idval_from_value_required(self, val: object, idx: int) -> str: + """Like _idval_from_value(), but fails if the type is not supported.""" + id = self._idval_from_value(val) + if id is not None: + return id + + # Fail. + if self.func_name is not None: + prefix = f"In {self.func_name}: " + elif self.nodeid is not None: + prefix = f"In {self.nodeid}: " + else: + prefix = "" + msg = ( + f"{prefix}ids contains unsupported value {saferepr(val)} (type: {type(val)!r}) at index {idx}. " + "Supported types are: str, bytes, int, float, complex, bool, enum, regex or anything with a __name__." + ) + fail(msg, pytrace=False) + + @staticmethod + def _idval_from_argname(argname: str, idx: int) -> str: + """Make an ID for a parameter in a ParameterSet from the argument name + and the index of the ParameterSet.""" + return str(argname) + str(idx) + + +@final +@attr.s(frozen=True, slots=True, auto_attribs=True) +class CallSpec2: + """A planned parameterized invocation of a test function. + + Calculated during collection for a given test function's Metafunc. + Once collection is over, each callspec is turned into a single Item + and stored in item.callspec. + """ + + # arg name -> arg value which will be passed to the parametrized test + # function (direct parameterization). + funcargs: Dict[str, object] = attr.Factory(dict) + # arg name -> arg value which will be passed to a fixture of the same name + # (indirect parametrization). + params: Dict[str, object] = attr.Factory(dict) + # arg name -> arg index. + indices: Dict[str, int] = attr.Factory(dict) + # Used for sorting parametrized resources. + _arg2scope: Dict[str, Scope] = attr.Factory(dict) + # Parts which will be added to the item's name in `[..]` separated by "-". + _idlist: List[str] = attr.Factory(list) + # Marks which will be applied to the item. + marks: List[Mark] = attr.Factory(list) + + def setmulti( + self, + *, + valtypes: Mapping[str, "Literal['params', 'funcargs']"], + argnames: Iterable[str], + valset: Iterable[object], + id: str, + marks: Iterable[Union[Mark, MarkDecorator]], + scope: Scope, + param_index: int, + ) -> "CallSpec2": + funcargs = self.funcargs.copy() + params = self.params.copy() + indices = self.indices.copy() + arg2scope = self._arg2scope.copy() + for arg, val in zip(argnames, valset): + if arg in params or arg in funcargs: + raise ValueError(f"duplicate {arg!r}") + valtype_for_arg = valtypes[arg] + if valtype_for_arg == "params": + params[arg] = val + elif valtype_for_arg == "funcargs": + funcargs[arg] = val + else: + assert_never(valtype_for_arg) + indices[arg] = param_index + arg2scope[arg] = scope + return CallSpec2( + funcargs=funcargs, + params=params, + arg2scope=arg2scope, + indices=indices, + idlist=[*self._idlist, id], + marks=[*self.marks, *normalize_mark_list(marks)], + ) + + def getparam(self, name: str) -> object: + try: + return self.params[name] + except KeyError as e: + raise ValueError(name) from e + + @property + def id(self) -> str: + return "-".join(self._idlist) + + +@final +class Metafunc: + """Objects passed to the :hook:`pytest_generate_tests` hook. + + They help to inspect a test function and to generate tests according to + test configuration or values specified in the class or module where a + test function is defined. + """ + + def __init__( + self, + definition: "FunctionDefinition", + fixtureinfo: fixtures.FuncFixtureInfo, + config: Config, + cls=None, + module=None, + *, + _ispytest: bool = False, + ) -> None: + check_ispytest(_ispytest) + + #: Access to the underlying :class:`_pytest.python.FunctionDefinition`. + self.definition = definition + + #: Access to the :class:`pytest.Config` object for the test session. + self.config = config + + #: The module object where the test function is defined in. + self.module = module + + #: Underlying Python test function. + self.function = definition.obj + + #: Set of fixture names required by the test function. + self.fixturenames = fixtureinfo.names_closure + + #: Class object where the test function is defined in or ``None``. + self.cls = cls + + self._arg2fixturedefs = fixtureinfo.name2fixturedefs + + # Result of parametrize(). + self._calls: List[CallSpec2] = [] + + def parametrize( + self, + argnames: Union[str, List[str], Tuple[str, ...]], + argvalues: Iterable[Union[ParameterSet, Sequence[object], object]], + indirect: Union[bool, Sequence[str]] = False, + ids: Optional[ + Union[Iterable[Optional[object]], Callable[[Any], Optional[object]]] + ] = None, + scope: "Optional[_ScopeName]" = None, + *, + _param_mark: Optional[Mark] = None, + ) -> None: + """Add new invocations to the underlying test function using the list + of argvalues for the given argnames. Parametrization is performed + during the collection phase. If you need to setup expensive resources + see about setting indirect to do it rather than at test setup time. + + Can be called multiple times, in which case each call parametrizes all + previous parametrizations, e.g. + + :: + + unparametrized: t + parametrize ["x", "y"]: t[x], t[y] + parametrize [1, 2]: t[x-1], t[x-2], t[y-1], t[y-2] + + :param argnames: + A comma-separated string denoting one or more argument names, or + a list/tuple of argument strings. + + :param argvalues: + The list of argvalues determines how often a test is invoked with + different argument values. + + If only one argname was specified argvalues is a list of values. + If N argnames were specified, argvalues must be a list of + N-tuples, where each tuple-element specifies a value for its + respective argname. + + :param indirect: + A list of arguments' names (subset of argnames) or a boolean. + If True the list contains all names from the argnames. Each + argvalue corresponding to an argname in this list will + be passed as request.param to its respective argname fixture + function so that it can perform more expensive setups during the + setup phase of a test rather than at collection time. + + :param ids: + Sequence of (or generator for) ids for ``argvalues``, + or a callable to return part of the id for each argvalue. + + With sequences (and generators like ``itertools.count()``) the + returned ids should be of type ``string``, ``int``, ``float``, + ``bool``, or ``None``. + They are mapped to the corresponding index in ``argvalues``. + ``None`` means to use the auto-generated id. + + If it is a callable it will be called for each entry in + ``argvalues``, and the return value is used as part of the + auto-generated id for the whole set (where parts are joined with + dashes ("-")). + This is useful to provide more specific ids for certain items, e.g. + dates. Returning ``None`` will use an auto-generated id. + + If no ids are provided they will be generated automatically from + the argvalues. + + :param scope: + If specified it denotes the scope of the parameters. + The scope is used for grouping tests by parameter instances. + It will also override any fixture-function defined scope, allowing + to set a dynamic scope using test context or configuration. + """ + argnames, parametersets = ParameterSet._for_parametrize( + argnames, + argvalues, + self.function, + self.config, + nodeid=self.definition.nodeid, + ) + del argvalues + + if "request" in argnames: + fail( + "'request' is a reserved name and cannot be used in @pytest.mark.parametrize", + pytrace=False, + ) + + if scope is not None: + scope_ = Scope.from_user( + scope, descr=f"parametrize() call in {self.function.__name__}" + ) + else: + scope_ = _find_parametrized_scope(argnames, self._arg2fixturedefs, indirect) + + self._validate_if_using_arg_names(argnames, indirect) + + arg_values_types = self._resolve_arg_value_types(argnames, indirect) + + # Use any already (possibly) generated ids with parametrize Marks. + if _param_mark and _param_mark._param_ids_from: + generated_ids = _param_mark._param_ids_from._param_ids_generated + if generated_ids is not None: + ids = generated_ids + + ids = self._resolve_parameter_set_ids( + argnames, ids, parametersets, nodeid=self.definition.nodeid + ) + + # Store used (possibly generated) ids with parametrize Marks. + if _param_mark and _param_mark._param_ids_from and generated_ids is None: + object.__setattr__(_param_mark._param_ids_from, "_param_ids_generated", ids) + + # Create the new calls: if we are parametrize() multiple times (by applying the decorator + # more than once) then we accumulate those calls generating the cartesian product + # of all calls. + newcalls = [] + for callspec in self._calls or [CallSpec2()]: + for param_index, (param_id, param_set) in enumerate( + zip(ids, parametersets) + ): + newcallspec = callspec.setmulti( + valtypes=arg_values_types, + argnames=argnames, + valset=param_set.values, + id=param_id, + marks=param_set.marks, + scope=scope_, + param_index=param_index, + ) + newcalls.append(newcallspec) + self._calls = newcalls + + def _resolve_parameter_set_ids( + self, + argnames: Sequence[str], + ids: Optional[ + Union[Iterable[Optional[object]], Callable[[Any], Optional[object]]] + ], + parametersets: Sequence[ParameterSet], + nodeid: str, + ) -> List[str]: + """Resolve the actual ids for the given parameter sets. + + :param argnames: + Argument names passed to ``parametrize()``. + :param ids: + The `ids` parameter of the ``parametrize()`` call (see docs). + :param parametersets: + The parameter sets, each containing a set of values corresponding + to ``argnames``. + :param nodeid str: + The nodeid of the definition item that generated this + parametrization. + :returns: + List with ids for each parameter set given. + """ + if ids is None: + idfn = None + ids_ = None + elif callable(ids): + idfn = ids + ids_ = None + else: + idfn = None + ids_ = self._validate_ids(ids, parametersets, self.function.__name__) + id_maker = IdMaker( + argnames, + parametersets, + idfn, + ids_, + self.config, + nodeid=nodeid, + func_name=self.function.__name__, + ) + return id_maker.make_unique_parameterset_ids() + + def _validate_ids( + self, + ids: Iterable[Optional[object]], + parametersets: Sequence[ParameterSet], + func_name: str, + ) -> List[Optional[object]]: + try: + num_ids = len(ids) # type: ignore[arg-type] + except TypeError: + try: + iter(ids) + except TypeError as e: + raise TypeError("ids must be a callable or an iterable") from e + num_ids = len(parametersets) + + # num_ids == 0 is a special case: https://github.com/pytest-dev/pytest/issues/1849 + if num_ids != len(parametersets) and num_ids != 0: + msg = "In {}: {} parameter sets specified, with different number of ids: {}" + fail(msg.format(func_name, len(parametersets), num_ids), pytrace=False) + + return list(itertools.islice(ids, num_ids)) + + def _resolve_arg_value_types( + self, + argnames: Sequence[str], + indirect: Union[bool, Sequence[str]], + ) -> Dict[str, "Literal['params', 'funcargs']"]: + """Resolve if each parametrized argument must be considered a + parameter to a fixture or a "funcarg" to the function, based on the + ``indirect`` parameter of the parametrized() call. + + :param List[str] argnames: List of argument names passed to ``parametrize()``. + :param indirect: Same as the ``indirect`` parameter of ``parametrize()``. + :rtype: Dict[str, str] + A dict mapping each arg name to either: + * "params" if the argname should be the parameter of a fixture of the same name. + * "funcargs" if the argname should be a parameter to the parametrized test function. + """ + if isinstance(indirect, bool): + valtypes: Dict[str, Literal["params", "funcargs"]] = dict.fromkeys( + argnames, "params" if indirect else "funcargs" + ) + elif isinstance(indirect, Sequence): + valtypes = dict.fromkeys(argnames, "funcargs") + for arg in indirect: + if arg not in argnames: + fail( + "In {}: indirect fixture '{}' doesn't exist".format( + self.function.__name__, arg + ), + pytrace=False, + ) + valtypes[arg] = "params" + else: + fail( + "In {func}: expected Sequence or boolean for indirect, got {type}".format( + type=type(indirect).__name__, func=self.function.__name__ + ), + pytrace=False, + ) + return valtypes + + def _validate_if_using_arg_names( + self, + argnames: Sequence[str], + indirect: Union[bool, Sequence[str]], + ) -> None: + """Check if all argnames are being used, by default values, or directly/indirectly. + + :param List[str] argnames: List of argument names passed to ``parametrize()``. + :param indirect: Same as the ``indirect`` parameter of ``parametrize()``. + :raises ValueError: If validation fails. + """ + default_arg_names = set(get_default_arg_names(self.function)) + func_name = self.function.__name__ + for arg in argnames: + if arg not in self.fixturenames: + if arg in default_arg_names: + fail( + "In {}: function already takes an argument '{}' with a default value".format( + func_name, arg + ), + pytrace=False, + ) + else: + if isinstance(indirect, Sequence): + name = "fixture" if arg in indirect else "argument" + else: + name = "fixture" if indirect else "argument" + fail( + f"In {func_name}: function uses no {name} '{arg}'", + pytrace=False, + ) + + +def _find_parametrized_scope( + argnames: Sequence[str], + arg2fixturedefs: Mapping[str, Sequence[fixtures.FixtureDef[object]]], + indirect: Union[bool, Sequence[str]], +) -> Scope: + """Find the most appropriate scope for a parametrized call based on its arguments. + + When there's at least one direct argument, always use "function" scope. + + When a test function is parametrized and all its arguments are indirect + (e.g. fixtures), return the most narrow scope based on the fixtures used. + + Related to issue #1832, based on code posted by @Kingdread. + """ + if isinstance(indirect, Sequence): + all_arguments_are_fixtures = len(indirect) == len(argnames) + else: + all_arguments_are_fixtures = bool(indirect) + + if all_arguments_are_fixtures: + fixturedefs = arg2fixturedefs or {} + used_scopes = [ + fixturedef[0]._scope + for name, fixturedef in fixturedefs.items() + if name in argnames + ] + # Takes the most narrow scope from used fixtures. + return min(used_scopes, default=Scope.Function) + + return Scope.Function + + +def _ascii_escaped_by_config(val: Union[str, bytes], config: Optional[Config]) -> str: + if config is None: + escape_option = False + else: + escape_option = config.getini( + "disable_test_id_escaping_and_forfeit_all_rights_to_community_support" + ) + # TODO: If escaping is turned off and the user passes bytes, + # will return a bytes. For now we ignore this but the + # code *probably* doesn't handle this case. + return val if escape_option else ascii_escaped(val) # type: ignore + + +def _pretty_fixture_path(func) -> str: + cwd = Path.cwd() + loc = Path(getlocation(func, str(cwd))) + prefix = Path("...", "_pytest") + try: + return str(prefix / loc.relative_to(_PYTEST_DIR)) + except ValueError: + return bestrelpath(cwd, loc) + + +def show_fixtures_per_test(config): + from _pytest.main import wrap_session + + return wrap_session(config, _show_fixtures_per_test) + + +def _show_fixtures_per_test(config: Config, session: Session) -> None: + import _pytest.config + + session.perform_collect() + curdir = Path.cwd() + tw = _pytest.config.create_terminal_writer(config) + verbose = config.getvalue("verbose") + + def get_best_relpath(func) -> str: + loc = getlocation(func, str(curdir)) + return bestrelpath(curdir, Path(loc)) + + def write_fixture(fixture_def: fixtures.FixtureDef[object]) -> None: + argname = fixture_def.argname + if verbose <= 0 and argname.startswith("_"): + return + prettypath = _pretty_fixture_path(fixture_def.func) + tw.write(f"{argname}", green=True) + tw.write(f" -- {prettypath}", yellow=True) + tw.write("\n") + fixture_doc = inspect.getdoc(fixture_def.func) + if fixture_doc: + write_docstring( + tw, fixture_doc.split("\n\n")[0] if verbose <= 0 else fixture_doc + ) + else: + tw.line(" no docstring available", red=True) + + def write_item(item: nodes.Item) -> None: + # Not all items have _fixtureinfo attribute. + info: Optional[FuncFixtureInfo] = getattr(item, "_fixtureinfo", None) + if info is None or not info.name2fixturedefs: + # This test item does not use any fixtures. + return + tw.line() + tw.sep("-", f"fixtures used by {item.name}") + # TODO: Fix this type ignore. + tw.sep("-", f"({get_best_relpath(item.function)})") # type: ignore[attr-defined] + # dict key not used in loop but needed for sorting. + for _, fixturedefs in sorted(info.name2fixturedefs.items()): + assert fixturedefs is not None + if not fixturedefs: + continue + # Last item is expected to be the one used by the test item. + write_fixture(fixturedefs[-1]) + + for session_item in session.items: + write_item(session_item) + + +def showfixtures(config: Config) -> Union[int, ExitCode]: + from _pytest.main import wrap_session + + return wrap_session(config, _showfixtures_main) + + +def _showfixtures_main(config: Config, session: Session) -> None: + import _pytest.config + + session.perform_collect() + curdir = Path.cwd() + tw = _pytest.config.create_terminal_writer(config) + verbose = config.getvalue("verbose") + + fm = session._fixturemanager + + available = [] + seen: Set[Tuple[str, str]] = set() + + for argname, fixturedefs in fm._arg2fixturedefs.items(): + assert fixturedefs is not None + if not fixturedefs: + continue + for fixturedef in fixturedefs: + loc = getlocation(fixturedef.func, str(curdir)) + if (fixturedef.argname, loc) in seen: + continue + seen.add((fixturedef.argname, loc)) + available.append( + ( + len(fixturedef.baseid), + fixturedef.func.__module__, + _pretty_fixture_path(fixturedef.func), + fixturedef.argname, + fixturedef, + ) + ) + + available.sort() + currentmodule = None + for baseid, module, prettypath, argname, fixturedef in available: + if currentmodule != module: + if not module.startswith("_pytest."): + tw.line() + tw.sep("-", f"fixtures defined from {module}") + currentmodule = module + if verbose <= 0 and argname.startswith("_"): + continue + tw.write(f"{argname}", green=True) + if fixturedef.scope != "function": + tw.write(" [%s scope]" % fixturedef.scope, cyan=True) + tw.write(f" -- {prettypath}", yellow=True) + tw.write("\n") + doc = inspect.getdoc(fixturedef.func) + if doc: + write_docstring(tw, doc.split("\n\n")[0] if verbose <= 0 else doc) + else: + tw.line(" no docstring available", red=True) + tw.line() + + +def write_docstring(tw: TerminalWriter, doc: str, indent: str = " ") -> None: + for line in doc.split("\n"): + tw.line(indent + line) + + +class Function(PyobjMixin, nodes.Item): + """An Item responsible for setting up and executing a Python test function. + + :param name: + The full function name, including any decorations like those + added by parametrization (``my_func[my_param]``). + :param parent: + The parent Node. + :param config: + The pytest Config object. + :param callspec: + If given, this is function has been parametrized and the callspec contains + meta information about the parametrization. + :param callobj: + If given, the object which will be called when the Function is invoked, + otherwise the callobj will be obtained from ``parent`` using ``originalname``. + :param keywords: + Keywords bound to the function object for "-k" matching. + :param session: + The pytest Session object. + :param fixtureinfo: + Fixture information already resolved at this fixture node.. + :param originalname: + The attribute name to use for accessing the underlying function object. + Defaults to ``name``. Set this if name is different from the original name, + for example when it contains decorations like those added by parametrization + (``my_func[my_param]``). + """ + + # Disable since functions handle it themselves. + _ALLOW_MARKERS = False + + def __init__( + self, + name: str, + parent, + config: Optional[Config] = None, + callspec: Optional[CallSpec2] = None, + callobj=NOTSET, + keywords: Optional[Mapping[str, Any]] = None, + session: Optional[Session] = None, + fixtureinfo: Optional[FuncFixtureInfo] = None, + originalname: Optional[str] = None, + ) -> None: + super().__init__(name, parent, config=config, session=session) + + if callobj is not NOTSET: + self.obj = callobj + + #: Original function name, without any decorations (for example + #: parametrization adds a ``"[...]"`` suffix to function names), used to access + #: the underlying function object from ``parent`` (in case ``callobj`` is not given + #: explicitly). + #: + #: .. versionadded:: 3.0 + self.originalname = originalname or name + + # Note: when FunctionDefinition is introduced, we should change ``originalname`` + # to a readonly property that returns FunctionDefinition.name. + + self.own_markers.extend(get_unpacked_marks(self.obj)) + if callspec: + self.callspec = callspec + self.own_markers.extend(callspec.marks) + + # todo: this is a hell of a hack + # https://github.com/pytest-dev/pytest/issues/4569 + # Note: the order of the updates is important here; indicates what + # takes priority (ctor argument over function attributes over markers). + # Take own_markers only; NodeKeywords handles parent traversal on its own. + self.keywords.update((mark.name, mark) for mark in self.own_markers) + self.keywords.update(self.obj.__dict__) + if keywords: + self.keywords.update(keywords) + + if fixtureinfo is None: + fixtureinfo = self.session._fixturemanager.getfixtureinfo( + self, self.obj, self.cls, funcargs=True + ) + self._fixtureinfo: FuncFixtureInfo = fixtureinfo + self.fixturenames = fixtureinfo.names_closure + self._initrequest() + + @classmethod + def from_parent(cls, parent, **kw): # todo: determine sound type limitations + """The public constructor.""" + return super().from_parent(parent=parent, **kw) + + def _initrequest(self) -> None: + self.funcargs: Dict[str, object] = {} + self._request = fixtures.FixtureRequest(self, _ispytest=True) + + @property + def function(self): + """Underlying python 'function' object.""" + return getimfunc(self.obj) + + def _getobj(self): + assert self.parent is not None + if isinstance(self.parent, Class): + # Each Function gets a fresh class instance. + parent_obj = self.parent.newinstance() + else: + parent_obj = self.parent.obj # type: ignore[attr-defined] + return getattr(parent_obj, self.originalname) + + @property + def _pyfuncitem(self): + """(compatonly) for code expecting pytest-2.2 style request objects.""" + return self + + def runtest(self) -> None: + """Execute the underlying test function.""" + self.ihook.pytest_pyfunc_call(pyfuncitem=self) + + def setup(self) -> None: + self._request._fillfixtures() + + def _prunetraceback(self, excinfo: ExceptionInfo[BaseException]) -> None: + if hasattr(self, "_obj") and not self.config.getoption("fulltrace", False): + code = _pytest._code.Code.from_function(get_real_func(self.obj)) + path, firstlineno = code.path, code.firstlineno + traceback = excinfo.traceback + ntraceback = traceback.cut(path=path, firstlineno=firstlineno) + if ntraceback == traceback: + ntraceback = ntraceback.cut(path=path) + if ntraceback == traceback: + ntraceback = ntraceback.filter(filter_traceback) + if not ntraceback: + ntraceback = traceback + + excinfo.traceback = ntraceback.filter() + # issue364: mark all but first and last frames to + # only show a single-line message for each frame. + if self.config.getoption("tbstyle", "auto") == "auto": + if len(excinfo.traceback) > 2: + for entry in excinfo.traceback[1:-1]: + entry.set_repr_style("short") + + # TODO: Type ignored -- breaks Liskov Substitution. + def repr_failure( # type: ignore[override] + self, + excinfo: ExceptionInfo[BaseException], + ) -> Union[str, TerminalRepr]: + style = self.config.getoption("tbstyle", "auto") + if style == "auto": + style = "long" + return self._repr_failure_py(excinfo, style=style) + + +class FunctionDefinition(Function): + """ + This class is a step gap solution until we evolve to have actual function definition nodes + and manage to get rid of ``metafunc``. + """ + + def runtest(self) -> None: + raise RuntimeError("function definitions are not supposed to be run as tests") + + setup = runtest diff --git a/venv/lib/python3.10/site-packages/_pytest/python_api.py b/venv/lib/python3.10/site-packages/_pytest/python_api.py new file mode 100644 index 0000000..5fa2196 --- /dev/null +++ b/venv/lib/python3.10/site-packages/_pytest/python_api.py @@ -0,0 +1,975 @@ +import math +import pprint +from collections.abc import Collection +from collections.abc import Sized +from decimal import Decimal +from numbers import Complex +from types import TracebackType +from typing import Any +from typing import Callable +from typing import cast +from typing import Generic +from typing import List +from typing import Mapping +from typing import Optional +from typing import overload +from typing import Pattern +from typing import Sequence +from typing import Tuple +from typing import Type +from typing import TYPE_CHECKING +from typing import TypeVar +from typing import Union + +if TYPE_CHECKING: + from numpy import ndarray + + +import _pytest._code +from _pytest.compat import final +from _pytest.compat import STRING_TYPES +from _pytest.outcomes import fail + + +def _non_numeric_type_error(value, at: Optional[str]) -> TypeError: + at_str = f" at {at}" if at else "" + return TypeError( + "cannot make approximate comparisons to non-numeric values: {!r} {}".format( + value, at_str + ) + ) + + +def _compare_approx( + full_object: object, + message_data: Sequence[Tuple[str, str, str]], + number_of_elements: int, + different_ids: Sequence[object], + max_abs_diff: float, + max_rel_diff: float, +) -> List[str]: + message_list = list(message_data) + message_list.insert(0, ("Index", "Obtained", "Expected")) + max_sizes = [0, 0, 0] + for index, obtained, expected in message_list: + max_sizes[0] = max(max_sizes[0], len(index)) + max_sizes[1] = max(max_sizes[1], len(obtained)) + max_sizes[2] = max(max_sizes[2], len(expected)) + explanation = [ + f"comparison failed. Mismatched elements: {len(different_ids)} / {number_of_elements}:", + f"Max absolute difference: {max_abs_diff}", + f"Max relative difference: {max_rel_diff}", + ] + [ + f"{indexes:<{max_sizes[0]}} | {obtained:<{max_sizes[1]}} | {expected:<{max_sizes[2]}}" + for indexes, obtained, expected in message_list + ] + return explanation + + +# builtin pytest.approx helper + + +class ApproxBase: + """Provide shared utilities for making approximate comparisons between + numbers or sequences of numbers.""" + + # Tell numpy to use our `__eq__` operator instead of its. + __array_ufunc__ = None + __array_priority__ = 100 + + def __init__(self, expected, rel=None, abs=None, nan_ok: bool = False) -> None: + __tracebackhide__ = True + self.expected = expected + self.abs = abs + self.rel = rel + self.nan_ok = nan_ok + self._check_type() + + def __repr__(self) -> str: + raise NotImplementedError + + def _repr_compare(self, other_side: Any) -> List[str]: + return [ + "comparison failed", + f"Obtained: {other_side}", + f"Expected: {self}", + ] + + def __eq__(self, actual) -> bool: + return all( + a == self._approx_scalar(x) for a, x in self._yield_comparisons(actual) + ) + + def __bool__(self): + __tracebackhide__ = True + raise AssertionError( + "approx() is not supported in a boolean context.\nDid you mean: `assert a == approx(b)`?" + ) + + # Ignore type because of https://github.com/python/mypy/issues/4266. + __hash__ = None # type: ignore + + def __ne__(self, actual) -> bool: + return not (actual == self) + + def _approx_scalar(self, x) -> "ApproxScalar": + if isinstance(x, Decimal): + return ApproxDecimal(x, rel=self.rel, abs=self.abs, nan_ok=self.nan_ok) + return ApproxScalar(x, rel=self.rel, abs=self.abs, nan_ok=self.nan_ok) + + def _yield_comparisons(self, actual): + """Yield all the pairs of numbers to be compared. + + This is used to implement the `__eq__` method. + """ + raise NotImplementedError + + def _check_type(self) -> None: + """Raise a TypeError if the expected value is not a valid type.""" + # This is only a concern if the expected value is a sequence. In every + # other case, the approx() function ensures that the expected value has + # a numeric type. For this reason, the default is to do nothing. The + # classes that deal with sequences should reimplement this method to + # raise if there are any non-numeric elements in the sequence. + + +def _recursive_list_map(f, x): + if isinstance(x, list): + return [_recursive_list_map(f, xi) for xi in x] + else: + return f(x) + + +class ApproxNumpy(ApproxBase): + """Perform approximate comparisons where the expected value is numpy array.""" + + def __repr__(self) -> str: + list_scalars = _recursive_list_map(self._approx_scalar, self.expected.tolist()) + return f"approx({list_scalars!r})" + + def _repr_compare(self, other_side: "ndarray") -> List[str]: + import itertools + import math + + def get_value_from_nested_list( + nested_list: List[Any], nd_index: Tuple[Any, ...] + ) -> Any: + """ + Helper function to get the value out of a nested list, given an n-dimensional index. + This mimics numpy's indexing, but for raw nested python lists. + """ + value: Any = nested_list + for i in nd_index: + value = value[i] + return value + + np_array_shape = self.expected.shape + approx_side_as_list = _recursive_list_map( + self._approx_scalar, self.expected.tolist() + ) + + if np_array_shape != other_side.shape: + return [ + "Impossible to compare arrays with different shapes.", + f"Shapes: {np_array_shape} and {other_side.shape}", + ] + + number_of_elements = self.expected.size + max_abs_diff = -math.inf + max_rel_diff = -math.inf + different_ids = [] + for index in itertools.product(*(range(i) for i in np_array_shape)): + approx_value = get_value_from_nested_list(approx_side_as_list, index) + other_value = get_value_from_nested_list(other_side, index) + if approx_value != other_value: + abs_diff = abs(approx_value.expected - other_value) + max_abs_diff = max(max_abs_diff, abs_diff) + if other_value == 0.0: + max_rel_diff = math.inf + else: + max_rel_diff = max(max_rel_diff, abs_diff / abs(other_value)) + different_ids.append(index) + + message_data = [ + ( + str(index), + str(get_value_from_nested_list(other_side, index)), + str(get_value_from_nested_list(approx_side_as_list, index)), + ) + for index in different_ids + ] + return _compare_approx( + self.expected, + message_data, + number_of_elements, + different_ids, + max_abs_diff, + max_rel_diff, + ) + + def __eq__(self, actual) -> bool: + import numpy as np + + # self.expected is supposed to always be an array here. + + if not np.isscalar(actual): + try: + actual = np.asarray(actual) + except Exception as e: + raise TypeError(f"cannot compare '{actual}' to numpy.ndarray") from e + + if not np.isscalar(actual) and actual.shape != self.expected.shape: + return False + + return super().__eq__(actual) + + def _yield_comparisons(self, actual): + import numpy as np + + # `actual` can either be a numpy array or a scalar, it is treated in + # `__eq__` before being passed to `ApproxBase.__eq__`, which is the + # only method that calls this one. + + if np.isscalar(actual): + for i in np.ndindex(self.expected.shape): + yield actual, self.expected[i].item() + else: + for i in np.ndindex(self.expected.shape): + yield actual[i].item(), self.expected[i].item() + + +class ApproxMapping(ApproxBase): + """Perform approximate comparisons where the expected value is a mapping + with numeric values (the keys can be anything).""" + + def __repr__(self) -> str: + return "approx({!r})".format( + {k: self._approx_scalar(v) for k, v in self.expected.items()} + ) + + def _repr_compare(self, other_side: Mapping[object, float]) -> List[str]: + import math + + approx_side_as_map = { + k: self._approx_scalar(v) for k, v in self.expected.items() + } + + number_of_elements = len(approx_side_as_map) + max_abs_diff = -math.inf + max_rel_diff = -math.inf + different_ids = [] + for (approx_key, approx_value), other_value in zip( + approx_side_as_map.items(), other_side.values() + ): + if approx_value != other_value: + max_abs_diff = max( + max_abs_diff, abs(approx_value.expected - other_value) + ) + max_rel_diff = max( + max_rel_diff, + abs((approx_value.expected - other_value) / approx_value.expected), + ) + different_ids.append(approx_key) + + message_data = [ + (str(key), str(other_side[key]), str(approx_side_as_map[key])) + for key in different_ids + ] + + return _compare_approx( + self.expected, + message_data, + number_of_elements, + different_ids, + max_abs_diff, + max_rel_diff, + ) + + def __eq__(self, actual) -> bool: + try: + if set(actual.keys()) != set(self.expected.keys()): + return False + except AttributeError: + return False + + return super().__eq__(actual) + + def _yield_comparisons(self, actual): + for k in self.expected.keys(): + yield actual[k], self.expected[k] + + def _check_type(self) -> None: + __tracebackhide__ = True + for key, value in self.expected.items(): + if isinstance(value, type(self.expected)): + msg = "pytest.approx() does not support nested dictionaries: key={!r} value={!r}\n full mapping={}" + raise TypeError(msg.format(key, value, pprint.pformat(self.expected))) + + +class ApproxSequenceLike(ApproxBase): + """Perform approximate comparisons where the expected value is a sequence of numbers.""" + + def __repr__(self) -> str: + seq_type = type(self.expected) + if seq_type not in (tuple, list): + seq_type = list + return "approx({!r})".format( + seq_type(self._approx_scalar(x) for x in self.expected) + ) + + def _repr_compare(self, other_side: Sequence[float]) -> List[str]: + import math + + if len(self.expected) != len(other_side): + return [ + "Impossible to compare lists with different sizes.", + f"Lengths: {len(self.expected)} and {len(other_side)}", + ] + + approx_side_as_map = _recursive_list_map(self._approx_scalar, self.expected) + + number_of_elements = len(approx_side_as_map) + max_abs_diff = -math.inf + max_rel_diff = -math.inf + different_ids = [] + for i, (approx_value, other_value) in enumerate( + zip(approx_side_as_map, other_side) + ): + if approx_value != other_value: + abs_diff = abs(approx_value.expected - other_value) + max_abs_diff = max(max_abs_diff, abs_diff) + if other_value == 0.0: + max_rel_diff = math.inf + else: + max_rel_diff = max(max_rel_diff, abs_diff / abs(other_value)) + different_ids.append(i) + + message_data = [ + (str(i), str(other_side[i]), str(approx_side_as_map[i])) + for i in different_ids + ] + + return _compare_approx( + self.expected, + message_data, + number_of_elements, + different_ids, + max_abs_diff, + max_rel_diff, + ) + + def __eq__(self, actual) -> bool: + try: + if len(actual) != len(self.expected): + return False + except TypeError: + return False + return super().__eq__(actual) + + def _yield_comparisons(self, actual): + return zip(actual, self.expected) + + def _check_type(self) -> None: + __tracebackhide__ = True + for index, x in enumerate(self.expected): + if isinstance(x, type(self.expected)): + msg = "pytest.approx() does not support nested data structures: {!r} at index {}\n full sequence: {}" + raise TypeError(msg.format(x, index, pprint.pformat(self.expected))) + + +class ApproxScalar(ApproxBase): + """Perform approximate comparisons where the expected value is a single number.""" + + # Using Real should be better than this Union, but not possible yet: + # https://github.com/python/typeshed/pull/3108 + DEFAULT_ABSOLUTE_TOLERANCE: Union[float, Decimal] = 1e-12 + DEFAULT_RELATIVE_TOLERANCE: Union[float, Decimal] = 1e-6 + + def __repr__(self) -> str: + """Return a string communicating both the expected value and the + tolerance for the comparison being made. + + For example, ``1.0 ± 1e-6``, ``(3+4j) ± 5e-6 ∠ ±180°``. + """ + # Don't show a tolerance for values that aren't compared using + # tolerances, i.e. non-numerics and infinities. Need to call abs to + # handle complex numbers, e.g. (inf + 1j). + if (not isinstance(self.expected, (Complex, Decimal))) or math.isinf( + abs(self.expected) # type: ignore[arg-type] + ): + return str(self.expected) + + # If a sensible tolerance can't be calculated, self.tolerance will + # raise a ValueError. In this case, display '???'. + try: + vetted_tolerance = f"{self.tolerance:.1e}" + if ( + isinstance(self.expected, Complex) + and self.expected.imag + and not math.isinf(self.tolerance) + ): + vetted_tolerance += " ∠ ±180°" + except ValueError: + vetted_tolerance = "???" + + return f"{self.expected} ± {vetted_tolerance}" + + def __eq__(self, actual) -> bool: + """Return whether the given value is equal to the expected value + within the pre-specified tolerance.""" + asarray = _as_numpy_array(actual) + if asarray is not None: + # Call ``__eq__()`` manually to prevent infinite-recursion with + # numpy<1.13. See #3748. + return all(self.__eq__(a) for a in asarray.flat) + + # Short-circuit exact equality. + if actual == self.expected: + return True + + # If either type is non-numeric, fall back to strict equality. + # NB: we need Complex, rather than just Number, to ensure that __abs__, + # __sub__, and __float__ are defined. + if not ( + isinstance(self.expected, (Complex, Decimal)) + and isinstance(actual, (Complex, Decimal)) + ): + return False + + # Allow the user to control whether NaNs are considered equal to each + # other or not. The abs() calls are for compatibility with complex + # numbers. + if math.isnan(abs(self.expected)): # type: ignore[arg-type] + return self.nan_ok and math.isnan(abs(actual)) # type: ignore[arg-type] + + # Infinity shouldn't be approximately equal to anything but itself, but + # if there's a relative tolerance, it will be infinite and infinity + # will seem approximately equal to everything. The equal-to-itself + # case would have been short circuited above, so here we can just + # return false if the expected value is infinite. The abs() call is + # for compatibility with complex numbers. + if math.isinf(abs(self.expected)): # type: ignore[arg-type] + return False + + # Return true if the two numbers are within the tolerance. + result: bool = abs(self.expected - actual) <= self.tolerance + return result + + # Ignore type because of https://github.com/python/mypy/issues/4266. + __hash__ = None # type: ignore + + @property + def tolerance(self): + """Return the tolerance for the comparison. + + This could be either an absolute tolerance or a relative tolerance, + depending on what the user specified or which would be larger. + """ + + def set_default(x, default): + return x if x is not None else default + + # Figure out what the absolute tolerance should be. ``self.abs`` is + # either None or a value specified by the user. + absolute_tolerance = set_default(self.abs, self.DEFAULT_ABSOLUTE_TOLERANCE) + + if absolute_tolerance < 0: + raise ValueError( + f"absolute tolerance can't be negative: {absolute_tolerance}" + ) + if math.isnan(absolute_tolerance): + raise ValueError("absolute tolerance can't be NaN.") + + # If the user specified an absolute tolerance but not a relative one, + # just return the absolute tolerance. + if self.rel is None: + if self.abs is not None: + return absolute_tolerance + + # Figure out what the relative tolerance should be. ``self.rel`` is + # either None or a value specified by the user. This is done after + # we've made sure the user didn't ask for an absolute tolerance only, + # because we don't want to raise errors about the relative tolerance if + # we aren't even going to use it. + relative_tolerance = set_default( + self.rel, self.DEFAULT_RELATIVE_TOLERANCE + ) * abs(self.expected) + + if relative_tolerance < 0: + raise ValueError( + f"relative tolerance can't be negative: {relative_tolerance}" + ) + if math.isnan(relative_tolerance): + raise ValueError("relative tolerance can't be NaN.") + + # Return the larger of the relative and absolute tolerances. + return max(relative_tolerance, absolute_tolerance) + + +class ApproxDecimal(ApproxScalar): + """Perform approximate comparisons where the expected value is a Decimal.""" + + DEFAULT_ABSOLUTE_TOLERANCE = Decimal("1e-12") + DEFAULT_RELATIVE_TOLERANCE = Decimal("1e-6") + + +def approx(expected, rel=None, abs=None, nan_ok: bool = False) -> ApproxBase: + """Assert that two numbers (or two ordered sequences of numbers) are equal to each other + within some tolerance. + + Due to the :std:doc:`tutorial/floatingpoint`, numbers that we + would intuitively expect to be equal are not always so:: + + >>> 0.1 + 0.2 == 0.3 + False + + This problem is commonly encountered when writing tests, e.g. when making + sure that floating-point values are what you expect them to be. One way to + deal with this problem is to assert that two floating-point numbers are + equal to within some appropriate tolerance:: + + >>> abs((0.1 + 0.2) - 0.3) < 1e-6 + True + + However, comparisons like this are tedious to write and difficult to + understand. Furthermore, absolute comparisons like the one above are + usually discouraged because there's no tolerance that works well for all + situations. ``1e-6`` is good for numbers around ``1``, but too small for + very big numbers and too big for very small ones. It's better to express + the tolerance as a fraction of the expected value, but relative comparisons + like that are even more difficult to write correctly and concisely. + + The ``approx`` class performs floating-point comparisons using a syntax + that's as intuitive as possible:: + + >>> from pytest import approx + >>> 0.1 + 0.2 == approx(0.3) + True + + The same syntax also works for ordered sequences of numbers:: + + >>> (0.1 + 0.2, 0.2 + 0.4) == approx((0.3, 0.6)) + True + + ``numpy`` arrays:: + + >>> import numpy as np # doctest: +SKIP + >>> np.array([0.1, 0.2]) + np.array([0.2, 0.4]) == approx(np.array([0.3, 0.6])) # doctest: +SKIP + True + + And for a ``numpy`` array against a scalar:: + + >>> import numpy as np # doctest: +SKIP + >>> np.array([0.1, 0.2]) + np.array([0.2, 0.1]) == approx(0.3) # doctest: +SKIP + True + + Only ordered sequences are supported, because ``approx`` needs + to infer the relative position of the sequences without ambiguity. This means + ``sets`` and other unordered sequences are not supported. + + Finally, dictionary *values* can also be compared:: + + >>> {'a': 0.1 + 0.2, 'b': 0.2 + 0.4} == approx({'a': 0.3, 'b': 0.6}) + True + + The comparison will be true if both mappings have the same keys and their + respective values match the expected tolerances. + + **Tolerances** + + By default, ``approx`` considers numbers within a relative tolerance of + ``1e-6`` (i.e. one part in a million) of its expected value to be equal. + This treatment would lead to surprising results if the expected value was + ``0.0``, because nothing but ``0.0`` itself is relatively close to ``0.0``. + To handle this case less surprisingly, ``approx`` also considers numbers + within an absolute tolerance of ``1e-12`` of its expected value to be + equal. Infinity and NaN are special cases. Infinity is only considered + equal to itself, regardless of the relative tolerance. NaN is not + considered equal to anything by default, but you can make it be equal to + itself by setting the ``nan_ok`` argument to True. (This is meant to + facilitate comparing arrays that use NaN to mean "no data".) + + Both the relative and absolute tolerances can be changed by passing + arguments to the ``approx`` constructor:: + + >>> 1.0001 == approx(1) + False + >>> 1.0001 == approx(1, rel=1e-3) + True + >>> 1.0001 == approx(1, abs=1e-3) + True + + If you specify ``abs`` but not ``rel``, the comparison will not consider + the relative tolerance at all. In other words, two numbers that are within + the default relative tolerance of ``1e-6`` will still be considered unequal + if they exceed the specified absolute tolerance. If you specify both + ``abs`` and ``rel``, the numbers will be considered equal if either + tolerance is met:: + + >>> 1 + 1e-8 == approx(1) + True + >>> 1 + 1e-8 == approx(1, abs=1e-12) + False + >>> 1 + 1e-8 == approx(1, rel=1e-6, abs=1e-12) + True + + You can also use ``approx`` to compare nonnumeric types, or dicts and + sequences containing nonnumeric types, in which case it falls back to + strict equality. This can be useful for comparing dicts and sequences that + can contain optional values:: + + >>> {"required": 1.0000005, "optional": None} == approx({"required": 1, "optional": None}) + True + >>> [None, 1.0000005] == approx([None,1]) + True + >>> ["foo", 1.0000005] == approx([None,1]) + False + + If you're thinking about using ``approx``, then you might want to know how + it compares to other good ways of comparing floating-point numbers. All of + these algorithms are based on relative and absolute tolerances and should + agree for the most part, but they do have meaningful differences: + + - ``math.isclose(a, b, rel_tol=1e-9, abs_tol=0.0)``: True if the relative + tolerance is met w.r.t. either ``a`` or ``b`` or if the absolute + tolerance is met. Because the relative tolerance is calculated w.r.t. + both ``a`` and ``b``, this test is symmetric (i.e. neither ``a`` nor + ``b`` is a "reference value"). You have to specify an absolute tolerance + if you want to compare to ``0.0`` because there is no tolerance by + default. More information: :py:func:`math.isclose`. + + - ``numpy.isclose(a, b, rtol=1e-5, atol=1e-8)``: True if the difference + between ``a`` and ``b`` is less that the sum of the relative tolerance + w.r.t. ``b`` and the absolute tolerance. Because the relative tolerance + is only calculated w.r.t. ``b``, this test is asymmetric and you can + think of ``b`` as the reference value. Support for comparing sequences + is provided by :py:func:`numpy.allclose`. More information: + :std:doc:`numpy:reference/generated/numpy.isclose`. + + - ``unittest.TestCase.assertAlmostEqual(a, b)``: True if ``a`` and ``b`` + are within an absolute tolerance of ``1e-7``. No relative tolerance is + considered , so this function is not appropriate for very large or very + small numbers. Also, it's only available in subclasses of ``unittest.TestCase`` + and it's ugly because it doesn't follow PEP8. More information: + :py:meth:`unittest.TestCase.assertAlmostEqual`. + + - ``a == pytest.approx(b, rel=1e-6, abs=1e-12)``: True if the relative + tolerance is met w.r.t. ``b`` or if the absolute tolerance is met. + Because the relative tolerance is only calculated w.r.t. ``b``, this test + is asymmetric and you can think of ``b`` as the reference value. In the + special case that you explicitly specify an absolute tolerance but not a + relative tolerance, only the absolute tolerance is considered. + + .. note:: + + ``approx`` can handle numpy arrays, but we recommend the + specialised test helpers in :std:doc:`numpy:reference/routines.testing` + if you need support for comparisons, NaNs, or ULP-based tolerances. + + .. warning:: + + .. versionchanged:: 3.2 + + In order to avoid inconsistent behavior, :py:exc:`TypeError` is + raised for ``>``, ``>=``, ``<`` and ``<=`` comparisons. + The example below illustrates the problem:: + + assert approx(0.1) > 0.1 + 1e-10 # calls approx(0.1).__gt__(0.1 + 1e-10) + assert 0.1 + 1e-10 > approx(0.1) # calls approx(0.1).__lt__(0.1 + 1e-10) + + In the second example one expects ``approx(0.1).__le__(0.1 + 1e-10)`` + to be called. But instead, ``approx(0.1).__lt__(0.1 + 1e-10)`` is used to + comparison. This is because the call hierarchy of rich comparisons + follows a fixed behavior. More information: :py:meth:`object.__ge__` + + .. versionchanged:: 3.7.1 + ``approx`` raises ``TypeError`` when it encounters a dict value or + sequence element of nonnumeric type. + + .. versionchanged:: 6.1.0 + ``approx`` falls back to strict equality for nonnumeric types instead + of raising ``TypeError``. + """ + + # Delegate the comparison to a class that knows how to deal with the type + # of the expected value (e.g. int, float, list, dict, numpy.array, etc). + # + # The primary responsibility of these classes is to implement ``__eq__()`` + # and ``__repr__()``. The former is used to actually check if some + # "actual" value is equivalent to the given expected value within the + # allowed tolerance. The latter is used to show the user the expected + # value and tolerance, in the case that a test failed. + # + # The actual logic for making approximate comparisons can be found in + # ApproxScalar, which is used to compare individual numbers. All of the + # other Approx classes eventually delegate to this class. The ApproxBase + # class provides some convenient methods and overloads, but isn't really + # essential. + + __tracebackhide__ = True + + if isinstance(expected, Decimal): + cls: Type[ApproxBase] = ApproxDecimal + elif isinstance(expected, Mapping): + cls = ApproxMapping + elif _is_numpy_array(expected): + expected = _as_numpy_array(expected) + cls = ApproxNumpy + elif ( + hasattr(expected, "__getitem__") + and isinstance(expected, Sized) + # Type ignored because the error is wrong -- not unreachable. + and not isinstance(expected, STRING_TYPES) # type: ignore[unreachable] + ): + cls = ApproxSequenceLike + elif ( + isinstance(expected, Collection) + # Type ignored because the error is wrong -- not unreachable. + and not isinstance(expected, STRING_TYPES) # type: ignore[unreachable] + ): + msg = f"pytest.approx() only supports ordered sequences, but got: {repr(expected)}" + raise TypeError(msg) + else: + cls = ApproxScalar + + return cls(expected, rel, abs, nan_ok) + + +def _is_numpy_array(obj: object) -> bool: + """ + Return true if the given object is implicitly convertible to ndarray, + and numpy is already imported. + """ + return _as_numpy_array(obj) is not None + + +def _as_numpy_array(obj: object) -> Optional["ndarray"]: + """ + Return an ndarray if the given object is implicitly convertible to ndarray, + and numpy is already imported, otherwise None. + """ + import sys + + np: Any = sys.modules.get("numpy") + if np is not None: + # avoid infinite recursion on numpy scalars, which have __array__ + if np.isscalar(obj): + return None + elif isinstance(obj, np.ndarray): + return obj + elif hasattr(obj, "__array__") or hasattr("obj", "__array_interface__"): + return np.asarray(obj) + return None + + +# builtin pytest.raises helper + +E = TypeVar("E", bound=BaseException) + + +@overload +def raises( + expected_exception: Union[Type[E], Tuple[Type[E], ...]], + *, + match: Optional[Union[str, Pattern[str]]] = ..., +) -> "RaisesContext[E]": + ... + + +@overload +def raises( + expected_exception: Union[Type[E], Tuple[Type[E], ...]], + func: Callable[..., Any], + *args: Any, + **kwargs: Any, +) -> _pytest._code.ExceptionInfo[E]: + ... + + +def raises( + expected_exception: Union[Type[E], Tuple[Type[E], ...]], *args: Any, **kwargs: Any +) -> Union["RaisesContext[E]", _pytest._code.ExceptionInfo[E]]: + r"""Assert that a code block/function call raises ``expected_exception`` + or raise a failure exception otherwise. + + :kwparam match: + If specified, a string containing a regular expression, + or a regular expression object, that is tested against the string + representation of the exception using :py:func:`re.search`. To match a literal + string that may contain :std:ref:`special characters `, the pattern can + first be escaped with :py:func:`re.escape`. + + (This is only used when :py:func:`pytest.raises` is used as a context manager, + and passed through to the function otherwise. + When using :py:func:`pytest.raises` as a function, you can use: + ``pytest.raises(Exc, func, match="passed on").match("my pattern")``.) + + .. currentmodule:: _pytest._code + + Use ``pytest.raises`` as a context manager, which will capture the exception of the given + type:: + + >>> import pytest + >>> with pytest.raises(ZeroDivisionError): + ... 1/0 + + If the code block does not raise the expected exception (``ZeroDivisionError`` in the example + above), or no exception at all, the check will fail instead. + + You can also use the keyword argument ``match`` to assert that the + exception matches a text or regex:: + + >>> with pytest.raises(ValueError, match='must be 0 or None'): + ... raise ValueError("value must be 0 or None") + + >>> with pytest.raises(ValueError, match=r'must be \d+$'): + ... raise ValueError("value must be 42") + + The context manager produces an :class:`ExceptionInfo` object which can be used to inspect the + details of the captured exception:: + + >>> with pytest.raises(ValueError) as exc_info: + ... raise ValueError("value must be 42") + >>> assert exc_info.type is ValueError + >>> assert exc_info.value.args[0] == "value must be 42" + + .. note:: + + When using ``pytest.raises`` as a context manager, it's worthwhile to + note that normal context manager rules apply and that the exception + raised *must* be the final line in the scope of the context manager. + Lines of code after that, within the scope of the context manager will + not be executed. For example:: + + >>> value = 15 + >>> with pytest.raises(ValueError) as exc_info: + ... if value > 10: + ... raise ValueError("value must be <= 10") + ... assert exc_info.type is ValueError # this will not execute + + Instead, the following approach must be taken (note the difference in + scope):: + + >>> with pytest.raises(ValueError) as exc_info: + ... if value > 10: + ... raise ValueError("value must be <= 10") + ... + >>> assert exc_info.type is ValueError + + **Using with** ``pytest.mark.parametrize`` + + When using :ref:`pytest.mark.parametrize ref` + it is possible to parametrize tests such that + some runs raise an exception and others do not. + + See :ref:`parametrizing_conditional_raising` for an example. + + **Legacy form** + + It is possible to specify a callable by passing a to-be-called lambda:: + + >>> raises(ZeroDivisionError, lambda: 1/0) + + + or you can specify an arbitrary callable with arguments:: + + >>> def f(x): return 1/x + ... + >>> raises(ZeroDivisionError, f, 0) + + >>> raises(ZeroDivisionError, f, x=0) + + + The form above is fully supported but discouraged for new code because the + context manager form is regarded as more readable and less error-prone. + + .. note:: + Similar to caught exception objects in Python, explicitly clearing + local references to returned ``ExceptionInfo`` objects can + help the Python interpreter speed up its garbage collection. + + Clearing those references breaks a reference cycle + (``ExceptionInfo`` --> caught exception --> frame stack raising + the exception --> current frame stack --> local variables --> + ``ExceptionInfo``) which makes Python keep all objects referenced + from that cycle (including all local variables in the current + frame) alive until the next cyclic garbage collection run. + More detailed information can be found in the official Python + documentation for :ref:`the try statement `. + """ + __tracebackhide__ = True + + if isinstance(expected_exception, type): + excepted_exceptions: Tuple[Type[E], ...] = (expected_exception,) + else: + excepted_exceptions = expected_exception + for exc in excepted_exceptions: + if not isinstance(exc, type) or not issubclass(exc, BaseException): + msg = "expected exception must be a BaseException type, not {}" # type: ignore[unreachable] + not_a = exc.__name__ if isinstance(exc, type) else type(exc).__name__ + raise TypeError(msg.format(not_a)) + + message = f"DID NOT RAISE {expected_exception}" + + if not args: + match: Optional[Union[str, Pattern[str]]] = kwargs.pop("match", None) + if kwargs: + msg = "Unexpected keyword arguments passed to pytest.raises: " + msg += ", ".join(sorted(kwargs)) + msg += "\nUse context-manager form instead?" + raise TypeError(msg) + return RaisesContext(expected_exception, message, match) + else: + func = args[0] + if not callable(func): + raise TypeError(f"{func!r} object (type: {type(func)}) must be callable") + try: + func(*args[1:], **kwargs) + except expected_exception as e: + # We just caught the exception - there is a traceback. + assert e.__traceback__ is not None + return _pytest._code.ExceptionInfo.from_exc_info( + (type(e), e, e.__traceback__) + ) + fail(message) + + +# This doesn't work with mypy for now. Use fail.Exception instead. +raises.Exception = fail.Exception # type: ignore + + +@final +class RaisesContext(Generic[E]): + def __init__( + self, + expected_exception: Union[Type[E], Tuple[Type[E], ...]], + message: str, + match_expr: Optional[Union[str, Pattern[str]]] = None, + ) -> None: + self.expected_exception = expected_exception + self.message = message + self.match_expr = match_expr + self.excinfo: Optional[_pytest._code.ExceptionInfo[E]] = None + + def __enter__(self) -> _pytest._code.ExceptionInfo[E]: + self.excinfo = _pytest._code.ExceptionInfo.for_later() + return self.excinfo + + def __exit__( + self, + exc_type: Optional[Type[BaseException]], + exc_val: Optional[BaseException], + exc_tb: Optional[TracebackType], + ) -> bool: + __tracebackhide__ = True + if exc_type is None: + fail(self.message) + assert self.excinfo is not None + if not issubclass(exc_type, self.expected_exception): + return False + # Cast to narrow the exception type now that it's verified. + exc_info = cast(Tuple[Type[E], E, TracebackType], (exc_type, exc_val, exc_tb)) + self.excinfo.fill_unfilled(exc_info) + if self.match_expr is not None: + self.excinfo.match(self.match_expr) + return True diff --git a/venv/lib/python3.10/site-packages/_pytest/python_path.py b/venv/lib/python3.10/site-packages/_pytest/python_path.py new file mode 100644 index 0000000..cceabbc --- /dev/null +++ b/venv/lib/python3.10/site-packages/_pytest/python_path.py @@ -0,0 +1,24 @@ +import sys + +import pytest +from pytest import Config +from pytest import Parser + + +def pytest_addoption(parser: Parser) -> None: + parser.addini("pythonpath", type="paths", help="Add paths to sys.path", default=[]) + + +@pytest.hookimpl(tryfirst=True) +def pytest_load_initial_conftests(early_config: Config) -> None: + # `pythonpath = a b` will set `sys.path` to `[a, b, x, y, z, ...]` + for path in reversed(early_config.getini("pythonpath")): + sys.path.insert(0, str(path)) + + +@pytest.hookimpl(trylast=True) +def pytest_unconfigure(config: Config) -> None: + for path in config.getini("pythonpath"): + path_str = str(path) + if path_str in sys.path: + sys.path.remove(path_str) diff --git a/venv/lib/python3.10/site-packages/_pytest/recwarn.py b/venv/lib/python3.10/site-packages/_pytest/recwarn.py new file mode 100644 index 0000000..175b571 --- /dev/null +++ b/venv/lib/python3.10/site-packages/_pytest/recwarn.py @@ -0,0 +1,296 @@ +"""Record warnings during test function execution.""" +import re +import warnings +from types import TracebackType +from typing import Any +from typing import Callable +from typing import Generator +from typing import Iterator +from typing import List +from typing import Optional +from typing import overload +from typing import Pattern +from typing import Tuple +from typing import Type +from typing import TypeVar +from typing import Union + +from _pytest.compat import final +from _pytest.deprecated import check_ispytest +from _pytest.deprecated import WARNS_NONE_ARG +from _pytest.fixtures import fixture +from _pytest.outcomes import fail + + +T = TypeVar("T") + + +@fixture +def recwarn() -> Generator["WarningsRecorder", None, None]: + """Return a :class:`WarningsRecorder` instance that records all warnings emitted by test functions. + + See https://docs.python.org/library/how-to/capture-warnings.html for information + on warning categories. + """ + wrec = WarningsRecorder(_ispytest=True) + with wrec: + warnings.simplefilter("default") + yield wrec + + +@overload +def deprecated_call( + *, match: Optional[Union[str, Pattern[str]]] = ... +) -> "WarningsRecorder": + ... + + +@overload +def deprecated_call(func: Callable[..., T], *args: Any, **kwargs: Any) -> T: + ... + + +def deprecated_call( + func: Optional[Callable[..., Any]] = None, *args: Any, **kwargs: Any +) -> Union["WarningsRecorder", Any]: + """Assert that code produces a ``DeprecationWarning`` or ``PendingDeprecationWarning``. + + This function can be used as a context manager:: + + >>> import warnings + >>> def api_call_v2(): + ... warnings.warn('use v3 of this api', DeprecationWarning) + ... return 200 + + >>> import pytest + >>> with pytest.deprecated_call(): + ... assert api_call_v2() == 200 + + It can also be used by passing a function and ``*args`` and ``**kwargs``, + in which case it will ensure calling ``func(*args, **kwargs)`` produces one of + the warnings types above. The return value is the return value of the function. + + In the context manager form you may use the keyword argument ``match`` to assert + that the warning matches a text or regex. + + The context manager produces a list of :class:`warnings.WarningMessage` objects, + one for each warning raised. + """ + __tracebackhide__ = True + if func is not None: + args = (func,) + args + return warns((DeprecationWarning, PendingDeprecationWarning), *args, **kwargs) + + +@overload +def warns( + expected_warning: Union[Type[Warning], Tuple[Type[Warning], ...]] = ..., + *, + match: Optional[Union[str, Pattern[str]]] = ..., +) -> "WarningsChecker": + ... + + +@overload +def warns( + expected_warning: Union[Type[Warning], Tuple[Type[Warning], ...]], + func: Callable[..., T], + *args: Any, + **kwargs: Any, +) -> T: + ... + + +def warns( + expected_warning: Union[Type[Warning], Tuple[Type[Warning], ...]] = Warning, + *args: Any, + match: Optional[Union[str, Pattern[str]]] = None, + **kwargs: Any, +) -> Union["WarningsChecker", Any]: + r"""Assert that code raises a particular class of warning. + + Specifically, the parameter ``expected_warning`` can be a warning class or + sequence of warning classes, and the inside the ``with`` block must issue a warning of that class or + classes. + + This helper produces a list of :class:`warnings.WarningMessage` objects, + one for each warning raised. + + This function can be used as a context manager, or any of the other ways + :func:`pytest.raises` can be used:: + + >>> import pytest + >>> with pytest.warns(RuntimeWarning): + ... warnings.warn("my warning", RuntimeWarning) + + In the context manager form you may use the keyword argument ``match`` to assert + that the warning matches a text or regex:: + + >>> with pytest.warns(UserWarning, match='must be 0 or None'): + ... warnings.warn("value must be 0 or None", UserWarning) + + >>> with pytest.warns(UserWarning, match=r'must be \d+$'): + ... warnings.warn("value must be 42", UserWarning) + + >>> with pytest.warns(UserWarning, match=r'must be \d+$'): + ... warnings.warn("this is not here", UserWarning) + Traceback (most recent call last): + ... + Failed: DID NOT WARN. No warnings of type ...UserWarning... were emitted... + + """ + __tracebackhide__ = True + if not args: + if kwargs: + msg = "Unexpected keyword arguments passed to pytest.warns: " + msg += ", ".join(sorted(kwargs)) + msg += "\nUse context-manager form instead?" + raise TypeError(msg) + return WarningsChecker(expected_warning, match_expr=match, _ispytest=True) + else: + func = args[0] + if not callable(func): + raise TypeError(f"{func!r} object (type: {type(func)}) must be callable") + with WarningsChecker(expected_warning, _ispytest=True): + return func(*args[1:], **kwargs) + + +class WarningsRecorder(warnings.catch_warnings): + """A context manager to record raised warnings. + + Adapted from `warnings.catch_warnings`. + """ + + def __init__(self, *, _ispytest: bool = False) -> None: + check_ispytest(_ispytest) + # Type ignored due to the way typeshed handles warnings.catch_warnings. + super().__init__(record=True) # type: ignore[call-arg] + self._entered = False + self._list: List[warnings.WarningMessage] = [] + + @property + def list(self) -> List["warnings.WarningMessage"]: + """The list of recorded warnings.""" + return self._list + + def __getitem__(self, i: int) -> "warnings.WarningMessage": + """Get a recorded warning by index.""" + return self._list[i] + + def __iter__(self) -> Iterator["warnings.WarningMessage"]: + """Iterate through the recorded warnings.""" + return iter(self._list) + + def __len__(self) -> int: + """The number of recorded warnings.""" + return len(self._list) + + def pop(self, cls: Type[Warning] = Warning) -> "warnings.WarningMessage": + """Pop the first recorded warning, raise exception if not exists.""" + for i, w in enumerate(self._list): + if issubclass(w.category, cls): + return self._list.pop(i) + __tracebackhide__ = True + raise AssertionError("%r not found in warning list" % cls) + + def clear(self) -> None: + """Clear the list of recorded warnings.""" + self._list[:] = [] + + # Type ignored because it doesn't exactly warnings.catch_warnings.__enter__ + # -- it returns a List but we only emulate one. + def __enter__(self) -> "WarningsRecorder": # type: ignore + if self._entered: + __tracebackhide__ = True + raise RuntimeError("Cannot enter %r twice" % self) + _list = super().__enter__() + # record=True means it's None. + assert _list is not None + self._list = _list + warnings.simplefilter("always") + return self + + def __exit__( + self, + exc_type: Optional[Type[BaseException]], + exc_val: Optional[BaseException], + exc_tb: Optional[TracebackType], + ) -> None: + if not self._entered: + __tracebackhide__ = True + raise RuntimeError("Cannot exit %r without entering first" % self) + + super().__exit__(exc_type, exc_val, exc_tb) + + # Built-in catch_warnings does not reset entered state so we do it + # manually here for this context manager to become reusable. + self._entered = False + + +@final +class WarningsChecker(WarningsRecorder): + def __init__( + self, + expected_warning: Optional[ + Union[Type[Warning], Tuple[Type[Warning], ...]] + ] = Warning, + match_expr: Optional[Union[str, Pattern[str]]] = None, + *, + _ispytest: bool = False, + ) -> None: + check_ispytest(_ispytest) + super().__init__(_ispytest=True) + + msg = "exceptions must be derived from Warning, not %s" + if expected_warning is None: + warnings.warn(WARNS_NONE_ARG, stacklevel=4) + expected_warning_tup = None + elif isinstance(expected_warning, tuple): + for exc in expected_warning: + if not issubclass(exc, Warning): + raise TypeError(msg % type(exc)) + expected_warning_tup = expected_warning + elif issubclass(expected_warning, Warning): + expected_warning_tup = (expected_warning,) + else: + raise TypeError(msg % type(expected_warning)) + + self.expected_warning = expected_warning_tup + self.match_expr = match_expr + + def __exit__( + self, + exc_type: Optional[Type[BaseException]], + exc_val: Optional[BaseException], + exc_tb: Optional[TracebackType], + ) -> None: + super().__exit__(exc_type, exc_val, exc_tb) + + __tracebackhide__ = True + + # only check if we're not currently handling an exception + if exc_type is None and exc_val is None and exc_tb is None: + if self.expected_warning is not None: + if not any(issubclass(r.category, self.expected_warning) for r in self): + __tracebackhide__ = True + fail( + "DID NOT WARN. No warnings of type {} were emitted. " + "The list of emitted warnings is: {}.".format( + self.expected_warning, [each.message for each in self] + ) + ) + elif self.match_expr is not None: + for r in self: + if issubclass(r.category, self.expected_warning): + if re.compile(self.match_expr).search(str(r.message)): + break + else: + fail( + "DID NOT WARN. No warnings of type {} matching" + " ('{}') were emitted. The list of emitted warnings" + " is: {}.".format( + self.expected_warning, + self.match_expr, + [each.message for each in self], + ) + ) diff --git a/venv/lib/python3.10/site-packages/_pytest/reports.py b/venv/lib/python3.10/site-packages/_pytest/reports.py new file mode 100644 index 0000000..725fdf6 --- /dev/null +++ b/venv/lib/python3.10/site-packages/_pytest/reports.py @@ -0,0 +1,599 @@ +import os +from io import StringIO +from pprint import pprint +from typing import Any +from typing import cast +from typing import Dict +from typing import Iterable +from typing import Iterator +from typing import List +from typing import Mapping +from typing import Optional +from typing import Tuple +from typing import Type +from typing import TYPE_CHECKING +from typing import TypeVar +from typing import Union + +import attr + +from _pytest._code.code import ExceptionChainRepr +from _pytest._code.code import ExceptionInfo +from _pytest._code.code import ExceptionRepr +from _pytest._code.code import ReprEntry +from _pytest._code.code import ReprEntryNative +from _pytest._code.code import ReprExceptionInfo +from _pytest._code.code import ReprFileLocation +from _pytest._code.code import ReprFuncArgs +from _pytest._code.code import ReprLocals +from _pytest._code.code import ReprTraceback +from _pytest._code.code import TerminalRepr +from _pytest._io import TerminalWriter +from _pytest.compat import final +from _pytest.config import Config +from _pytest.nodes import Collector +from _pytest.nodes import Item +from _pytest.outcomes import skip + +if TYPE_CHECKING: + from typing import NoReturn + from typing_extensions import Literal + + from _pytest.runner import CallInfo + + +def getworkerinfoline(node): + try: + return node._workerinfocache + except AttributeError: + d = node.workerinfo + ver = "%s.%s.%s" % d["version_info"][:3] + node._workerinfocache = s = "[{}] {} -- Python {} {}".format( + d["id"], d["sysplatform"], ver, d["executable"] + ) + return s + + +_R = TypeVar("_R", bound="BaseReport") + + +class BaseReport: + when: Optional[str] + location: Optional[Tuple[str, Optional[int], str]] + longrepr: Union[ + None, ExceptionInfo[BaseException], Tuple[str, int, str], str, TerminalRepr + ] + sections: List[Tuple[str, str]] + nodeid: str + outcome: "Literal['passed', 'failed', 'skipped']" + + def __init__(self, **kw: Any) -> None: + self.__dict__.update(kw) + + if TYPE_CHECKING: + # Can have arbitrary fields given to __init__(). + def __getattr__(self, key: str) -> Any: + ... + + def toterminal(self, out: TerminalWriter) -> None: + if hasattr(self, "node"): + worker_info = getworkerinfoline(self.node) + if worker_info: + out.line(worker_info) + + longrepr = self.longrepr + if longrepr is None: + return + + if hasattr(longrepr, "toterminal"): + longrepr_terminal = cast(TerminalRepr, longrepr) + longrepr_terminal.toterminal(out) + else: + try: + s = str(longrepr) + except UnicodeEncodeError: + s = "" + out.line(s) + + def get_sections(self, prefix: str) -> Iterator[Tuple[str, str]]: + for name, content in self.sections: + if name.startswith(prefix): + yield prefix, content + + @property + def longreprtext(self) -> str: + """Read-only property that returns the full string representation of + ``longrepr``. + + .. versionadded:: 3.0 + """ + file = StringIO() + tw = TerminalWriter(file) + tw.hasmarkup = False + self.toterminal(tw) + exc = file.getvalue() + return exc.strip() + + @property + def caplog(self) -> str: + """Return captured log lines, if log capturing is enabled. + + .. versionadded:: 3.5 + """ + return "\n".join( + content for (prefix, content) in self.get_sections("Captured log") + ) + + @property + def capstdout(self) -> str: + """Return captured text from stdout, if capturing is enabled. + + .. versionadded:: 3.0 + """ + return "".join( + content for (prefix, content) in self.get_sections("Captured stdout") + ) + + @property + def capstderr(self) -> str: + """Return captured text from stderr, if capturing is enabled. + + .. versionadded:: 3.0 + """ + return "".join( + content for (prefix, content) in self.get_sections("Captured stderr") + ) + + @property + def passed(self) -> bool: + """Whether the outcome is passed.""" + return self.outcome == "passed" + + @property + def failed(self) -> bool: + """Whether the outcome is failed.""" + return self.outcome == "failed" + + @property + def skipped(self) -> bool: + """Whether the outcome is skipped.""" + return self.outcome == "skipped" + + @property + def fspath(self) -> str: + """The path portion of the reported node, as a string.""" + return self.nodeid.split("::")[0] + + @property + def count_towards_summary(self) -> bool: + """**Experimental** Whether this report should be counted towards the + totals shown at the end of the test session: "1 passed, 1 failure, etc". + + .. note:: + + This function is considered **experimental**, so beware that it is subject to changes + even in patch releases. + """ + return True + + @property + def head_line(self) -> Optional[str]: + """**Experimental** The head line shown with longrepr output for this + report, more commonly during traceback representation during + failures:: + + ________ Test.foo ________ + + + In the example above, the head_line is "Test.foo". + + .. note:: + + This function is considered **experimental**, so beware that it is subject to changes + even in patch releases. + """ + if self.location is not None: + fspath, lineno, domain = self.location + return domain + return None + + def _get_verbose_word(self, config: Config): + _category, _short, verbose = config.hook.pytest_report_teststatus( + report=self, config=config + ) + return verbose + + def _to_json(self) -> Dict[str, Any]: + """Return the contents of this report as a dict of builtin entries, + suitable for serialization. + + This was originally the serialize_report() function from xdist (ca03269). + + Experimental method. + """ + return _report_to_json(self) + + @classmethod + def _from_json(cls: Type[_R], reportdict: Dict[str, object]) -> _R: + """Create either a TestReport or CollectReport, depending on the calling class. + + It is the callers responsibility to know which class to pass here. + + This was originally the serialize_report() function from xdist (ca03269). + + Experimental method. + """ + kwargs = _report_kwargs_from_json(reportdict) + return cls(**kwargs) + + +def _report_unserialization_failure( + type_name: str, report_class: Type[BaseReport], reportdict +) -> "NoReturn": + url = "https://github.com/pytest-dev/pytest/issues" + stream = StringIO() + pprint("-" * 100, stream=stream) + pprint("INTERNALERROR: Unknown entry type returned: %s" % type_name, stream=stream) + pprint("report_name: %s" % report_class, stream=stream) + pprint(reportdict, stream=stream) + pprint("Please report this bug at %s" % url, stream=stream) + pprint("-" * 100, stream=stream) + raise RuntimeError(stream.getvalue()) + + +@final +class TestReport(BaseReport): + """Basic test report object (also used for setup and teardown calls if + they fail). + + Reports can contain arbitrary extra attributes. + """ + + __test__ = False + + def __init__( + self, + nodeid: str, + location: Tuple[str, Optional[int], str], + keywords: Mapping[str, Any], + outcome: "Literal['passed', 'failed', 'skipped']", + longrepr: Union[ + None, ExceptionInfo[BaseException], Tuple[str, int, str], str, TerminalRepr + ], + when: "Literal['setup', 'call', 'teardown']", + sections: Iterable[Tuple[str, str]] = (), + duration: float = 0, + user_properties: Optional[Iterable[Tuple[str, object]]] = None, + **extra, + ) -> None: + #: Normalized collection nodeid. + self.nodeid = nodeid + + #: A (filesystempath, lineno, domaininfo) tuple indicating the + #: actual location of a test item - it might be different from the + #: collected one e.g. if a method is inherited from a different module. + self.location: Tuple[str, Optional[int], str] = location + + #: A name -> value dictionary containing all keywords and + #: markers associated with a test invocation. + self.keywords = keywords + + #: Test outcome, always one of "passed", "failed", "skipped". + self.outcome = outcome + + #: None or a failure representation. + self.longrepr = longrepr + + #: One of 'setup', 'call', 'teardown' to indicate runtest phase. + self.when = when + + #: User properties is a list of tuples (name, value) that holds user + #: defined properties of the test. + self.user_properties = list(user_properties or []) + + #: Tuples of str ``(heading, content)`` with extra information + #: for the test report. Used by pytest to add text captured + #: from ``stdout``, ``stderr``, and intercepted logging events. May + #: be used by other plugins to add arbitrary information to reports. + self.sections = list(sections) + + #: Time it took to run just the test. + self.duration = duration + + self.__dict__.update(extra) + + def __repr__(self) -> str: + return "<{} {!r} when={!r} outcome={!r}>".format( + self.__class__.__name__, self.nodeid, self.when, self.outcome + ) + + @classmethod + def from_item_and_call(cls, item: Item, call: "CallInfo[None]") -> "TestReport": + """Create and fill a TestReport with standard item and call info.""" + when = call.when + # Remove "collect" from the Literal type -- only for collection calls. + assert when != "collect" + duration = call.duration + keywords = {x: 1 for x in item.keywords} + excinfo = call.excinfo + sections = [] + if not call.excinfo: + outcome: Literal["passed", "failed", "skipped"] = "passed" + longrepr: Union[ + None, + ExceptionInfo[BaseException], + Tuple[str, int, str], + str, + TerminalRepr, + ] = None + else: + if not isinstance(excinfo, ExceptionInfo): + outcome = "failed" + longrepr = excinfo + elif isinstance(excinfo.value, skip.Exception): + outcome = "skipped" + r = excinfo._getreprcrash() + if excinfo.value._use_item_location: + path, line = item.reportinfo()[:2] + assert line is not None + longrepr = os.fspath(path), line + 1, r.message + else: + longrepr = (str(r.path), r.lineno, r.message) + else: + outcome = "failed" + if call.when == "call": + longrepr = item.repr_failure(excinfo) + else: # exception in setup or teardown + longrepr = item._repr_failure_py( + excinfo, style=item.config.getoption("tbstyle", "auto") + ) + for rwhen, key, content in item._report_sections: + sections.append((f"Captured {key} {rwhen}", content)) + return cls( + item.nodeid, + item.location, + keywords, + outcome, + longrepr, + when, + sections, + duration, + user_properties=item.user_properties, + ) + + +@final +class CollectReport(BaseReport): + """Collection report object. + + Reports can contain arbitrary extra attributes. + """ + + when = "collect" + + def __init__( + self, + nodeid: str, + outcome: "Literal['passed', 'failed', 'skipped']", + longrepr: Union[ + None, ExceptionInfo[BaseException], Tuple[str, int, str], str, TerminalRepr + ], + result: Optional[List[Union[Item, Collector]]], + sections: Iterable[Tuple[str, str]] = (), + **extra, + ) -> None: + #: Normalized collection nodeid. + self.nodeid = nodeid + + #: Test outcome, always one of "passed", "failed", "skipped". + self.outcome = outcome + + #: None or a failure representation. + self.longrepr = longrepr + + #: The collected items and collection nodes. + self.result = result or [] + + #: Tuples of str ``(heading, content)`` with extra information + #: for the test report. Used by pytest to add text captured + #: from ``stdout``, ``stderr``, and intercepted logging events. May + #: be used by other plugins to add arbitrary information to reports. + self.sections = list(sections) + + self.__dict__.update(extra) + + @property + def location(self): + return (self.fspath, None, self.fspath) + + def __repr__(self) -> str: + return "".format( + self.nodeid, len(self.result), self.outcome + ) + + +class CollectErrorRepr(TerminalRepr): + def __init__(self, msg: str) -> None: + self.longrepr = msg + + def toterminal(self, out: TerminalWriter) -> None: + out.line(self.longrepr, red=True) + + +def pytest_report_to_serializable( + report: Union[CollectReport, TestReport] +) -> Optional[Dict[str, Any]]: + if isinstance(report, (TestReport, CollectReport)): + data = report._to_json() + data["$report_type"] = report.__class__.__name__ + return data + # TODO: Check if this is actually reachable. + return None # type: ignore[unreachable] + + +def pytest_report_from_serializable( + data: Dict[str, Any], +) -> Optional[Union[CollectReport, TestReport]]: + if "$report_type" in data: + if data["$report_type"] == "TestReport": + return TestReport._from_json(data) + elif data["$report_type"] == "CollectReport": + return CollectReport._from_json(data) + assert False, "Unknown report_type unserialize data: {}".format( + data["$report_type"] + ) + return None + + +def _report_to_json(report: BaseReport) -> Dict[str, Any]: + """Return the contents of this report as a dict of builtin entries, + suitable for serialization. + + This was originally the serialize_report() function from xdist (ca03269). + """ + + def serialize_repr_entry( + entry: Union[ReprEntry, ReprEntryNative] + ) -> Dict[str, Any]: + data = attr.asdict(entry) + for key, value in data.items(): + if hasattr(value, "__dict__"): + data[key] = attr.asdict(value) + entry_data = {"type": type(entry).__name__, "data": data} + return entry_data + + def serialize_repr_traceback(reprtraceback: ReprTraceback) -> Dict[str, Any]: + result = attr.asdict(reprtraceback) + result["reprentries"] = [ + serialize_repr_entry(x) for x in reprtraceback.reprentries + ] + return result + + def serialize_repr_crash( + reprcrash: Optional[ReprFileLocation], + ) -> Optional[Dict[str, Any]]: + if reprcrash is not None: + return attr.asdict(reprcrash) + else: + return None + + def serialize_exception_longrepr(rep: BaseReport) -> Dict[str, Any]: + assert rep.longrepr is not None + # TODO: Investigate whether the duck typing is really necessary here. + longrepr = cast(ExceptionRepr, rep.longrepr) + result: Dict[str, Any] = { + "reprcrash": serialize_repr_crash(longrepr.reprcrash), + "reprtraceback": serialize_repr_traceback(longrepr.reprtraceback), + "sections": longrepr.sections, + } + if isinstance(longrepr, ExceptionChainRepr): + result["chain"] = [] + for repr_traceback, repr_crash, description in longrepr.chain: + result["chain"].append( + ( + serialize_repr_traceback(repr_traceback), + serialize_repr_crash(repr_crash), + description, + ) + ) + else: + result["chain"] = None + return result + + d = report.__dict__.copy() + if hasattr(report.longrepr, "toterminal"): + if hasattr(report.longrepr, "reprtraceback") and hasattr( + report.longrepr, "reprcrash" + ): + d["longrepr"] = serialize_exception_longrepr(report) + else: + d["longrepr"] = str(report.longrepr) + else: + d["longrepr"] = report.longrepr + for name in d: + if isinstance(d[name], os.PathLike): + d[name] = os.fspath(d[name]) + elif name == "result": + d[name] = None # for now + return d + + +def _report_kwargs_from_json(reportdict: Dict[str, Any]) -> Dict[str, Any]: + """Return **kwargs that can be used to construct a TestReport or + CollectReport instance. + + This was originally the serialize_report() function from xdist (ca03269). + """ + + def deserialize_repr_entry(entry_data): + data = entry_data["data"] + entry_type = entry_data["type"] + if entry_type == "ReprEntry": + reprfuncargs = None + reprfileloc = None + reprlocals = None + if data["reprfuncargs"]: + reprfuncargs = ReprFuncArgs(**data["reprfuncargs"]) + if data["reprfileloc"]: + reprfileloc = ReprFileLocation(**data["reprfileloc"]) + if data["reprlocals"]: + reprlocals = ReprLocals(data["reprlocals"]["lines"]) + + reprentry: Union[ReprEntry, ReprEntryNative] = ReprEntry( + lines=data["lines"], + reprfuncargs=reprfuncargs, + reprlocals=reprlocals, + reprfileloc=reprfileloc, + style=data["style"], + ) + elif entry_type == "ReprEntryNative": + reprentry = ReprEntryNative(data["lines"]) + else: + _report_unserialization_failure(entry_type, TestReport, reportdict) + return reprentry + + def deserialize_repr_traceback(repr_traceback_dict): + repr_traceback_dict["reprentries"] = [ + deserialize_repr_entry(x) for x in repr_traceback_dict["reprentries"] + ] + return ReprTraceback(**repr_traceback_dict) + + def deserialize_repr_crash(repr_crash_dict: Optional[Dict[str, Any]]): + if repr_crash_dict is not None: + return ReprFileLocation(**repr_crash_dict) + else: + return None + + if ( + reportdict["longrepr"] + and "reprcrash" in reportdict["longrepr"] + and "reprtraceback" in reportdict["longrepr"] + ): + + reprtraceback = deserialize_repr_traceback( + reportdict["longrepr"]["reprtraceback"] + ) + reprcrash = deserialize_repr_crash(reportdict["longrepr"]["reprcrash"]) + if reportdict["longrepr"]["chain"]: + chain = [] + for repr_traceback_data, repr_crash_data, description in reportdict[ + "longrepr" + ]["chain"]: + chain.append( + ( + deserialize_repr_traceback(repr_traceback_data), + deserialize_repr_crash(repr_crash_data), + description, + ) + ) + exception_info: Union[ + ExceptionChainRepr, ReprExceptionInfo + ] = ExceptionChainRepr(chain) + else: + exception_info = ReprExceptionInfo(reprtraceback, reprcrash) + + for section in reportdict["longrepr"]["sections"]: + exception_info.addsection(*section) + reportdict["longrepr"] = exception_info + + return reportdict diff --git a/venv/lib/python3.10/site-packages/_pytest/runner.py b/venv/lib/python3.10/site-packages/_pytest/runner.py new file mode 100644 index 0000000..df6eecd --- /dev/null +++ b/venv/lib/python3.10/site-packages/_pytest/runner.py @@ -0,0 +1,541 @@ +"""Basic collect and runtest protocol implementations.""" +import bdb +import os +import sys +from typing import Callable +from typing import cast +from typing import Dict +from typing import Generic +from typing import List +from typing import Optional +from typing import Tuple +from typing import Type +from typing import TYPE_CHECKING +from typing import TypeVar +from typing import Union + +import attr + +from .reports import BaseReport +from .reports import CollectErrorRepr +from .reports import CollectReport +from .reports import TestReport +from _pytest import timing +from _pytest._code.code import ExceptionChainRepr +from _pytest._code.code import ExceptionInfo +from _pytest._code.code import TerminalRepr +from _pytest.compat import final +from _pytest.config.argparsing import Parser +from _pytest.deprecated import check_ispytest +from _pytest.nodes import Collector +from _pytest.nodes import Item +from _pytest.nodes import Node +from _pytest.outcomes import Exit +from _pytest.outcomes import OutcomeException +from _pytest.outcomes import Skipped +from _pytest.outcomes import TEST_OUTCOME + +if TYPE_CHECKING: + from typing_extensions import Literal + + from _pytest.main import Session + from _pytest.terminal import TerminalReporter + +# +# pytest plugin hooks. + + +def pytest_addoption(parser: Parser) -> None: + group = parser.getgroup("terminal reporting", "reporting", after="general") + group.addoption( + "--durations", + action="store", + type=int, + default=None, + metavar="N", + help="show N slowest setup/test durations (N=0 for all).", + ) + group.addoption( + "--durations-min", + action="store", + type=float, + default=0.005, + metavar="N", + help="Minimal duration in seconds for inclusion in slowest list. Default 0.005", + ) + + +def pytest_terminal_summary(terminalreporter: "TerminalReporter") -> None: + durations = terminalreporter.config.option.durations + durations_min = terminalreporter.config.option.durations_min + verbose = terminalreporter.config.getvalue("verbose") + if durations is None: + return + tr = terminalreporter + dlist = [] + for replist in tr.stats.values(): + for rep in replist: + if hasattr(rep, "duration"): + dlist.append(rep) + if not dlist: + return + dlist.sort(key=lambda x: x.duration, reverse=True) # type: ignore[no-any-return] + if not durations: + tr.write_sep("=", "slowest durations") + else: + tr.write_sep("=", "slowest %s durations" % durations) + dlist = dlist[:durations] + + for i, rep in enumerate(dlist): + if verbose < 2 and rep.duration < durations_min: + tr.write_line("") + tr.write_line( + "(%s durations < %gs hidden. Use -vv to show these durations.)" + % (len(dlist) - i, durations_min) + ) + break + tr.write_line(f"{rep.duration:02.2f}s {rep.when:<8} {rep.nodeid}") + + +def pytest_sessionstart(session: "Session") -> None: + session._setupstate = SetupState() + + +def pytest_sessionfinish(session: "Session") -> None: + session._setupstate.teardown_exact(None) + + +def pytest_runtest_protocol(item: Item, nextitem: Optional[Item]) -> bool: + ihook = item.ihook + ihook.pytest_runtest_logstart(nodeid=item.nodeid, location=item.location) + runtestprotocol(item, nextitem=nextitem) + ihook.pytest_runtest_logfinish(nodeid=item.nodeid, location=item.location) + return True + + +def runtestprotocol( + item: Item, log: bool = True, nextitem: Optional[Item] = None +) -> List[TestReport]: + hasrequest = hasattr(item, "_request") + if hasrequest and not item._request: # type: ignore[attr-defined] + # This only happens if the item is re-run, as is done by + # pytest-rerunfailures. + item._initrequest() # type: ignore[attr-defined] + rep = call_and_report(item, "setup", log) + reports = [rep] + if rep.passed: + if item.config.getoption("setupshow", False): + show_test_item(item) + if not item.config.getoption("setuponly", False): + reports.append(call_and_report(item, "call", log)) + reports.append(call_and_report(item, "teardown", log, nextitem=nextitem)) + # After all teardown hooks have been called + # want funcargs and request info to go away. + if hasrequest: + item._request = False # type: ignore[attr-defined] + item.funcargs = None # type: ignore[attr-defined] + return reports + + +def show_test_item(item: Item) -> None: + """Show test function, parameters and the fixtures of the test item.""" + tw = item.config.get_terminal_writer() + tw.line() + tw.write(" " * 8) + tw.write(item.nodeid) + used_fixtures = sorted(getattr(item, "fixturenames", [])) + if used_fixtures: + tw.write(" (fixtures used: {})".format(", ".join(used_fixtures))) + tw.flush() + + +def pytest_runtest_setup(item: Item) -> None: + _update_current_test_var(item, "setup") + item.session._setupstate.setup(item) + + +def pytest_runtest_call(item: Item) -> None: + _update_current_test_var(item, "call") + try: + del sys.last_type + del sys.last_value + del sys.last_traceback + except AttributeError: + pass + try: + item.runtest() + except Exception as e: + # Store trace info to allow postmortem debugging + sys.last_type = type(e) + sys.last_value = e + assert e.__traceback__ is not None + # Skip *this* frame + sys.last_traceback = e.__traceback__.tb_next + raise e + + +def pytest_runtest_teardown(item: Item, nextitem: Optional[Item]) -> None: + _update_current_test_var(item, "teardown") + item.session._setupstate.teardown_exact(nextitem) + _update_current_test_var(item, None) + + +def _update_current_test_var( + item: Item, when: Optional["Literal['setup', 'call', 'teardown']"] +) -> None: + """Update :envvar:`PYTEST_CURRENT_TEST` to reflect the current item and stage. + + If ``when`` is None, delete ``PYTEST_CURRENT_TEST`` from the environment. + """ + var_name = "PYTEST_CURRENT_TEST" + if when: + value = f"{item.nodeid} ({when})" + # don't allow null bytes on environment variables (see #2644, #2957) + value = value.replace("\x00", "(null)") + os.environ[var_name] = value + else: + os.environ.pop(var_name) + + +def pytest_report_teststatus(report: BaseReport) -> Optional[Tuple[str, str, str]]: + if report.when in ("setup", "teardown"): + if report.failed: + # category, shortletter, verbose-word + return "error", "E", "ERROR" + elif report.skipped: + return "skipped", "s", "SKIPPED" + else: + return "", "", "" + return None + + +# +# Implementation + + +def call_and_report( + item: Item, when: "Literal['setup', 'call', 'teardown']", log: bool = True, **kwds +) -> TestReport: + call = call_runtest_hook(item, when, **kwds) + hook = item.ihook + report: TestReport = hook.pytest_runtest_makereport(item=item, call=call) + if log: + hook.pytest_runtest_logreport(report=report) + if check_interactive_exception(call, report): + hook.pytest_exception_interact(node=item, call=call, report=report) + return report + + +def check_interactive_exception(call: "CallInfo[object]", report: BaseReport) -> bool: + """Check whether the call raised an exception that should be reported as + interactive.""" + if call.excinfo is None: + # Didn't raise. + return False + if hasattr(report, "wasxfail"): + # Exception was expected. + return False + if isinstance(call.excinfo.value, (Skipped, bdb.BdbQuit)): + # Special control flow exception. + return False + return True + + +def call_runtest_hook( + item: Item, when: "Literal['setup', 'call', 'teardown']", **kwds +) -> "CallInfo[None]": + if when == "setup": + ihook: Callable[..., None] = item.ihook.pytest_runtest_setup + elif when == "call": + ihook = item.ihook.pytest_runtest_call + elif when == "teardown": + ihook = item.ihook.pytest_runtest_teardown + else: + assert False, f"Unhandled runtest hook case: {when}" + reraise: Tuple[Type[BaseException], ...] = (Exit,) + if not item.config.getoption("usepdb", False): + reraise += (KeyboardInterrupt,) + return CallInfo.from_call( + lambda: ihook(item=item, **kwds), when=when, reraise=reraise + ) + + +TResult = TypeVar("TResult", covariant=True) + + +@final +@attr.s(repr=False, init=False, auto_attribs=True) +class CallInfo(Generic[TResult]): + """Result/Exception info of a function invocation.""" + + _result: Optional[TResult] + #: The captured exception of the call, if it raised. + excinfo: Optional[ExceptionInfo[BaseException]] + #: The system time when the call started, in seconds since the epoch. + start: float + #: The system time when the call ended, in seconds since the epoch. + stop: float + #: The call duration, in seconds. + duration: float + #: The context of invocation: "collect", "setup", "call" or "teardown". + when: "Literal['collect', 'setup', 'call', 'teardown']" + + def __init__( + self, + result: Optional[TResult], + excinfo: Optional[ExceptionInfo[BaseException]], + start: float, + stop: float, + duration: float, + when: "Literal['collect', 'setup', 'call', 'teardown']", + *, + _ispytest: bool = False, + ) -> None: + check_ispytest(_ispytest) + self._result = result + self.excinfo = excinfo + self.start = start + self.stop = stop + self.duration = duration + self.when = when + + @property + def result(self) -> TResult: + """The return value of the call, if it didn't raise. + + Can only be accessed if excinfo is None. + """ + if self.excinfo is not None: + raise AttributeError(f"{self!r} has no valid result") + # The cast is safe because an exception wasn't raised, hence + # _result has the expected function return type (which may be + # None, that's why a cast and not an assert). + return cast(TResult, self._result) + + @classmethod + def from_call( + cls, + func: "Callable[[], TResult]", + when: "Literal['collect', 'setup', 'call', 'teardown']", + reraise: Optional[ + Union[Type[BaseException], Tuple[Type[BaseException], ...]] + ] = None, + ) -> "CallInfo[TResult]": + """Call func, wrapping the result in a CallInfo. + + :param func: + The function to call. Called without arguments. + :param when: + The phase in which the function is called. + :param reraise: + Exception or exceptions that shall propagate if raised by the + function, instead of being wrapped in the CallInfo. + """ + excinfo = None + start = timing.time() + precise_start = timing.perf_counter() + try: + result: Optional[TResult] = func() + except BaseException: + excinfo = ExceptionInfo.from_current() + if reraise is not None and isinstance(excinfo.value, reraise): + raise + result = None + # use the perf counter + precise_stop = timing.perf_counter() + duration = precise_stop - precise_start + stop = timing.time() + return cls( + start=start, + stop=stop, + duration=duration, + when=when, + result=result, + excinfo=excinfo, + _ispytest=True, + ) + + def __repr__(self) -> str: + if self.excinfo is None: + return f"" + return f"" + + +def pytest_runtest_makereport(item: Item, call: CallInfo[None]) -> TestReport: + return TestReport.from_item_and_call(item, call) + + +def pytest_make_collect_report(collector: Collector) -> CollectReport: + call = CallInfo.from_call(lambda: list(collector.collect()), "collect") + longrepr: Union[None, Tuple[str, int, str], str, TerminalRepr] = None + if not call.excinfo: + outcome: Literal["passed", "skipped", "failed"] = "passed" + else: + skip_exceptions = [Skipped] + unittest = sys.modules.get("unittest") + if unittest is not None: + # Type ignored because unittest is loaded dynamically. + skip_exceptions.append(unittest.SkipTest) # type: ignore + if isinstance(call.excinfo.value, tuple(skip_exceptions)): + outcome = "skipped" + r_ = collector._repr_failure_py(call.excinfo, "line") + assert isinstance(r_, ExceptionChainRepr), repr(r_) + r = r_.reprcrash + assert r + longrepr = (str(r.path), r.lineno, r.message) + else: + outcome = "failed" + errorinfo = collector.repr_failure(call.excinfo) + if not hasattr(errorinfo, "toterminal"): + assert isinstance(errorinfo, str) + errorinfo = CollectErrorRepr(errorinfo) + longrepr = errorinfo + result = call.result if not call.excinfo else None + rep = CollectReport(collector.nodeid, outcome, longrepr, result) + rep.call = call # type: ignore # see collect_one_node + return rep + + +class SetupState: + """Shared state for setting up/tearing down test items or collectors + in a session. + + Suppose we have a collection tree as follows: + + + + + + + + The SetupState maintains a stack. The stack starts out empty: + + [] + + During the setup phase of item1, setup(item1) is called. What it does + is: + + push session to stack, run session.setup() + push mod1 to stack, run mod1.setup() + push item1 to stack, run item1.setup() + + The stack is: + + [session, mod1, item1] + + While the stack is in this shape, it is allowed to add finalizers to + each of session, mod1, item1 using addfinalizer(). + + During the teardown phase of item1, teardown_exact(item2) is called, + where item2 is the next item to item1. What it does is: + + pop item1 from stack, run its teardowns + pop mod1 from stack, run its teardowns + + mod1 was popped because it ended its purpose with item1. The stack is: + + [session] + + During the setup phase of item2, setup(item2) is called. What it does + is: + + push mod2 to stack, run mod2.setup() + push item2 to stack, run item2.setup() + + Stack: + + [session, mod2, item2] + + During the teardown phase of item2, teardown_exact(None) is called, + because item2 is the last item. What it does is: + + pop item2 from stack, run its teardowns + pop mod2 from stack, run its teardowns + pop session from stack, run its teardowns + + Stack: + + [] + + The end! + """ + + def __init__(self) -> None: + # The stack is in the dict insertion order. + self.stack: Dict[ + Node, + Tuple[ + # Node's finalizers. + List[Callable[[], object]], + # Node's exception, if its setup raised. + Optional[Union[OutcomeException, Exception]], + ], + ] = {} + + def setup(self, item: Item) -> None: + """Setup objects along the collector chain to the item.""" + needed_collectors = item.listchain() + + # If a collector fails its setup, fail its entire subtree of items. + # The setup is not retried for each item - the same exception is used. + for col, (finalizers, exc) in self.stack.items(): + assert col in needed_collectors, "previous item was not torn down properly" + if exc: + raise exc + + for col in needed_collectors[len(self.stack) :]: + assert col not in self.stack + # Push onto the stack. + self.stack[col] = ([col.teardown], None) + try: + col.setup() + except TEST_OUTCOME as exc: + self.stack[col] = (self.stack[col][0], exc) + raise exc + + def addfinalizer(self, finalizer: Callable[[], object], node: Node) -> None: + """Attach a finalizer to the given node. + + The node must be currently active in the stack. + """ + assert node and not isinstance(node, tuple) + assert callable(finalizer) + assert node in self.stack, (node, self.stack) + self.stack[node][0].append(finalizer) + + def teardown_exact(self, nextitem: Optional[Item]) -> None: + """Teardown the current stack up until reaching nodes that nextitem + also descends from. + + When nextitem is None (meaning we're at the last item), the entire + stack is torn down. + """ + needed_collectors = nextitem and nextitem.listchain() or [] + exc = None + while self.stack: + if list(self.stack.keys()) == needed_collectors[: len(self.stack)]: + break + node, (finalizers, _) = self.stack.popitem() + while finalizers: + fin = finalizers.pop() + try: + fin() + except TEST_OUTCOME as e: + # XXX Only first exception will be seen by user, + # ideally all should be reported. + if exc is None: + exc = e + if exc: + raise exc + if nextitem is None: + assert not self.stack + + +def collect_one_node(collector: Collector) -> CollectReport: + ihook = collector.ihook + ihook.pytest_collectstart(collector=collector) + rep: CollectReport = ihook.pytest_make_collect_report(collector=collector) + call = rep.__dict__.pop("call", None) + if call and check_interactive_exception(call, rep): + ihook.pytest_exception_interact(node=collector, call=call, report=rep) + return rep diff --git a/venv/lib/python3.10/site-packages/_pytest/scope.py b/venv/lib/python3.10/site-packages/_pytest/scope.py new file mode 100644 index 0000000..7a746fb --- /dev/null +++ b/venv/lib/python3.10/site-packages/_pytest/scope.py @@ -0,0 +1,91 @@ +""" +Scope definition and related utilities. + +Those are defined here, instead of in the 'fixtures' module because +their use is spread across many other pytest modules, and centralizing it in 'fixtures' +would cause circular references. + +Also this makes the module light to import, as it should. +""" +from enum import Enum +from functools import total_ordering +from typing import Optional +from typing import TYPE_CHECKING + +if TYPE_CHECKING: + from typing_extensions import Literal + + _ScopeName = Literal["session", "package", "module", "class", "function"] + + +@total_ordering +class Scope(Enum): + """ + Represents one of the possible fixture scopes in pytest. + + Scopes are ordered from lower to higher, that is: + + ->>> higher ->>> + + Function < Class < Module < Package < Session + + <<<- lower <<<- + """ + + # Scopes need to be listed from lower to higher. + Function: "_ScopeName" = "function" + Class: "_ScopeName" = "class" + Module: "_ScopeName" = "module" + Package: "_ScopeName" = "package" + Session: "_ScopeName" = "session" + + def next_lower(self) -> "Scope": + """Return the next lower scope.""" + index = _SCOPE_INDICES[self] + if index == 0: + raise ValueError(f"{self} is the lower-most scope") + return _ALL_SCOPES[index - 1] + + def next_higher(self) -> "Scope": + """Return the next higher scope.""" + index = _SCOPE_INDICES[self] + if index == len(_SCOPE_INDICES) - 1: + raise ValueError(f"{self} is the upper-most scope") + return _ALL_SCOPES[index + 1] + + def __lt__(self, other: "Scope") -> bool: + self_index = _SCOPE_INDICES[self] + other_index = _SCOPE_INDICES[other] + return self_index < other_index + + @classmethod + def from_user( + cls, scope_name: "_ScopeName", descr: str, where: Optional[str] = None + ) -> "Scope": + """ + Given a scope name from the user, return the equivalent Scope enum. Should be used + whenever we want to convert a user provided scope name to its enum object. + + If the scope name is invalid, construct a user friendly message and call pytest.fail. + """ + from _pytest.outcomes import fail + + try: + # Holding this reference is necessary for mypy at the moment. + scope = Scope(scope_name) + except ValueError: + fail( + "{} {}got an unexpected scope value '{}'".format( + descr, f"from {where} " if where else "", scope_name + ), + pytrace=False, + ) + return scope + + +_ALL_SCOPES = list(Scope) +_SCOPE_INDICES = {scope: index for index, scope in enumerate(_ALL_SCOPES)} + + +# Ordered list of scopes which can contain many tests (in practice all except Function). +HIGH_SCOPES = [x for x in Scope if x is not Scope.Function] diff --git a/venv/lib/python3.10/site-packages/_pytest/setuponly.py b/venv/lib/python3.10/site-packages/_pytest/setuponly.py new file mode 100644 index 0000000..531131c --- /dev/null +++ b/venv/lib/python3.10/site-packages/_pytest/setuponly.py @@ -0,0 +1,97 @@ +from typing import Generator +from typing import Optional +from typing import Union + +import pytest +from _pytest._io.saferepr import saferepr +from _pytest.config import Config +from _pytest.config import ExitCode +from _pytest.config.argparsing import Parser +from _pytest.fixtures import FixtureDef +from _pytest.fixtures import SubRequest +from _pytest.scope import Scope + + +def pytest_addoption(parser: Parser) -> None: + group = parser.getgroup("debugconfig") + group.addoption( + "--setuponly", + "--setup-only", + action="store_true", + help="only setup fixtures, do not execute tests.", + ) + group.addoption( + "--setupshow", + "--setup-show", + action="store_true", + help="show setup of fixtures while executing tests.", + ) + + +@pytest.hookimpl(hookwrapper=True) +def pytest_fixture_setup( + fixturedef: FixtureDef[object], request: SubRequest +) -> Generator[None, None, None]: + yield + if request.config.option.setupshow: + if hasattr(request, "param"): + # Save the fixture parameter so ._show_fixture_action() can + # display it now and during the teardown (in .finish()). + if fixturedef.ids: + if callable(fixturedef.ids): + param = fixturedef.ids(request.param) + else: + param = fixturedef.ids[request.param_index] + else: + param = request.param + fixturedef.cached_param = param # type: ignore[attr-defined] + _show_fixture_action(fixturedef, "SETUP") + + +def pytest_fixture_post_finalizer(fixturedef: FixtureDef[object]) -> None: + if fixturedef.cached_result is not None: + config = fixturedef._fixturemanager.config + if config.option.setupshow: + _show_fixture_action(fixturedef, "TEARDOWN") + if hasattr(fixturedef, "cached_param"): + del fixturedef.cached_param # type: ignore[attr-defined] + + +def _show_fixture_action(fixturedef: FixtureDef[object], msg: str) -> None: + config = fixturedef._fixturemanager.config + capman = config.pluginmanager.getplugin("capturemanager") + if capman: + capman.suspend_global_capture() + + tw = config.get_terminal_writer() + tw.line() + # Use smaller indentation the higher the scope: Session = 0, Package = 1, etc. + scope_indent = list(reversed(Scope)).index(fixturedef._scope) + tw.write(" " * 2 * scope_indent) + tw.write( + "{step} {scope} {fixture}".format( + step=msg.ljust(8), # align the output to TEARDOWN + scope=fixturedef.scope[0].upper(), + fixture=fixturedef.argname, + ) + ) + + if msg == "SETUP": + deps = sorted(arg for arg in fixturedef.argnames if arg != "request") + if deps: + tw.write(" (fixtures used: {})".format(", ".join(deps))) + + if hasattr(fixturedef, "cached_param"): + tw.write(f"[{saferepr(fixturedef.cached_param, maxsize=42)}]") # type: ignore[attr-defined] + + tw.flush() + + if capman: + capman.resume_global_capture() + + +@pytest.hookimpl(tryfirst=True) +def pytest_cmdline_main(config: Config) -> Optional[Union[int, ExitCode]]: + if config.option.setuponly: + config.option.setupshow = True + return None diff --git a/venv/lib/python3.10/site-packages/_pytest/setupplan.py b/venv/lib/python3.10/site-packages/_pytest/setupplan.py new file mode 100644 index 0000000..9ba81cc --- /dev/null +++ b/venv/lib/python3.10/site-packages/_pytest/setupplan.py @@ -0,0 +1,40 @@ +from typing import Optional +from typing import Union + +import pytest +from _pytest.config import Config +from _pytest.config import ExitCode +from _pytest.config.argparsing import Parser +from _pytest.fixtures import FixtureDef +from _pytest.fixtures import SubRequest + + +def pytest_addoption(parser: Parser) -> None: + group = parser.getgroup("debugconfig") + group.addoption( + "--setupplan", + "--setup-plan", + action="store_true", + help="show what fixtures and tests would be executed but " + "don't execute anything.", + ) + + +@pytest.hookimpl(tryfirst=True) +def pytest_fixture_setup( + fixturedef: FixtureDef[object], request: SubRequest +) -> Optional[object]: + # Will return a dummy fixture if the setuponly option is provided. + if request.config.option.setupplan: + my_cache_key = fixturedef.cache_key(request) + fixturedef.cached_result = (None, my_cache_key, None) + return fixturedef.cached_result + return None + + +@pytest.hookimpl(tryfirst=True) +def pytest_cmdline_main(config: Config) -> Optional[Union[int, ExitCode]]: + if config.option.setupplan: + config.option.setuponly = True + config.option.setupshow = True + return None diff --git a/venv/lib/python3.10/site-packages/_pytest/skipping.py b/venv/lib/python3.10/site-packages/_pytest/skipping.py new file mode 100644 index 0000000..ac7216f --- /dev/null +++ b/venv/lib/python3.10/site-packages/_pytest/skipping.py @@ -0,0 +1,296 @@ +"""Support for skip/xfail functions and markers.""" +import os +import platform +import sys +import traceback +from collections.abc import Mapping +from typing import Generator +from typing import Optional +from typing import Tuple +from typing import Type + +import attr + +from _pytest.config import Config +from _pytest.config import hookimpl +from _pytest.config.argparsing import Parser +from _pytest.mark.structures import Mark +from _pytest.nodes import Item +from _pytest.outcomes import fail +from _pytest.outcomes import skip +from _pytest.outcomes import xfail +from _pytest.reports import BaseReport +from _pytest.runner import CallInfo +from _pytest.stash import StashKey + + +def pytest_addoption(parser: Parser) -> None: + group = parser.getgroup("general") + group.addoption( + "--runxfail", + action="store_true", + dest="runxfail", + default=False, + help="report the results of xfail tests as if they were not marked", + ) + + parser.addini( + "xfail_strict", + "default for the strict parameter of xfail " + "markers when not given explicitly (default: False)", + default=False, + type="bool", + ) + + +def pytest_configure(config: Config) -> None: + if config.option.runxfail: + # yay a hack + import pytest + + old = pytest.xfail + config.add_cleanup(lambda: setattr(pytest, "xfail", old)) + + def nop(*args, **kwargs): + pass + + nop.Exception = xfail.Exception # type: ignore[attr-defined] + setattr(pytest, "xfail", nop) + + config.addinivalue_line( + "markers", + "skip(reason=None): skip the given test function with an optional reason. " + 'Example: skip(reason="no way of currently testing this") skips the ' + "test.", + ) + config.addinivalue_line( + "markers", + "skipif(condition, ..., *, reason=...): " + "skip the given test function if any of the conditions evaluate to True. " + "Example: skipif(sys.platform == 'win32') skips the test if we are on the win32 platform. " + "See https://docs.pytest.org/en/stable/reference/reference.html#pytest-mark-skipif", + ) + config.addinivalue_line( + "markers", + "xfail(condition, ..., *, reason=..., run=True, raises=None, strict=xfail_strict): " + "mark the test function as an expected failure if any of the conditions " + "evaluate to True. Optionally specify a reason for better reporting " + "and run=False if you don't even want to execute the test function. " + "If only specific exception(s) are expected, you can list them in " + "raises, and if the test fails in other ways, it will be reported as " + "a true failure. See https://docs.pytest.org/en/stable/reference/reference.html#pytest-mark-xfail", + ) + + +def evaluate_condition(item: Item, mark: Mark, condition: object) -> Tuple[bool, str]: + """Evaluate a single skipif/xfail condition. + + If an old-style string condition is given, it is eval()'d, otherwise the + condition is bool()'d. If this fails, an appropriately formatted pytest.fail + is raised. + + Returns (result, reason). The reason is only relevant if the result is True. + """ + # String condition. + if isinstance(condition, str): + globals_ = { + "os": os, + "sys": sys, + "platform": platform, + "config": item.config, + } + for dictionary in reversed( + item.ihook.pytest_markeval_namespace(config=item.config) + ): + if not isinstance(dictionary, Mapping): + raise ValueError( + "pytest_markeval_namespace() needs to return a dict, got {!r}".format( + dictionary + ) + ) + globals_.update(dictionary) + if hasattr(item, "obj"): + globals_.update(item.obj.__globals__) # type: ignore[attr-defined] + try: + filename = f"<{mark.name} condition>" + condition_code = compile(condition, filename, "eval") + result = eval(condition_code, globals_) + except SyntaxError as exc: + msglines = [ + "Error evaluating %r condition" % mark.name, + " " + condition, + " " + " " * (exc.offset or 0) + "^", + "SyntaxError: invalid syntax", + ] + fail("\n".join(msglines), pytrace=False) + except Exception as exc: + msglines = [ + "Error evaluating %r condition" % mark.name, + " " + condition, + *traceback.format_exception_only(type(exc), exc), + ] + fail("\n".join(msglines), pytrace=False) + + # Boolean condition. + else: + try: + result = bool(condition) + except Exception as exc: + msglines = [ + "Error evaluating %r condition as a boolean" % mark.name, + *traceback.format_exception_only(type(exc), exc), + ] + fail("\n".join(msglines), pytrace=False) + + reason = mark.kwargs.get("reason", None) + if reason is None: + if isinstance(condition, str): + reason = "condition: " + condition + else: + # XXX better be checked at collection time + msg = ( + "Error evaluating %r: " % mark.name + + "you need to specify reason=STRING when using booleans as conditions." + ) + fail(msg, pytrace=False) + + return result, reason + + +@attr.s(slots=True, frozen=True, auto_attribs=True) +class Skip: + """The result of evaluate_skip_marks().""" + + reason: str = "unconditional skip" + + +def evaluate_skip_marks(item: Item) -> Optional[Skip]: + """Evaluate skip and skipif marks on item, returning Skip if triggered.""" + for mark in item.iter_markers(name="skipif"): + if "condition" not in mark.kwargs: + conditions = mark.args + else: + conditions = (mark.kwargs["condition"],) + + # Unconditional. + if not conditions: + reason = mark.kwargs.get("reason", "") + return Skip(reason) + + # If any of the conditions are true. + for condition in conditions: + result, reason = evaluate_condition(item, mark, condition) + if result: + return Skip(reason) + + for mark in item.iter_markers(name="skip"): + try: + return Skip(*mark.args, **mark.kwargs) + except TypeError as e: + raise TypeError(str(e) + " - maybe you meant pytest.mark.skipif?") from None + + return None + + +@attr.s(slots=True, frozen=True, auto_attribs=True) +class Xfail: + """The result of evaluate_xfail_marks().""" + + reason: str + run: bool + strict: bool + raises: Optional[Tuple[Type[BaseException], ...]] + + +def evaluate_xfail_marks(item: Item) -> Optional[Xfail]: + """Evaluate xfail marks on item, returning Xfail if triggered.""" + for mark in item.iter_markers(name="xfail"): + run = mark.kwargs.get("run", True) + strict = mark.kwargs.get("strict", item.config.getini("xfail_strict")) + raises = mark.kwargs.get("raises", None) + if "condition" not in mark.kwargs: + conditions = mark.args + else: + conditions = (mark.kwargs["condition"],) + + # Unconditional. + if not conditions: + reason = mark.kwargs.get("reason", "") + return Xfail(reason, run, strict, raises) + + # If any of the conditions are true. + for condition in conditions: + result, reason = evaluate_condition(item, mark, condition) + if result: + return Xfail(reason, run, strict, raises) + + return None + + +# Saves the xfail mark evaluation. Can be refreshed during call if None. +xfailed_key = StashKey[Optional[Xfail]]() + + +@hookimpl(tryfirst=True) +def pytest_runtest_setup(item: Item) -> None: + skipped = evaluate_skip_marks(item) + if skipped: + raise skip.Exception(skipped.reason, _use_item_location=True) + + item.stash[xfailed_key] = xfailed = evaluate_xfail_marks(item) + if xfailed and not item.config.option.runxfail and not xfailed.run: + xfail("[NOTRUN] " + xfailed.reason) + + +@hookimpl(hookwrapper=True) +def pytest_runtest_call(item: Item) -> Generator[None, None, None]: + xfailed = item.stash.get(xfailed_key, None) + if xfailed is None: + item.stash[xfailed_key] = xfailed = evaluate_xfail_marks(item) + + if xfailed and not item.config.option.runxfail and not xfailed.run: + xfail("[NOTRUN] " + xfailed.reason) + + yield + + # The test run may have added an xfail mark dynamically. + xfailed = item.stash.get(xfailed_key, None) + if xfailed is None: + item.stash[xfailed_key] = xfailed = evaluate_xfail_marks(item) + + +@hookimpl(hookwrapper=True) +def pytest_runtest_makereport(item: Item, call: CallInfo[None]): + outcome = yield + rep = outcome.get_result() + xfailed = item.stash.get(xfailed_key, None) + if item.config.option.runxfail: + pass # don't interfere + elif call.excinfo and isinstance(call.excinfo.value, xfail.Exception): + assert call.excinfo.value.msg is not None + rep.wasxfail = "reason: " + call.excinfo.value.msg + rep.outcome = "skipped" + elif not rep.skipped and xfailed: + if call.excinfo: + raises = xfailed.raises + if raises is not None and not isinstance(call.excinfo.value, raises): + rep.outcome = "failed" + else: + rep.outcome = "skipped" + rep.wasxfail = xfailed.reason + elif call.when == "call": + if xfailed.strict: + rep.outcome = "failed" + rep.longrepr = "[XPASS(strict)] " + xfailed.reason + else: + rep.outcome = "passed" + rep.wasxfail = xfailed.reason + + +def pytest_report_teststatus(report: BaseReport) -> Optional[Tuple[str, str, str]]: + if hasattr(report, "wasxfail"): + if report.skipped: + return "xfailed", "x", "XFAIL" + elif report.passed: + return "xpassed", "X", "XPASS" + return None diff --git a/venv/lib/python3.10/site-packages/_pytest/stash.py b/venv/lib/python3.10/site-packages/_pytest/stash.py new file mode 100644 index 0000000..e61d75b --- /dev/null +++ b/venv/lib/python3.10/site-packages/_pytest/stash.py @@ -0,0 +1,112 @@ +from typing import Any +from typing import cast +from typing import Dict +from typing import Generic +from typing import TypeVar +from typing import Union + + +__all__ = ["Stash", "StashKey"] + + +T = TypeVar("T") +D = TypeVar("D") + + +class StashKey(Generic[T]): + """``StashKey`` is an object used as a key to a :class:`Stash`. + + A ``StashKey`` is associated with the type ``T`` of the value of the key. + + A ``StashKey`` is unique and cannot conflict with another key. + """ + + __slots__ = () + + +class Stash: + r"""``Stash`` is a type-safe heterogeneous mutable mapping that + allows keys and value types to be defined separately from + where it (the ``Stash``) is created. + + Usually you will be given an object which has a ``Stash``, for example + :class:`~pytest.Config` or a :class:`~_pytest.nodes.Node`: + + .. code-block:: python + + stash: Stash = some_object.stash + + If a module or plugin wants to store data in this ``Stash``, it creates + :class:`StashKey`\s for its keys (at the module level): + + .. code-block:: python + + # At the top-level of the module + some_str_key = StashKey[str]() + some_bool_key = StashKey[bool]() + + To store information: + + .. code-block:: python + + # Value type must match the key. + stash[some_str_key] = "value" + stash[some_bool_key] = True + + To retrieve the information: + + .. code-block:: python + + # The static type of some_str is str. + some_str = stash[some_str_key] + # The static type of some_bool is bool. + some_bool = stash[some_bool_key] + """ + + __slots__ = ("_storage",) + + def __init__(self) -> None: + self._storage: Dict[StashKey[Any], object] = {} + + def __setitem__(self, key: StashKey[T], value: T) -> None: + """Set a value for key.""" + self._storage[key] = value + + def __getitem__(self, key: StashKey[T]) -> T: + """Get the value for key. + + Raises ``KeyError`` if the key wasn't set before. + """ + return cast(T, self._storage[key]) + + def get(self, key: StashKey[T], default: D) -> Union[T, D]: + """Get the value for key, or return default if the key wasn't set + before.""" + try: + return self[key] + except KeyError: + return default + + def setdefault(self, key: StashKey[T], default: T) -> T: + """Return the value of key if already set, otherwise set the value + of key to default and return default.""" + try: + return self[key] + except KeyError: + self[key] = default + return default + + def __delitem__(self, key: StashKey[T]) -> None: + """Delete the value for key. + + Raises ``KeyError`` if the key wasn't set before. + """ + del self._storage[key] + + def __contains__(self, key: StashKey[T]) -> bool: + """Return whether key was set.""" + return key in self._storage + + def __len__(self) -> int: + """Return how many items exist in the stash.""" + return len(self._storage) diff --git a/venv/lib/python3.10/site-packages/_pytest/stepwise.py b/venv/lib/python3.10/site-packages/_pytest/stepwise.py new file mode 100644 index 0000000..4d95a96 --- /dev/null +++ b/venv/lib/python3.10/site-packages/_pytest/stepwise.py @@ -0,0 +1,122 @@ +from typing import List +from typing import Optional +from typing import TYPE_CHECKING + +import pytest +from _pytest import nodes +from _pytest.config import Config +from _pytest.config.argparsing import Parser +from _pytest.main import Session +from _pytest.reports import TestReport + +if TYPE_CHECKING: + from _pytest.cacheprovider import Cache + +STEPWISE_CACHE_DIR = "cache/stepwise" + + +def pytest_addoption(parser: Parser) -> None: + group = parser.getgroup("general") + group.addoption( + "--sw", + "--stepwise", + action="store_true", + default=False, + dest="stepwise", + help="exit on test failure and continue from last failing test next time", + ) + group.addoption( + "--sw-skip", + "--stepwise-skip", + action="store_true", + default=False, + dest="stepwise_skip", + help="ignore the first failing test but stop on the next failing test.\n" + "implicitly enables --stepwise.", + ) + + +@pytest.hookimpl +def pytest_configure(config: Config) -> None: + if config.option.stepwise_skip: + # allow --stepwise-skip to work on it's own merits. + config.option.stepwise = True + if config.getoption("stepwise"): + config.pluginmanager.register(StepwisePlugin(config), "stepwiseplugin") + + +def pytest_sessionfinish(session: Session) -> None: + if not session.config.getoption("stepwise"): + assert session.config.cache is not None + # Clear the list of failing tests if the plugin is not active. + session.config.cache.set(STEPWISE_CACHE_DIR, []) + + +class StepwisePlugin: + def __init__(self, config: Config) -> None: + self.config = config + self.session: Optional[Session] = None + self.report_status = "" + assert config.cache is not None + self.cache: Cache = config.cache + self.lastfailed: Optional[str] = self.cache.get(STEPWISE_CACHE_DIR, None) + self.skip: bool = config.getoption("stepwise_skip") + + def pytest_sessionstart(self, session: Session) -> None: + self.session = session + + def pytest_collection_modifyitems( + self, config: Config, items: List[nodes.Item] + ) -> None: + if not self.lastfailed: + self.report_status = "no previously failed tests, not skipping." + return + + # check all item nodes until we find a match on last failed + failed_index = None + for index, item in enumerate(items): + if item.nodeid == self.lastfailed: + failed_index = index + break + + # If the previously failed test was not found among the test items, + # do not skip any tests. + if failed_index is None: + self.report_status = "previously failed test not found, not skipping." + else: + self.report_status = f"skipping {failed_index} already passed items." + deselected = items[:failed_index] + del items[:failed_index] + config.hook.pytest_deselected(items=deselected) + + def pytest_runtest_logreport(self, report: TestReport) -> None: + if report.failed: + if self.skip: + # Remove test from the failed ones (if it exists) and unset the skip option + # to make sure the following tests will not be skipped. + if report.nodeid == self.lastfailed: + self.lastfailed = None + + self.skip = False + else: + # Mark test as the last failing and interrupt the test session. + self.lastfailed = report.nodeid + assert self.session is not None + self.session.shouldstop = ( + "Test failed, continuing from this test next run." + ) + + else: + # If the test was actually run and did pass. + if report.when == "call": + # Remove test from the failed ones, if exists. + if report.nodeid == self.lastfailed: + self.lastfailed = None + + def pytest_report_collectionfinish(self) -> Optional[str]: + if self.config.getoption("verbose") >= 0 and self.report_status: + return f"stepwise: {self.report_status}" + return None + + def pytest_sessionfinish(self) -> None: + self.cache.set(STEPWISE_CACHE_DIR, self.lastfailed) diff --git a/venv/lib/python3.10/site-packages/_pytest/terminal.py b/venv/lib/python3.10/site-packages/_pytest/terminal.py new file mode 100644 index 0000000..b4848c4 --- /dev/null +++ b/venv/lib/python3.10/site-packages/_pytest/terminal.py @@ -0,0 +1,1400 @@ +"""Terminal reporting of the full testing process. + +This is a good source for looking at the various reporting hooks. +""" +import argparse +import datetime +import inspect +import platform +import sys +import warnings +from collections import Counter +from functools import partial +from pathlib import Path +from typing import Any +from typing import Callable +from typing import cast +from typing import ClassVar +from typing import Dict +from typing import Generator +from typing import List +from typing import Mapping +from typing import Optional +from typing import Sequence +from typing import Set +from typing import TextIO +from typing import Tuple +from typing import TYPE_CHECKING +from typing import Union + +import attr +import pluggy + +import _pytest._version +from _pytest import nodes +from _pytest import timing +from _pytest._code import ExceptionInfo +from _pytest._code.code import ExceptionRepr +from _pytest._io.wcwidth import wcswidth +from _pytest.compat import final +from _pytest.config import _PluggyPlugin +from _pytest.config import Config +from _pytest.config import ExitCode +from _pytest.config import hookimpl +from _pytest.config.argparsing import Parser +from _pytest.nodes import Item +from _pytest.nodes import Node +from _pytest.pathlib import absolutepath +from _pytest.pathlib import bestrelpath +from _pytest.reports import BaseReport +from _pytest.reports import CollectReport +from _pytest.reports import TestReport + +if TYPE_CHECKING: + from typing_extensions import Literal + + from _pytest.main import Session + + +REPORT_COLLECTING_RESOLUTION = 0.5 + +KNOWN_TYPES = ( + "failed", + "passed", + "skipped", + "deselected", + "xfailed", + "xpassed", + "warnings", + "error", +) + +_REPORTCHARS_DEFAULT = "fE" + + +class MoreQuietAction(argparse.Action): + """A modified copy of the argparse count action which counts down and updates + the legacy quiet attribute at the same time. + + Used to unify verbosity handling. + """ + + def __init__( + self, + option_strings: Sequence[str], + dest: str, + default: object = None, + required: bool = False, + help: Optional[str] = None, + ) -> None: + super().__init__( + option_strings=option_strings, + dest=dest, + nargs=0, + default=default, + required=required, + help=help, + ) + + def __call__( + self, + parser: argparse.ArgumentParser, + namespace: argparse.Namespace, + values: Union[str, Sequence[object], None], + option_string: Optional[str] = None, + ) -> None: + new_count = getattr(namespace, self.dest, 0) - 1 + setattr(namespace, self.dest, new_count) + # todo Deprecate config.quiet + namespace.quiet = getattr(namespace, "quiet", 0) + 1 + + +def pytest_addoption(parser: Parser) -> None: + group = parser.getgroup("terminal reporting", "reporting", after="general") + group._addoption( + "-v", + "--verbose", + action="count", + default=0, + dest="verbose", + help="increase verbosity.", + ) + group._addoption( + "--no-header", + action="store_true", + default=False, + dest="no_header", + help="disable header", + ) + group._addoption( + "--no-summary", + action="store_true", + default=False, + dest="no_summary", + help="disable summary", + ) + group._addoption( + "-q", + "--quiet", + action=MoreQuietAction, + default=0, + dest="verbose", + help="decrease verbosity.", + ) + group._addoption( + "--verbosity", + dest="verbose", + type=int, + default=0, + help="set verbosity. Default is 0.", + ) + group._addoption( + "-r", + action="store", + dest="reportchars", + default=_REPORTCHARS_DEFAULT, + metavar="chars", + help="show extra test summary info as specified by chars: (f)ailed, " + "(E)rror, (s)kipped, (x)failed, (X)passed, " + "(p)assed, (P)assed with output, (a)ll except passed (p/P), or (A)ll. " + "(w)arnings are enabled by default (see --disable-warnings), " + "'N' can be used to reset the list. (default: 'fE').", + ) + group._addoption( + "--disable-warnings", + "--disable-pytest-warnings", + default=False, + dest="disable_warnings", + action="store_true", + help="disable warnings summary", + ) + group._addoption( + "-l", + "--showlocals", + action="store_true", + dest="showlocals", + default=False, + help="show locals in tracebacks (disabled by default).", + ) + group._addoption( + "--tb", + metavar="style", + action="store", + dest="tbstyle", + default="auto", + choices=["auto", "long", "short", "no", "line", "native"], + help="traceback print mode (auto/long/short/line/native/no).", + ) + group._addoption( + "--show-capture", + action="store", + dest="showcapture", + choices=["no", "stdout", "stderr", "log", "all"], + default="all", + help="Controls how captured stdout/stderr/log is shown on failed tests. " + "Default is 'all'.", + ) + group._addoption( + "--fulltrace", + "--full-trace", + action="store_true", + default=False, + help="don't cut any tracebacks (default is to cut).", + ) + group._addoption( + "--color", + metavar="color", + action="store", + dest="color", + default="auto", + choices=["yes", "no", "auto"], + help="color terminal output (yes/no/auto).", + ) + group._addoption( + "--code-highlight", + default="yes", + choices=["yes", "no"], + help="Whether code should be highlighted (only if --color is also enabled)", + ) + + parser.addini( + "console_output_style", + help='console output: "classic", or with additional progress information ("progress" (percentage) | "count").', + default="progress", + ) + + +def pytest_configure(config: Config) -> None: + reporter = TerminalReporter(config, sys.stdout) + config.pluginmanager.register(reporter, "terminalreporter") + if config.option.debug or config.option.traceconfig: + + def mywriter(tags, args): + msg = " ".join(map(str, args)) + reporter.write_line("[traceconfig] " + msg) + + config.trace.root.setprocessor("pytest:config", mywriter) + + +def getreportopt(config: Config) -> str: + reportchars: str = config.option.reportchars + + old_aliases = {"F", "S"} + reportopts = "" + for char in reportchars: + if char in old_aliases: + char = char.lower() + if char == "a": + reportopts = "sxXEf" + elif char == "A": + reportopts = "PpsxXEf" + elif char == "N": + reportopts = "" + elif char not in reportopts: + reportopts += char + + if not config.option.disable_warnings and "w" not in reportopts: + reportopts = "w" + reportopts + elif config.option.disable_warnings and "w" in reportopts: + reportopts = reportopts.replace("w", "") + + return reportopts + + +@hookimpl(trylast=True) # after _pytest.runner +def pytest_report_teststatus(report: BaseReport) -> Tuple[str, str, str]: + letter = "F" + if report.passed: + letter = "." + elif report.skipped: + letter = "s" + + outcome: str = report.outcome + if report.when in ("collect", "setup", "teardown") and outcome == "failed": + outcome = "error" + letter = "E" + + return outcome, letter, outcome.upper() + + +@attr.s(auto_attribs=True) +class WarningReport: + """Simple structure to hold warnings information captured by ``pytest_warning_recorded``. + + :ivar str message: + User friendly message about the warning. + :ivar str|None nodeid: + nodeid that generated the warning (see ``get_location``). + :ivar tuple fslocation: + File system location of the source of the warning (see ``get_location``). + """ + + message: str + nodeid: Optional[str] = None + fslocation: Optional[Tuple[str, int]] = None + + count_towards_summary: ClassVar = True + + def get_location(self, config: Config) -> Optional[str]: + """Return the more user-friendly information about the location of a warning, or None.""" + if self.nodeid: + return self.nodeid + if self.fslocation: + filename, linenum = self.fslocation + relpath = bestrelpath(config.invocation_params.dir, absolutepath(filename)) + return f"{relpath}:{linenum}" + return None + + +@final +class TerminalReporter: + def __init__(self, config: Config, file: Optional[TextIO] = None) -> None: + import _pytest.config + + self.config = config + self._numcollected = 0 + self._session: Optional[Session] = None + self._showfspath: Optional[bool] = None + + self.stats: Dict[str, List[Any]] = {} + self._main_color: Optional[str] = None + self._known_types: Optional[List[str]] = None + self.startpath = config.invocation_params.dir + if file is None: + file = sys.stdout + self._tw = _pytest.config.create_terminal_writer(config, file) + self._screen_width = self._tw.fullwidth + self.currentfspath: Union[None, Path, str, int] = None + self.reportchars = getreportopt(config) + self.hasmarkup = self._tw.hasmarkup + self.isatty = file.isatty() + self._progress_nodeids_reported: Set[str] = set() + self._show_progress_info = self._determine_show_progress_info() + self._collect_report_last_write: Optional[float] = None + self._already_displayed_warnings: Optional[int] = None + self._keyboardinterrupt_memo: Optional[ExceptionRepr] = None + + def _determine_show_progress_info(self) -> "Literal['progress', 'count', False]": + """Return whether we should display progress information based on the current config.""" + # do not show progress if we are not capturing output (#3038) + if self.config.getoption("capture", "no") == "no": + return False + # do not show progress if we are showing fixture setup/teardown + if self.config.getoption("setupshow", False): + return False + cfg: str = self.config.getini("console_output_style") + if cfg == "progress": + return "progress" + elif cfg == "count": + return "count" + else: + return False + + @property + def verbosity(self) -> int: + verbosity: int = self.config.option.verbose + return verbosity + + @property + def showheader(self) -> bool: + return self.verbosity >= 0 + + @property + def no_header(self) -> bool: + return bool(self.config.option.no_header) + + @property + def no_summary(self) -> bool: + return bool(self.config.option.no_summary) + + @property + def showfspath(self) -> bool: + if self._showfspath is None: + return self.verbosity >= 0 + return self._showfspath + + @showfspath.setter + def showfspath(self, value: Optional[bool]) -> None: + self._showfspath = value + + @property + def showlongtestinfo(self) -> bool: + return self.verbosity > 0 + + def hasopt(self, char: str) -> bool: + char = {"xfailed": "x", "skipped": "s"}.get(char, char) + return char in self.reportchars + + def write_fspath_result(self, nodeid: str, res, **markup: bool) -> None: + fspath = self.config.rootpath / nodeid.split("::")[0] + if self.currentfspath is None or fspath != self.currentfspath: + if self.currentfspath is not None and self._show_progress_info: + self._write_progress_information_filling_space() + self.currentfspath = fspath + relfspath = bestrelpath(self.startpath, fspath) + self._tw.line() + self._tw.write(relfspath + " ") + self._tw.write(res, flush=True, **markup) + + def write_ensure_prefix(self, prefix: str, extra: str = "", **kwargs) -> None: + if self.currentfspath != prefix: + self._tw.line() + self.currentfspath = prefix + self._tw.write(prefix) + if extra: + self._tw.write(extra, **kwargs) + self.currentfspath = -2 + + def ensure_newline(self) -> None: + if self.currentfspath: + self._tw.line() + self.currentfspath = None + + def write(self, content: str, *, flush: bool = False, **markup: bool) -> None: + self._tw.write(content, flush=flush, **markup) + + def flush(self) -> None: + self._tw.flush() + + def write_line(self, line: Union[str, bytes], **markup: bool) -> None: + if not isinstance(line, str): + line = str(line, errors="replace") + self.ensure_newline() + self._tw.line(line, **markup) + + def rewrite(self, line: str, **markup: bool) -> None: + """Rewinds the terminal cursor to the beginning and writes the given line. + + :param erase: + If True, will also add spaces until the full terminal width to ensure + previous lines are properly erased. + + The rest of the keyword arguments are markup instructions. + """ + erase = markup.pop("erase", False) + if erase: + fill_count = self._tw.fullwidth - len(line) - 1 + fill = " " * fill_count + else: + fill = "" + line = str(line) + self._tw.write("\r" + line + fill, **markup) + + def write_sep( + self, + sep: str, + title: Optional[str] = None, + fullwidth: Optional[int] = None, + **markup: bool, + ) -> None: + self.ensure_newline() + self._tw.sep(sep, title, fullwidth, **markup) + + def section(self, title: str, sep: str = "=", **kw: bool) -> None: + self._tw.sep(sep, title, **kw) + + def line(self, msg: str, **kw: bool) -> None: + self._tw.line(msg, **kw) + + def _add_stats(self, category: str, items: Sequence[Any]) -> None: + set_main_color = category not in self.stats + self.stats.setdefault(category, []).extend(items) + if set_main_color: + self._set_main_color() + + def pytest_internalerror(self, excrepr: ExceptionRepr) -> bool: + for line in str(excrepr).split("\n"): + self.write_line("INTERNALERROR> " + line) + return True + + def pytest_warning_recorded( + self, + warning_message: warnings.WarningMessage, + nodeid: str, + ) -> None: + from _pytest.warnings import warning_record_to_str + + fslocation = warning_message.filename, warning_message.lineno + message = warning_record_to_str(warning_message) + + warning_report = WarningReport( + fslocation=fslocation, message=message, nodeid=nodeid + ) + self._add_stats("warnings", [warning_report]) + + def pytest_plugin_registered(self, plugin: _PluggyPlugin) -> None: + if self.config.option.traceconfig: + msg = f"PLUGIN registered: {plugin}" + # XXX This event may happen during setup/teardown time + # which unfortunately captures our output here + # which garbles our output if we use self.write_line. + self.write_line(msg) + + def pytest_deselected(self, items: Sequence[Item]) -> None: + self._add_stats("deselected", items) + + def pytest_runtest_logstart( + self, nodeid: str, location: Tuple[str, Optional[int], str] + ) -> None: + # Ensure that the path is printed before the + # 1st test of a module starts running. + if self.showlongtestinfo: + line = self._locationline(nodeid, *location) + self.write_ensure_prefix(line, "") + self.flush() + elif self.showfspath: + self.write_fspath_result(nodeid, "") + self.flush() + + def pytest_runtest_logreport(self, report: TestReport) -> None: + self._tests_ran = True + rep = report + res: Tuple[ + str, str, Union[str, Tuple[str, Mapping[str, bool]]] + ] = self.config.hook.pytest_report_teststatus(report=rep, config=self.config) + category, letter, word = res + if not isinstance(word, tuple): + markup = None + else: + word, markup = word + self._add_stats(category, [rep]) + if not letter and not word: + # Probably passed setup/teardown. + return + running_xdist = hasattr(rep, "node") + if markup is None: + was_xfail = hasattr(report, "wasxfail") + if rep.passed and not was_xfail: + markup = {"green": True} + elif rep.passed and was_xfail: + markup = {"yellow": True} + elif rep.failed: + markup = {"red": True} + elif rep.skipped: + markup = {"yellow": True} + else: + markup = {} + if self.verbosity <= 0: + self._tw.write(letter, **markup) + else: + self._progress_nodeids_reported.add(rep.nodeid) + line = self._locationline(rep.nodeid, *rep.location) + if not running_xdist: + self.write_ensure_prefix(line, word, **markup) + if rep.skipped or hasattr(report, "wasxfail"): + reason = _get_raw_skip_reason(rep) + if self.config.option.verbose < 2: + available_width = ( + (self._tw.fullwidth - self._tw.width_of_current_line) + - len(" [100%]") + - 1 + ) + formatted_reason = _format_trimmed( + " ({})", reason, available_width + ) + else: + formatted_reason = f" ({reason})" + + if reason and formatted_reason is not None: + self._tw.write(formatted_reason) + if self._show_progress_info: + self._write_progress_information_filling_space() + else: + self.ensure_newline() + self._tw.write("[%s]" % rep.node.gateway.id) + if self._show_progress_info: + self._tw.write( + self._get_progress_information_message() + " ", cyan=True + ) + else: + self._tw.write(" ") + self._tw.write(word, **markup) + self._tw.write(" " + line) + self.currentfspath = -2 + self.flush() + + @property + def _is_last_item(self) -> bool: + assert self._session is not None + return len(self._progress_nodeids_reported) == self._session.testscollected + + def pytest_runtest_logfinish(self, nodeid: str) -> None: + assert self._session + if self.verbosity <= 0 and self._show_progress_info: + if self._show_progress_info == "count": + num_tests = self._session.testscollected + progress_length = len(f" [{num_tests}/{num_tests}]") + else: + progress_length = len(" [100%]") + + self._progress_nodeids_reported.add(nodeid) + + if self._is_last_item: + self._write_progress_information_filling_space() + else: + main_color, _ = self._get_main_color() + w = self._width_of_current_line + past_edge = w + progress_length + 1 >= self._screen_width + if past_edge: + msg = self._get_progress_information_message() + self._tw.write(msg + "\n", **{main_color: True}) + + def _get_progress_information_message(self) -> str: + assert self._session + collected = self._session.testscollected + if self._show_progress_info == "count": + if collected: + progress = self._progress_nodeids_reported + counter_format = f"{{:{len(str(collected))}d}}" + format_string = f" [{counter_format}/{{}}]" + return format_string.format(len(progress), collected) + return f" [ {collected} / {collected} ]" + else: + if collected: + return " [{:3d}%]".format( + len(self._progress_nodeids_reported) * 100 // collected + ) + return " [100%]" + + def _write_progress_information_filling_space(self) -> None: + color, _ = self._get_main_color() + msg = self._get_progress_information_message() + w = self._width_of_current_line + fill = self._tw.fullwidth - w - 1 + self.write(msg.rjust(fill), flush=True, **{color: True}) + + @property + def _width_of_current_line(self) -> int: + """Return the width of the current line.""" + return self._tw.width_of_current_line + + def pytest_collection(self) -> None: + if self.isatty: + if self.config.option.verbose >= 0: + self.write("collecting ... ", flush=True, bold=True) + self._collect_report_last_write = timing.time() + elif self.config.option.verbose >= 1: + self.write("collecting ... ", flush=True, bold=True) + + def pytest_collectreport(self, report: CollectReport) -> None: + if report.failed: + self._add_stats("error", [report]) + elif report.skipped: + self._add_stats("skipped", [report]) + items = [x for x in report.result if isinstance(x, Item)] + self._numcollected += len(items) + if self.isatty: + self.report_collect() + + def report_collect(self, final: bool = False) -> None: + if self.config.option.verbose < 0: + return + + if not final: + # Only write "collecting" report every 0.5s. + t = timing.time() + if ( + self._collect_report_last_write is not None + and self._collect_report_last_write > t - REPORT_COLLECTING_RESOLUTION + ): + return + self._collect_report_last_write = t + + errors = len(self.stats.get("error", [])) + skipped = len(self.stats.get("skipped", [])) + deselected = len(self.stats.get("deselected", [])) + selected = self._numcollected - deselected + line = "collected " if final else "collecting " + line += ( + str(self._numcollected) + " item" + ("" if self._numcollected == 1 else "s") + ) + if errors: + line += " / %d error%s" % (errors, "s" if errors != 1 else "") + if deselected: + line += " / %d deselected" % deselected + if skipped: + line += " / %d skipped" % skipped + if self._numcollected > selected: + line += " / %d selected" % selected + if self.isatty: + self.rewrite(line, bold=True, erase=True) + if final: + self.write("\n") + else: + self.write_line(line) + + @hookimpl(trylast=True) + def pytest_sessionstart(self, session: "Session") -> None: + self._session = session + self._sessionstarttime = timing.time() + if not self.showheader: + return + self.write_sep("=", "test session starts", bold=True) + verinfo = platform.python_version() + if not self.no_header: + msg = f"platform {sys.platform} -- Python {verinfo}" + pypy_version_info = getattr(sys, "pypy_version_info", None) + if pypy_version_info: + verinfo = ".".join(map(str, pypy_version_info[:3])) + msg += f"[pypy-{verinfo}-{pypy_version_info[3]}]" + msg += ", pytest-{}, pluggy-{}".format( + _pytest._version.version, pluggy.__version__ + ) + if ( + self.verbosity > 0 + or self.config.option.debug + or getattr(self.config.option, "pastebin", None) + ): + msg += " -- " + str(sys.executable) + self.write_line(msg) + lines = self.config.hook.pytest_report_header( + config=self.config, start_path=self.startpath + ) + self._write_report_lines_from_hooks(lines) + + def _write_report_lines_from_hooks( + self, lines: Sequence[Union[str, Sequence[str]]] + ) -> None: + for line_or_lines in reversed(lines): + if isinstance(line_or_lines, str): + self.write_line(line_or_lines) + else: + for line in line_or_lines: + self.write_line(line) + + def pytest_report_header(self, config: Config) -> List[str]: + line = "rootdir: %s" % config.rootpath + + if config.inipath: + line += ", configfile: " + bestrelpath(config.rootpath, config.inipath) + + testpaths: List[str] = config.getini("testpaths") + if config.invocation_params.dir == config.rootpath and config.args == testpaths: + line += ", testpaths: {}".format(", ".join(testpaths)) + + result = [line] + + plugininfo = config.pluginmanager.list_plugin_distinfo() + if plugininfo: + result.append("plugins: %s" % ", ".join(_plugin_nameversions(plugininfo))) + return result + + def pytest_collection_finish(self, session: "Session") -> None: + self.report_collect(True) + + lines = self.config.hook.pytest_report_collectionfinish( + config=self.config, + start_path=self.startpath, + items=session.items, + ) + self._write_report_lines_from_hooks(lines) + + if self.config.getoption("collectonly"): + if session.items: + if self.config.option.verbose > -1: + self._tw.line("") + self._printcollecteditems(session.items) + + failed = self.stats.get("failed") + if failed: + self._tw.sep("!", "collection failures") + for rep in failed: + rep.toterminal(self._tw) + + def _printcollecteditems(self, items: Sequence[Item]) -> None: + if self.config.option.verbose < 0: + if self.config.option.verbose < -1: + counts = Counter(item.nodeid.split("::", 1)[0] for item in items) + for name, count in sorted(counts.items()): + self._tw.line("%s: %d" % (name, count)) + else: + for item in items: + self._tw.line(item.nodeid) + return + stack: List[Node] = [] + indent = "" + for item in items: + needed_collectors = item.listchain()[1:] # strip root node + while stack: + if stack == needed_collectors[: len(stack)]: + break + stack.pop() + for col in needed_collectors[len(stack) :]: + stack.append(col) + indent = (len(stack) - 1) * " " + self._tw.line(f"{indent}{col}") + if self.config.option.verbose >= 1: + obj = getattr(col, "obj", None) + doc = inspect.getdoc(obj) if obj else None + if doc: + for line in doc.splitlines(): + self._tw.line("{}{}".format(indent + " ", line)) + + @hookimpl(hookwrapper=True) + def pytest_sessionfinish( + self, session: "Session", exitstatus: Union[int, ExitCode] + ): + outcome = yield + outcome.get_result() + self._tw.line("") + summary_exit_codes = ( + ExitCode.OK, + ExitCode.TESTS_FAILED, + ExitCode.INTERRUPTED, + ExitCode.USAGE_ERROR, + ExitCode.NO_TESTS_COLLECTED, + ) + if exitstatus in summary_exit_codes and not self.no_summary: + self.config.hook.pytest_terminal_summary( + terminalreporter=self, exitstatus=exitstatus, config=self.config + ) + if session.shouldfail: + self.write_sep("!", str(session.shouldfail), red=True) + if exitstatus == ExitCode.INTERRUPTED: + self._report_keyboardinterrupt() + self._keyboardinterrupt_memo = None + elif session.shouldstop: + self.write_sep("!", str(session.shouldstop), red=True) + self.summary_stats() + + @hookimpl(hookwrapper=True) + def pytest_terminal_summary(self) -> Generator[None, None, None]: + self.summary_errors() + self.summary_failures() + self.summary_warnings() + self.summary_passes() + yield + self.short_test_summary() + # Display any extra warnings from teardown here (if any). + self.summary_warnings() + + def pytest_keyboard_interrupt(self, excinfo: ExceptionInfo[BaseException]) -> None: + self._keyboardinterrupt_memo = excinfo.getrepr(funcargs=True) + + def pytest_unconfigure(self) -> None: + if self._keyboardinterrupt_memo is not None: + self._report_keyboardinterrupt() + + def _report_keyboardinterrupt(self) -> None: + excrepr = self._keyboardinterrupt_memo + assert excrepr is not None + assert excrepr.reprcrash is not None + msg = excrepr.reprcrash.message + self.write_sep("!", msg) + if "KeyboardInterrupt" in msg: + if self.config.option.fulltrace: + excrepr.toterminal(self._tw) + else: + excrepr.reprcrash.toterminal(self._tw) + self._tw.line( + "(to show a full traceback on KeyboardInterrupt use --full-trace)", + yellow=True, + ) + + def _locationline( + self, nodeid: str, fspath: str, lineno: Optional[int], domain: str + ) -> str: + def mkrel(nodeid: str) -> str: + line = self.config.cwd_relative_nodeid(nodeid) + if domain and line.endswith(domain): + line = line[: -len(domain)] + values = domain.split("[") + values[0] = values[0].replace(".", "::") # don't replace '.' in params + line += "[".join(values) + return line + + # collect_fspath comes from testid which has a "/"-normalized path. + if fspath: + res = mkrel(nodeid) + if self.verbosity >= 2 and nodeid.split("::")[0] != fspath.replace( + "\\", nodes.SEP + ): + res += " <- " + bestrelpath(self.startpath, Path(fspath)) + else: + res = "[location]" + return res + " " + + def _getfailureheadline(self, rep): + head_line = rep.head_line + if head_line: + return head_line + return "test session" # XXX? + + def _getcrashline(self, rep): + try: + return str(rep.longrepr.reprcrash) + except AttributeError: + try: + return str(rep.longrepr)[:50] + except AttributeError: + return "" + + # + # Summaries for sessionfinish. + # + def getreports(self, name: str): + return [x for x in self.stats.get(name, ()) if not hasattr(x, "_pdbshown")] + + def summary_warnings(self) -> None: + if self.hasopt("w"): + all_warnings: Optional[List[WarningReport]] = self.stats.get("warnings") + if not all_warnings: + return + + final = self._already_displayed_warnings is not None + if final: + warning_reports = all_warnings[self._already_displayed_warnings :] + else: + warning_reports = all_warnings + self._already_displayed_warnings = len(warning_reports) + if not warning_reports: + return + + reports_grouped_by_message: Dict[str, List[WarningReport]] = {} + for wr in warning_reports: + reports_grouped_by_message.setdefault(wr.message, []).append(wr) + + def collapsed_location_report(reports: List[WarningReport]) -> str: + locations = [] + for w in reports: + location = w.get_location(self.config) + if location: + locations.append(location) + + if len(locations) < 10: + return "\n".join(map(str, locations)) + + counts_by_filename = Counter( + str(loc).split("::", 1)[0] for loc in locations + ) + return "\n".join( + "{}: {} warning{}".format(k, v, "s" if v > 1 else "") + for k, v in counts_by_filename.items() + ) + + title = "warnings summary (final)" if final else "warnings summary" + self.write_sep("=", title, yellow=True, bold=False) + for message, message_reports in reports_grouped_by_message.items(): + maybe_location = collapsed_location_report(message_reports) + if maybe_location: + self._tw.line(maybe_location) + lines = message.splitlines() + indented = "\n".join(" " + x for x in lines) + message = indented.rstrip() + else: + message = message.rstrip() + self._tw.line(message) + self._tw.line() + self._tw.line( + "-- Docs: https://docs.pytest.org/en/stable/how-to/capture-warnings.html" + ) + + def summary_passes(self) -> None: + if self.config.option.tbstyle != "no": + if self.hasopt("P"): + reports: List[TestReport] = self.getreports("passed") + if not reports: + return + self.write_sep("=", "PASSES") + for rep in reports: + if rep.sections: + msg = self._getfailureheadline(rep) + self.write_sep("_", msg, green=True, bold=True) + self._outrep_summary(rep) + self._handle_teardown_sections(rep.nodeid) + + def _get_teardown_reports(self, nodeid: str) -> List[TestReport]: + reports = self.getreports("") + return [ + report + for report in reports + if report.when == "teardown" and report.nodeid == nodeid + ] + + def _handle_teardown_sections(self, nodeid: str) -> None: + for report in self._get_teardown_reports(nodeid): + self.print_teardown_sections(report) + + def print_teardown_sections(self, rep: TestReport) -> None: + showcapture = self.config.option.showcapture + if showcapture == "no": + return + for secname, content in rep.sections: + if showcapture != "all" and showcapture not in secname: + continue + if "teardown" in secname: + self._tw.sep("-", secname) + if content[-1:] == "\n": + content = content[:-1] + self._tw.line(content) + + def summary_failures(self) -> None: + if self.config.option.tbstyle != "no": + reports: List[BaseReport] = self.getreports("failed") + if not reports: + return + self.write_sep("=", "FAILURES") + if self.config.option.tbstyle == "line": + for rep in reports: + line = self._getcrashline(rep) + self.write_line(line) + else: + for rep in reports: + msg = self._getfailureheadline(rep) + self.write_sep("_", msg, red=True, bold=True) + self._outrep_summary(rep) + self._handle_teardown_sections(rep.nodeid) + + def summary_errors(self) -> None: + if self.config.option.tbstyle != "no": + reports: List[BaseReport] = self.getreports("error") + if not reports: + return + self.write_sep("=", "ERRORS") + for rep in self.stats["error"]: + msg = self._getfailureheadline(rep) + if rep.when == "collect": + msg = "ERROR collecting " + msg + else: + msg = f"ERROR at {rep.when} of {msg}" + self.write_sep("_", msg, red=True, bold=True) + self._outrep_summary(rep) + + def _outrep_summary(self, rep: BaseReport) -> None: + rep.toterminal(self._tw) + showcapture = self.config.option.showcapture + if showcapture == "no": + return + for secname, content in rep.sections: + if showcapture != "all" and showcapture not in secname: + continue + self._tw.sep("-", secname) + if content[-1:] == "\n": + content = content[:-1] + self._tw.line(content) + + def summary_stats(self) -> None: + if self.verbosity < -1: + return + + session_duration = timing.time() - self._sessionstarttime + (parts, main_color) = self.build_summary_stats_line() + line_parts = [] + + display_sep = self.verbosity >= 0 + if display_sep: + fullwidth = self._tw.fullwidth + for text, markup in parts: + with_markup = self._tw.markup(text, **markup) + if display_sep: + fullwidth += len(with_markup) - len(text) + line_parts.append(with_markup) + msg = ", ".join(line_parts) + + main_markup = {main_color: True} + duration = f" in {format_session_duration(session_duration)}" + duration_with_markup = self._tw.markup(duration, **main_markup) + if display_sep: + fullwidth += len(duration_with_markup) - len(duration) + msg += duration_with_markup + + if display_sep: + markup_for_end_sep = self._tw.markup("", **main_markup) + if markup_for_end_sep.endswith("\x1b[0m"): + markup_for_end_sep = markup_for_end_sep[:-4] + fullwidth += len(markup_for_end_sep) + msg += markup_for_end_sep + + if display_sep: + self.write_sep("=", msg, fullwidth=fullwidth, **main_markup) + else: + self.write_line(msg, **main_markup) + + def short_test_summary(self) -> None: + if not self.reportchars: + return + + def show_simple(stat, lines: List[str]) -> None: + failed = self.stats.get(stat, []) + if not failed: + return + termwidth = self._tw.fullwidth + config = self.config + for rep in failed: + line = _get_line_with_reprcrash_message(config, rep, termwidth) + lines.append(line) + + def show_xfailed(lines: List[str]) -> None: + xfailed = self.stats.get("xfailed", []) + for rep in xfailed: + verbose_word = rep._get_verbose_word(self.config) + pos = _get_pos(self.config, rep) + lines.append(f"{verbose_word} {pos}") + reason = rep.wasxfail + if reason: + lines.append(" " + str(reason)) + + def show_xpassed(lines: List[str]) -> None: + xpassed = self.stats.get("xpassed", []) + for rep in xpassed: + verbose_word = rep._get_verbose_word(self.config) + pos = _get_pos(self.config, rep) + reason = rep.wasxfail + lines.append(f"{verbose_word} {pos} {reason}") + + def show_skipped(lines: List[str]) -> None: + skipped: List[CollectReport] = self.stats.get("skipped", []) + fskips = _folded_skips(self.startpath, skipped) if skipped else [] + if not fskips: + return + verbose_word = skipped[0]._get_verbose_word(self.config) + for num, fspath, lineno, reason in fskips: + if reason.startswith("Skipped: "): + reason = reason[9:] + if lineno is not None: + lines.append( + "%s [%d] %s:%d: %s" + % (verbose_word, num, fspath, lineno, reason) + ) + else: + lines.append("%s [%d] %s: %s" % (verbose_word, num, fspath, reason)) + + REPORTCHAR_ACTIONS: Mapping[str, Callable[[List[str]], None]] = { + "x": show_xfailed, + "X": show_xpassed, + "f": partial(show_simple, "failed"), + "s": show_skipped, + "p": partial(show_simple, "passed"), + "E": partial(show_simple, "error"), + } + + lines: List[str] = [] + for char in self.reportchars: + action = REPORTCHAR_ACTIONS.get(char) + if action: # skipping e.g. "P" (passed with output) here. + action(lines) + + if lines: + self.write_sep("=", "short test summary info") + for line in lines: + self.write_line(line) + + def _get_main_color(self) -> Tuple[str, List[str]]: + if self._main_color is None or self._known_types is None or self._is_last_item: + self._set_main_color() + assert self._main_color + assert self._known_types + return self._main_color, self._known_types + + def _determine_main_color(self, unknown_type_seen: bool) -> str: + stats = self.stats + if "failed" in stats or "error" in stats: + main_color = "red" + elif "warnings" in stats or "xpassed" in stats or unknown_type_seen: + main_color = "yellow" + elif "passed" in stats or not self._is_last_item: + main_color = "green" + else: + main_color = "yellow" + return main_color + + def _set_main_color(self) -> None: + unknown_types: List[str] = [] + for found_type in self.stats.keys(): + if found_type: # setup/teardown reports have an empty key, ignore them + if found_type not in KNOWN_TYPES and found_type not in unknown_types: + unknown_types.append(found_type) + self._known_types = list(KNOWN_TYPES) + unknown_types + self._main_color = self._determine_main_color(bool(unknown_types)) + + def build_summary_stats_line(self) -> Tuple[List[Tuple[str, Dict[str, bool]]], str]: + """ + Build the parts used in the last summary stats line. + + The summary stats line is the line shown at the end, "=== 12 passed, 2 errors in Xs===". + + This function builds a list of the "parts" that make up for the text in that line, in + the example above it would be: + + [ + ("12 passed", {"green": True}), + ("2 errors", {"red": True} + ] + + That last dict for each line is a "markup dictionary", used by TerminalWriter to + color output. + + The final color of the line is also determined by this function, and is the second + element of the returned tuple. + """ + if self.config.getoption("collectonly"): + return self._build_collect_only_summary_stats_line() + else: + return self._build_normal_summary_stats_line() + + def _get_reports_to_display(self, key: str) -> List[Any]: + """Get test/collection reports for the given status key, such as `passed` or `error`.""" + reports = self.stats.get(key, []) + return [x for x in reports if getattr(x, "count_towards_summary", True)] + + def _build_normal_summary_stats_line( + self, + ) -> Tuple[List[Tuple[str, Dict[str, bool]]], str]: + main_color, known_types = self._get_main_color() + parts = [] + + for key in known_types: + reports = self._get_reports_to_display(key) + if reports: + count = len(reports) + color = _color_for_type.get(key, _color_for_type_default) + markup = {color: True, "bold": color == main_color} + parts.append(("%d %s" % pluralize(count, key), markup)) + + if not parts: + parts = [("no tests ran", {_color_for_type_default: True})] + + return parts, main_color + + def _build_collect_only_summary_stats_line( + self, + ) -> Tuple[List[Tuple[str, Dict[str, bool]]], str]: + deselected = len(self._get_reports_to_display("deselected")) + errors = len(self._get_reports_to_display("error")) + + if self._numcollected == 0: + parts = [("no tests collected", {"yellow": True})] + main_color = "yellow" + + elif deselected == 0: + main_color = "green" + collected_output = "%d %s collected" % pluralize(self._numcollected, "test") + parts = [(collected_output, {main_color: True})] + else: + all_tests_were_deselected = self._numcollected == deselected + if all_tests_were_deselected: + main_color = "yellow" + collected_output = f"no tests collected ({deselected} deselected)" + else: + main_color = "green" + selected = self._numcollected - deselected + collected_output = f"{selected}/{self._numcollected} tests collected ({deselected} deselected)" + + parts = [(collected_output, {main_color: True})] + + if errors: + main_color = _color_for_type["error"] + parts += [("%d %s" % pluralize(errors, "error"), {main_color: True})] + + return parts, main_color + + +def _get_pos(config: Config, rep: BaseReport): + nodeid = config.cwd_relative_nodeid(rep.nodeid) + return nodeid + + +def _format_trimmed(format: str, msg: str, available_width: int) -> Optional[str]: + """Format msg into format, ellipsizing it if doesn't fit in available_width. + + Returns None if even the ellipsis can't fit. + """ + # Only use the first line. + i = msg.find("\n") + if i != -1: + msg = msg[:i] + + ellipsis = "..." + format_width = wcswidth(format.format("")) + if format_width + len(ellipsis) > available_width: + return None + + if format_width + wcswidth(msg) > available_width: + available_width -= len(ellipsis) + msg = msg[:available_width] + while format_width + wcswidth(msg) > available_width: + msg = msg[:-1] + msg += ellipsis + + return format.format(msg) + + +def _get_line_with_reprcrash_message( + config: Config, rep: BaseReport, termwidth: int +) -> str: + """Get summary line for a report, trying to add reprcrash message.""" + verbose_word = rep._get_verbose_word(config) + pos = _get_pos(config, rep) + + line = f"{verbose_word} {pos}" + line_width = wcswidth(line) + + try: + # Type ignored intentionally -- possible AttributeError expected. + msg = rep.longrepr.reprcrash.message # type: ignore[union-attr] + except AttributeError: + pass + else: + available_width = termwidth - line_width + msg = _format_trimmed(" - {}", msg, available_width) + if msg is not None: + line += msg + + return line + + +def _folded_skips( + startpath: Path, + skipped: Sequence[CollectReport], +) -> List[Tuple[int, str, Optional[int], str]]: + d: Dict[Tuple[str, Optional[int], str], List[CollectReport]] = {} + for event in skipped: + assert event.longrepr is not None + assert isinstance(event.longrepr, tuple), (event, event.longrepr) + assert len(event.longrepr) == 3, (event, event.longrepr) + fspath, lineno, reason = event.longrepr + # For consistency, report all fspaths in relative form. + fspath = bestrelpath(startpath, Path(fspath)) + keywords = getattr(event, "keywords", {}) + # Folding reports with global pytestmark variable. + # This is a workaround, because for now we cannot identify the scope of a skip marker + # TODO: Revisit after marks scope would be fixed. + if ( + event.when == "setup" + and "skip" in keywords + and "pytestmark" not in keywords + ): + key: Tuple[str, Optional[int], str] = (fspath, None, reason) + else: + key = (fspath, lineno, reason) + d.setdefault(key, []).append(event) + values: List[Tuple[int, str, Optional[int], str]] = [] + for key, events in d.items(): + values.append((len(events), *key)) + return values + + +_color_for_type = { + "failed": "red", + "error": "red", + "warnings": "yellow", + "passed": "green", +} +_color_for_type_default = "yellow" + + +def pluralize(count: int, noun: str) -> Tuple[int, str]: + # No need to pluralize words such as `failed` or `passed`. + if noun not in ["error", "warnings", "test"]: + return count, noun + + # The `warnings` key is plural. To avoid API breakage, we keep it that way but + # set it to singular here so we can determine plurality in the same way as we do + # for `error`. + noun = noun.replace("warnings", "warning") + + return count, noun + "s" if count != 1 else noun + + +def _plugin_nameversions(plugininfo) -> List[str]: + values: List[str] = [] + for plugin, dist in plugininfo: + # Gets us name and version! + name = "{dist.project_name}-{dist.version}".format(dist=dist) + # Questionable convenience, but it keeps things short. + if name.startswith("pytest-"): + name = name[7:] + # We decided to print python package names they can have more than one plugin. + if name not in values: + values.append(name) + return values + + +def format_session_duration(seconds: float) -> str: + """Format the given seconds in a human readable manner to show in the final summary.""" + if seconds < 60: + return f"{seconds:.2f}s" + else: + dt = datetime.timedelta(seconds=int(seconds)) + return f"{seconds:.2f}s ({dt})" + + +def _get_raw_skip_reason(report: TestReport) -> str: + """Get the reason string of a skip/xfail/xpass test report. + + The string is just the part given by the user. + """ + if hasattr(report, "wasxfail"): + reason = cast(str, report.wasxfail) + if reason.startswith("reason: "): + reason = reason[len("reason: ") :] + return reason + else: + assert report.skipped + assert isinstance(report.longrepr, tuple) + _, _, reason = report.longrepr + if reason.startswith("Skipped: "): + reason = reason[len("Skipped: ") :] + elif reason == "Skipped": + reason = "" + return reason diff --git a/venv/lib/python3.10/site-packages/_pytest/threadexception.py b/venv/lib/python3.10/site-packages/_pytest/threadexception.py new file mode 100644 index 0000000..43341e7 --- /dev/null +++ b/venv/lib/python3.10/site-packages/_pytest/threadexception.py @@ -0,0 +1,88 @@ +import threading +import traceback +import warnings +from types import TracebackType +from typing import Any +from typing import Callable +from typing import Generator +from typing import Optional +from typing import Type + +import pytest + + +# Copied from cpython/Lib/test/support/threading_helper.py, with modifications. +class catch_threading_exception: + """Context manager catching threading.Thread exception using + threading.excepthook. + + Storing exc_value using a custom hook can create a reference cycle. The + reference cycle is broken explicitly when the context manager exits. + + Storing thread using a custom hook can resurrect it if it is set to an + object which is being finalized. Exiting the context manager clears the + stored object. + + Usage: + with threading_helper.catch_threading_exception() as cm: + # code spawning a thread which raises an exception + ... + # check the thread exception: use cm.args + ... + # cm.args attribute no longer exists at this point + # (to break a reference cycle) + """ + + def __init__(self) -> None: + self.args: Optional["threading.ExceptHookArgs"] = None + self._old_hook: Optional[Callable[["threading.ExceptHookArgs"], Any]] = None + + def _hook(self, args: "threading.ExceptHookArgs") -> None: + self.args = args + + def __enter__(self) -> "catch_threading_exception": + self._old_hook = threading.excepthook + threading.excepthook = self._hook + return self + + def __exit__( + self, + exc_type: Optional[Type[BaseException]], + exc_val: Optional[BaseException], + exc_tb: Optional[TracebackType], + ) -> None: + assert self._old_hook is not None + threading.excepthook = self._old_hook + self._old_hook = None + del self.args + + +def thread_exception_runtest_hook() -> Generator[None, None, None]: + with catch_threading_exception() as cm: + yield + if cm.args: + thread_name = "" if cm.args.thread is None else cm.args.thread.name + msg = f"Exception in thread {thread_name}\n\n" + msg += "".join( + traceback.format_exception( + cm.args.exc_type, + cm.args.exc_value, + cm.args.exc_traceback, + ) + ) + warnings.warn(pytest.PytestUnhandledThreadExceptionWarning(msg)) + + +@pytest.hookimpl(hookwrapper=True, trylast=True) +def pytest_runtest_setup() -> Generator[None, None, None]: + yield from thread_exception_runtest_hook() + + +@pytest.hookimpl(hookwrapper=True, tryfirst=True) +def pytest_runtest_call() -> Generator[None, None, None]: + yield from thread_exception_runtest_hook() + + +@pytest.hookimpl(hookwrapper=True, tryfirst=True) +def pytest_runtest_teardown() -> Generator[None, None, None]: + yield from thread_exception_runtest_hook() diff --git a/venv/lib/python3.10/site-packages/_pytest/timing.py b/venv/lib/python3.10/site-packages/_pytest/timing.py new file mode 100644 index 0000000..925163a --- /dev/null +++ b/venv/lib/python3.10/site-packages/_pytest/timing.py @@ -0,0 +1,12 @@ +"""Indirection for time functions. + +We intentionally grab some "time" functions internally to avoid tests mocking "time" to affect +pytest runtime information (issue #185). + +Fixture "mock_timing" also interacts with this module for pytest's own tests. +""" +from time import perf_counter +from time import sleep +from time import time + +__all__ = ["perf_counter", "sleep", "time"] diff --git a/venv/lib/python3.10/site-packages/_pytest/tmpdir.py b/venv/lib/python3.10/site-packages/_pytest/tmpdir.py new file mode 100644 index 0000000..12dc463 --- /dev/null +++ b/venv/lib/python3.10/site-packages/_pytest/tmpdir.py @@ -0,0 +1,212 @@ +"""Support for providing temporary directories to test functions.""" +import os +import re +import sys +import tempfile +from pathlib import Path +from typing import Optional + +import attr + +from .pathlib import LOCK_TIMEOUT +from .pathlib import make_numbered_dir +from .pathlib import make_numbered_dir_with_cleanup +from .pathlib import rm_rf +from _pytest.compat import final +from _pytest.config import Config +from _pytest.deprecated import check_ispytest +from _pytest.fixtures import fixture +from _pytest.fixtures import FixtureRequest +from _pytest.monkeypatch import MonkeyPatch + + +@final +@attr.s(init=False) +class TempPathFactory: + """Factory for temporary directories under the common base temp directory. + + The base directory can be configured using the ``--basetemp`` option. + """ + + _given_basetemp = attr.ib(type=Optional[Path]) + _trace = attr.ib() + _basetemp = attr.ib(type=Optional[Path]) + + def __init__( + self, + given_basetemp: Optional[Path], + trace, + basetemp: Optional[Path] = None, + *, + _ispytest: bool = False, + ) -> None: + check_ispytest(_ispytest) + if given_basetemp is None: + self._given_basetemp = None + else: + # Use os.path.abspath() to get absolute path instead of resolve() as it + # does not work the same in all platforms (see #4427). + # Path.absolute() exists, but it is not public (see https://bugs.python.org/issue25012). + self._given_basetemp = Path(os.path.abspath(str(given_basetemp))) + self._trace = trace + self._basetemp = basetemp + + @classmethod + def from_config( + cls, + config: Config, + *, + _ispytest: bool = False, + ) -> "TempPathFactory": + """Create a factory according to pytest configuration. + + :meta private: + """ + check_ispytest(_ispytest) + return cls( + given_basetemp=config.option.basetemp, + trace=config.trace.get("tmpdir"), + _ispytest=True, + ) + + def _ensure_relative_to_basetemp(self, basename: str) -> str: + basename = os.path.normpath(basename) + if (self.getbasetemp() / basename).resolve().parent != self.getbasetemp(): + raise ValueError(f"{basename} is not a normalized and relative path") + return basename + + def mktemp(self, basename: str, numbered: bool = True) -> Path: + """Create a new temporary directory managed by the factory. + + :param basename: + Directory base name, must be a relative path. + + :param numbered: + If ``True``, ensure the directory is unique by adding a numbered + suffix greater than any existing one: ``basename="foo-"`` and ``numbered=True`` + means that this function will create directories named ``"foo-0"``, + ``"foo-1"``, ``"foo-2"`` and so on. + + :returns: + The path to the new directory. + """ + basename = self._ensure_relative_to_basetemp(basename) + if not numbered: + p = self.getbasetemp().joinpath(basename) + p.mkdir(mode=0o700) + else: + p = make_numbered_dir(root=self.getbasetemp(), prefix=basename, mode=0o700) + self._trace("mktemp", p) + return p + + def getbasetemp(self) -> Path: + """Return the base temporary directory, creating it if needed.""" + if self._basetemp is not None: + return self._basetemp + + if self._given_basetemp is not None: + basetemp = self._given_basetemp + if basetemp.exists(): + rm_rf(basetemp) + basetemp.mkdir(mode=0o700) + basetemp = basetemp.resolve() + else: + from_env = os.environ.get("PYTEST_DEBUG_TEMPROOT") + temproot = Path(from_env or tempfile.gettempdir()).resolve() + user = get_user() or "unknown" + # use a sub-directory in the temproot to speed-up + # make_numbered_dir() call + rootdir = temproot.joinpath(f"pytest-of-{user}") + try: + rootdir.mkdir(mode=0o700, exist_ok=True) + except OSError: + # getuser() likely returned illegal characters for the platform, use unknown back off mechanism + rootdir = temproot.joinpath("pytest-of-unknown") + rootdir.mkdir(mode=0o700, exist_ok=True) + # Because we use exist_ok=True with a predictable name, make sure + # we are the owners, to prevent any funny business (on unix, where + # temproot is usually shared). + # Also, to keep things private, fixup any world-readable temp + # rootdir's permissions. Historically 0o755 was used, so we can't + # just error out on this, at least for a while. + if sys.platform != "win32": + uid = os.getuid() + rootdir_stat = rootdir.stat() + # getuid shouldn't fail, but cpython defines such a case. + # Let's hope for the best. + if uid != -1: + if rootdir_stat.st_uid != uid: + raise OSError( + f"The temporary directory {rootdir} is not owned by the current user. " + "Fix this and try again." + ) + if (rootdir_stat.st_mode & 0o077) != 0: + os.chmod(rootdir, rootdir_stat.st_mode & ~0o077) + basetemp = make_numbered_dir_with_cleanup( + prefix="pytest-", + root=rootdir, + keep=3, + lock_timeout=LOCK_TIMEOUT, + mode=0o700, + ) + assert basetemp is not None, basetemp + self._basetemp = basetemp + self._trace("new basetemp", basetemp) + return basetemp + + +def get_user() -> Optional[str]: + """Return the current user name, or None if getuser() does not work + in the current environment (see #1010).""" + try: + # In some exotic environments, getpass may not be importable. + import getpass + + return getpass.getuser() + except (ImportError, KeyError): + return None + + +def pytest_configure(config: Config) -> None: + """Create a TempPathFactory and attach it to the config object. + + This is to comply with existing plugins which expect the handler to be + available at pytest_configure time, but ideally should be moved entirely + to the tmp_path_factory session fixture. + """ + mp = MonkeyPatch() + config.add_cleanup(mp.undo) + _tmp_path_factory = TempPathFactory.from_config(config, _ispytest=True) + mp.setattr(config, "_tmp_path_factory", _tmp_path_factory, raising=False) + + +@fixture(scope="session") +def tmp_path_factory(request: FixtureRequest) -> TempPathFactory: + """Return a :class:`pytest.TempPathFactory` instance for the test session.""" + # Set dynamically by pytest_configure() above. + return request.config._tmp_path_factory # type: ignore + + +def _mk_tmp(request: FixtureRequest, factory: TempPathFactory) -> Path: + name = request.node.name + name = re.sub(r"[\W]", "_", name) + MAXVAL = 30 + name = name[:MAXVAL] + return factory.mktemp(name, numbered=True) + + +@fixture +def tmp_path(request: FixtureRequest, tmp_path_factory: TempPathFactory) -> Path: + """Return a temporary directory path object which is unique to each test + function invocation, created as a sub directory of the base temporary + directory. + + By default, a new base temporary directory is created each test session, + and old bases are removed after 3 sessions, to aid in debugging. If + ``--basetemp`` is used then it is cleared each session. See :ref:`base + temporary directory`. + + The returned object is a :class:`pathlib.Path` object. + """ + + return _mk_tmp(request, tmp_path_factory) diff --git a/venv/lib/python3.10/site-packages/_pytest/unittest.py b/venv/lib/python3.10/site-packages/_pytest/unittest.py new file mode 100644 index 0000000..851e494 --- /dev/null +++ b/venv/lib/python3.10/site-packages/_pytest/unittest.py @@ -0,0 +1,414 @@ +"""Discover and run std-library "unittest" style tests.""" +import sys +import traceback +import types +from typing import Any +from typing import Callable +from typing import Generator +from typing import Iterable +from typing import List +from typing import Optional +from typing import Tuple +from typing import Type +from typing import TYPE_CHECKING +from typing import Union + +import _pytest._code +import pytest +from _pytest.compat import getimfunc +from _pytest.compat import is_async_function +from _pytest.config import hookimpl +from _pytest.fixtures import FixtureRequest +from _pytest.nodes import Collector +from _pytest.nodes import Item +from _pytest.outcomes import exit +from _pytest.outcomes import fail +from _pytest.outcomes import skip +from _pytest.outcomes import xfail +from _pytest.python import Class +from _pytest.python import Function +from _pytest.python import Module +from _pytest.runner import CallInfo +from _pytest.scope import Scope + +if TYPE_CHECKING: + import unittest + import twisted.trial.unittest + + _SysExcInfoType = Union[ + Tuple[Type[BaseException], BaseException, types.TracebackType], + Tuple[None, None, None], + ] + + +def pytest_pycollect_makeitem( + collector: Union[Module, Class], name: str, obj: object +) -> Optional["UnitTestCase"]: + # Has unittest been imported and is obj a subclass of its TestCase? + try: + ut = sys.modules["unittest"] + # Type ignored because `ut` is an opaque module. + if not issubclass(obj, ut.TestCase): # type: ignore + return None + except Exception: + return None + # Yes, so let's collect it. + item: UnitTestCase = UnitTestCase.from_parent(collector, name=name, obj=obj) + return item + + +class UnitTestCase(Class): + # Marker for fixturemanger.getfixtureinfo() + # to declare that our children do not support funcargs. + nofuncargs = True + + def collect(self) -> Iterable[Union[Item, Collector]]: + from unittest import TestLoader + + cls = self.obj + if not getattr(cls, "__test__", True): + return + + skipped = _is_skipped(cls) + if not skipped: + self._inject_setup_teardown_fixtures(cls) + self._inject_setup_class_fixture() + + self.session._fixturemanager.parsefactories(self, unittest=True) + loader = TestLoader() + foundsomething = False + for name in loader.getTestCaseNames(self.obj): + x = getattr(self.obj, name) + if not getattr(x, "__test__", True): + continue + funcobj = getimfunc(x) + yield TestCaseFunction.from_parent(self, name=name, callobj=funcobj) + foundsomething = True + + if not foundsomething: + runtest = getattr(self.obj, "runTest", None) + if runtest is not None: + ut = sys.modules.get("twisted.trial.unittest", None) + # Type ignored because `ut` is an opaque module. + if ut is None or runtest != ut.TestCase.runTest: # type: ignore + yield TestCaseFunction.from_parent(self, name="runTest") + + def _inject_setup_teardown_fixtures(self, cls: type) -> None: + """Injects a hidden auto-use fixture to invoke setUpClass/setup_method and corresponding + teardown functions (#517).""" + class_fixture = _make_xunit_fixture( + cls, + "setUpClass", + "tearDownClass", + "doClassCleanups", + scope=Scope.Class, + pass_self=False, + ) + if class_fixture: + cls.__pytest_class_setup = class_fixture # type: ignore[attr-defined] + + method_fixture = _make_xunit_fixture( + cls, + "setup_method", + "teardown_method", + None, + scope=Scope.Function, + pass_self=True, + ) + if method_fixture: + cls.__pytest_method_setup = method_fixture # type: ignore[attr-defined] + + +def _make_xunit_fixture( + obj: type, + setup_name: str, + teardown_name: str, + cleanup_name: Optional[str], + scope: Scope, + pass_self: bool, +): + setup = getattr(obj, setup_name, None) + teardown = getattr(obj, teardown_name, None) + if setup is None and teardown is None: + return None + + if cleanup_name: + cleanup = getattr(obj, cleanup_name, lambda *args: None) + else: + + def cleanup(*args): + pass + + @pytest.fixture( + scope=scope.value, + autouse=True, + # Use a unique name to speed up lookup. + name=f"_unittest_{setup_name}_fixture_{obj.__qualname__}", + ) + def fixture(self, request: FixtureRequest) -> Generator[None, None, None]: + if _is_skipped(self): + reason = self.__unittest_skip_why__ + raise pytest.skip.Exception(reason, _use_item_location=True) + if setup is not None: + try: + if pass_self: + setup(self, request.function) + else: + setup() + # unittest does not call the cleanup function for every BaseException, so we + # follow this here. + except Exception: + if pass_self: + cleanup(self) + else: + cleanup() + + raise + yield + try: + if teardown is not None: + if pass_self: + teardown(self, request.function) + else: + teardown() + finally: + if pass_self: + cleanup(self) + else: + cleanup() + + return fixture + + +class TestCaseFunction(Function): + nofuncargs = True + _excinfo: Optional[List[_pytest._code.ExceptionInfo[BaseException]]] = None + _testcase: Optional["unittest.TestCase"] = None + + def _getobj(self): + assert self.parent is not None + # Unlike a regular Function in a Class, where `item.obj` returns + # a *bound* method (attached to an instance), TestCaseFunction's + # `obj` returns an *unbound* method (not attached to an instance). + # This inconsistency is probably not desirable, but needs some + # consideration before changing. + return getattr(self.parent.obj, self.originalname) # type: ignore[attr-defined] + + def setup(self) -> None: + # A bound method to be called during teardown() if set (see 'runtest()'). + self._explicit_tearDown: Optional[Callable[[], None]] = None + assert self.parent is not None + self._testcase = self.parent.obj(self.name) # type: ignore[attr-defined] + self._obj = getattr(self._testcase, self.name) + if hasattr(self, "_request"): + self._request._fillfixtures() + + def teardown(self) -> None: + if self._explicit_tearDown is not None: + self._explicit_tearDown() + self._explicit_tearDown = None + self._testcase = None + self._obj = None + + def startTest(self, testcase: "unittest.TestCase") -> None: + pass + + def _addexcinfo(self, rawexcinfo: "_SysExcInfoType") -> None: + # Unwrap potential exception info (see twisted trial support below). + rawexcinfo = getattr(rawexcinfo, "_rawexcinfo", rawexcinfo) + try: + excinfo = _pytest._code.ExceptionInfo[BaseException].from_exc_info(rawexcinfo) # type: ignore[arg-type] + # Invoke the attributes to trigger storing the traceback + # trial causes some issue there. + excinfo.value + excinfo.traceback + except TypeError: + try: + try: + values = traceback.format_exception(*rawexcinfo) + values.insert( + 0, + "NOTE: Incompatible Exception Representation, " + "displaying natively:\n\n", + ) + fail("".join(values), pytrace=False) + except (fail.Exception, KeyboardInterrupt): + raise + except BaseException: + fail( + "ERROR: Unknown Incompatible Exception " + "representation:\n%r" % (rawexcinfo,), + pytrace=False, + ) + except KeyboardInterrupt: + raise + except fail.Exception: + excinfo = _pytest._code.ExceptionInfo.from_current() + self.__dict__.setdefault("_excinfo", []).append(excinfo) + + def addError( + self, testcase: "unittest.TestCase", rawexcinfo: "_SysExcInfoType" + ) -> None: + try: + if isinstance(rawexcinfo[1], exit.Exception): + exit(rawexcinfo[1].msg) + except TypeError: + pass + self._addexcinfo(rawexcinfo) + + def addFailure( + self, testcase: "unittest.TestCase", rawexcinfo: "_SysExcInfoType" + ) -> None: + self._addexcinfo(rawexcinfo) + + def addSkip(self, testcase: "unittest.TestCase", reason: str) -> None: + try: + raise pytest.skip.Exception(reason, _use_item_location=True) + except skip.Exception: + self._addexcinfo(sys.exc_info()) + + def addExpectedFailure( + self, + testcase: "unittest.TestCase", + rawexcinfo: "_SysExcInfoType", + reason: str = "", + ) -> None: + try: + xfail(str(reason)) + except xfail.Exception: + self._addexcinfo(sys.exc_info()) + + def addUnexpectedSuccess( + self, + testcase: "unittest.TestCase", + reason: Optional["twisted.trial.unittest.Todo"] = None, + ) -> None: + msg = "Unexpected success" + if reason: + msg += f": {reason.reason}" + # Preserve unittest behaviour - fail the test. Explicitly not an XPASS. + try: + fail(msg, pytrace=False) + except fail.Exception: + self._addexcinfo(sys.exc_info()) + + def addSuccess(self, testcase: "unittest.TestCase") -> None: + pass + + def stopTest(self, testcase: "unittest.TestCase") -> None: + pass + + def runtest(self) -> None: + from _pytest.debugging import maybe_wrap_pytest_function_for_tracing + + assert self._testcase is not None + + maybe_wrap_pytest_function_for_tracing(self) + + # Let the unittest framework handle async functions. + if is_async_function(self.obj): + # Type ignored because self acts as the TestResult, but is not actually one. + self._testcase(result=self) # type: ignore[arg-type] + else: + # When --pdb is given, we want to postpone calling tearDown() otherwise + # when entering the pdb prompt, tearDown() would have probably cleaned up + # instance variables, which makes it difficult to debug. + # Arguably we could always postpone tearDown(), but this changes the moment where the + # TestCase instance interacts with the results object, so better to only do it + # when absolutely needed. + if self.config.getoption("usepdb") and not _is_skipped(self.obj): + self._explicit_tearDown = self._testcase.tearDown + setattr(self._testcase, "tearDown", lambda *args: None) + + # We need to update the actual bound method with self.obj, because + # wrap_pytest_function_for_tracing replaces self.obj by a wrapper. + setattr(self._testcase, self.name, self.obj) + try: + self._testcase(result=self) # type: ignore[arg-type] + finally: + delattr(self._testcase, self.name) + + def _prunetraceback( + self, excinfo: _pytest._code.ExceptionInfo[BaseException] + ) -> None: + super()._prunetraceback(excinfo) + traceback = excinfo.traceback.filter( + lambda x: not x.frame.f_globals.get("__unittest") + ) + if traceback: + excinfo.traceback = traceback + + +@hookimpl(tryfirst=True) +def pytest_runtest_makereport(item: Item, call: CallInfo[None]) -> None: + if isinstance(item, TestCaseFunction): + if item._excinfo: + call.excinfo = item._excinfo.pop(0) + try: + del call.result + except AttributeError: + pass + + # Convert unittest.SkipTest to pytest.skip. + # This is actually only needed for nose, which reuses unittest.SkipTest for + # its own nose.SkipTest. For unittest TestCases, SkipTest is already + # handled internally, and doesn't reach here. + unittest = sys.modules.get("unittest") + if ( + unittest + and call.excinfo + and isinstance(call.excinfo.value, unittest.SkipTest) # type: ignore[attr-defined] + ): + excinfo = call.excinfo + call2 = CallInfo[None].from_call( + lambda: pytest.skip(str(excinfo.value)), call.when + ) + call.excinfo = call2.excinfo + + +# Twisted trial support. + + +@hookimpl(hookwrapper=True) +def pytest_runtest_protocol(item: Item) -> Generator[None, None, None]: + if isinstance(item, TestCaseFunction) and "twisted.trial.unittest" in sys.modules: + ut: Any = sys.modules["twisted.python.failure"] + Failure__init__ = ut.Failure.__init__ + check_testcase_implements_trial_reporter() + + def excstore( + self, exc_value=None, exc_type=None, exc_tb=None, captureVars=None + ): + if exc_value is None: + self._rawexcinfo = sys.exc_info() + else: + if exc_type is None: + exc_type = type(exc_value) + self._rawexcinfo = (exc_type, exc_value, exc_tb) + try: + Failure__init__( + self, exc_value, exc_type, exc_tb, captureVars=captureVars + ) + except TypeError: + Failure__init__(self, exc_value, exc_type, exc_tb) + + ut.Failure.__init__ = excstore + yield + ut.Failure.__init__ = Failure__init__ + else: + yield + + +def check_testcase_implements_trial_reporter(done: List[int] = []) -> None: + if done: + return + from zope.interface import classImplements + from twisted.trial.itrial import IReporter + + classImplements(TestCaseFunction, IReporter) + done.append(1) + + +def _is_skipped(obj) -> bool: + """Return True if the given object has been marked with @unittest.skip.""" + return bool(getattr(obj, "__unittest_skip__", False)) diff --git a/venv/lib/python3.10/site-packages/_pytest/unraisableexception.py b/venv/lib/python3.10/site-packages/_pytest/unraisableexception.py new file mode 100644 index 0000000..fcb5d82 --- /dev/null +++ b/venv/lib/python3.10/site-packages/_pytest/unraisableexception.py @@ -0,0 +1,93 @@ +import sys +import traceback +import warnings +from types import TracebackType +from typing import Any +from typing import Callable +from typing import Generator +from typing import Optional +from typing import Type + +import pytest + + +# Copied from cpython/Lib/test/support/__init__.py, with modifications. +class catch_unraisable_exception: + """Context manager catching unraisable exception using sys.unraisablehook. + + Storing the exception value (cm.unraisable.exc_value) creates a reference + cycle. The reference cycle is broken explicitly when the context manager + exits. + + Storing the object (cm.unraisable.object) can resurrect it if it is set to + an object which is being finalized. Exiting the context manager clears the + stored object. + + Usage: + with catch_unraisable_exception() as cm: + # code creating an "unraisable exception" + ... + # check the unraisable exception: use cm.unraisable + ... + # cm.unraisable attribute no longer exists at this point + # (to break a reference cycle) + """ + + def __init__(self) -> None: + self.unraisable: Optional["sys.UnraisableHookArgs"] = None + self._old_hook: Optional[Callable[["sys.UnraisableHookArgs"], Any]] = None + + def _hook(self, unraisable: "sys.UnraisableHookArgs") -> None: + # Storing unraisable.object can resurrect an object which is being + # finalized. Storing unraisable.exc_value creates a reference cycle. + self.unraisable = unraisable + + def __enter__(self) -> "catch_unraisable_exception": + self._old_hook = sys.unraisablehook + sys.unraisablehook = self._hook + return self + + def __exit__( + self, + exc_type: Optional[Type[BaseException]], + exc_val: Optional[BaseException], + exc_tb: Optional[TracebackType], + ) -> None: + assert self._old_hook is not None + sys.unraisablehook = self._old_hook + self._old_hook = None + del self.unraisable + + +def unraisable_exception_runtest_hook() -> Generator[None, None, None]: + with catch_unraisable_exception() as cm: + yield + if cm.unraisable: + if cm.unraisable.err_msg is not None: + err_msg = cm.unraisable.err_msg + else: + err_msg = "Exception ignored in" + msg = f"{err_msg}: {cm.unraisable.object!r}\n\n" + msg += "".join( + traceback.format_exception( + cm.unraisable.exc_type, + cm.unraisable.exc_value, + cm.unraisable.exc_traceback, + ) + ) + warnings.warn(pytest.PytestUnraisableExceptionWarning(msg)) + + +@pytest.hookimpl(hookwrapper=True, tryfirst=True) +def pytest_runtest_setup() -> Generator[None, None, None]: + yield from unraisable_exception_runtest_hook() + + +@pytest.hookimpl(hookwrapper=True, tryfirst=True) +def pytest_runtest_call() -> Generator[None, None, None]: + yield from unraisable_exception_runtest_hook() + + +@pytest.hookimpl(hookwrapper=True, tryfirst=True) +def pytest_runtest_teardown() -> Generator[None, None, None]: + yield from unraisable_exception_runtest_hook() diff --git a/venv/lib/python3.10/site-packages/_pytest/warning_types.py b/venv/lib/python3.10/site-packages/_pytest/warning_types.py new file mode 100644 index 0000000..ac79bb5 --- /dev/null +++ b/venv/lib/python3.10/site-packages/_pytest/warning_types.py @@ -0,0 +1,138 @@ +from typing import Any +from typing import Generic +from typing import Type +from typing import TypeVar + +import attr + +from _pytest.compat import final + + +class PytestWarning(UserWarning): + """Base class for all warnings emitted by pytest.""" + + __module__ = "pytest" + + +@final +class PytestAssertRewriteWarning(PytestWarning): + """Warning emitted by the pytest assert rewrite module.""" + + __module__ = "pytest" + + +@final +class PytestCacheWarning(PytestWarning): + """Warning emitted by the cache plugin in various situations.""" + + __module__ = "pytest" + + +@final +class PytestConfigWarning(PytestWarning): + """Warning emitted for configuration issues.""" + + __module__ = "pytest" + + +@final +class PytestCollectionWarning(PytestWarning): + """Warning emitted when pytest is not able to collect a file or symbol in a module.""" + + __module__ = "pytest" + + +class PytestDeprecationWarning(PytestWarning, DeprecationWarning): + """Warning class for features that will be removed in a future version.""" + + __module__ = "pytest" + + +@final +class PytestRemovedIn8Warning(PytestDeprecationWarning): + """Warning class for features that will be removed in pytest 8.""" + + __module__ = "pytest" + + +@final +class PytestExperimentalApiWarning(PytestWarning, FutureWarning): + """Warning category used to denote experiments in pytest. + + Use sparingly as the API might change or even be removed completely in a + future version. + """ + + __module__ = "pytest" + + @classmethod + def simple(cls, apiname: str) -> "PytestExperimentalApiWarning": + return cls( + "{apiname} is an experimental api that may change over time".format( + apiname=apiname + ) + ) + + +@final +class PytestUnhandledCoroutineWarning(PytestWarning): + """Warning emitted for an unhandled coroutine. + + A coroutine was encountered when collecting test functions, but was not + handled by any async-aware plugin. + Coroutine test functions are not natively supported. + """ + + __module__ = "pytest" + + +@final +class PytestUnknownMarkWarning(PytestWarning): + """Warning emitted on use of unknown markers. + + See :ref:`mark` for details. + """ + + __module__ = "pytest" + + +@final +class PytestUnraisableExceptionWarning(PytestWarning): + """An unraisable exception was reported. + + Unraisable exceptions are exceptions raised in :meth:`__del__ ` + implementations and similar situations when the exception cannot be raised + as normal. + """ + + __module__ = "pytest" + + +@final +class PytestUnhandledThreadExceptionWarning(PytestWarning): + """An unhandled exception occurred in a :class:`~threading.Thread`. + + Such exceptions don't propagate normally. + """ + + __module__ = "pytest" + + +_W = TypeVar("_W", bound=PytestWarning) + + +@final +@attr.s(auto_attribs=True) +class UnformattedWarning(Generic[_W]): + """A warning meant to be formatted during runtime. + + This is used to hold warnings that need to format their message at runtime, + as opposed to a direct message. + """ + + category: Type["_W"] + template: str + + def format(self, **kwargs: Any) -> _W: + """Return an instance of the warning category, formatted with given kwargs.""" + return self.category(self.template.format(**kwargs)) diff --git a/venv/lib/python3.10/site-packages/_pytest/warnings.py b/venv/lib/python3.10/site-packages/_pytest/warnings.py new file mode 100644 index 0000000..4aaa944 --- /dev/null +++ b/venv/lib/python3.10/site-packages/_pytest/warnings.py @@ -0,0 +1,148 @@ +import sys +import warnings +from contextlib import contextmanager +from typing import Generator +from typing import Optional +from typing import TYPE_CHECKING + +import pytest +from _pytest.config import apply_warning_filters +from _pytest.config import Config +from _pytest.config import parse_warning_filter +from _pytest.main import Session +from _pytest.nodes import Item +from _pytest.terminal import TerminalReporter + +if TYPE_CHECKING: + from typing_extensions import Literal + + +def pytest_configure(config: Config) -> None: + config.addinivalue_line( + "markers", + "filterwarnings(warning): add a warning filter to the given test. " + "see https://docs.pytest.org/en/stable/how-to/capture-warnings.html#pytest-mark-filterwarnings ", + ) + + +@contextmanager +def catch_warnings_for_item( + config: Config, + ihook, + when: "Literal['config', 'collect', 'runtest']", + item: Optional[Item], +) -> Generator[None, None, None]: + """Context manager that catches warnings generated in the contained execution block. + + ``item`` can be None if we are not in the context of an item execution. + + Each warning captured triggers the ``pytest_warning_recorded`` hook. + """ + config_filters = config.getini("filterwarnings") + cmdline_filters = config.known_args_namespace.pythonwarnings or [] + with warnings.catch_warnings(record=True) as log: + # mypy can't infer that record=True means log is not None; help it. + assert log is not None + + if not sys.warnoptions: + # If user is not explicitly configuring warning filters, show deprecation warnings by default (#2908). + warnings.filterwarnings("always", category=DeprecationWarning) + warnings.filterwarnings("always", category=PendingDeprecationWarning) + + apply_warning_filters(config_filters, cmdline_filters) + + # apply filters from "filterwarnings" marks + nodeid = "" if item is None else item.nodeid + if item is not None: + for mark in item.iter_markers(name="filterwarnings"): + for arg in mark.args: + warnings.filterwarnings(*parse_warning_filter(arg, escape=False)) + + yield + + for warning_message in log: + ihook.pytest_warning_recorded.call_historic( + kwargs=dict( + warning_message=warning_message, + nodeid=nodeid, + when=when, + location=None, + ) + ) + + +def warning_record_to_str(warning_message: warnings.WarningMessage) -> str: + """Convert a warnings.WarningMessage to a string.""" + warn_msg = warning_message.message + msg = warnings.formatwarning( + str(warn_msg), + warning_message.category, + warning_message.filename, + warning_message.lineno, + warning_message.line, + ) + if warning_message.source is not None: + try: + import tracemalloc + except ImportError: + pass + else: + tb = tracemalloc.get_object_traceback(warning_message.source) + if tb is not None: + formatted_tb = "\n".join(tb.format()) + # Use a leading new line to better separate the (large) output + # from the traceback to the previous warning text. + msg += f"\nObject allocated at:\n{formatted_tb}" + else: + # No need for a leading new line. + url = "https://docs.pytest.org/en/stable/how-to/capture-warnings.html#resource-warnings" + msg += "Enable tracemalloc to get traceback where the object was allocated.\n" + msg += f"See {url} for more info." + return msg + + +@pytest.hookimpl(hookwrapper=True, tryfirst=True) +def pytest_runtest_protocol(item: Item) -> Generator[None, None, None]: + with catch_warnings_for_item( + config=item.config, ihook=item.ihook, when="runtest", item=item + ): + yield + + +@pytest.hookimpl(hookwrapper=True, tryfirst=True) +def pytest_collection(session: Session) -> Generator[None, None, None]: + config = session.config + with catch_warnings_for_item( + config=config, ihook=config.hook, when="collect", item=None + ): + yield + + +@pytest.hookimpl(hookwrapper=True) +def pytest_terminal_summary( + terminalreporter: TerminalReporter, +) -> Generator[None, None, None]: + config = terminalreporter.config + with catch_warnings_for_item( + config=config, ihook=config.hook, when="config", item=None + ): + yield + + +@pytest.hookimpl(hookwrapper=True) +def pytest_sessionfinish(session: Session) -> Generator[None, None, None]: + config = session.config + with catch_warnings_for_item( + config=config, ihook=config.hook, when="config", item=None + ): + yield + + +@pytest.hookimpl(hookwrapper=True) +def pytest_load_initial_conftests( + early_config: "Config", +) -> Generator[None, None, None]: + with catch_warnings_for_item( + config=early_config, ihook=early_config.hook, when="config", item=None + ): + yield diff --git a/venv/lib/python3.10/site-packages/_virtualenv.pth b/venv/lib/python3.10/site-packages/_virtualenv.pth deleted file mode 100644 index 1c3ff99..0000000 --- a/venv/lib/python3.10/site-packages/_virtualenv.pth +++ /dev/null @@ -1 +0,0 @@ -import _virtualenv \ No newline at end of file diff --git a/venv/lib/python3.10/site-packages/_virtualenv.py b/venv/lib/python3.10/site-packages/_virtualenv.py deleted file mode 100644 index faee64c..0000000 --- a/venv/lib/python3.10/site-packages/_virtualenv.py +++ /dev/null @@ -1,130 +0,0 @@ -"""Patches that are applied at runtime to the virtual environment""" -# -*- coding: utf-8 -*- - -import os -import sys - -VIRTUALENV_PATCH_FILE = os.path.join(__file__) - - -def patch_dist(dist): - """ - Distutils allows user to configure some arguments via a configuration file: - https://docs.python.org/3/install/index.html#distutils-configuration-files - - Some of this arguments though don't make sense in context of the virtual environment files, let's fix them up. - """ - # we cannot allow some install config as that would get packages installed outside of the virtual environment - old_parse_config_files = dist.Distribution.parse_config_files - - def parse_config_files(self, *args, **kwargs): - result = old_parse_config_files(self, *args, **kwargs) - install = self.get_option_dict("install") - - if "prefix" in install: # the prefix governs where to install the libraries - install["prefix"] = VIRTUALENV_PATCH_FILE, os.path.abspath(sys.prefix) - for base in ("purelib", "platlib", "headers", "scripts", "data"): - key = "install_{}".format(base) - if key in install: # do not allow global configs to hijack venv paths - install.pop(key, None) - return result - - dist.Distribution.parse_config_files = parse_config_files - - -# Import hook that patches some modules to ignore configuration values that break package installation in case -# of virtual environments. -_DISTUTILS_PATCH = "distutils.dist", "setuptools.dist" -if sys.version_info > (3, 4): - # https://docs.python.org/3/library/importlib.html#setting-up-an-importer - - class _Finder: - """A meta path finder that allows patching the imported distutils modules""" - - fullname = None - - # lock[0] is threading.Lock(), but initialized lazily to avoid importing threading very early at startup, - # because there are gevent-based applications that need to be first to import threading by themselves. - # See https://github.com/pypa/virtualenv/issues/1895 for details. - lock = [] - - def find_spec(self, fullname, path, target=None): # noqa: U100 - if fullname in _DISTUTILS_PATCH and self.fullname is None: - # initialize lock[0] lazily - if len(self.lock) == 0: - import threading - - lock = threading.Lock() - # there is possibility that two threads T1 and T2 are simultaneously running into find_spec, - # observing .lock as empty, and further going into hereby initialization. However due to the GIL, - # list.append() operation is atomic and this way only one of the threads will "win" to put the lock - # - that every thread will use - into .lock[0]. - # https://docs.python.org/3/faq/library.html#what-kinds-of-global-value-mutation-are-thread-safe - self.lock.append(lock) - - from functools import partial - from importlib.util import find_spec - - with self.lock[0]: - self.fullname = fullname - try: - spec = find_spec(fullname, path) - if spec is not None: - # https://www.python.org/dev/peps/pep-0451/#how-loading-will-work - is_new_api = hasattr(spec.loader, "exec_module") - func_name = "exec_module" if is_new_api else "load_module" - old = getattr(spec.loader, func_name) - func = self.exec_module if is_new_api else self.load_module - if old is not func: - try: - setattr(spec.loader, func_name, partial(func, old)) - except AttributeError: - pass # C-Extension loaders are r/o such as zipimporter with Callable: + """ + Create a metadata proxy for packaging information that uses *mod_name* in + its warnings and errors. + """ + + def __getattr__(name: str) -> str: + dunder_to_metadata = { + "__title__": "Name", + "__copyright__": "", + "__version__": "version", + "__version_info__": "version", + "__description__": "summary", + "__uri__": "", + "__url__": "", + "__author__": "", + "__email__": "", + "__license__": "license", + } + if name not in dunder_to_metadata.keys(): + raise AttributeError(f"module {mod_name} has no attribute {name}") + + import sys + import warnings + + if sys.version_info < (3, 8): + from importlib_metadata import metadata + else: + from importlib.metadata import metadata + + if name != "__version_info__": + warnings.warn( + f"Accessing {mod_name}.{name} is deprecated and will be " + "removed in a future release. Use importlib.metadata directly " + "to query for attrs's packaging metadata.", + DeprecationWarning, + stacklevel=2, + ) + + meta = metadata("attrs") + if name == "__license__": + return "MIT" + elif name == "__copyright__": + return "Copyright (c) 2015 Hynek Schlawack" + elif name in ("__uri__", "__url__"): + return meta["Project-URL"].split(" ", 1)[-1] + elif name == "__version_info__": + return VersionInfo._from_version_string(meta["version"]) + elif name == "__author__": + return meta["Author-email"].rsplit(" ", 1)[0] + elif name == "__email__": + return meta["Author-email"].rsplit("<", 1)[1][:-1] + + return meta[dunder_to_metadata[name]] + + return __getattr__ + + +__getattr__ = _make_getattr(__name__) diff --git a/venv/lib/python3.10/site-packages/attr/__init__.pyi b/venv/lib/python3.10/site-packages/attr/__init__.pyi new file mode 100644 index 0000000..ced5a3f --- /dev/null +++ b/venv/lib/python3.10/site-packages/attr/__init__.pyi @@ -0,0 +1,571 @@ +import enum +import sys + +from typing import ( + Any, + Callable, + Dict, + Generic, + List, + Mapping, + Optional, + Protocol, + Sequence, + Tuple, + Type, + TypeVar, + Union, + overload, +) + +# `import X as X` is required to make these public +from . import converters as converters +from . import exceptions as exceptions +from . import filters as filters +from . import setters as setters +from . import validators as validators +from ._cmp import cmp_using as cmp_using +from ._typing_compat import AttrsInstance_ +from ._version_info import VersionInfo + +if sys.version_info >= (3, 10): + from typing import TypeGuard +else: + from typing_extensions import TypeGuard + +__version__: str +__version_info__: VersionInfo +__title__: str +__description__: str +__url__: str +__uri__: str +__author__: str +__email__: str +__license__: str +__copyright__: str + +_T = TypeVar("_T") +_C = TypeVar("_C", bound=type) + +_EqOrderType = Union[bool, Callable[[Any], Any]] +_ValidatorType = Callable[[Any, "Attribute[_T]", _T], Any] +_ConverterType = Callable[[Any], Any] +_FilterType = Callable[["Attribute[_T]", _T], bool] +_ReprType = Callable[[Any], str] +_ReprArgType = Union[bool, _ReprType] +_OnSetAttrType = Callable[[Any, "Attribute[Any]", Any], Any] +_OnSetAttrArgType = Union[ + _OnSetAttrType, List[_OnSetAttrType], setters._NoOpType +] +_FieldTransformer = Callable[ + [type, List["Attribute[Any]"]], List["Attribute[Any]"] +] +# FIXME: in reality, if multiple validators are passed they must be in a list +# or tuple, but those are invariant and so would prevent subtypes of +# _ValidatorType from working when passed in a list or tuple. +_ValidatorArgType = Union[_ValidatorType[_T], Sequence[_ValidatorType[_T]]] + +# We subclass this here to keep the protocol's qualified name clean. +class AttrsInstance(AttrsInstance_, Protocol): + pass + +_A = TypeVar("_A", bound=AttrsInstance) +# _make -- + +class _Nothing(enum.Enum): + NOTHING = enum.auto() + +NOTHING = _Nothing.NOTHING + +# NOTE: Factory lies about its return type to make this possible: +# `x: List[int] # = Factory(list)` +# Work around mypy issue #4554 in the common case by using an overload. +if sys.version_info >= (3, 8): + from typing import Literal + @overload + def Factory(factory: Callable[[], _T]) -> _T: ... + @overload + def Factory( + factory: Callable[[Any], _T], + takes_self: Literal[True], + ) -> _T: ... + @overload + def Factory( + factory: Callable[[], _T], + takes_self: Literal[False], + ) -> _T: ... + +else: + @overload + def Factory(factory: Callable[[], _T]) -> _T: ... + @overload + def Factory( + factory: Union[Callable[[Any], _T], Callable[[], _T]], + takes_self: bool = ..., + ) -> _T: ... + +# Static type inference support via __dataclass_transform__ implemented as per: +# https://github.com/microsoft/pyright/blob/1.1.135/specs/dataclass_transforms.md +# This annotation must be applied to all overloads of "define" and "attrs" +# +# NOTE: This is a typing construct and does not exist at runtime. Extensions +# wrapping attrs decorators should declare a separate __dataclass_transform__ +# signature in the extension module using the specification linked above to +# provide pyright support. +def __dataclass_transform__( + *, + eq_default: bool = True, + order_default: bool = False, + kw_only_default: bool = False, + frozen_default: bool = False, + field_descriptors: Tuple[Union[type, Callable[..., Any]], ...] = (()), +) -> Callable[[_T], _T]: ... + +class Attribute(Generic[_T]): + name: str + default: Optional[_T] + validator: Optional[_ValidatorType[_T]] + repr: _ReprArgType + cmp: _EqOrderType + eq: _EqOrderType + order: _EqOrderType + hash: Optional[bool] + init: bool + converter: Optional[_ConverterType] + metadata: Dict[Any, Any] + type: Optional[Type[_T]] + kw_only: bool + on_setattr: _OnSetAttrType + alias: Optional[str] + + def evolve(self, **changes: Any) -> "Attribute[Any]": ... + +# NOTE: We had several choices for the annotation to use for type arg: +# 1) Type[_T] +# - Pros: Handles simple cases correctly +# - Cons: Might produce less informative errors in the case of conflicting +# TypeVars e.g. `attr.ib(default='bad', type=int)` +# 2) Callable[..., _T] +# - Pros: Better error messages than #1 for conflicting TypeVars +# - Cons: Terrible error messages for validator checks. +# e.g. attr.ib(type=int, validator=validate_str) +# -> error: Cannot infer function type argument +# 3) type (and do all of the work in the mypy plugin) +# - Pros: Simple here, and we could customize the plugin with our own errors. +# - Cons: Would need to write mypy plugin code to handle all the cases. +# We chose option #1. + +# `attr` lies about its return type to make the following possible: +# attr() -> Any +# attr(8) -> int +# attr(validator=) -> Whatever the callable expects. +# This makes this type of assignments possible: +# x: int = attr(8) +# +# This form catches explicit None or no default but with no other arguments +# returns Any. +@overload +def attrib( + default: None = ..., + validator: None = ..., + repr: _ReprArgType = ..., + cmp: Optional[_EqOrderType] = ..., + hash: Optional[bool] = ..., + init: bool = ..., + metadata: Optional[Mapping[Any, Any]] = ..., + type: None = ..., + converter: None = ..., + factory: None = ..., + kw_only: bool = ..., + eq: Optional[_EqOrderType] = ..., + order: Optional[_EqOrderType] = ..., + on_setattr: Optional[_OnSetAttrArgType] = ..., + alias: Optional[str] = ..., +) -> Any: ... + +# This form catches an explicit None or no default and infers the type from the +# other arguments. +@overload +def attrib( + default: None = ..., + validator: Optional[_ValidatorArgType[_T]] = ..., + repr: _ReprArgType = ..., + cmp: Optional[_EqOrderType] = ..., + hash: Optional[bool] = ..., + init: bool = ..., + metadata: Optional[Mapping[Any, Any]] = ..., + type: Optional[Type[_T]] = ..., + converter: Optional[_ConverterType] = ..., + factory: Optional[Callable[[], _T]] = ..., + kw_only: bool = ..., + eq: Optional[_EqOrderType] = ..., + order: Optional[_EqOrderType] = ..., + on_setattr: Optional[_OnSetAttrArgType] = ..., + alias: Optional[str] = ..., +) -> _T: ... + +# This form catches an explicit default argument. +@overload +def attrib( + default: _T, + validator: Optional[_ValidatorArgType[_T]] = ..., + repr: _ReprArgType = ..., + cmp: Optional[_EqOrderType] = ..., + hash: Optional[bool] = ..., + init: bool = ..., + metadata: Optional[Mapping[Any, Any]] = ..., + type: Optional[Type[_T]] = ..., + converter: Optional[_ConverterType] = ..., + factory: Optional[Callable[[], _T]] = ..., + kw_only: bool = ..., + eq: Optional[_EqOrderType] = ..., + order: Optional[_EqOrderType] = ..., + on_setattr: Optional[_OnSetAttrArgType] = ..., + alias: Optional[str] = ..., +) -> _T: ... + +# This form covers type=non-Type: e.g. forward references (str), Any +@overload +def attrib( + default: Optional[_T] = ..., + validator: Optional[_ValidatorArgType[_T]] = ..., + repr: _ReprArgType = ..., + cmp: Optional[_EqOrderType] = ..., + hash: Optional[bool] = ..., + init: bool = ..., + metadata: Optional[Mapping[Any, Any]] = ..., + type: object = ..., + converter: Optional[_ConverterType] = ..., + factory: Optional[Callable[[], _T]] = ..., + kw_only: bool = ..., + eq: Optional[_EqOrderType] = ..., + order: Optional[_EqOrderType] = ..., + on_setattr: Optional[_OnSetAttrArgType] = ..., + alias: Optional[str] = ..., +) -> Any: ... +@overload +def field( + *, + default: None = ..., + validator: None = ..., + repr: _ReprArgType = ..., + hash: Optional[bool] = ..., + init: bool = ..., + metadata: Optional[Mapping[Any, Any]] = ..., + converter: None = ..., + factory: None = ..., + kw_only: bool = ..., + eq: Optional[bool] = ..., + order: Optional[bool] = ..., + on_setattr: Optional[_OnSetAttrArgType] = ..., + alias: Optional[str] = ..., + type: Optional[type] = ..., +) -> Any: ... + +# This form catches an explicit None or no default and infers the type from the +# other arguments. +@overload +def field( + *, + default: None = ..., + validator: Optional[_ValidatorArgType[_T]] = ..., + repr: _ReprArgType = ..., + hash: Optional[bool] = ..., + init: bool = ..., + metadata: Optional[Mapping[Any, Any]] = ..., + converter: Optional[_ConverterType] = ..., + factory: Optional[Callable[[], _T]] = ..., + kw_only: bool = ..., + eq: Optional[_EqOrderType] = ..., + order: Optional[_EqOrderType] = ..., + on_setattr: Optional[_OnSetAttrArgType] = ..., + alias: Optional[str] = ..., + type: Optional[type] = ..., +) -> _T: ... + +# This form catches an explicit default argument. +@overload +def field( + *, + default: _T, + validator: Optional[_ValidatorArgType[_T]] = ..., + repr: _ReprArgType = ..., + hash: Optional[bool] = ..., + init: bool = ..., + metadata: Optional[Mapping[Any, Any]] = ..., + converter: Optional[_ConverterType] = ..., + factory: Optional[Callable[[], _T]] = ..., + kw_only: bool = ..., + eq: Optional[_EqOrderType] = ..., + order: Optional[_EqOrderType] = ..., + on_setattr: Optional[_OnSetAttrArgType] = ..., + alias: Optional[str] = ..., + type: Optional[type] = ..., +) -> _T: ... + +# This form covers type=non-Type: e.g. forward references (str), Any +@overload +def field( + *, + default: Optional[_T] = ..., + validator: Optional[_ValidatorArgType[_T]] = ..., + repr: _ReprArgType = ..., + hash: Optional[bool] = ..., + init: bool = ..., + metadata: Optional[Mapping[Any, Any]] = ..., + converter: Optional[_ConverterType] = ..., + factory: Optional[Callable[[], _T]] = ..., + kw_only: bool = ..., + eq: Optional[_EqOrderType] = ..., + order: Optional[_EqOrderType] = ..., + on_setattr: Optional[_OnSetAttrArgType] = ..., + alias: Optional[str] = ..., + type: Optional[type] = ..., +) -> Any: ... +@overload +@__dataclass_transform__(order_default=True, field_descriptors=(attrib, field)) +def attrs( + maybe_cls: _C, + these: Optional[Dict[str, Any]] = ..., + repr_ns: Optional[str] = ..., + repr: bool = ..., + cmp: Optional[_EqOrderType] = ..., + hash: Optional[bool] = ..., + init: bool = ..., + slots: bool = ..., + frozen: bool = ..., + weakref_slot: bool = ..., + str: bool = ..., + auto_attribs: bool = ..., + kw_only: bool = ..., + cache_hash: bool = ..., + auto_exc: bool = ..., + eq: Optional[_EqOrderType] = ..., + order: Optional[_EqOrderType] = ..., + auto_detect: bool = ..., + collect_by_mro: bool = ..., + getstate_setstate: Optional[bool] = ..., + on_setattr: Optional[_OnSetAttrArgType] = ..., + field_transformer: Optional[_FieldTransformer] = ..., + match_args: bool = ..., + unsafe_hash: Optional[bool] = ..., +) -> _C: ... +@overload +@__dataclass_transform__(order_default=True, field_descriptors=(attrib, field)) +def attrs( + maybe_cls: None = ..., + these: Optional[Dict[str, Any]] = ..., + repr_ns: Optional[str] = ..., + repr: bool = ..., + cmp: Optional[_EqOrderType] = ..., + hash: Optional[bool] = ..., + init: bool = ..., + slots: bool = ..., + frozen: bool = ..., + weakref_slot: bool = ..., + str: bool = ..., + auto_attribs: bool = ..., + kw_only: bool = ..., + cache_hash: bool = ..., + auto_exc: bool = ..., + eq: Optional[_EqOrderType] = ..., + order: Optional[_EqOrderType] = ..., + auto_detect: bool = ..., + collect_by_mro: bool = ..., + getstate_setstate: Optional[bool] = ..., + on_setattr: Optional[_OnSetAttrArgType] = ..., + field_transformer: Optional[_FieldTransformer] = ..., + match_args: bool = ..., + unsafe_hash: Optional[bool] = ..., +) -> Callable[[_C], _C]: ... +@overload +@__dataclass_transform__(field_descriptors=(attrib, field)) +def define( + maybe_cls: _C, + *, + these: Optional[Dict[str, Any]] = ..., + repr: bool = ..., + unsafe_hash: Optional[bool] = ..., + hash: Optional[bool] = ..., + init: bool = ..., + slots: bool = ..., + frozen: bool = ..., + weakref_slot: bool = ..., + str: bool = ..., + auto_attribs: bool = ..., + kw_only: bool = ..., + cache_hash: bool = ..., + auto_exc: bool = ..., + eq: Optional[bool] = ..., + order: Optional[bool] = ..., + auto_detect: bool = ..., + getstate_setstate: Optional[bool] = ..., + on_setattr: Optional[_OnSetAttrArgType] = ..., + field_transformer: Optional[_FieldTransformer] = ..., + match_args: bool = ..., +) -> _C: ... +@overload +@__dataclass_transform__(field_descriptors=(attrib, field)) +def define( + maybe_cls: None = ..., + *, + these: Optional[Dict[str, Any]] = ..., + repr: bool = ..., + unsafe_hash: Optional[bool] = ..., + hash: Optional[bool] = ..., + init: bool = ..., + slots: bool = ..., + frozen: bool = ..., + weakref_slot: bool = ..., + str: bool = ..., + auto_attribs: bool = ..., + kw_only: bool = ..., + cache_hash: bool = ..., + auto_exc: bool = ..., + eq: Optional[bool] = ..., + order: Optional[bool] = ..., + auto_detect: bool = ..., + getstate_setstate: Optional[bool] = ..., + on_setattr: Optional[_OnSetAttrArgType] = ..., + field_transformer: Optional[_FieldTransformer] = ..., + match_args: bool = ..., +) -> Callable[[_C], _C]: ... + +mutable = define + +@overload +@__dataclass_transform__( + frozen_default=True, field_descriptors=(attrib, field) +) +def frozen( + maybe_cls: _C, + *, + these: Optional[Dict[str, Any]] = ..., + repr: bool = ..., + unsafe_hash: Optional[bool] = ..., + hash: Optional[bool] = ..., + init: bool = ..., + slots: bool = ..., + frozen: bool = ..., + weakref_slot: bool = ..., + str: bool = ..., + auto_attribs: bool = ..., + kw_only: bool = ..., + cache_hash: bool = ..., + auto_exc: bool = ..., + eq: Optional[bool] = ..., + order: Optional[bool] = ..., + auto_detect: bool = ..., + getstate_setstate: Optional[bool] = ..., + on_setattr: Optional[_OnSetAttrArgType] = ..., + field_transformer: Optional[_FieldTransformer] = ..., + match_args: bool = ..., +) -> _C: ... +@overload +@__dataclass_transform__( + frozen_default=True, field_descriptors=(attrib, field) +) +def frozen( + maybe_cls: None = ..., + *, + these: Optional[Dict[str, Any]] = ..., + repr: bool = ..., + unsafe_hash: Optional[bool] = ..., + hash: Optional[bool] = ..., + init: bool = ..., + slots: bool = ..., + frozen: bool = ..., + weakref_slot: bool = ..., + str: bool = ..., + auto_attribs: bool = ..., + kw_only: bool = ..., + cache_hash: bool = ..., + auto_exc: bool = ..., + eq: Optional[bool] = ..., + order: Optional[bool] = ..., + auto_detect: bool = ..., + getstate_setstate: Optional[bool] = ..., + on_setattr: Optional[_OnSetAttrArgType] = ..., + field_transformer: Optional[_FieldTransformer] = ..., + match_args: bool = ..., +) -> Callable[[_C], _C]: ... +def fields(cls: Type[AttrsInstance]) -> Any: ... +def fields_dict(cls: Type[AttrsInstance]) -> Dict[str, Attribute[Any]]: ... +def validate(inst: AttrsInstance) -> None: ... +def resolve_types( + cls: _A, + globalns: Optional[Dict[str, Any]] = ..., + localns: Optional[Dict[str, Any]] = ..., + attribs: Optional[List[Attribute[Any]]] = ..., + include_extras: bool = ..., +) -> _A: ... + +# TODO: add support for returning a proper attrs class from the mypy plugin +# we use Any instead of _CountingAttr so that e.g. `make_class('Foo', +# [attr.ib()])` is valid +def make_class( + name: str, + attrs: Union[List[str], Tuple[str, ...], Dict[str, Any]], + bases: Tuple[type, ...] = ..., + repr_ns: Optional[str] = ..., + repr: bool = ..., + cmp: Optional[_EqOrderType] = ..., + hash: Optional[bool] = ..., + init: bool = ..., + slots: bool = ..., + frozen: bool = ..., + weakref_slot: bool = ..., + str: bool = ..., + auto_attribs: bool = ..., + kw_only: bool = ..., + cache_hash: bool = ..., + auto_exc: bool = ..., + eq: Optional[_EqOrderType] = ..., + order: Optional[_EqOrderType] = ..., + collect_by_mro: bool = ..., + on_setattr: Optional[_OnSetAttrArgType] = ..., + field_transformer: Optional[_FieldTransformer] = ..., +) -> type: ... + +# _funcs -- + +# TODO: add support for returning TypedDict from the mypy plugin +# FIXME: asdict/astuple do not honor their factory args. Waiting on one of +# these: +# https://github.com/python/mypy/issues/4236 +# https://github.com/python/typing/issues/253 +# XXX: remember to fix attrs.asdict/astuple too! +def asdict( + inst: AttrsInstance, + recurse: bool = ..., + filter: Optional[_FilterType[Any]] = ..., + dict_factory: Type[Mapping[Any, Any]] = ..., + retain_collection_types: bool = ..., + value_serializer: Optional[ + Callable[[type, Attribute[Any], Any], Any] + ] = ..., + tuple_keys: Optional[bool] = ..., +) -> Dict[str, Any]: ... + +# TODO: add support for returning NamedTuple from the mypy plugin +def astuple( + inst: AttrsInstance, + recurse: bool = ..., + filter: Optional[_FilterType[Any]] = ..., + tuple_factory: Type[Sequence[Any]] = ..., + retain_collection_types: bool = ..., +) -> Tuple[Any, ...]: ... +def has(cls: type) -> TypeGuard[Type[AttrsInstance]]: ... +def assoc(inst: _T, **changes: Any) -> _T: ... +def evolve(inst: _T, **changes: Any) -> _T: ... + +# _config -- + +def set_run_validators(run: bool) -> None: ... +def get_run_validators() -> bool: ... + +# aliases -- + +s = attributes = attrs +ib = attr = attrib +dataclass = attrs # Technically, partial(attrs, auto_attribs=True) ;) diff --git a/venv/lib/python3.10/site-packages/attr/_cmp.py b/venv/lib/python3.10/site-packages/attr/_cmp.py new file mode 100644 index 0000000..d9cbe22 --- /dev/null +++ b/venv/lib/python3.10/site-packages/attr/_cmp.py @@ -0,0 +1,155 @@ +# SPDX-License-Identifier: MIT + + +import functools +import types + +from ._make import _make_ne + + +_operation_names = {"eq": "==", "lt": "<", "le": "<=", "gt": ">", "ge": ">="} + + +def cmp_using( + eq=None, + lt=None, + le=None, + gt=None, + ge=None, + require_same_type=True, + class_name="Comparable", +): + """ + Create a class that can be passed into `attrs.field`'s ``eq``, ``order``, + and ``cmp`` arguments to customize field comparison. + + The resulting class will have a full set of ordering methods if at least + one of ``{lt, le, gt, ge}`` and ``eq`` are provided. + + :param Optional[callable] eq: `callable` used to evaluate equality of two + objects. + :param Optional[callable] lt: `callable` used to evaluate whether one + object is less than another object. + :param Optional[callable] le: `callable` used to evaluate whether one + object is less than or equal to another object. + :param Optional[callable] gt: `callable` used to evaluate whether one + object is greater than another object. + :param Optional[callable] ge: `callable` used to evaluate whether one + object is greater than or equal to another object. + + :param bool require_same_type: When `True`, equality and ordering methods + will return `NotImplemented` if objects are not of the same type. + + :param Optional[str] class_name: Name of class. Defaults to 'Comparable'. + + See `comparison` for more details. + + .. versionadded:: 21.1.0 + """ + + body = { + "__slots__": ["value"], + "__init__": _make_init(), + "_requirements": [], + "_is_comparable_to": _is_comparable_to, + } + + # Add operations. + num_order_functions = 0 + has_eq_function = False + + if eq is not None: + has_eq_function = True + body["__eq__"] = _make_operator("eq", eq) + body["__ne__"] = _make_ne() + + if lt is not None: + num_order_functions += 1 + body["__lt__"] = _make_operator("lt", lt) + + if le is not None: + num_order_functions += 1 + body["__le__"] = _make_operator("le", le) + + if gt is not None: + num_order_functions += 1 + body["__gt__"] = _make_operator("gt", gt) + + if ge is not None: + num_order_functions += 1 + body["__ge__"] = _make_operator("ge", ge) + + type_ = types.new_class( + class_name, (object,), {}, lambda ns: ns.update(body) + ) + + # Add same type requirement. + if require_same_type: + type_._requirements.append(_check_same_type) + + # Add total ordering if at least one operation was defined. + if 0 < num_order_functions < 4: + if not has_eq_function: + # functools.total_ordering requires __eq__ to be defined, + # so raise early error here to keep a nice stack. + raise ValueError( + "eq must be define is order to complete ordering from " + "lt, le, gt, ge." + ) + type_ = functools.total_ordering(type_) + + return type_ + + +def _make_init(): + """ + Create __init__ method. + """ + + def __init__(self, value): + """ + Initialize object with *value*. + """ + self.value = value + + return __init__ + + +def _make_operator(name, func): + """ + Create operator method. + """ + + def method(self, other): + if not self._is_comparable_to(other): + return NotImplemented + + result = func(self.value, other.value) + if result is NotImplemented: + return NotImplemented + + return result + + method.__name__ = f"__{name}__" + method.__doc__ = ( + f"Return a {_operation_names[name]} b. Computed by attrs." + ) + + return method + + +def _is_comparable_to(self, other): + """ + Check whether `other` is comparable to `self`. + """ + for func in self._requirements: + if not func(self, other): + return False + return True + + +def _check_same_type(self, other): + """ + Return True if *self* and *other* are of the same type, False otherwise. + """ + return other.value.__class__ is self.value.__class__ diff --git a/venv/lib/python3.10/site-packages/attr/_cmp.pyi b/venv/lib/python3.10/site-packages/attr/_cmp.pyi new file mode 100644 index 0000000..f3dcdc1 --- /dev/null +++ b/venv/lib/python3.10/site-packages/attr/_cmp.pyi @@ -0,0 +1,13 @@ +from typing import Any, Callable, Optional, Type + +_CompareWithType = Callable[[Any, Any], bool] + +def cmp_using( + eq: Optional[_CompareWithType] = ..., + lt: Optional[_CompareWithType] = ..., + le: Optional[_CompareWithType] = ..., + gt: Optional[_CompareWithType] = ..., + ge: Optional[_CompareWithType] = ..., + require_same_type: bool = ..., + class_name: str = ..., +) -> Type: ... diff --git a/venv/lib/python3.10/site-packages/attr/_compat.py b/venv/lib/python3.10/site-packages/attr/_compat.py new file mode 100644 index 0000000..c3bf5e3 --- /dev/null +++ b/venv/lib/python3.10/site-packages/attr/_compat.py @@ -0,0 +1,185 @@ +# SPDX-License-Identifier: MIT + + +import inspect +import platform +import sys +import threading +import types +import warnings + +from collections.abc import Mapping, Sequence # noqa +from typing import _GenericAlias + + +PYPY = platform.python_implementation() == "PyPy" +PY_3_9_PLUS = sys.version_info[:2] >= (3, 9) +PY310 = sys.version_info[:2] >= (3, 10) +PY_3_12_PLUS = sys.version_info[:2] >= (3, 12) + + +def just_warn(*args, **kw): + warnings.warn( + "Running interpreter doesn't sufficiently support code object " + "introspection. Some features like bare super() or accessing " + "__class__ will not work with slotted classes.", + RuntimeWarning, + stacklevel=2, + ) + + +class _AnnotationExtractor: + """ + Extract type annotations from a callable, returning None whenever there + is none. + """ + + __slots__ = ["sig"] + + def __init__(self, callable): + try: + self.sig = inspect.signature(callable) + except (ValueError, TypeError): # inspect failed + self.sig = None + + def get_first_param_type(self): + """ + Return the type annotation of the first argument if it's not empty. + """ + if not self.sig: + return None + + params = list(self.sig.parameters.values()) + if params and params[0].annotation is not inspect.Parameter.empty: + return params[0].annotation + + return None + + def get_return_type(self): + """ + Return the return type if it's not empty. + """ + if ( + self.sig + and self.sig.return_annotation is not inspect.Signature.empty + ): + return self.sig.return_annotation + + return None + + +def make_set_closure_cell(): + """Return a function of two arguments (cell, value) which sets + the value stored in the closure cell `cell` to `value`. + """ + # pypy makes this easy. (It also supports the logic below, but + # why not do the easy/fast thing?) + if PYPY: + + def set_closure_cell(cell, value): + cell.__setstate__((value,)) + + return set_closure_cell + + # Otherwise gotta do it the hard way. + + try: + if sys.version_info >= (3, 8): + + def set_closure_cell(cell, value): + cell.cell_contents = value + + else: + # Create a function that will set its first cellvar to `value`. + def set_first_cellvar_to(value): + x = value + return + + # This function will be eliminated as dead code, but + # not before its reference to `x` forces `x` to be + # represented as a closure cell rather than a local. + def force_x_to_be_a_cell(): # pragma: no cover + return x + + # Extract the code object and make sure our assumptions about + # the closure behavior are correct. + co = set_first_cellvar_to.__code__ + if co.co_cellvars != ("x",) or co.co_freevars != (): + raise AssertionError # pragma: no cover + + # Convert this code object to a code object that sets the + # function's first _freevar_ (not cellvar) to the argument. + args = [co.co_argcount] + args.append(co.co_kwonlyargcount) + args.extend( + [ + co.co_nlocals, + co.co_stacksize, + co.co_flags, + co.co_code, + co.co_consts, + co.co_names, + co.co_varnames, + co.co_filename, + co.co_name, + co.co_firstlineno, + co.co_lnotab, + # These two arguments are reversed: + co.co_cellvars, + co.co_freevars, + ] + ) + set_first_freevar_code = types.CodeType(*args) + + def set_closure_cell(cell, value): + # Create a function using the set_first_freevar_code, + # whose first closure cell is `cell`. Calling it will + # change the value of that cell. + setter = types.FunctionType( + set_first_freevar_code, {}, "setter", (), (cell,) + ) + # And call it to set the cell. + setter(value) + + # Make sure it works on this interpreter: + def make_func_with_cell(): + x = None + + def func(): + return x # pragma: no cover + + return func + + cell = make_func_with_cell().__closure__[0] + set_closure_cell(cell, 100) + if cell.cell_contents != 100: + raise AssertionError # pragma: no cover + + except Exception: + return just_warn + else: + return set_closure_cell + + +set_closure_cell = make_set_closure_cell() + +# Thread-local global to track attrs instances which are already being repr'd. +# This is needed because there is no other (thread-safe) way to pass info +# about the instances that are already being repr'd through the call stack +# in order to ensure we don't perform infinite recursion. +# +# For instance, if an instance contains a dict which contains that instance, +# we need to know that we're already repr'ing the outside instance from within +# the dict's repr() call. +# +# This lives here rather than in _make.py so that the functions in _make.py +# don't have a direct reference to the thread-local in their globals dict. +# If they have such a reference, it breaks cloudpickle. +repr_context = threading.local() + + +def get_generic_base(cl): + """If this is a generic class (A[str]), return the generic base for it.""" + if cl.__class__ is _GenericAlias: + return cl.__origin__ + return None diff --git a/venv/lib/python3.10/site-packages/attr/_config.py b/venv/lib/python3.10/site-packages/attr/_config.py new file mode 100644 index 0000000..96d4200 --- /dev/null +++ b/venv/lib/python3.10/site-packages/attr/_config.py @@ -0,0 +1,31 @@ +# SPDX-License-Identifier: MIT + + +__all__ = ["set_run_validators", "get_run_validators"] + +_run_validators = True + + +def set_run_validators(run): + """ + Set whether or not validators are run. By default, they are run. + + .. deprecated:: 21.3.0 It will not be removed, but it also will not be + moved to new ``attrs`` namespace. Use `attrs.validators.set_disabled()` + instead. + """ + if not isinstance(run, bool): + raise TypeError("'run' must be bool.") + global _run_validators + _run_validators = run + + +def get_run_validators(): + """ + Return whether or not validators are run. + + .. deprecated:: 21.3.0 It will not be removed, but it also will not be + moved to new ``attrs`` namespace. Use `attrs.validators.get_disabled()` + instead. + """ + return _run_validators diff --git a/venv/lib/python3.10/site-packages/attr/_funcs.py b/venv/lib/python3.10/site-packages/attr/_funcs.py new file mode 100644 index 0000000..7f5d961 --- /dev/null +++ b/venv/lib/python3.10/site-packages/attr/_funcs.py @@ -0,0 +1,477 @@ +# SPDX-License-Identifier: MIT + + +import copy + +from ._compat import PY_3_9_PLUS, get_generic_base +from ._make import NOTHING, _obj_setattr, fields +from .exceptions import AttrsAttributeNotFoundError + + +def asdict( + inst, + recurse=True, + filter=None, + dict_factory=dict, + retain_collection_types=False, + value_serializer=None, +): + """ + Return the *attrs* attribute values of *inst* as a dict. + + Optionally recurse into other *attrs*-decorated classes. + + :param inst: Instance of an *attrs*-decorated class. + :param bool recurse: Recurse into classes that are also + *attrs*-decorated. + :param callable filter: A callable whose return code determines whether an + attribute or element is included (``True``) or dropped (``False``). Is + called with the `attrs.Attribute` as the first argument and the + value as the second argument. + :param callable dict_factory: A callable to produce dictionaries from. For + example, to produce ordered dictionaries instead of normal Python + dictionaries, pass in ``collections.OrderedDict``. + :param bool retain_collection_types: Do not convert to ``list`` when + encountering an attribute whose type is ``tuple`` or ``set``. Only + meaningful if ``recurse`` is ``True``. + :param Optional[callable] value_serializer: A hook that is called for every + attribute or dict key/value. It receives the current instance, field + and value and must return the (updated) value. The hook is run *after* + the optional *filter* has been applied. + + :rtype: return type of *dict_factory* + + :raise attrs.exceptions.NotAnAttrsClassError: If *cls* is not an *attrs* + class. + + .. versionadded:: 16.0.0 *dict_factory* + .. versionadded:: 16.1.0 *retain_collection_types* + .. versionadded:: 20.3.0 *value_serializer* + .. versionadded:: 21.3.0 If a dict has a collection for a key, it is + serialized as a tuple. + """ + attrs = fields(inst.__class__) + rv = dict_factory() + for a in attrs: + v = getattr(inst, a.name) + if filter is not None and not filter(a, v): + continue + + if value_serializer is not None: + v = value_serializer(inst, a, v) + + if recurse is True: + if has(v.__class__): + rv[a.name] = asdict( + v, + recurse=True, + filter=filter, + dict_factory=dict_factory, + retain_collection_types=retain_collection_types, + value_serializer=value_serializer, + ) + elif isinstance(v, (tuple, list, set, frozenset)): + cf = v.__class__ if retain_collection_types is True else list + rv[a.name] = cf( + [ + _asdict_anything( + i, + is_key=False, + filter=filter, + dict_factory=dict_factory, + retain_collection_types=retain_collection_types, + value_serializer=value_serializer, + ) + for i in v + ] + ) + elif isinstance(v, dict): + df = dict_factory + rv[a.name] = df( + ( + _asdict_anything( + kk, + is_key=True, + filter=filter, + dict_factory=df, + retain_collection_types=retain_collection_types, + value_serializer=value_serializer, + ), + _asdict_anything( + vv, + is_key=False, + filter=filter, + dict_factory=df, + retain_collection_types=retain_collection_types, + value_serializer=value_serializer, + ), + ) + for kk, vv in v.items() + ) + else: + rv[a.name] = v + else: + rv[a.name] = v + return rv + + +def _asdict_anything( + val, + is_key, + filter, + dict_factory, + retain_collection_types, + value_serializer, +): + """ + ``asdict`` only works on attrs instances, this works on anything. + """ + if getattr(val.__class__, "__attrs_attrs__", None) is not None: + # Attrs class. + rv = asdict( + val, + recurse=True, + filter=filter, + dict_factory=dict_factory, + retain_collection_types=retain_collection_types, + value_serializer=value_serializer, + ) + elif isinstance(val, (tuple, list, set, frozenset)): + if retain_collection_types is True: + cf = val.__class__ + elif is_key: + cf = tuple + else: + cf = list + + rv = cf( + [ + _asdict_anything( + i, + is_key=False, + filter=filter, + dict_factory=dict_factory, + retain_collection_types=retain_collection_types, + value_serializer=value_serializer, + ) + for i in val + ] + ) + elif isinstance(val, dict): + df = dict_factory + rv = df( + ( + _asdict_anything( + kk, + is_key=True, + filter=filter, + dict_factory=df, + retain_collection_types=retain_collection_types, + value_serializer=value_serializer, + ), + _asdict_anything( + vv, + is_key=False, + filter=filter, + dict_factory=df, + retain_collection_types=retain_collection_types, + value_serializer=value_serializer, + ), + ) + for kk, vv in val.items() + ) + else: + rv = val + if value_serializer is not None: + rv = value_serializer(None, None, rv) + + return rv + + +def astuple( + inst, + recurse=True, + filter=None, + tuple_factory=tuple, + retain_collection_types=False, +): + """ + Return the *attrs* attribute values of *inst* as a tuple. + + Optionally recurse into other *attrs*-decorated classes. + + :param inst: Instance of an *attrs*-decorated class. + :param bool recurse: Recurse into classes that are also + *attrs*-decorated. + :param callable filter: A callable whose return code determines whether an + attribute or element is included (``True``) or dropped (``False``). Is + called with the `attrs.Attribute` as the first argument and the + value as the second argument. + :param callable tuple_factory: A callable to produce tuples from. For + example, to produce lists instead of tuples. + :param bool retain_collection_types: Do not convert to ``list`` + or ``dict`` when encountering an attribute which type is + ``tuple``, ``dict`` or ``set``. Only meaningful if ``recurse`` is + ``True``. + + :rtype: return type of *tuple_factory* + + :raise attrs.exceptions.NotAnAttrsClassError: If *cls* is not an *attrs* + class. + + .. versionadded:: 16.2.0 + """ + attrs = fields(inst.__class__) + rv = [] + retain = retain_collection_types # Very long. :/ + for a in attrs: + v = getattr(inst, a.name) + if filter is not None and not filter(a, v): + continue + if recurse is True: + if has(v.__class__): + rv.append( + astuple( + v, + recurse=True, + filter=filter, + tuple_factory=tuple_factory, + retain_collection_types=retain, + ) + ) + elif isinstance(v, (tuple, list, set, frozenset)): + cf = v.__class__ if retain is True else list + rv.append( + cf( + [ + astuple( + j, + recurse=True, + filter=filter, + tuple_factory=tuple_factory, + retain_collection_types=retain, + ) + if has(j.__class__) + else j + for j in v + ] + ) + ) + elif isinstance(v, dict): + df = v.__class__ if retain is True else dict + rv.append( + df( + ( + astuple( + kk, + tuple_factory=tuple_factory, + retain_collection_types=retain, + ) + if has(kk.__class__) + else kk, + astuple( + vv, + tuple_factory=tuple_factory, + retain_collection_types=retain, + ) + if has(vv.__class__) + else vv, + ) + for kk, vv in v.items() + ) + ) + else: + rv.append(v) + else: + rv.append(v) + + return rv if tuple_factory is list else tuple_factory(rv) + + +def has(cls): + """ + Check whether *cls* is a class with *attrs* attributes. + + :param type cls: Class to introspect. + :raise TypeError: If *cls* is not a class. + + :rtype: bool + """ + attrs = getattr(cls, "__attrs_attrs__", None) + if attrs is not None: + return True + + # No attrs, maybe it's a specialized generic (A[str])? + generic_base = get_generic_base(cls) + if generic_base is not None: + generic_attrs = getattr(generic_base, "__attrs_attrs__", None) + if generic_attrs is not None: + # Stick it on here for speed next time. + cls.__attrs_attrs__ = generic_attrs + return generic_attrs is not None + return False + + +def assoc(inst, **changes): + """ + Copy *inst* and apply *changes*. + + This is different from `evolve` that applies the changes to the arguments + that create the new instance. + + `evolve`'s behavior is preferable, but there are `edge cases`_ where it + doesn't work. Therefore `assoc` is deprecated, but will not be removed. + + .. _`edge cases`: https://github.com/python-attrs/attrs/issues/251 + + :param inst: Instance of a class with *attrs* attributes. + :param changes: Keyword changes in the new copy. + + :return: A copy of inst with *changes* incorporated. + + :raise attrs.exceptions.AttrsAttributeNotFoundError: If *attr_name* + couldn't be found on *cls*. + :raise attrs.exceptions.NotAnAttrsClassError: If *cls* is not an *attrs* + class. + + .. deprecated:: 17.1.0 + Use `attrs.evolve` instead if you can. + This function will not be removed du to the slightly different approach + compared to `attrs.evolve`. + """ + new = copy.copy(inst) + attrs = fields(inst.__class__) + for k, v in changes.items(): + a = getattr(attrs, k, NOTHING) + if a is NOTHING: + raise AttrsAttributeNotFoundError( + f"{k} is not an attrs attribute on {new.__class__}." + ) + _obj_setattr(new, k, v) + return new + + +def evolve(*args, **changes): + """ + Create a new instance, based on the first positional argument with + *changes* applied. + + :param inst: Instance of a class with *attrs* attributes. + :param changes: Keyword changes in the new copy. + + :return: A copy of inst with *changes* incorporated. + + :raise TypeError: If *attr_name* couldn't be found in the class + ``__init__``. + :raise attrs.exceptions.NotAnAttrsClassError: If *cls* is not an *attrs* + class. + + .. versionadded:: 17.1.0 + .. deprecated:: 23.1.0 + It is now deprecated to pass the instance using the keyword argument + *inst*. It will raise a warning until at least April 2024, after which + it will become an error. Always pass the instance as a positional + argument. + """ + # Try to get instance by positional argument first. + # Use changes otherwise and warn it'll break. + if args: + try: + (inst,) = args + except ValueError: + raise TypeError( + f"evolve() takes 1 positional argument, but {len(args)} " + "were given" + ) from None + else: + try: + inst = changes.pop("inst") + except KeyError: + raise TypeError( + "evolve() missing 1 required positional argument: 'inst'" + ) from None + + import warnings + + warnings.warn( + "Passing the instance per keyword argument is deprecated and " + "will stop working in, or after, April 2024.", + DeprecationWarning, + stacklevel=2, + ) + + cls = inst.__class__ + attrs = fields(cls) + for a in attrs: + if not a.init: + continue + attr_name = a.name # To deal with private attributes. + init_name = a.alias + if init_name not in changes: + changes[init_name] = getattr(inst, attr_name) + + return cls(**changes) + + +def resolve_types( + cls, globalns=None, localns=None, attribs=None, include_extras=True +): + """ + Resolve any strings and forward annotations in type annotations. + + This is only required if you need concrete types in `Attribute`'s *type* + field. In other words, you don't need to resolve your types if you only + use them for static type checking. + + With no arguments, names will be looked up in the module in which the class + was created. If this is not what you want, e.g. if the name only exists + inside a method, you may pass *globalns* or *localns* to specify other + dictionaries in which to look up these names. See the docs of + `typing.get_type_hints` for more details. + + :param type cls: Class to resolve. + :param Optional[dict] globalns: Dictionary containing global variables. + :param Optional[dict] localns: Dictionary containing local variables. + :param Optional[list] attribs: List of attribs for the given class. + This is necessary when calling from inside a ``field_transformer`` + since *cls* is not an *attrs* class yet. + :param bool include_extras: Resolve more accurately, if possible. + Pass ``include_extras`` to ``typing.get_hints``, if supported by the + typing module. On supported Python versions (3.9+), this resolves the + types more accurately. + + :raise TypeError: If *cls* is not a class. + :raise attrs.exceptions.NotAnAttrsClassError: If *cls* is not an *attrs* + class and you didn't pass any attribs. + :raise NameError: If types cannot be resolved because of missing variables. + + :returns: *cls* so you can use this function also as a class decorator. + Please note that you have to apply it **after** `attrs.define`. That + means the decorator has to come in the line **before** `attrs.define`. + + .. versionadded:: 20.1.0 + .. versionadded:: 21.1.0 *attribs* + .. versionadded:: 23.1.0 *include_extras* + + """ + # Since calling get_type_hints is expensive we cache whether we've + # done it already. + if getattr(cls, "__attrs_types_resolved__", None) != cls: + import typing + + kwargs = {"globalns": globalns, "localns": localns} + + if PY_3_9_PLUS: + kwargs["include_extras"] = include_extras + + hints = typing.get_type_hints(cls, **kwargs) + for field in fields(cls) if attribs is None else attribs: + if field.name in hints: + # Since fields have been frozen we must work around it. + _obj_setattr(field, "type", hints[field.name]) + # We store the class we resolved so that subclasses know they haven't + # been resolved. + cls.__attrs_types_resolved__ = cls + + # Return the class so you can use it as a decorator too. + return cls diff --git a/venv/lib/python3.10/site-packages/attr/_make.py b/venv/lib/python3.10/site-packages/attr/_make.py new file mode 100644 index 0000000..d72f738 --- /dev/null +++ b/venv/lib/python3.10/site-packages/attr/_make.py @@ -0,0 +1,2987 @@ +# SPDX-License-Identifier: MIT + +import copy +import enum +import linecache +import sys +import types +import typing + +from operator import itemgetter + +# We need to import _compat itself in addition to the _compat members to avoid +# having the thread-local in the globals here. +from . import _compat, _config, setters +from ._compat import ( + PY310, + _AnnotationExtractor, + get_generic_base, + set_closure_cell, +) +from .exceptions import ( + DefaultAlreadySetError, + FrozenInstanceError, + NotAnAttrsClassError, + UnannotatedAttributeError, +) + + +# This is used at least twice, so cache it here. +_obj_setattr = object.__setattr__ +_init_converter_pat = "__attr_converter_%s" +_init_factory_pat = "__attr_factory_%s" +_classvar_prefixes = ( + "typing.ClassVar", + "t.ClassVar", + "ClassVar", + "typing_extensions.ClassVar", +) +# we don't use a double-underscore prefix because that triggers +# name mangling when trying to create a slot for the field +# (when slots=True) +_hash_cache_field = "_attrs_cached_hash" + +_empty_metadata_singleton = types.MappingProxyType({}) + +# Unique object for unequivocal getattr() defaults. +_sentinel = object() + +_ng_default_on_setattr = setters.pipe(setters.convert, setters.validate) + + +class _Nothing(enum.Enum): + """ + Sentinel to indicate the lack of a value when ``None`` is ambiguous. + + If extending attrs, you can use ``typing.Literal[NOTHING]`` to show + that a value may be ``NOTHING``. + + .. versionchanged:: 21.1.0 ``bool(NOTHING)`` is now False. + .. versionchanged:: 22.2.0 ``NOTHING`` is now an ``enum.Enum`` variant. + """ + + NOTHING = enum.auto() + + def __repr__(self): + return "NOTHING" + + def __bool__(self): + return False + + +NOTHING = _Nothing.NOTHING +""" +Sentinel to indicate the lack of a value when ``None`` is ambiguous. +""" + + +class _CacheHashWrapper(int): + """ + An integer subclass that pickles / copies as None + + This is used for non-slots classes with ``cache_hash=True``, to avoid + serializing a potentially (even likely) invalid hash value. Since ``None`` + is the default value for uncalculated hashes, whenever this is copied, + the copy's value for the hash should automatically reset. + + See GH #613 for more details. + """ + + def __reduce__(self, _none_constructor=type(None), _args=()): + return _none_constructor, _args + + +def attrib( + default=NOTHING, + validator=None, + repr=True, + cmp=None, + hash=None, + init=True, + metadata=None, + type=None, + converter=None, + factory=None, + kw_only=False, + eq=None, + order=None, + on_setattr=None, + alias=None, +): + """ + Create a new attribute on a class. + + .. warning:: + + Does *not* do anything unless the class is also decorated with + `attr.s` / `attrs.define` / et cetera! + + Please consider using `attrs.field` in new code (``attr.ib`` will *never* + go away, though). + + :param default: A value that is used if an *attrs*-generated ``__init__`` + is used and no value is passed while instantiating or the attribute is + excluded using ``init=False``. + + If the value is an instance of `attrs.Factory`, its callable will be + used to construct a new value (useful for mutable data types like lists + or dicts). + + If a default is not set (or set manually to `attrs.NOTHING`), a value + *must* be supplied when instantiating; otherwise a `TypeError` + will be raised. + + The default can also be set using decorator notation as shown below. + + :type default: Any value + + :param callable factory: Syntactic sugar for + ``default=attr.Factory(factory)``. + + :param validator: `callable` that is called by *attrs*-generated + ``__init__`` methods after the instance has been initialized. They + receive the initialized instance, the :func:`~attrs.Attribute`, and the + passed value. + + The return value is *not* inspected so the validator has to throw an + exception itself. + + If a `list` is passed, its items are treated as validators and must + all pass. + + Validators can be globally disabled and re-enabled using + `attrs.validators.get_disabled` / `attrs.validators.set_disabled`. + + The validator can also be set using decorator notation as shown below. + + :type validator: `callable` or a `list` of `callable`\\ s. + + :param repr: Include this attribute in the generated ``__repr__`` + method. If ``True``, include the attribute; if ``False``, omit it. By + default, the built-in ``repr()`` function is used. To override how the + attribute value is formatted, pass a ``callable`` that takes a single + value and returns a string. Note that the resulting string is used + as-is, i.e. it will be used directly *instead* of calling ``repr()`` + (the default). + :type repr: a `bool` or a `callable` to use a custom function. + + :param eq: If ``True`` (default), include this attribute in the + generated ``__eq__`` and ``__ne__`` methods that check two instances + for equality. To override how the attribute value is compared, + pass a ``callable`` that takes a single value and returns the value + to be compared. + :type eq: a `bool` or a `callable`. + + :param order: If ``True`` (default), include this attributes in the + generated ``__lt__``, ``__le__``, ``__gt__`` and ``__ge__`` methods. + To override how the attribute value is ordered, + pass a ``callable`` that takes a single value and returns the value + to be ordered. + :type order: a `bool` or a `callable`. + + :param cmp: Setting *cmp* is equivalent to setting *eq* and *order* to the + same value. Must not be mixed with *eq* or *order*. + :type cmp: a `bool` or a `callable`. + + :param Optional[bool] hash: Include this attribute in the generated + ``__hash__`` method. If ``None`` (default), mirror *eq*'s value. This + is the correct behavior according the Python spec. Setting this value + to anything else than ``None`` is *discouraged*. + :param bool init: Include this attribute in the generated ``__init__`` + method. It is possible to set this to ``False`` and set a default + value. In that case this attributed is unconditionally initialized + with the specified default value or factory. + :param callable converter: `callable` that is called by + *attrs*-generated ``__init__`` methods to convert attribute's value + to the desired format. It is given the passed-in value, and the + returned value will be used as the new value of the attribute. The + value is converted before being passed to the validator, if any. + :param metadata: An arbitrary mapping, to be used by third-party + components. See `extending-metadata`. + + :param type: The type of the attribute. Nowadays, the preferred method to + specify the type is using a variable annotation (see :pep:`526`). + This argument is provided for backward compatibility. + Regardless of the approach used, the type will be stored on + ``Attribute.type``. + + Please note that *attrs* doesn't do anything with this metadata by + itself. You can use it as part of your own code or for + `static type checking `. + :param kw_only: Make this attribute keyword-only in the generated + ``__init__`` (if ``init`` is ``False``, this parameter is ignored). + :param on_setattr: Allows to overwrite the *on_setattr* setting from + `attr.s`. If left `None`, the *on_setattr* value from `attr.s` is used. + Set to `attrs.setters.NO_OP` to run **no** `setattr` hooks for this + attribute -- regardless of the setting in `attr.s`. + :type on_setattr: `callable`, or a list of callables, or `None`, or + `attrs.setters.NO_OP` + :param Optional[str] alias: Override this attribute's parameter name in the + generated ``__init__`` method. If left `None`, default to ``name`` + stripped of leading underscores. See `private-attributes`. + + .. versionadded:: 15.2.0 *convert* + .. versionadded:: 16.3.0 *metadata* + .. versionchanged:: 17.1.0 *validator* can be a ``list`` now. + .. versionchanged:: 17.1.0 + *hash* is ``None`` and therefore mirrors *eq* by default. + .. versionadded:: 17.3.0 *type* + .. deprecated:: 17.4.0 *convert* + .. versionadded:: 17.4.0 *converter* as a replacement for the deprecated + *convert* to achieve consistency with other noun-based arguments. + .. versionadded:: 18.1.0 + ``factory=f`` is syntactic sugar for ``default=attr.Factory(f)``. + .. versionadded:: 18.2.0 *kw_only* + .. versionchanged:: 19.2.0 *convert* keyword argument removed. + .. versionchanged:: 19.2.0 *repr* also accepts a custom callable. + .. deprecated:: 19.2.0 *cmp* Removal on or after 2021-06-01. + .. versionadded:: 19.2.0 *eq* and *order* + .. versionadded:: 20.1.0 *on_setattr* + .. versionchanged:: 20.3.0 *kw_only* backported to Python 2 + .. versionchanged:: 21.1.0 + *eq*, *order*, and *cmp* also accept a custom callable + .. versionchanged:: 21.1.0 *cmp* undeprecated + .. versionadded:: 22.2.0 *alias* + """ + eq, eq_key, order, order_key = _determine_attrib_eq_order( + cmp, eq, order, True + ) + + if hash is not None and hash is not True and hash is not False: + raise TypeError( + "Invalid value for hash. Must be True, False, or None." + ) + + if factory is not None: + if default is not NOTHING: + raise ValueError( + "The `default` and `factory` arguments are mutually " + "exclusive." + ) + if not callable(factory): + raise ValueError("The `factory` argument must be a callable.") + default = Factory(factory) + + if metadata is None: + metadata = {} + + # Apply syntactic sugar by auto-wrapping. + if isinstance(on_setattr, (list, tuple)): + on_setattr = setters.pipe(*on_setattr) + + if validator and isinstance(validator, (list, tuple)): + validator = and_(*validator) + + if converter and isinstance(converter, (list, tuple)): + converter = pipe(*converter) + + return _CountingAttr( + default=default, + validator=validator, + repr=repr, + cmp=None, + hash=hash, + init=init, + converter=converter, + metadata=metadata, + type=type, + kw_only=kw_only, + eq=eq, + eq_key=eq_key, + order=order, + order_key=order_key, + on_setattr=on_setattr, + alias=alias, + ) + + +def _compile_and_eval(script, globs, locs=None, filename=""): + """ + "Exec" the script with the given global (globs) and local (locs) variables. + """ + bytecode = compile(script, filename, "exec") + eval(bytecode, globs, locs) + + +def _make_method(name, script, filename, globs): + """ + Create the method with the script given and return the method object. + """ + locs = {} + + # In order of debuggers like PDB being able to step through the code, + # we add a fake linecache entry. + count = 1 + base_filename = filename + while True: + linecache_tuple = ( + len(script), + None, + script.splitlines(True), + filename, + ) + old_val = linecache.cache.setdefault(filename, linecache_tuple) + if old_val == linecache_tuple: + break + else: + filename = f"{base_filename[:-1]}-{count}>" + count += 1 + + _compile_and_eval(script, globs, locs, filename) + + return locs[name] + + +def _make_attr_tuple_class(cls_name, attr_names): + """ + Create a tuple subclass to hold `Attribute`s for an `attrs` class. + + The subclass is a bare tuple with properties for names. + + class MyClassAttributes(tuple): + __slots__ = () + x = property(itemgetter(0)) + """ + attr_class_name = f"{cls_name}Attributes" + attr_class_template = [ + f"class {attr_class_name}(tuple):", + " __slots__ = ()", + ] + if attr_names: + for i, attr_name in enumerate(attr_names): + attr_class_template.append( + f" {attr_name} = _attrs_property(_attrs_itemgetter({i}))" + ) + else: + attr_class_template.append(" pass") + globs = {"_attrs_itemgetter": itemgetter, "_attrs_property": property} + _compile_and_eval("\n".join(attr_class_template), globs) + return globs[attr_class_name] + + +# Tuple class for extracted attributes from a class definition. +# `base_attrs` is a subset of `attrs`. +_Attributes = _make_attr_tuple_class( + "_Attributes", + [ + # all attributes to build dunder methods for + "attrs", + # attributes that have been inherited + "base_attrs", + # map inherited attributes to their originating classes + "base_attrs_map", + ], +) + + +def _is_class_var(annot): + """ + Check whether *annot* is a typing.ClassVar. + + The string comparison hack is used to avoid evaluating all string + annotations which would put attrs-based classes at a performance + disadvantage compared to plain old classes. + """ + annot = str(annot) + + # Annotation can be quoted. + if annot.startswith(("'", '"')) and annot.endswith(("'", '"')): + annot = annot[1:-1] + + return annot.startswith(_classvar_prefixes) + + +def _has_own_attribute(cls, attrib_name): + """ + Check whether *cls* defines *attrib_name* (and doesn't just inherit it). + """ + attr = getattr(cls, attrib_name, _sentinel) + if attr is _sentinel: + return False + + for base_cls in cls.__mro__[1:]: + a = getattr(base_cls, attrib_name, None) + if attr is a: + return False + + return True + + +def _get_annotations(cls): + """ + Get annotations for *cls*. + """ + if _has_own_attribute(cls, "__annotations__"): + return cls.__annotations__ + + return {} + + +def _collect_base_attrs(cls, taken_attr_names): + """ + Collect attr.ibs from base classes of *cls*, except *taken_attr_names*. + """ + base_attrs = [] + base_attr_map = {} # A dictionary of base attrs to their classes. + + # Traverse the MRO and collect attributes. + for base_cls in reversed(cls.__mro__[1:-1]): + for a in getattr(base_cls, "__attrs_attrs__", []): + if a.inherited or a.name in taken_attr_names: + continue + + a = a.evolve(inherited=True) + base_attrs.append(a) + base_attr_map[a.name] = base_cls + + # For each name, only keep the freshest definition i.e. the furthest at the + # back. base_attr_map is fine because it gets overwritten with every new + # instance. + filtered = [] + seen = set() + for a in reversed(base_attrs): + if a.name in seen: + continue + filtered.insert(0, a) + seen.add(a.name) + + return filtered, base_attr_map + + +def _collect_base_attrs_broken(cls, taken_attr_names): + """ + Collect attr.ibs from base classes of *cls*, except *taken_attr_names*. + + N.B. *taken_attr_names* will be mutated. + + Adhere to the old incorrect behavior. + + Notably it collects from the front and considers inherited attributes which + leads to the buggy behavior reported in #428. + """ + base_attrs = [] + base_attr_map = {} # A dictionary of base attrs to their classes. + + # Traverse the MRO and collect attributes. + for base_cls in cls.__mro__[1:-1]: + for a in getattr(base_cls, "__attrs_attrs__", []): + if a.name in taken_attr_names: + continue + + a = a.evolve(inherited=True) + taken_attr_names.add(a.name) + base_attrs.append(a) + base_attr_map[a.name] = base_cls + + return base_attrs, base_attr_map + + +def _transform_attrs( + cls, these, auto_attribs, kw_only, collect_by_mro, field_transformer +): + """ + Transform all `_CountingAttr`s on a class into `Attribute`s. + + If *these* is passed, use that and don't look for them on the class. + + *collect_by_mro* is True, collect them in the correct MRO order, otherwise + use the old -- incorrect -- order. See #428. + + Return an `_Attributes`. + """ + cd = cls.__dict__ + anns = _get_annotations(cls) + + if these is not None: + ca_list = [(name, ca) for name, ca in these.items()] + elif auto_attribs is True: + ca_names = { + name + for name, attr in cd.items() + if isinstance(attr, _CountingAttr) + } + ca_list = [] + annot_names = set() + for attr_name, type in anns.items(): + if _is_class_var(type): + continue + annot_names.add(attr_name) + a = cd.get(attr_name, NOTHING) + + if not isinstance(a, _CountingAttr): + if a is NOTHING: + a = attrib() + else: + a = attrib(default=a) + ca_list.append((attr_name, a)) + + unannotated = ca_names - annot_names + if len(unannotated) > 0: + raise UnannotatedAttributeError( + "The following `attr.ib`s lack a type annotation: " + + ", ".join( + sorted(unannotated, key=lambda n: cd.get(n).counter) + ) + + "." + ) + else: + ca_list = sorted( + ( + (name, attr) + for name, attr in cd.items() + if isinstance(attr, _CountingAttr) + ), + key=lambda e: e[1].counter, + ) + + own_attrs = [ + Attribute.from_counting_attr( + name=attr_name, ca=ca, type=anns.get(attr_name) + ) + for attr_name, ca in ca_list + ] + + if collect_by_mro: + base_attrs, base_attr_map = _collect_base_attrs( + cls, {a.name for a in own_attrs} + ) + else: + base_attrs, base_attr_map = _collect_base_attrs_broken( + cls, {a.name for a in own_attrs} + ) + + if kw_only: + own_attrs = [a.evolve(kw_only=True) for a in own_attrs] + base_attrs = [a.evolve(kw_only=True) for a in base_attrs] + + attrs = base_attrs + own_attrs + + # Mandatory vs non-mandatory attr order only matters when they are part of + # the __init__ signature and when they aren't kw_only (which are moved to + # the end and can be mandatory or non-mandatory in any order, as they will + # be specified as keyword args anyway). Check the order of those attrs: + had_default = False + for a in (a for a in attrs if a.init is not False and a.kw_only is False): + if had_default is True and a.default is NOTHING: + raise ValueError( + "No mandatory attributes allowed after an attribute with a " + f"default value or factory. Attribute in question: {a!r}" + ) + + if had_default is False and a.default is not NOTHING: + had_default = True + + if field_transformer is not None: + attrs = field_transformer(cls, attrs) + + # Resolve default field alias after executing field_transformer. + # This allows field_transformer to differentiate between explicit vs + # default aliases and supply their own defaults. + attrs = [ + a.evolve(alias=_default_init_alias_for(a.name)) if not a.alias else a + for a in attrs + ] + + # Create AttrsClass *after* applying the field_transformer since it may + # add or remove attributes! + attr_names = [a.name for a in attrs] + AttrsClass = _make_attr_tuple_class(cls.__name__, attr_names) + + return _Attributes((AttrsClass(attrs), base_attrs, base_attr_map)) + + +def _frozen_setattrs(self, name, value): + """ + Attached to frozen classes as __setattr__. + """ + if isinstance(self, BaseException) and name in ( + "__cause__", + "__context__", + "__traceback__", + ): + BaseException.__setattr__(self, name, value) + return + + raise FrozenInstanceError() + + +def _frozen_delattrs(self, name): + """ + Attached to frozen classes as __delattr__. + """ + raise FrozenInstanceError() + + +class _ClassBuilder: + """ + Iteratively build *one* class. + """ + + __slots__ = ( + "_attr_names", + "_attrs", + "_base_attr_map", + "_base_names", + "_cache_hash", + "_cls", + "_cls_dict", + "_delete_attribs", + "_frozen", + "_has_pre_init", + "_has_post_init", + "_is_exc", + "_on_setattr", + "_slots", + "_weakref_slot", + "_wrote_own_setattr", + "_has_custom_setattr", + ) + + def __init__( + self, + cls, + these, + slots, + frozen, + weakref_slot, + getstate_setstate, + auto_attribs, + kw_only, + cache_hash, + is_exc, + collect_by_mro, + on_setattr, + has_custom_setattr, + field_transformer, + ): + attrs, base_attrs, base_map = _transform_attrs( + cls, + these, + auto_attribs, + kw_only, + collect_by_mro, + field_transformer, + ) + + self._cls = cls + self._cls_dict = dict(cls.__dict__) if slots else {} + self._attrs = attrs + self._base_names = {a.name for a in base_attrs} + self._base_attr_map = base_map + self._attr_names = tuple(a.name for a in attrs) + self._slots = slots + self._frozen = frozen + self._weakref_slot = weakref_slot + self._cache_hash = cache_hash + self._has_pre_init = bool(getattr(cls, "__attrs_pre_init__", False)) + self._has_post_init = bool(getattr(cls, "__attrs_post_init__", False)) + self._delete_attribs = not bool(these) + self._is_exc = is_exc + self._on_setattr = on_setattr + + self._has_custom_setattr = has_custom_setattr + self._wrote_own_setattr = False + + self._cls_dict["__attrs_attrs__"] = self._attrs + + if frozen: + self._cls_dict["__setattr__"] = _frozen_setattrs + self._cls_dict["__delattr__"] = _frozen_delattrs + + self._wrote_own_setattr = True + elif on_setattr in ( + _ng_default_on_setattr, + setters.validate, + setters.convert, + ): + has_validator = has_converter = False + for a in attrs: + if a.validator is not None: + has_validator = True + if a.converter is not None: + has_converter = True + + if has_validator and has_converter: + break + if ( + ( + on_setattr == _ng_default_on_setattr + and not (has_validator or has_converter) + ) + or (on_setattr == setters.validate and not has_validator) + or (on_setattr == setters.convert and not has_converter) + ): + # If class-level on_setattr is set to convert + validate, but + # there's no field to convert or validate, pretend like there's + # no on_setattr. + self._on_setattr = None + + if getstate_setstate: + ( + self._cls_dict["__getstate__"], + self._cls_dict["__setstate__"], + ) = self._make_getstate_setstate() + + def __repr__(self): + return f"<_ClassBuilder(cls={self._cls.__name__})>" + + if PY310: + import abc + + def build_class(self): + """ + Finalize class based on the accumulated configuration. + + Builder cannot be used after calling this method. + """ + if self._slots is True: + return self._create_slots_class() + + return self.abc.update_abstractmethods( + self._patch_original_class() + ) + + else: + + def build_class(self): + """ + Finalize class based on the accumulated configuration. + + Builder cannot be used after calling this method. + """ + if self._slots is True: + return self._create_slots_class() + + return self._patch_original_class() + + def _patch_original_class(self): + """ + Apply accumulated methods and return the class. + """ + cls = self._cls + base_names = self._base_names + + # Clean class of attribute definitions (`attr.ib()`s). + if self._delete_attribs: + for name in self._attr_names: + if ( + name not in base_names + and getattr(cls, name, _sentinel) is not _sentinel + ): + try: + delattr(cls, name) + except AttributeError: + # This can happen if a base class defines a class + # variable and we want to set an attribute with the + # same name by using only a type annotation. + pass + + # Attach our dunder methods. + for name, value in self._cls_dict.items(): + setattr(cls, name, value) + + # If we've inherited an attrs __setattr__ and don't write our own, + # reset it to object's. + if not self._wrote_own_setattr and getattr( + cls, "__attrs_own_setattr__", False + ): + cls.__attrs_own_setattr__ = False + + if not self._has_custom_setattr: + cls.__setattr__ = _obj_setattr + + return cls + + def _create_slots_class(self): + """ + Build and return a new class with a `__slots__` attribute. + """ + cd = { + k: v + for k, v in self._cls_dict.items() + if k not in tuple(self._attr_names) + ("__dict__", "__weakref__") + } + + # If our class doesn't have its own implementation of __setattr__ + # (either from the user or by us), check the bases, if one of them has + # an attrs-made __setattr__, that needs to be reset. We don't walk the + # MRO because we only care about our immediate base classes. + # XXX: This can be confused by subclassing a slotted attrs class with + # XXX: a non-attrs class and subclass the resulting class with an attrs + # XXX: class. See `test_slotted_confused` for details. For now that's + # XXX: OK with us. + if not self._wrote_own_setattr: + cd["__attrs_own_setattr__"] = False + + if not self._has_custom_setattr: + for base_cls in self._cls.__bases__: + if base_cls.__dict__.get("__attrs_own_setattr__", False): + cd["__setattr__"] = _obj_setattr + break + + # Traverse the MRO to collect existing slots + # and check for an existing __weakref__. + existing_slots = dict() + weakref_inherited = False + for base_cls in self._cls.__mro__[1:-1]: + if base_cls.__dict__.get("__weakref__", None) is not None: + weakref_inherited = True + existing_slots.update( + { + name: getattr(base_cls, name) + for name in getattr(base_cls, "__slots__", []) + } + ) + + base_names = set(self._base_names) + + names = self._attr_names + if ( + self._weakref_slot + and "__weakref__" not in getattr(self._cls, "__slots__", ()) + and "__weakref__" not in names + and not weakref_inherited + ): + names += ("__weakref__",) + + # We only add the names of attributes that aren't inherited. + # Setting __slots__ to inherited attributes wastes memory. + slot_names = [name for name in names if name not in base_names] + # There are slots for attributes from current class + # that are defined in parent classes. + # As their descriptors may be overridden by a child class, + # we collect them here and update the class dict + reused_slots = { + slot: slot_descriptor + for slot, slot_descriptor in existing_slots.items() + if slot in slot_names + } + slot_names = [name for name in slot_names if name not in reused_slots] + cd.update(reused_slots) + if self._cache_hash: + slot_names.append(_hash_cache_field) + cd["__slots__"] = tuple(slot_names) + + cd["__qualname__"] = self._cls.__qualname__ + + # Create new class based on old class and our methods. + cls = type(self._cls)(self._cls.__name__, self._cls.__bases__, cd) + + # The following is a fix for + # . + # If a method mentions `__class__` or uses the no-arg super(), the + # compiler will bake a reference to the class in the method itself + # as `method.__closure__`. Since we replace the class with a + # clone, we rewrite these references so it keeps working. + for item in cls.__dict__.values(): + if isinstance(item, (classmethod, staticmethod)): + # Class- and staticmethods hide their functions inside. + # These might need to be rewritten as well. + closure_cells = getattr(item.__func__, "__closure__", None) + elif isinstance(item, property): + # Workaround for property `super()` shortcut (PY3-only). + # There is no universal way for other descriptors. + closure_cells = getattr(item.fget, "__closure__", None) + else: + closure_cells = getattr(item, "__closure__", None) + + if not closure_cells: # Catch None or the empty list. + continue + for cell in closure_cells: + try: + match = cell.cell_contents is self._cls + except ValueError: # ValueError: Cell is empty + pass + else: + if match: + set_closure_cell(cell, cls) + + return cls + + def add_repr(self, ns): + self._cls_dict["__repr__"] = self._add_method_dunders( + _make_repr(self._attrs, ns, self._cls) + ) + return self + + def add_str(self): + repr = self._cls_dict.get("__repr__") + if repr is None: + raise ValueError( + "__str__ can only be generated if a __repr__ exists." + ) + + def __str__(self): + return self.__repr__() + + self._cls_dict["__str__"] = self._add_method_dunders(__str__) + return self + + def _make_getstate_setstate(self): + """ + Create custom __setstate__ and __getstate__ methods. + """ + # __weakref__ is not writable. + state_attr_names = tuple( + an for an in self._attr_names if an != "__weakref__" + ) + + def slots_getstate(self): + """ + Automatically created by attrs. + """ + return {name: getattr(self, name) for name in state_attr_names} + + hash_caching_enabled = self._cache_hash + + def slots_setstate(self, state): + """ + Automatically created by attrs. + """ + __bound_setattr = _obj_setattr.__get__(self) + if isinstance(state, tuple): + # Backward compatibility with attrs instances pickled with + # attrs versions before v22.2.0 which stored tuples. + for name, value in zip(state_attr_names, state): + __bound_setattr(name, value) + else: + for name in state_attr_names: + if name in state: + __bound_setattr(name, state[name]) + + # The hash code cache is not included when the object is + # serialized, but it still needs to be initialized to None to + # indicate that the first call to __hash__ should be a cache + # miss. + if hash_caching_enabled: + __bound_setattr(_hash_cache_field, None) + + return slots_getstate, slots_setstate + + def make_unhashable(self): + self._cls_dict["__hash__"] = None + return self + + def add_hash(self): + self._cls_dict["__hash__"] = self._add_method_dunders( + _make_hash( + self._cls, + self._attrs, + frozen=self._frozen, + cache_hash=self._cache_hash, + ) + ) + + return self + + def add_init(self): + self._cls_dict["__init__"] = self._add_method_dunders( + _make_init( + self._cls, + self._attrs, + self._has_pre_init, + self._has_post_init, + self._frozen, + self._slots, + self._cache_hash, + self._base_attr_map, + self._is_exc, + self._on_setattr, + attrs_init=False, + ) + ) + + return self + + def add_match_args(self): + self._cls_dict["__match_args__"] = tuple( + field.name + for field in self._attrs + if field.init and not field.kw_only + ) + + def add_attrs_init(self): + self._cls_dict["__attrs_init__"] = self._add_method_dunders( + _make_init( + self._cls, + self._attrs, + self._has_pre_init, + self._has_post_init, + self._frozen, + self._slots, + self._cache_hash, + self._base_attr_map, + self._is_exc, + self._on_setattr, + attrs_init=True, + ) + ) + + return self + + def add_eq(self): + cd = self._cls_dict + + cd["__eq__"] = self._add_method_dunders( + _make_eq(self._cls, self._attrs) + ) + cd["__ne__"] = self._add_method_dunders(_make_ne()) + + return self + + def add_order(self): + cd = self._cls_dict + + cd["__lt__"], cd["__le__"], cd["__gt__"], cd["__ge__"] = ( + self._add_method_dunders(meth) + for meth in _make_order(self._cls, self._attrs) + ) + + return self + + def add_setattr(self): + if self._frozen: + return self + + sa_attrs = {} + for a in self._attrs: + on_setattr = a.on_setattr or self._on_setattr + if on_setattr and on_setattr is not setters.NO_OP: + sa_attrs[a.name] = a, on_setattr + + if not sa_attrs: + return self + + if self._has_custom_setattr: + # We need to write a __setattr__ but there already is one! + raise ValueError( + "Can't combine custom __setattr__ with on_setattr hooks." + ) + + # docstring comes from _add_method_dunders + def __setattr__(self, name, val): + try: + a, hook = sa_attrs[name] + except KeyError: + nval = val + else: + nval = hook(self, a, val) + + _obj_setattr(self, name, nval) + + self._cls_dict["__attrs_own_setattr__"] = True + self._cls_dict["__setattr__"] = self._add_method_dunders(__setattr__) + self._wrote_own_setattr = True + + return self + + def _add_method_dunders(self, method): + """ + Add __module__ and __qualname__ to a *method* if possible. + """ + try: + method.__module__ = self._cls.__module__ + except AttributeError: + pass + + try: + method.__qualname__ = ".".join( + (self._cls.__qualname__, method.__name__) + ) + except AttributeError: + pass + + try: + method.__doc__ = ( + "Method generated by attrs for class " + f"{self._cls.__qualname__}." + ) + except AttributeError: + pass + + return method + + +def _determine_attrs_eq_order(cmp, eq, order, default_eq): + """ + Validate the combination of *cmp*, *eq*, and *order*. Derive the effective + values of eq and order. If *eq* is None, set it to *default_eq*. + """ + if cmp is not None and any((eq is not None, order is not None)): + raise ValueError("Don't mix `cmp` with `eq' and `order`.") + + # cmp takes precedence due to bw-compatibility. + if cmp is not None: + return cmp, cmp + + # If left None, equality is set to the specified default and ordering + # mirrors equality. + if eq is None: + eq = default_eq + + if order is None: + order = eq + + if eq is False and order is True: + raise ValueError("`order` can only be True if `eq` is True too.") + + return eq, order + + +def _determine_attrib_eq_order(cmp, eq, order, default_eq): + """ + Validate the combination of *cmp*, *eq*, and *order*. Derive the effective + values of eq and order. If *eq* is None, set it to *default_eq*. + """ + if cmp is not None and any((eq is not None, order is not None)): + raise ValueError("Don't mix `cmp` with `eq' and `order`.") + + def decide_callable_or_boolean(value): + """ + Decide whether a key function is used. + """ + if callable(value): + value, key = True, value + else: + key = None + return value, key + + # cmp takes precedence due to bw-compatibility. + if cmp is not None: + cmp, cmp_key = decide_callable_or_boolean(cmp) + return cmp, cmp_key, cmp, cmp_key + + # If left None, equality is set to the specified default and ordering + # mirrors equality. + if eq is None: + eq, eq_key = default_eq, None + else: + eq, eq_key = decide_callable_or_boolean(eq) + + if order is None: + order, order_key = eq, eq_key + else: + order, order_key = decide_callable_or_boolean(order) + + if eq is False and order is True: + raise ValueError("`order` can only be True if `eq` is True too.") + + return eq, eq_key, order, order_key + + +def _determine_whether_to_implement( + cls, flag, auto_detect, dunders, default=True +): + """ + Check whether we should implement a set of methods for *cls*. + + *flag* is the argument passed into @attr.s like 'init', *auto_detect* the + same as passed into @attr.s and *dunders* is a tuple of attribute names + whose presence signal that the user has implemented it themselves. + + Return *default* if no reason for either for or against is found. + """ + if flag is True or flag is False: + return flag + + if flag is None and auto_detect is False: + return default + + # Logically, flag is None and auto_detect is True here. + for dunder in dunders: + if _has_own_attribute(cls, dunder): + return False + + return default + + +def attrs( + maybe_cls=None, + these=None, + repr_ns=None, + repr=None, + cmp=None, + hash=None, + init=None, + slots=False, + frozen=False, + weakref_slot=True, + str=False, + auto_attribs=False, + kw_only=False, + cache_hash=False, + auto_exc=False, + eq=None, + order=None, + auto_detect=False, + collect_by_mro=False, + getstate_setstate=None, + on_setattr=None, + field_transformer=None, + match_args=True, + unsafe_hash=None, +): + r""" + A class decorator that adds :term:`dunder methods` according to the + specified attributes using `attr.ib` or the *these* argument. + + Please consider using `attrs.define` / `attrs.frozen` in new code + (``attr.s`` will *never* go away, though). + + :param these: A dictionary of name to `attr.ib` mappings. This is + useful to avoid the definition of your attributes within the class body + because you can't (e.g. if you want to add ``__repr__`` methods to + Django models) or don't want to. + + If *these* is not ``None``, *attrs* will *not* search the class body + for attributes and will *not* remove any attributes from it. + + The order is deduced from the order of the attributes inside *these*. + + :type these: `dict` of `str` to `attr.ib` + + :param str repr_ns: When using nested classes, there's no way in Python 2 + to automatically detect that. Therefore it's possible to set the + namespace explicitly for a more meaningful ``repr`` output. + :param bool auto_detect: Instead of setting the *init*, *repr*, *eq*, + *order*, and *hash* arguments explicitly, assume they are set to + ``True`` **unless any** of the involved methods for one of the + arguments is implemented in the *current* class (i.e. it is *not* + inherited from some base class). + + So for example by implementing ``__eq__`` on a class yourself, + *attrs* will deduce ``eq=False`` and will create *neither* + ``__eq__`` *nor* ``__ne__`` (but Python classes come with a sensible + ``__ne__`` by default, so it *should* be enough to only implement + ``__eq__`` in most cases). + + .. warning:: + + If you prevent *attrs* from creating the ordering methods for you + (``order=False``, e.g. by implementing ``__le__``), it becomes + *your* responsibility to make sure its ordering is sound. The best + way is to use the `functools.total_ordering` decorator. + + + Passing ``True`` or ``False`` to *init*, *repr*, *eq*, *order*, + *cmp*, or *hash* overrides whatever *auto_detect* would determine. + + :param bool repr: Create a ``__repr__`` method with a human readable + representation of *attrs* attributes.. + :param bool str: Create a ``__str__`` method that is identical to + ``__repr__``. This is usually not necessary except for + `Exception`\ s. + :param Optional[bool] eq: If ``True`` or ``None`` (default), add ``__eq__`` + and ``__ne__`` methods that check two instances for equality. + + They compare the instances as if they were tuples of their *attrs* + attributes if and only if the types of both classes are *identical*! + :param Optional[bool] order: If ``True``, add ``__lt__``, ``__le__``, + ``__gt__``, and ``__ge__`` methods that behave like *eq* above and + allow instances to be ordered. If ``None`` (default) mirror value of + *eq*. + :param Optional[bool] cmp: Setting *cmp* is equivalent to setting *eq* + and *order* to the same value. Must not be mixed with *eq* or *order*. + :param Optional[bool] unsafe_hash: If ``None`` (default), the ``__hash__`` + method is generated according how *eq* and *frozen* are set. + + 1. If *both* are True, *attrs* will generate a ``__hash__`` for you. + 2. If *eq* is True and *frozen* is False, ``__hash__`` will be set to + None, marking it unhashable (which it is). + 3. If *eq* is False, ``__hash__`` will be left untouched meaning the + ``__hash__`` method of the base class will be used (if base class is + ``object``, this means it will fall back to id-based hashing.). + + Although not recommended, you can decide for yourself and force + *attrs* to create one (e.g. if the class is immutable even though you + didn't freeze it programmatically) by passing ``True`` or not. Both of + these cases are rather special and should be used carefully. + + See our documentation on `hashing`, Python's documentation on + `object.__hash__`, and the `GitHub issue that led to the default \ + behavior `_ for more + details. + :param Optional[bool] hash: Alias for *unsafe_hash*. *unsafe_hash* takes + precedence. + :param bool init: Create a ``__init__`` method that initializes the + *attrs* attributes. Leading underscores are stripped for the argument + name. If a ``__attrs_pre_init__`` method exists on the class, it will + be called before the class is initialized. If a ``__attrs_post_init__`` + method exists on the class, it will be called after the class is fully + initialized. + + If ``init`` is ``False``, an ``__attrs_init__`` method will be + injected instead. This allows you to define a custom ``__init__`` + method that can do pre-init work such as ``super().__init__()``, + and then call ``__attrs_init__()`` and ``__attrs_post_init__()``. + :param bool slots: Create a :term:`slotted class ` that's + more memory-efficient. Slotted classes are generally superior to the + default dict classes, but have some gotchas you should know about, so + we encourage you to read the :term:`glossary entry `. + :param bool frozen: Make instances immutable after initialization. If + someone attempts to modify a frozen instance, + `attrs.exceptions.FrozenInstanceError` is raised. + + .. note:: + + 1. This is achieved by installing a custom ``__setattr__`` method + on your class, so you can't implement your own. + + 2. True immutability is impossible in Python. + + 3. This *does* have a minor a runtime performance `impact + ` when initializing new instances. In other words: + ``__init__`` is slightly slower with ``frozen=True``. + + 4. If a class is frozen, you cannot modify ``self`` in + ``__attrs_post_init__`` or a self-written ``__init__``. You can + circumvent that limitation by using + ``object.__setattr__(self, "attribute_name", value)``. + + 5. Subclasses of a frozen class are frozen too. + + :param bool weakref_slot: Make instances weak-referenceable. This has no + effect unless ``slots`` is also enabled. + :param bool auto_attribs: If ``True``, collect :pep:`526`-annotated + attributes from the class body. + + In this case, you **must** annotate every field. If *attrs* + encounters a field that is set to an `attr.ib` but lacks a type + annotation, an `attr.exceptions.UnannotatedAttributeError` is + raised. Use ``field_name: typing.Any = attr.ib(...)`` if you don't + want to set a type. + + If you assign a value to those attributes (e.g. ``x: int = 42``), that + value becomes the default value like if it were passed using + ``attr.ib(default=42)``. Passing an instance of `attrs.Factory` also + works as expected in most cases (see warning below). + + Attributes annotated as `typing.ClassVar`, and attributes that are + neither annotated nor set to an `attr.ib` are **ignored**. + + .. warning:: + For features that use the attribute name to create decorators (e.g. + :ref:`validators `), you still *must* assign `attr.ib` + to them. Otherwise Python will either not find the name or try to + use the default value to call e.g. ``validator`` on it. + + These errors can be quite confusing and probably the most common bug + report on our bug tracker. + + :param bool kw_only: Make all attributes keyword-only + in the generated ``__init__`` (if ``init`` is ``False``, this + parameter is ignored). + :param bool cache_hash: Ensure that the object's hash code is computed + only once and stored on the object. If this is set to ``True``, + hashing must be either explicitly or implicitly enabled for this + class. If the hash code is cached, avoid any reassignments of + fields involved in hash code computation or mutations of the objects + those fields point to after object creation. If such changes occur, + the behavior of the object's hash code is undefined. + :param bool auto_exc: If the class subclasses `BaseException` + (which implicitly includes any subclass of any exception), the + following happens to behave like a well-behaved Python exceptions + class: + + - the values for *eq*, *order*, and *hash* are ignored and the + instances compare and hash by the instance's ids (N.B. *attrs* will + *not* remove existing implementations of ``__hash__`` or the equality + methods. It just won't add own ones.), + - all attributes that are either passed into ``__init__`` or have a + default value are additionally available as a tuple in the ``args`` + attribute, + - the value of *str* is ignored leaving ``__str__`` to base classes. + :param bool collect_by_mro: Setting this to `True` fixes the way *attrs* + collects attributes from base classes. The default behavior is + incorrect in certain cases of multiple inheritance. It should be on by + default but is kept off for backward-compatibility. + + See issue `#428 `_ for + more details. + + :param Optional[bool] getstate_setstate: + .. note:: + This is usually only interesting for slotted classes and you should + probably just set *auto_detect* to `True`. + + If `True`, ``__getstate__`` and + ``__setstate__`` are generated and attached to the class. This is + necessary for slotted classes to be pickleable. If left `None`, it's + `True` by default for slotted classes and ``False`` for dict classes. + + If *auto_detect* is `True`, and *getstate_setstate* is left `None`, + and **either** ``__getstate__`` or ``__setstate__`` is detected directly + on the class (i.e. not inherited), it is set to `False` (this is usually + what you want). + + :param on_setattr: A callable that is run whenever the user attempts to set + an attribute (either by assignment like ``i.x = 42`` or by using + `setattr` like ``setattr(i, "x", 42)``). It receives the same arguments + as validators: the instance, the attribute that is being modified, and + the new value. + + If no exception is raised, the attribute is set to the return value of + the callable. + + If a list of callables is passed, they're automatically wrapped in an + `attrs.setters.pipe`. + :type on_setattr: `callable`, or a list of callables, or `None`, or + `attrs.setters.NO_OP` + + :param Optional[callable] field_transformer: + A function that is called with the original class object and all + fields right before *attrs* finalizes the class. You can use + this, e.g., to automatically add converters or validators to + fields based on their types. See `transform-fields` for more details. + + :param bool match_args: + If `True` (default), set ``__match_args__`` on the class to support + :pep:`634` (Structural Pattern Matching). It is a tuple of all + non-keyword-only ``__init__`` parameter names on Python 3.10 and later. + Ignored on older Python versions. + + .. versionadded:: 16.0.0 *slots* + .. versionadded:: 16.1.0 *frozen* + .. versionadded:: 16.3.0 *str* + .. versionadded:: 16.3.0 Support for ``__attrs_post_init__``. + .. versionchanged:: 17.1.0 + *hash* supports ``None`` as value which is also the default now. + .. versionadded:: 17.3.0 *auto_attribs* + .. versionchanged:: 18.1.0 + If *these* is passed, no attributes are deleted from the class body. + .. versionchanged:: 18.1.0 If *these* is ordered, the order is retained. + .. versionadded:: 18.2.0 *weakref_slot* + .. deprecated:: 18.2.0 + ``__lt__``, ``__le__``, ``__gt__``, and ``__ge__`` now raise a + `DeprecationWarning` if the classes compared are subclasses of + each other. ``__eq`` and ``__ne__`` never tried to compared subclasses + to each other. + .. versionchanged:: 19.2.0 + ``__lt__``, ``__le__``, ``__gt__``, and ``__ge__`` now do not consider + subclasses comparable anymore. + .. versionadded:: 18.2.0 *kw_only* + .. versionadded:: 18.2.0 *cache_hash* + .. versionadded:: 19.1.0 *auto_exc* + .. deprecated:: 19.2.0 *cmp* Removal on or after 2021-06-01. + .. versionadded:: 19.2.0 *eq* and *order* + .. versionadded:: 20.1.0 *auto_detect* + .. versionadded:: 20.1.0 *collect_by_mro* + .. versionadded:: 20.1.0 *getstate_setstate* + .. versionadded:: 20.1.0 *on_setattr* + .. versionadded:: 20.3.0 *field_transformer* + .. versionchanged:: 21.1.0 + ``init=False`` injects ``__attrs_init__`` + .. versionchanged:: 21.1.0 Support for ``__attrs_pre_init__`` + .. versionchanged:: 21.1.0 *cmp* undeprecated + .. versionadded:: 21.3.0 *match_args* + .. versionadded:: 22.2.0 + *unsafe_hash* as an alias for *hash* (for :pep:`681` compliance). + """ + eq_, order_ = _determine_attrs_eq_order(cmp, eq, order, None) + + # unsafe_hash takes precedence due to PEP 681. + if unsafe_hash is not None: + hash = unsafe_hash + + if isinstance(on_setattr, (list, tuple)): + on_setattr = setters.pipe(*on_setattr) + + def wrap(cls): + is_frozen = frozen or _has_frozen_base_class(cls) + is_exc = auto_exc is True and issubclass(cls, BaseException) + has_own_setattr = auto_detect and _has_own_attribute( + cls, "__setattr__" + ) + + if has_own_setattr and is_frozen: + raise ValueError("Can't freeze a class with a custom __setattr__.") + + builder = _ClassBuilder( + cls, + these, + slots, + is_frozen, + weakref_slot, + _determine_whether_to_implement( + cls, + getstate_setstate, + auto_detect, + ("__getstate__", "__setstate__"), + default=slots, + ), + auto_attribs, + kw_only, + cache_hash, + is_exc, + collect_by_mro, + on_setattr, + has_own_setattr, + field_transformer, + ) + if _determine_whether_to_implement( + cls, repr, auto_detect, ("__repr__",) + ): + builder.add_repr(repr_ns) + if str is True: + builder.add_str() + + eq = _determine_whether_to_implement( + cls, eq_, auto_detect, ("__eq__", "__ne__") + ) + if not is_exc and eq is True: + builder.add_eq() + if not is_exc and _determine_whether_to_implement( + cls, order_, auto_detect, ("__lt__", "__le__", "__gt__", "__ge__") + ): + builder.add_order() + + builder.add_setattr() + + nonlocal hash + if ( + hash is None + and auto_detect is True + and _has_own_attribute(cls, "__hash__") + ): + hash = False + + if hash is not True and hash is not False and hash is not None: + # Can't use `hash in` because 1 == True for example. + raise TypeError( + "Invalid value for hash. Must be True, False, or None." + ) + elif hash is False or (hash is None and eq is False) or is_exc: + # Don't do anything. Should fall back to __object__'s __hash__ + # which is by id. + if cache_hash: + raise TypeError( + "Invalid value for cache_hash. To use hash caching," + " hashing must be either explicitly or implicitly " + "enabled." + ) + elif hash is True or ( + hash is None and eq is True and is_frozen is True + ): + # Build a __hash__ if told so, or if it's safe. + builder.add_hash() + else: + # Raise TypeError on attempts to hash. + if cache_hash: + raise TypeError( + "Invalid value for cache_hash. To use hash caching," + " hashing must be either explicitly or implicitly " + "enabled." + ) + builder.make_unhashable() + + if _determine_whether_to_implement( + cls, init, auto_detect, ("__init__",) + ): + builder.add_init() + else: + builder.add_attrs_init() + if cache_hash: + raise TypeError( + "Invalid value for cache_hash. To use hash caching," + " init must be True." + ) + + if ( + PY310 + and match_args + and not _has_own_attribute(cls, "__match_args__") + ): + builder.add_match_args() + + return builder.build_class() + + # maybe_cls's type depends on the usage of the decorator. It's a class + # if it's used as `@attrs` but ``None`` if used as `@attrs()`. + if maybe_cls is None: + return wrap + else: + return wrap(maybe_cls) + + +_attrs = attrs +""" +Internal alias so we can use it in functions that take an argument called +*attrs*. +""" + + +def _has_frozen_base_class(cls): + """ + Check whether *cls* has a frozen ancestor by looking at its + __setattr__. + """ + return cls.__setattr__ is _frozen_setattrs + + +def _generate_unique_filename(cls, func_name): + """ + Create a "filename" suitable for a function being generated. + """ + return ( + f"" + ) + + +def _make_hash(cls, attrs, frozen, cache_hash): + attrs = tuple( + a for a in attrs if a.hash is True or (a.hash is None and a.eq is True) + ) + + tab = " " + + unique_filename = _generate_unique_filename(cls, "hash") + type_hash = hash(unique_filename) + # If eq is custom generated, we need to include the functions in globs + globs = {} + + hash_def = "def __hash__(self" + hash_func = "hash((" + closing_braces = "))" + if not cache_hash: + hash_def += "):" + else: + hash_def += ", *" + + hash_def += ( + ", _cache_wrapper=" + + "__import__('attr._make')._make._CacheHashWrapper):" + ) + hash_func = "_cache_wrapper(" + hash_func + closing_braces += ")" + + method_lines = [hash_def] + + def append_hash_computation_lines(prefix, indent): + """ + Generate the code for actually computing the hash code. + Below this will either be returned directly or used to compute + a value which is then cached, depending on the value of cache_hash + """ + + method_lines.extend( + [ + indent + prefix + hash_func, + indent + f" {type_hash},", + ] + ) + + for a in attrs: + if a.eq_key: + cmp_name = f"_{a.name}_key" + globs[cmp_name] = a.eq_key + method_lines.append( + indent + f" {cmp_name}(self.{a.name})," + ) + else: + method_lines.append(indent + f" self.{a.name},") + + method_lines.append(indent + " " + closing_braces) + + if cache_hash: + method_lines.append(tab + f"if self.{_hash_cache_field} is None:") + if frozen: + append_hash_computation_lines( + f"object.__setattr__(self, '{_hash_cache_field}', ", tab * 2 + ) + method_lines.append(tab * 2 + ")") # close __setattr__ + else: + append_hash_computation_lines( + f"self.{_hash_cache_field} = ", tab * 2 + ) + method_lines.append(tab + f"return self.{_hash_cache_field}") + else: + append_hash_computation_lines("return ", tab) + + script = "\n".join(method_lines) + return _make_method("__hash__", script, unique_filename, globs) + + +def _add_hash(cls, attrs): + """ + Add a hash method to *cls*. + """ + cls.__hash__ = _make_hash(cls, attrs, frozen=False, cache_hash=False) + return cls + + +def _make_ne(): + """ + Create __ne__ method. + """ + + def __ne__(self, other): + """ + Check equality and either forward a NotImplemented or + return the result negated. + """ + result = self.__eq__(other) + if result is NotImplemented: + return NotImplemented + + return not result + + return __ne__ + + +def _make_eq(cls, attrs): + """ + Create __eq__ method for *cls* with *attrs*. + """ + attrs = [a for a in attrs if a.eq] + + unique_filename = _generate_unique_filename(cls, "eq") + lines = [ + "def __eq__(self, other):", + " if other.__class__ is not self.__class__:", + " return NotImplemented", + ] + + # We can't just do a big self.x = other.x and... clause due to + # irregularities like nan == nan is false but (nan,) == (nan,) is true. + globs = {} + if attrs: + lines.append(" return (") + others = [" ) == ("] + for a in attrs: + if a.eq_key: + cmp_name = f"_{a.name}_key" + # Add the key function to the global namespace + # of the evaluated function. + globs[cmp_name] = a.eq_key + lines.append(f" {cmp_name}(self.{a.name}),") + others.append(f" {cmp_name}(other.{a.name}),") + else: + lines.append(f" self.{a.name},") + others.append(f" other.{a.name},") + + lines += others + [" )"] + else: + lines.append(" return True") + + script = "\n".join(lines) + + return _make_method("__eq__", script, unique_filename, globs) + + +def _make_order(cls, attrs): + """ + Create ordering methods for *cls* with *attrs*. + """ + attrs = [a for a in attrs if a.order] + + def attrs_to_tuple(obj): + """ + Save us some typing. + """ + return tuple( + key(value) if key else value + for value, key in ( + (getattr(obj, a.name), a.order_key) for a in attrs + ) + ) + + def __lt__(self, other): + """ + Automatically created by attrs. + """ + if other.__class__ is self.__class__: + return attrs_to_tuple(self) < attrs_to_tuple(other) + + return NotImplemented + + def __le__(self, other): + """ + Automatically created by attrs. + """ + if other.__class__ is self.__class__: + return attrs_to_tuple(self) <= attrs_to_tuple(other) + + return NotImplemented + + def __gt__(self, other): + """ + Automatically created by attrs. + """ + if other.__class__ is self.__class__: + return attrs_to_tuple(self) > attrs_to_tuple(other) + + return NotImplemented + + def __ge__(self, other): + """ + Automatically created by attrs. + """ + if other.__class__ is self.__class__: + return attrs_to_tuple(self) >= attrs_to_tuple(other) + + return NotImplemented + + return __lt__, __le__, __gt__, __ge__ + + +def _add_eq(cls, attrs=None): + """ + Add equality methods to *cls* with *attrs*. + """ + if attrs is None: + attrs = cls.__attrs_attrs__ + + cls.__eq__ = _make_eq(cls, attrs) + cls.__ne__ = _make_ne() + + return cls + + +def _make_repr(attrs, ns, cls): + unique_filename = _generate_unique_filename(cls, "repr") + # Figure out which attributes to include, and which function to use to + # format them. The a.repr value can be either bool or a custom + # callable. + attr_names_with_reprs = tuple( + (a.name, (repr if a.repr is True else a.repr), a.init) + for a in attrs + if a.repr is not False + ) + globs = { + name + "_repr": r for name, r, _ in attr_names_with_reprs if r != repr + } + globs["_compat"] = _compat + globs["AttributeError"] = AttributeError + globs["NOTHING"] = NOTHING + attribute_fragments = [] + for name, r, i in attr_names_with_reprs: + accessor = ( + "self." + name if i else 'getattr(self, "' + name + '", NOTHING)' + ) + fragment = ( + "%s={%s!r}" % (name, accessor) + if r == repr + else "%s={%s_repr(%s)}" % (name, name, accessor) + ) + attribute_fragments.append(fragment) + repr_fragment = ", ".join(attribute_fragments) + + if ns is None: + cls_name_fragment = '{self.__class__.__qualname__.rsplit(">.", 1)[-1]}' + else: + cls_name_fragment = ns + ".{self.__class__.__name__}" + + lines = [ + "def __repr__(self):", + " try:", + " already_repring = _compat.repr_context.already_repring", + " except AttributeError:", + " already_repring = {id(self),}", + " _compat.repr_context.already_repring = already_repring", + " else:", + " if id(self) in already_repring:", + " return '...'", + " else:", + " already_repring.add(id(self))", + " try:", + f" return f'{cls_name_fragment}({repr_fragment})'", + " finally:", + " already_repring.remove(id(self))", + ] + + return _make_method( + "__repr__", "\n".join(lines), unique_filename, globs=globs + ) + + +def _add_repr(cls, ns=None, attrs=None): + """ + Add a repr method to *cls*. + """ + if attrs is None: + attrs = cls.__attrs_attrs__ + + cls.__repr__ = _make_repr(attrs, ns, cls) + return cls + + +def fields(cls): + """ + Return the tuple of *attrs* attributes for a class. + + The tuple also allows accessing the fields by their names (see below for + examples). + + :param type cls: Class to introspect. + + :raise TypeError: If *cls* is not a class. + :raise attrs.exceptions.NotAnAttrsClassError: If *cls* is not an *attrs* + class. + + :rtype: tuple (with name accessors) of `attrs.Attribute` + + .. versionchanged:: 16.2.0 Returned tuple allows accessing the fields + by name. + .. versionchanged:: 23.1.0 Add support for generic classes. + """ + generic_base = get_generic_base(cls) + + if generic_base is None and not isinstance(cls, type): + raise TypeError("Passed object must be a class.") + + attrs = getattr(cls, "__attrs_attrs__", None) + + if attrs is None: + if generic_base is not None: + attrs = getattr(generic_base, "__attrs_attrs__", None) + if attrs is not None: + # Even though this is global state, stick it on here to speed + # it up. We rely on `cls` being cached for this to be + # efficient. + cls.__attrs_attrs__ = attrs + return attrs + raise NotAnAttrsClassError(f"{cls!r} is not an attrs-decorated class.") + + return attrs + + +def fields_dict(cls): + """ + Return an ordered dictionary of *attrs* attributes for a class, whose + keys are the attribute names. + + :param type cls: Class to introspect. + + :raise TypeError: If *cls* is not a class. + :raise attrs.exceptions.NotAnAttrsClassError: If *cls* is not an *attrs* + class. + + :rtype: dict + + .. versionadded:: 18.1.0 + """ + if not isinstance(cls, type): + raise TypeError("Passed object must be a class.") + attrs = getattr(cls, "__attrs_attrs__", None) + if attrs is None: + raise NotAnAttrsClassError(f"{cls!r} is not an attrs-decorated class.") + return {a.name: a for a in attrs} + + +def validate(inst): + """ + Validate all attributes on *inst* that have a validator. + + Leaves all exceptions through. + + :param inst: Instance of a class with *attrs* attributes. + """ + if _config._run_validators is False: + return + + for a in fields(inst.__class__): + v = a.validator + if v is not None: + v(inst, a, getattr(inst, a.name)) + + +def _is_slot_cls(cls): + return "__slots__" in cls.__dict__ + + +def _is_slot_attr(a_name, base_attr_map): + """ + Check if the attribute name comes from a slot class. + """ + return a_name in base_attr_map and _is_slot_cls(base_attr_map[a_name]) + + +def _make_init( + cls, + attrs, + pre_init, + post_init, + frozen, + slots, + cache_hash, + base_attr_map, + is_exc, + cls_on_setattr, + attrs_init, +): + has_cls_on_setattr = ( + cls_on_setattr is not None and cls_on_setattr is not setters.NO_OP + ) + + if frozen and has_cls_on_setattr: + raise ValueError("Frozen classes can't use on_setattr.") + + needs_cached_setattr = cache_hash or frozen + filtered_attrs = [] + attr_dict = {} + for a in attrs: + if not a.init and a.default is NOTHING: + continue + + filtered_attrs.append(a) + attr_dict[a.name] = a + + if a.on_setattr is not None: + if frozen is True: + raise ValueError("Frozen classes can't use on_setattr.") + + needs_cached_setattr = True + elif has_cls_on_setattr and a.on_setattr is not setters.NO_OP: + needs_cached_setattr = True + + unique_filename = _generate_unique_filename(cls, "init") + + script, globs, annotations = _attrs_to_init_script( + filtered_attrs, + frozen, + slots, + pre_init, + post_init, + cache_hash, + base_attr_map, + is_exc, + needs_cached_setattr, + has_cls_on_setattr, + attrs_init, + ) + if cls.__module__ in sys.modules: + # This makes typing.get_type_hints(CLS.__init__) resolve string types. + globs.update(sys.modules[cls.__module__].__dict__) + + globs.update({"NOTHING": NOTHING, "attr_dict": attr_dict}) + + if needs_cached_setattr: + # Save the lookup overhead in __init__ if we need to circumvent + # setattr hooks. + globs["_cached_setattr_get"] = _obj_setattr.__get__ + + init = _make_method( + "__attrs_init__" if attrs_init else "__init__", + script, + unique_filename, + globs, + ) + init.__annotations__ = annotations + + return init + + +def _setattr(attr_name, value_var, has_on_setattr): + """ + Use the cached object.setattr to set *attr_name* to *value_var*. + """ + return f"_setattr('{attr_name}', {value_var})" + + +def _setattr_with_converter(attr_name, value_var, has_on_setattr): + """ + Use the cached object.setattr to set *attr_name* to *value_var*, but run + its converter first. + """ + return "_setattr('%s', %s(%s))" % ( + attr_name, + _init_converter_pat % (attr_name,), + value_var, + ) + + +def _assign(attr_name, value, has_on_setattr): + """ + Unless *attr_name* has an on_setattr hook, use normal assignment. Otherwise + relegate to _setattr. + """ + if has_on_setattr: + return _setattr(attr_name, value, True) + + return f"self.{attr_name} = {value}" + + +def _assign_with_converter(attr_name, value_var, has_on_setattr): + """ + Unless *attr_name* has an on_setattr hook, use normal assignment after + conversion. Otherwise relegate to _setattr_with_converter. + """ + if has_on_setattr: + return _setattr_with_converter(attr_name, value_var, True) + + return "self.%s = %s(%s)" % ( + attr_name, + _init_converter_pat % (attr_name,), + value_var, + ) + + +def _attrs_to_init_script( + attrs, + frozen, + slots, + pre_init, + post_init, + cache_hash, + base_attr_map, + is_exc, + needs_cached_setattr, + has_cls_on_setattr, + attrs_init, +): + """ + Return a script of an initializer for *attrs* and a dict of globals. + + The globals are expected by the generated script. + + If *frozen* is True, we cannot set the attributes directly so we use + a cached ``object.__setattr__``. + """ + lines = [] + if pre_init: + lines.append("self.__attrs_pre_init__()") + + if needs_cached_setattr: + lines.append( + # Circumvent the __setattr__ descriptor to save one lookup per + # assignment. + # Note _setattr will be used again below if cache_hash is True + "_setattr = _cached_setattr_get(self)" + ) + + if frozen is True: + if slots is True: + fmt_setter = _setattr + fmt_setter_with_converter = _setattr_with_converter + else: + # Dict frozen classes assign directly to __dict__. + # But only if the attribute doesn't come from an ancestor slot + # class. + # Note _inst_dict will be used again below if cache_hash is True + lines.append("_inst_dict = self.__dict__") + + def fmt_setter(attr_name, value_var, has_on_setattr): + if _is_slot_attr(attr_name, base_attr_map): + return _setattr(attr_name, value_var, has_on_setattr) + + return f"_inst_dict['{attr_name}'] = {value_var}" + + def fmt_setter_with_converter( + attr_name, value_var, has_on_setattr + ): + if has_on_setattr or _is_slot_attr(attr_name, base_attr_map): + return _setattr_with_converter( + attr_name, value_var, has_on_setattr + ) + + return "_inst_dict['%s'] = %s(%s)" % ( + attr_name, + _init_converter_pat % (attr_name,), + value_var, + ) + + else: + # Not frozen. + fmt_setter = _assign + fmt_setter_with_converter = _assign_with_converter + + args = [] + kw_only_args = [] + attrs_to_validate = [] + + # This is a dictionary of names to validator and converter callables. + # Injecting this into __init__ globals lets us avoid lookups. + names_for_globals = {} + annotations = {"return": None} + + for a in attrs: + if a.validator: + attrs_to_validate.append(a) + + attr_name = a.name + has_on_setattr = a.on_setattr is not None or ( + a.on_setattr is not setters.NO_OP and has_cls_on_setattr + ) + # a.alias is set to maybe-mangled attr_name in _ClassBuilder if not + # explicitly provided + arg_name = a.alias + + has_factory = isinstance(a.default, Factory) + if has_factory and a.default.takes_self: + maybe_self = "self" + else: + maybe_self = "" + + if a.init is False: + if has_factory: + init_factory_name = _init_factory_pat % (a.name,) + if a.converter is not None: + lines.append( + fmt_setter_with_converter( + attr_name, + init_factory_name + f"({maybe_self})", + has_on_setattr, + ) + ) + conv_name = _init_converter_pat % (a.name,) + names_for_globals[conv_name] = a.converter + else: + lines.append( + fmt_setter( + attr_name, + init_factory_name + f"({maybe_self})", + has_on_setattr, + ) + ) + names_for_globals[init_factory_name] = a.default.factory + else: + if a.converter is not None: + lines.append( + fmt_setter_with_converter( + attr_name, + f"attr_dict['{attr_name}'].default", + has_on_setattr, + ) + ) + conv_name = _init_converter_pat % (a.name,) + names_for_globals[conv_name] = a.converter + else: + lines.append( + fmt_setter( + attr_name, + f"attr_dict['{attr_name}'].default", + has_on_setattr, + ) + ) + elif a.default is not NOTHING and not has_factory: + arg = f"{arg_name}=attr_dict['{attr_name}'].default" + if a.kw_only: + kw_only_args.append(arg) + else: + args.append(arg) + + if a.converter is not None: + lines.append( + fmt_setter_with_converter( + attr_name, arg_name, has_on_setattr + ) + ) + names_for_globals[ + _init_converter_pat % (a.name,) + ] = a.converter + else: + lines.append(fmt_setter(attr_name, arg_name, has_on_setattr)) + + elif has_factory: + arg = f"{arg_name}=NOTHING" + if a.kw_only: + kw_only_args.append(arg) + else: + args.append(arg) + lines.append(f"if {arg_name} is not NOTHING:") + + init_factory_name = _init_factory_pat % (a.name,) + if a.converter is not None: + lines.append( + " " + + fmt_setter_with_converter( + attr_name, arg_name, has_on_setattr + ) + ) + lines.append("else:") + lines.append( + " " + + fmt_setter_with_converter( + attr_name, + init_factory_name + "(" + maybe_self + ")", + has_on_setattr, + ) + ) + names_for_globals[ + _init_converter_pat % (a.name,) + ] = a.converter + else: + lines.append( + " " + fmt_setter(attr_name, arg_name, has_on_setattr) + ) + lines.append("else:") + lines.append( + " " + + fmt_setter( + attr_name, + init_factory_name + "(" + maybe_self + ")", + has_on_setattr, + ) + ) + names_for_globals[init_factory_name] = a.default.factory + else: + if a.kw_only: + kw_only_args.append(arg_name) + else: + args.append(arg_name) + + if a.converter is not None: + lines.append( + fmt_setter_with_converter( + attr_name, arg_name, has_on_setattr + ) + ) + names_for_globals[ + _init_converter_pat % (a.name,) + ] = a.converter + else: + lines.append(fmt_setter(attr_name, arg_name, has_on_setattr)) + + if a.init is True: + if a.type is not None and a.converter is None: + annotations[arg_name] = a.type + elif a.converter is not None: + # Try to get the type from the converter. + t = _AnnotationExtractor(a.converter).get_first_param_type() + if t: + annotations[arg_name] = t + + if attrs_to_validate: # we can skip this if there are no validators. + names_for_globals["_config"] = _config + lines.append("if _config._run_validators is True:") + for a in attrs_to_validate: + val_name = "__attr_validator_" + a.name + attr_name = "__attr_" + a.name + lines.append(f" {val_name}(self, {attr_name}, self.{a.name})") + names_for_globals[val_name] = a.validator + names_for_globals[attr_name] = a + + if post_init: + lines.append("self.__attrs_post_init__()") + + # because this is set only after __attrs_post_init__ is called, a crash + # will result if post-init tries to access the hash code. This seemed + # preferable to setting this beforehand, in which case alteration to + # field values during post-init combined with post-init accessing the + # hash code would result in silent bugs. + if cache_hash: + if frozen: + if slots: + # if frozen and slots, then _setattr defined above + init_hash_cache = "_setattr('%s', %s)" + else: + # if frozen and not slots, then _inst_dict defined above + init_hash_cache = "_inst_dict['%s'] = %s" + else: + init_hash_cache = "self.%s = %s" + lines.append(init_hash_cache % (_hash_cache_field, "None")) + + # For exceptions we rely on BaseException.__init__ for proper + # initialization. + if is_exc: + vals = ",".join(f"self.{a.name}" for a in attrs if a.init) + + lines.append(f"BaseException.__init__(self, {vals})") + + args = ", ".join(args) + if kw_only_args: + args += "%s*, %s" % ( + ", " if args else "", # leading comma + ", ".join(kw_only_args), # kw_only args + ) + + return ( + "def %s(self, %s):\n %s\n" + % ( + ("__attrs_init__" if attrs_init else "__init__"), + args, + "\n ".join(lines) if lines else "pass", + ), + names_for_globals, + annotations, + ) + + +def _default_init_alias_for(name: str) -> str: + """ + The default __init__ parameter name for a field. + + This performs private-name adjustment via leading-unscore stripping, + and is the default value of Attribute.alias if not provided. + """ + + return name.lstrip("_") + + +class Attribute: + """ + *Read-only* representation of an attribute. + + .. warning:: + + You should never instantiate this class yourself. + + The class has *all* arguments of `attr.ib` (except for ``factory`` + which is only syntactic sugar for ``default=Factory(...)`` plus the + following: + + - ``name`` (`str`): The name of the attribute. + - ``alias`` (`str`): The __init__ parameter name of the attribute, after + any explicit overrides and default private-attribute-name handling. + - ``inherited`` (`bool`): Whether or not that attribute has been inherited + from a base class. + - ``eq_key`` and ``order_key`` (`typing.Callable` or `None`): The callables + that are used for comparing and ordering objects by this attribute, + respectively. These are set by passing a callable to `attr.ib`'s ``eq``, + ``order``, or ``cmp`` arguments. See also :ref:`comparison customization + `. + + Instances of this class are frequently used for introspection purposes + like: + + - `fields` returns a tuple of them. + - Validators get them passed as the first argument. + - The :ref:`field transformer ` hook receives a list of + them. + - The ``alias`` property exposes the __init__ parameter name of the field, + with any overrides and default private-attribute handling applied. + + + .. versionadded:: 20.1.0 *inherited* + .. versionadded:: 20.1.0 *on_setattr* + .. versionchanged:: 20.2.0 *inherited* is not taken into account for + equality checks and hashing anymore. + .. versionadded:: 21.1.0 *eq_key* and *order_key* + .. versionadded:: 22.2.0 *alias* + + For the full version history of the fields, see `attr.ib`. + """ + + __slots__ = ( + "name", + "default", + "validator", + "repr", + "eq", + "eq_key", + "order", + "order_key", + "hash", + "init", + "metadata", + "type", + "converter", + "kw_only", + "inherited", + "on_setattr", + "alias", + ) + + def __init__( + self, + name, + default, + validator, + repr, + cmp, # XXX: unused, remove along with other cmp code. + hash, + init, + inherited, + metadata=None, + type=None, + converter=None, + kw_only=False, + eq=None, + eq_key=None, + order=None, + order_key=None, + on_setattr=None, + alias=None, + ): + eq, eq_key, order, order_key = _determine_attrib_eq_order( + cmp, eq_key or eq, order_key or order, True + ) + + # Cache this descriptor here to speed things up later. + bound_setattr = _obj_setattr.__get__(self) + + # Despite the big red warning, people *do* instantiate `Attribute` + # themselves. + bound_setattr("name", name) + bound_setattr("default", default) + bound_setattr("validator", validator) + bound_setattr("repr", repr) + bound_setattr("eq", eq) + bound_setattr("eq_key", eq_key) + bound_setattr("order", order) + bound_setattr("order_key", order_key) + bound_setattr("hash", hash) + bound_setattr("init", init) + bound_setattr("converter", converter) + bound_setattr( + "metadata", + ( + types.MappingProxyType(dict(metadata)) # Shallow copy + if metadata + else _empty_metadata_singleton + ), + ) + bound_setattr("type", type) + bound_setattr("kw_only", kw_only) + bound_setattr("inherited", inherited) + bound_setattr("on_setattr", on_setattr) + bound_setattr("alias", alias) + + def __setattr__(self, name, value): + raise FrozenInstanceError() + + @classmethod + def from_counting_attr(cls, name, ca, type=None): + # type holds the annotated value. deal with conflicts: + if type is None: + type = ca.type + elif ca.type is not None: + raise ValueError( + "Type annotation and type argument cannot both be present" + ) + inst_dict = { + k: getattr(ca, k) + for k in Attribute.__slots__ + if k + not in ( + "name", + "validator", + "default", + "type", + "inherited", + ) # exclude methods and deprecated alias + } + return cls( + name=name, + validator=ca._validator, + default=ca._default, + type=type, + cmp=None, + inherited=False, + **inst_dict, + ) + + # Don't use attrs.evolve since fields(Attribute) doesn't work + def evolve(self, **changes): + """ + Copy *self* and apply *changes*. + + This works similarly to `attrs.evolve` but that function does not work + with `Attribute`. + + It is mainly meant to be used for `transform-fields`. + + .. versionadded:: 20.3.0 + """ + new = copy.copy(self) + + new._setattrs(changes.items()) + + return new + + # Don't use _add_pickle since fields(Attribute) doesn't work + def __getstate__(self): + """ + Play nice with pickle. + """ + return tuple( + getattr(self, name) if name != "metadata" else dict(self.metadata) + for name in self.__slots__ + ) + + def __setstate__(self, state): + """ + Play nice with pickle. + """ + self._setattrs(zip(self.__slots__, state)) + + def _setattrs(self, name_values_pairs): + bound_setattr = _obj_setattr.__get__(self) + for name, value in name_values_pairs: + if name != "metadata": + bound_setattr(name, value) + else: + bound_setattr( + name, + types.MappingProxyType(dict(value)) + if value + else _empty_metadata_singleton, + ) + + +_a = [ + Attribute( + name=name, + default=NOTHING, + validator=None, + repr=True, + cmp=None, + eq=True, + order=False, + hash=(name != "metadata"), + init=True, + inherited=False, + alias=_default_init_alias_for(name), + ) + for name in Attribute.__slots__ +] + +Attribute = _add_hash( + _add_eq( + _add_repr(Attribute, attrs=_a), + attrs=[a for a in _a if a.name != "inherited"], + ), + attrs=[a for a in _a if a.hash and a.name != "inherited"], +) + + +class _CountingAttr: + """ + Intermediate representation of attributes that uses a counter to preserve + the order in which the attributes have been defined. + + *Internal* data structure of the attrs library. Running into is most + likely the result of a bug like a forgotten `@attr.s` decorator. + """ + + __slots__ = ( + "counter", + "_default", + "repr", + "eq", + "eq_key", + "order", + "order_key", + "hash", + "init", + "metadata", + "_validator", + "converter", + "type", + "kw_only", + "on_setattr", + "alias", + ) + __attrs_attrs__ = tuple( + Attribute( + name=name, + alias=_default_init_alias_for(name), + default=NOTHING, + validator=None, + repr=True, + cmp=None, + hash=True, + init=True, + kw_only=False, + eq=True, + eq_key=None, + order=False, + order_key=None, + inherited=False, + on_setattr=None, + ) + for name in ( + "counter", + "_default", + "repr", + "eq", + "order", + "hash", + "init", + "on_setattr", + "alias", + ) + ) + ( + Attribute( + name="metadata", + alias="metadata", + default=None, + validator=None, + repr=True, + cmp=None, + hash=False, + init=True, + kw_only=False, + eq=True, + eq_key=None, + order=False, + order_key=None, + inherited=False, + on_setattr=None, + ), + ) + cls_counter = 0 + + def __init__( + self, + default, + validator, + repr, + cmp, + hash, + init, + converter, + metadata, + type, + kw_only, + eq, + eq_key, + order, + order_key, + on_setattr, + alias, + ): + _CountingAttr.cls_counter += 1 + self.counter = _CountingAttr.cls_counter + self._default = default + self._validator = validator + self.converter = converter + self.repr = repr + self.eq = eq + self.eq_key = eq_key + self.order = order + self.order_key = order_key + self.hash = hash + self.init = init + self.metadata = metadata + self.type = type + self.kw_only = kw_only + self.on_setattr = on_setattr + self.alias = alias + + def validator(self, meth): + """ + Decorator that adds *meth* to the list of validators. + + Returns *meth* unchanged. + + .. versionadded:: 17.1.0 + """ + if self._validator is None: + self._validator = meth + else: + self._validator = and_(self._validator, meth) + return meth + + def default(self, meth): + """ + Decorator that allows to set the default for an attribute. + + Returns *meth* unchanged. + + :raises DefaultAlreadySetError: If default has been set before. + + .. versionadded:: 17.1.0 + """ + if self._default is not NOTHING: + raise DefaultAlreadySetError() + + self._default = Factory(meth, takes_self=True) + + return meth + + +_CountingAttr = _add_eq(_add_repr(_CountingAttr)) + + +class Factory: + """ + Stores a factory callable. + + If passed as the default value to `attrs.field`, the factory is used to + generate a new value. + + :param callable factory: A callable that takes either none or exactly one + mandatory positional argument depending on *takes_self*. + :param bool takes_self: Pass the partially initialized instance that is + being initialized as a positional argument. + + .. versionadded:: 17.1.0 *takes_self* + """ + + __slots__ = ("factory", "takes_self") + + def __init__(self, factory, takes_self=False): + self.factory = factory + self.takes_self = takes_self + + def __getstate__(self): + """ + Play nice with pickle. + """ + return tuple(getattr(self, name) for name in self.__slots__) + + def __setstate__(self, state): + """ + Play nice with pickle. + """ + for name, value in zip(self.__slots__, state): + setattr(self, name, value) + + +_f = [ + Attribute( + name=name, + default=NOTHING, + validator=None, + repr=True, + cmp=None, + eq=True, + order=False, + hash=True, + init=True, + inherited=False, + ) + for name in Factory.__slots__ +] + +Factory = _add_hash(_add_eq(_add_repr(Factory, attrs=_f), attrs=_f), attrs=_f) + + +def make_class(name, attrs, bases=(object,), **attributes_arguments): + r""" + A quick way to create a new class called *name* with *attrs*. + + :param str name: The name for the new class. + + :param attrs: A list of names or a dictionary of mappings of names to + `attr.ib`\ s / `attrs.field`\ s. + + The order is deduced from the order of the names or attributes inside + *attrs*. Otherwise the order of the definition of the attributes is + used. + :type attrs: `list` or `dict` + + :param tuple bases: Classes that the new class will subclass. + + :param attributes_arguments: Passed unmodified to `attr.s`. + + :return: A new class with *attrs*. + :rtype: type + + .. versionadded:: 17.1.0 *bases* + .. versionchanged:: 18.1.0 If *attrs* is ordered, the order is retained. + """ + if isinstance(attrs, dict): + cls_dict = attrs + elif isinstance(attrs, (list, tuple)): + cls_dict = {a: attrib() for a in attrs} + else: + raise TypeError("attrs argument must be a dict or a list.") + + pre_init = cls_dict.pop("__attrs_pre_init__", None) + post_init = cls_dict.pop("__attrs_post_init__", None) + user_init = cls_dict.pop("__init__", None) + + body = {} + if pre_init is not None: + body["__attrs_pre_init__"] = pre_init + if post_init is not None: + body["__attrs_post_init__"] = post_init + if user_init is not None: + body["__init__"] = user_init + + type_ = types.new_class(name, bases, {}, lambda ns: ns.update(body)) + + # For pickling to work, the __module__ variable needs to be set to the + # frame where the class is created. Bypass this step in environments where + # sys._getframe is not defined (Jython for example) or sys._getframe is not + # defined for arguments greater than 0 (IronPython). + try: + type_.__module__ = sys._getframe(1).f_globals.get( + "__name__", "__main__" + ) + except (AttributeError, ValueError): + pass + + # We do it here for proper warnings with meaningful stacklevel. + cmp = attributes_arguments.pop("cmp", None) + ( + attributes_arguments["eq"], + attributes_arguments["order"], + ) = _determine_attrs_eq_order( + cmp, + attributes_arguments.get("eq"), + attributes_arguments.get("order"), + True, + ) + + return _attrs(these=cls_dict, **attributes_arguments)(type_) + + +# These are required by within this module so we define them here and merely +# import into .validators / .converters. + + +@attrs(slots=True, hash=True) +class _AndValidator: + """ + Compose many validators to a single one. + """ + + _validators = attrib() + + def __call__(self, inst, attr, value): + for v in self._validators: + v(inst, attr, value) + + +def and_(*validators): + """ + A validator that composes multiple validators into one. + + When called on a value, it runs all wrapped validators. + + :param callables validators: Arbitrary number of validators. + + .. versionadded:: 17.1.0 + """ + vals = [] + for validator in validators: + vals.extend( + validator._validators + if isinstance(validator, _AndValidator) + else [validator] + ) + + return _AndValidator(tuple(vals)) + + +def pipe(*converters): + """ + A converter that composes multiple converters into one. + + When called on a value, it runs all wrapped converters, returning the + *last* value. + + Type annotations will be inferred from the wrapped converters', if + they have any. + + :param callables converters: Arbitrary number of converters. + + .. versionadded:: 20.1.0 + """ + + def pipe_converter(val): + for converter in converters: + val = converter(val) + + return val + + if not converters: + # If the converter list is empty, pipe_converter is the identity. + A = typing.TypeVar("A") + pipe_converter.__annotations__ = {"val": A, "return": A} + else: + # Get parameter type from first converter. + t = _AnnotationExtractor(converters[0]).get_first_param_type() + if t: + pipe_converter.__annotations__["val"] = t + + # Get return type from last converter. + rt = _AnnotationExtractor(converters[-1]).get_return_type() + if rt: + pipe_converter.__annotations__["return"] = rt + + return pipe_converter diff --git a/venv/lib/python3.10/site-packages/attr/_next_gen.py b/venv/lib/python3.10/site-packages/attr/_next_gen.py new file mode 100644 index 0000000..8f7c0b9 --- /dev/null +++ b/venv/lib/python3.10/site-packages/attr/_next_gen.py @@ -0,0 +1,232 @@ +# SPDX-License-Identifier: MIT + +""" +These are keyword-only APIs that call `attr.s` and `attr.ib` with different +default values. +""" + + +from functools import partial + +from . import setters +from ._funcs import asdict as _asdict +from ._funcs import astuple as _astuple +from ._make import ( + NOTHING, + _frozen_setattrs, + _ng_default_on_setattr, + attrib, + attrs, +) +from .exceptions import UnannotatedAttributeError + + +def define( + maybe_cls=None, + *, + these=None, + repr=None, + unsafe_hash=None, + hash=None, + init=None, + slots=True, + frozen=False, + weakref_slot=True, + str=False, + auto_attribs=None, + kw_only=False, + cache_hash=False, + auto_exc=True, + eq=None, + order=False, + auto_detect=True, + getstate_setstate=None, + on_setattr=None, + field_transformer=None, + match_args=True, +): + r""" + Define an *attrs* class. + + Differences to the classic `attr.s` that it uses underneath: + + - Automatically detect whether or not *auto_attribs* should be `True` (c.f. + *auto_attribs* parameter). + - If *frozen* is `False`, run converters and validators when setting an + attribute by default. + - *slots=True* + + .. caution:: + + Usually this has only upsides and few visible effects in everyday + programming. But it *can* lead to some suprising behaviors, so please + make sure to read :term:`slotted classes`. + - *auto_exc=True* + - *auto_detect=True* + - *order=False* + - Some options that were only relevant on Python 2 or were kept around for + backwards-compatibility have been removed. + + Please note that these are all defaults and you can change them as you + wish. + + :param Optional[bool] auto_attribs: If set to `True` or `False`, it behaves + exactly like `attr.s`. If left `None`, `attr.s` will try to guess: + + 1. If any attributes are annotated and no unannotated `attrs.fields`\ s + are found, it assumes *auto_attribs=True*. + 2. Otherwise it assumes *auto_attribs=False* and tries to collect + `attrs.fields`\ s. + + For now, please refer to `attr.s` for the rest of the parameters. + + .. versionadded:: 20.1.0 + .. versionchanged:: 21.3.0 Converters are also run ``on_setattr``. + .. versionadded:: 22.2.0 + *unsafe_hash* as an alias for *hash* (for :pep:`681` compliance). + """ + + def do_it(cls, auto_attribs): + return attrs( + maybe_cls=cls, + these=these, + repr=repr, + hash=hash, + unsafe_hash=unsafe_hash, + init=init, + slots=slots, + frozen=frozen, + weakref_slot=weakref_slot, + str=str, + auto_attribs=auto_attribs, + kw_only=kw_only, + cache_hash=cache_hash, + auto_exc=auto_exc, + eq=eq, + order=order, + auto_detect=auto_detect, + collect_by_mro=True, + getstate_setstate=getstate_setstate, + on_setattr=on_setattr, + field_transformer=field_transformer, + match_args=match_args, + ) + + def wrap(cls): + """ + Making this a wrapper ensures this code runs during class creation. + + We also ensure that frozen-ness of classes is inherited. + """ + nonlocal frozen, on_setattr + + had_on_setattr = on_setattr not in (None, setters.NO_OP) + + # By default, mutable classes convert & validate on setattr. + if frozen is False and on_setattr is None: + on_setattr = _ng_default_on_setattr + + # However, if we subclass a frozen class, we inherit the immutability + # and disable on_setattr. + for base_cls in cls.__bases__: + if base_cls.__setattr__ is _frozen_setattrs: + if had_on_setattr: + raise ValueError( + "Frozen classes can't use on_setattr " + "(frozen-ness was inherited)." + ) + + on_setattr = setters.NO_OP + break + + if auto_attribs is not None: + return do_it(cls, auto_attribs) + + try: + return do_it(cls, True) + except UnannotatedAttributeError: + return do_it(cls, False) + + # maybe_cls's type depends on the usage of the decorator. It's a class + # if it's used as `@attrs` but ``None`` if used as `@attrs()`. + if maybe_cls is None: + return wrap + else: + return wrap(maybe_cls) + + +mutable = define +frozen = partial(define, frozen=True, on_setattr=None) + + +def field( + *, + default=NOTHING, + validator=None, + repr=True, + hash=None, + init=True, + metadata=None, + type=None, + converter=None, + factory=None, + kw_only=False, + eq=None, + order=None, + on_setattr=None, + alias=None, +): + """ + Identical to `attr.ib`, except keyword-only and with some arguments + removed. + + .. versionadded:: 23.1.0 + The *type* parameter has been re-added; mostly for + {func}`attrs.make_class`. Please note that type checkers ignore this + metadata. + .. versionadded:: 20.1.0 + """ + return attrib( + default=default, + validator=validator, + repr=repr, + hash=hash, + init=init, + metadata=metadata, + type=type, + converter=converter, + factory=factory, + kw_only=kw_only, + eq=eq, + order=order, + on_setattr=on_setattr, + alias=alias, + ) + + +def asdict(inst, *, recurse=True, filter=None, value_serializer=None): + """ + Same as `attr.asdict`, except that collections types are always retained + and dict is always used as *dict_factory*. + + .. versionadded:: 21.3.0 + """ + return _asdict( + inst=inst, + recurse=recurse, + filter=filter, + value_serializer=value_serializer, + retain_collection_types=True, + ) + + +def astuple(inst, *, recurse=True, filter=None): + """ + Same as `attr.astuple`, except that collections types are always retained + and `tuple` is always used as the *tuple_factory*. + + .. versionadded:: 21.3.0 + """ + return _astuple( + inst=inst, recurse=recurse, filter=filter, retain_collection_types=True + ) diff --git a/venv/lib/python3.10/site-packages/attr/_typing_compat.pyi b/venv/lib/python3.10/site-packages/attr/_typing_compat.pyi new file mode 100644 index 0000000..ca7b71e --- /dev/null +++ b/venv/lib/python3.10/site-packages/attr/_typing_compat.pyi @@ -0,0 +1,15 @@ +from typing import Any, ClassVar, Protocol + +# MYPY is a special constant in mypy which works the same way as `TYPE_CHECKING`. +MYPY = False + +if MYPY: + # A protocol to be able to statically accept an attrs class. + class AttrsInstance_(Protocol): + __attrs_attrs__: ClassVar[Any] + +else: + # For type checkers without plug-in support use an empty protocol that + # will (hopefully) be combined into a union. + class AttrsInstance_(Protocol): + pass diff --git a/venv/lib/python3.10/site-packages/attr/_version_info.py b/venv/lib/python3.10/site-packages/attr/_version_info.py new file mode 100644 index 0000000..51a1312 --- /dev/null +++ b/venv/lib/python3.10/site-packages/attr/_version_info.py @@ -0,0 +1,86 @@ +# SPDX-License-Identifier: MIT + + +from functools import total_ordering + +from ._funcs import astuple +from ._make import attrib, attrs + + +@total_ordering +@attrs(eq=False, order=False, slots=True, frozen=True) +class VersionInfo: + """ + A version object that can be compared to tuple of length 1--4: + + >>> attr.VersionInfo(19, 1, 0, "final") <= (19, 2) + True + >>> attr.VersionInfo(19, 1, 0, "final") < (19, 1, 1) + True + >>> vi = attr.VersionInfo(19, 2, 0, "final") + >>> vi < (19, 1, 1) + False + >>> vi < (19,) + False + >>> vi == (19, 2,) + True + >>> vi == (19, 2, 1) + False + + .. versionadded:: 19.2 + """ + + year = attrib(type=int) + minor = attrib(type=int) + micro = attrib(type=int) + releaselevel = attrib(type=str) + + @classmethod + def _from_version_string(cls, s): + """ + Parse *s* and return a _VersionInfo. + """ + v = s.split(".") + if len(v) == 3: + v.append("final") + + return cls( + year=int(v[0]), minor=int(v[1]), micro=int(v[2]), releaselevel=v[3] + ) + + def _ensure_tuple(self, other): + """ + Ensure *other* is a tuple of a valid length. + + Returns a possibly transformed *other* and ourselves as a tuple of + the same length as *other*. + """ + + if self.__class__ is other.__class__: + other = astuple(other) + + if not isinstance(other, tuple): + raise NotImplementedError + + if not (1 <= len(other) <= 4): + raise NotImplementedError + + return astuple(self)[: len(other)], other + + def __eq__(self, other): + try: + us, them = self._ensure_tuple(other) + except NotImplementedError: + return NotImplemented + + return us == them + + def __lt__(self, other): + try: + us, them = self._ensure_tuple(other) + except NotImplementedError: + return NotImplemented + + # Since alphabetically "dev0" < "final" < "post1" < "post2", we don't + # have to do anything special with releaselevel for now. + return us < them diff --git a/venv/lib/python3.10/site-packages/attr/_version_info.pyi b/venv/lib/python3.10/site-packages/attr/_version_info.pyi new file mode 100644 index 0000000..45ced08 --- /dev/null +++ b/venv/lib/python3.10/site-packages/attr/_version_info.pyi @@ -0,0 +1,9 @@ +class VersionInfo: + @property + def year(self) -> int: ... + @property + def minor(self) -> int: ... + @property + def micro(self) -> int: ... + @property + def releaselevel(self) -> str: ... diff --git a/venv/lib/python3.10/site-packages/attr/converters.py b/venv/lib/python3.10/site-packages/attr/converters.py new file mode 100644 index 0000000..4cada10 --- /dev/null +++ b/venv/lib/python3.10/site-packages/attr/converters.py @@ -0,0 +1,144 @@ +# SPDX-License-Identifier: MIT + +""" +Commonly useful converters. +""" + + +import typing + +from ._compat import _AnnotationExtractor +from ._make import NOTHING, Factory, pipe + + +__all__ = [ + "default_if_none", + "optional", + "pipe", + "to_bool", +] + + +def optional(converter): + """ + A converter that allows an attribute to be optional. An optional attribute + is one which can be set to ``None``. + + Type annotations will be inferred from the wrapped converter's, if it + has any. + + :param callable converter: the converter that is used for non-``None`` + values. + + .. versionadded:: 17.1.0 + """ + + def optional_converter(val): + if val is None: + return None + return converter(val) + + xtr = _AnnotationExtractor(converter) + + t = xtr.get_first_param_type() + if t: + optional_converter.__annotations__["val"] = typing.Optional[t] + + rt = xtr.get_return_type() + if rt: + optional_converter.__annotations__["return"] = typing.Optional[rt] + + return optional_converter + + +def default_if_none(default=NOTHING, factory=None): + """ + A converter that allows to replace ``None`` values by *default* or the + result of *factory*. + + :param default: Value to be used if ``None`` is passed. Passing an instance + of `attrs.Factory` is supported, however the ``takes_self`` option + is *not*. + :param callable factory: A callable that takes no parameters whose result + is used if ``None`` is passed. + + :raises TypeError: If **neither** *default* or *factory* is passed. + :raises TypeError: If **both** *default* and *factory* are passed. + :raises ValueError: If an instance of `attrs.Factory` is passed with + ``takes_self=True``. + + .. versionadded:: 18.2.0 + """ + if default is NOTHING and factory is None: + raise TypeError("Must pass either `default` or `factory`.") + + if default is not NOTHING and factory is not None: + raise TypeError( + "Must pass either `default` or `factory` but not both." + ) + + if factory is not None: + default = Factory(factory) + + if isinstance(default, Factory): + if default.takes_self: + raise ValueError( + "`takes_self` is not supported by default_if_none." + ) + + def default_if_none_converter(val): + if val is not None: + return val + + return default.factory() + + else: + + def default_if_none_converter(val): + if val is not None: + return val + + return default + + return default_if_none_converter + + +def to_bool(val): + """ + Convert "boolean" strings (e.g., from env. vars.) to real booleans. + + Values mapping to :code:`True`: + + - :code:`True` + - :code:`"true"` / :code:`"t"` + - :code:`"yes"` / :code:`"y"` + - :code:`"on"` + - :code:`"1"` + - :code:`1` + + Values mapping to :code:`False`: + + - :code:`False` + - :code:`"false"` / :code:`"f"` + - :code:`"no"` / :code:`"n"` + - :code:`"off"` + - :code:`"0"` + - :code:`0` + + :raises ValueError: for any other value. + + .. versionadded:: 21.3.0 + """ + if isinstance(val, str): + val = val.lower() + truthy = {True, "true", "t", "yes", "y", "on", "1", 1} + falsy = {False, "false", "f", "no", "n", "off", "0", 0} + try: + if val in truthy: + return True + if val in falsy: + return False + except TypeError: + # Raised when "val" is not hashable (e.g., lists) + pass + raise ValueError(f"Cannot convert value to bool: {val}") diff --git a/venv/lib/python3.10/site-packages/attr/converters.pyi b/venv/lib/python3.10/site-packages/attr/converters.pyi new file mode 100644 index 0000000..5abb49f --- /dev/null +++ b/venv/lib/python3.10/site-packages/attr/converters.pyi @@ -0,0 +1,13 @@ +from typing import Callable, TypeVar, overload + +from . import _ConverterType + +_T = TypeVar("_T") + +def pipe(*validators: _ConverterType) -> _ConverterType: ... +def optional(converter: _ConverterType) -> _ConverterType: ... +@overload +def default_if_none(default: _T) -> _ConverterType: ... +@overload +def default_if_none(*, factory: Callable[[], _T]) -> _ConverterType: ... +def to_bool(val: str) -> bool: ... diff --git a/venv/lib/python3.10/site-packages/attr/exceptions.py b/venv/lib/python3.10/site-packages/attr/exceptions.py new file mode 100644 index 0000000..2883493 --- /dev/null +++ b/venv/lib/python3.10/site-packages/attr/exceptions.py @@ -0,0 +1,91 @@ +# SPDX-License-Identifier: MIT + + +class FrozenError(AttributeError): + """ + A frozen/immutable instance or attribute have been attempted to be + modified. + + It mirrors the behavior of ``namedtuples`` by using the same error message + and subclassing `AttributeError`. + + .. versionadded:: 20.1.0 + """ + + msg = "can't set attribute" + args = [msg] + + +class FrozenInstanceError(FrozenError): + """ + A frozen instance has been attempted to be modified. + + .. versionadded:: 16.1.0 + """ + + +class FrozenAttributeError(FrozenError): + """ + A frozen attribute has been attempted to be modified. + + .. versionadded:: 20.1.0 + """ + + +class AttrsAttributeNotFoundError(ValueError): + """ + An *attrs* function couldn't find an attribute that the user asked for. + + .. versionadded:: 16.2.0 + """ + + +class NotAnAttrsClassError(ValueError): + """ + A non-*attrs* class has been passed into an *attrs* function. + + .. versionadded:: 16.2.0 + """ + + +class DefaultAlreadySetError(RuntimeError): + """ + A default has been set when defining the field and is attempted to be reset + using the decorator. + + .. versionadded:: 17.1.0 + """ + + +class UnannotatedAttributeError(RuntimeError): + """ + A class with ``auto_attribs=True`` has a field without a type annotation. + + .. versionadded:: 17.3.0 + """ + + +class PythonTooOldError(RuntimeError): + """ + It was attempted to use an *attrs* feature that requires a newer Python + version. + + .. versionadded:: 18.2.0 + """ + + +class NotCallableError(TypeError): + """ + A field requiring a callable has been set with a value that is not + callable. + + .. versionadded:: 19.2.0 + """ + + def __init__(self, msg, value): + super(TypeError, self).__init__(msg, value) + self.msg = msg + self.value = value + + def __str__(self): + return str(self.msg) diff --git a/venv/lib/python3.10/site-packages/attr/exceptions.pyi b/venv/lib/python3.10/site-packages/attr/exceptions.pyi new file mode 100644 index 0000000..f268011 --- /dev/null +++ b/venv/lib/python3.10/site-packages/attr/exceptions.pyi @@ -0,0 +1,17 @@ +from typing import Any + +class FrozenError(AttributeError): + msg: str = ... + +class FrozenInstanceError(FrozenError): ... +class FrozenAttributeError(FrozenError): ... +class AttrsAttributeNotFoundError(ValueError): ... +class NotAnAttrsClassError(ValueError): ... +class DefaultAlreadySetError(RuntimeError): ... +class UnannotatedAttributeError(RuntimeError): ... +class PythonTooOldError(RuntimeError): ... + +class NotCallableError(TypeError): + msg: str = ... + value: Any = ... + def __init__(self, msg: str, value: Any) -> None: ... diff --git a/venv/lib/python3.10/site-packages/attr/filters.py b/venv/lib/python3.10/site-packages/attr/filters.py new file mode 100644 index 0000000..a1e40c9 --- /dev/null +++ b/venv/lib/python3.10/site-packages/attr/filters.py @@ -0,0 +1,66 @@ +# SPDX-License-Identifier: MIT + +""" +Commonly useful filters for `attr.asdict`. +""" + +from ._make import Attribute + + +def _split_what(what): + """ + Returns a tuple of `frozenset`s of classes and attributes. + """ + return ( + frozenset(cls for cls in what if isinstance(cls, type)), + frozenset(cls for cls in what if isinstance(cls, str)), + frozenset(cls for cls in what if isinstance(cls, Attribute)), + ) + + +def include(*what): + """ + Include *what*. + + :param what: What to include. + :type what: `list` of classes `type`, field names `str` or + `attrs.Attribute`\\ s + + :rtype: `callable` + + .. versionchanged:: 23.1.0 Accept strings with field names. + """ + cls, names, attrs = _split_what(what) + + def include_(attribute, value): + return ( + value.__class__ in cls + or attribute.name in names + or attribute in attrs + ) + + return include_ + + +def exclude(*what): + """ + Exclude *what*. + + :param what: What to exclude. + :type what: `list` of classes `type`, field names `str` or + `attrs.Attribute`\\ s. + + :rtype: `callable` + + .. versionchanged:: 23.3.0 Accept field name string as input argument + """ + cls, names, attrs = _split_what(what) + + def exclude_(attribute, value): + return not ( + value.__class__ in cls + or attribute.name in names + or attribute in attrs + ) + + return exclude_ diff --git a/venv/lib/python3.10/site-packages/attr/filters.pyi b/venv/lib/python3.10/site-packages/attr/filters.pyi new file mode 100644 index 0000000..8a02fa0 --- /dev/null +++ b/venv/lib/python3.10/site-packages/attr/filters.pyi @@ -0,0 +1,6 @@ +from typing import Any, Union + +from . import Attribute, _FilterType + +def include(*what: Union[type, str, Attribute[Any]]) -> _FilterType[Any]: ... +def exclude(*what: Union[type, str, Attribute[Any]]) -> _FilterType[Any]: ... diff --git a/venv/lib/python3.10/site-packages/pkg_resources/_vendor/jaraco/__init__.py b/venv/lib/python3.10/site-packages/attr/py.typed similarity index 100% rename from venv/lib/python3.10/site-packages/pkg_resources/_vendor/jaraco/__init__.py rename to venv/lib/python3.10/site-packages/attr/py.typed diff --git a/venv/lib/python3.10/site-packages/attr/setters.py b/venv/lib/python3.10/site-packages/attr/setters.py new file mode 100644 index 0000000..12ed675 --- /dev/null +++ b/venv/lib/python3.10/site-packages/attr/setters.py @@ -0,0 +1,73 @@ +# SPDX-License-Identifier: MIT + +""" +Commonly used hooks for on_setattr. +""" + + +from . import _config +from .exceptions import FrozenAttributeError + + +def pipe(*setters): + """ + Run all *setters* and return the return value of the last one. + + .. versionadded:: 20.1.0 + """ + + def wrapped_pipe(instance, attrib, new_value): + rv = new_value + + for setter in setters: + rv = setter(instance, attrib, rv) + + return rv + + return wrapped_pipe + + +def frozen(_, __, ___): + """ + Prevent an attribute to be modified. + + .. versionadded:: 20.1.0 + """ + raise FrozenAttributeError() + + +def validate(instance, attrib, new_value): + """ + Run *attrib*'s validator on *new_value* if it has one. + + .. versionadded:: 20.1.0 + """ + if _config._run_validators is False: + return new_value + + v = attrib.validator + if not v: + return new_value + + v(instance, attrib, new_value) + + return new_value + + +def convert(instance, attrib, new_value): + """ + Run *attrib*'s converter -- if it has one -- on *new_value* and return the + result. + + .. versionadded:: 20.1.0 + """ + c = attrib.converter + if c: + return c(new_value) + + return new_value + + +# Sentinel for disabling class-wide *on_setattr* hooks for certain attributes. +# autodata stopped working, so the docstring is inlined in the API docs. +NO_OP = object() diff --git a/venv/lib/python3.10/site-packages/attr/setters.pyi b/venv/lib/python3.10/site-packages/attr/setters.pyi new file mode 100644 index 0000000..72f7ce4 --- /dev/null +++ b/venv/lib/python3.10/site-packages/attr/setters.pyi @@ -0,0 +1,19 @@ +from typing import Any, NewType, NoReturn, TypeVar + +from . import Attribute, _OnSetAttrType + +_T = TypeVar("_T") + +def frozen( + instance: Any, attribute: Attribute[Any], new_value: Any +) -> NoReturn: ... +def pipe(*setters: _OnSetAttrType) -> _OnSetAttrType: ... +def validate(instance: Any, attribute: Attribute[_T], new_value: _T) -> _T: ... + +# convert is allowed to return Any, because they can be chained using pipe. +def convert( + instance: Any, attribute: Attribute[Any], new_value: Any +) -> Any: ... + +_NoOpType = NewType("_NoOpType", object) +NO_OP: _NoOpType diff --git a/venv/lib/python3.10/site-packages/attr/validators.py b/venv/lib/python3.10/site-packages/attr/validators.py new file mode 100644 index 0000000..1488554 --- /dev/null +++ b/venv/lib/python3.10/site-packages/attr/validators.py @@ -0,0 +1,720 @@ +# SPDX-License-Identifier: MIT + +""" +Commonly useful validators. +""" + + +import operator +import re + +from contextlib import contextmanager +from re import Pattern + +from ._config import get_run_validators, set_run_validators +from ._make import _AndValidator, and_, attrib, attrs +from .converters import default_if_none +from .exceptions import NotCallableError + + +__all__ = [ + "and_", + "deep_iterable", + "deep_mapping", + "disabled", + "ge", + "get_disabled", + "gt", + "in_", + "instance_of", + "is_callable", + "le", + "lt", + "matches_re", + "max_len", + "min_len", + "not_", + "optional", + "provides", + "set_disabled", +] + + +def set_disabled(disabled): + """ + Globally disable or enable running validators. + + By default, they are run. + + :param disabled: If ``True``, disable running all validators. + :type disabled: bool + + .. warning:: + + This function is not thread-safe! + + .. versionadded:: 21.3.0 + """ + set_run_validators(not disabled) + + +def get_disabled(): + """ + Return a bool indicating whether validators are currently disabled or not. + + :return: ``True`` if validators are currently disabled. + :rtype: bool + + .. versionadded:: 21.3.0 + """ + return not get_run_validators() + + +@contextmanager +def disabled(): + """ + Context manager that disables running validators within its context. + + .. warning:: + + This context manager is not thread-safe! + + .. versionadded:: 21.3.0 + """ + set_run_validators(False) + try: + yield + finally: + set_run_validators(True) + + +@attrs(repr=False, slots=True, hash=True) +class _InstanceOfValidator: + type = attrib() + + def __call__(self, inst, attr, value): + """ + We use a callable class to be able to change the ``__repr__``. + """ + if not isinstance(value, self.type): + raise TypeError( + "'{name}' must be {type!r} (got {value!r} that is a " + "{actual!r}).".format( + name=attr.name, + type=self.type, + actual=value.__class__, + value=value, + ), + attr, + self.type, + value, + ) + + def __repr__(self): + return "".format( + type=self.type + ) + + +def instance_of(type): + """ + A validator that raises a `TypeError` if the initializer is called + with a wrong type for this particular attribute (checks are performed using + `isinstance` therefore it's also valid to pass a tuple of types). + + :param type: The type to check for. + :type type: type or tuple of type + + :raises TypeError: With a human readable error message, the attribute + (of type `attrs.Attribute`), the expected type, and the value it + got. + """ + return _InstanceOfValidator(type) + + +@attrs(repr=False, frozen=True, slots=True) +class _MatchesReValidator: + pattern = attrib() + match_func = attrib() + + def __call__(self, inst, attr, value): + """ + We use a callable class to be able to change the ``__repr__``. + """ + if not self.match_func(value): + raise ValueError( + "'{name}' must match regex {pattern!r}" + " ({value!r} doesn't)".format( + name=attr.name, pattern=self.pattern.pattern, value=value + ), + attr, + self.pattern, + value, + ) + + def __repr__(self): + return "".format( + pattern=self.pattern + ) + + +def matches_re(regex, flags=0, func=None): + r""" + A validator that raises `ValueError` if the initializer is called + with a string that doesn't match *regex*. + + :param regex: a regex string or precompiled pattern to match against + :param int flags: flags that will be passed to the underlying re function + (default 0) + :param callable func: which underlying `re` function to call. Valid options + are `re.fullmatch`, `re.search`, and `re.match`; the default ``None`` + means `re.fullmatch`. For performance reasons, the pattern is always + precompiled using `re.compile`. + + .. versionadded:: 19.2.0 + .. versionchanged:: 21.3.0 *regex* can be a pre-compiled pattern. + """ + valid_funcs = (re.fullmatch, None, re.search, re.match) + if func not in valid_funcs: + raise ValueError( + "'func' must be one of {}.".format( + ", ".join( + sorted( + e and e.__name__ or "None" for e in set(valid_funcs) + ) + ) + ) + ) + + if isinstance(regex, Pattern): + if flags: + raise TypeError( + "'flags' can only be used with a string pattern; " + "pass flags to re.compile() instead" + ) + pattern = regex + else: + pattern = re.compile(regex, flags) + + if func is re.match: + match_func = pattern.match + elif func is re.search: + match_func = pattern.search + else: + match_func = pattern.fullmatch + + return _MatchesReValidator(pattern, match_func) + + +@attrs(repr=False, slots=True, hash=True) +class _ProvidesValidator: + interface = attrib() + + def __call__(self, inst, attr, value): + """ + We use a callable class to be able to change the ``__repr__``. + """ + if not self.interface.providedBy(value): + raise TypeError( + "'{name}' must provide {interface!r} which {value!r} " + "doesn't.".format( + name=attr.name, interface=self.interface, value=value + ), + attr, + self.interface, + value, + ) + + def __repr__(self): + return "".format( + interface=self.interface + ) + + +def provides(interface): + """ + A validator that raises a `TypeError` if the initializer is called + with an object that does not provide the requested *interface* (checks are + performed using ``interface.providedBy(value)`` (see `zope.interface + `_). + + :param interface: The interface to check for. + :type interface: ``zope.interface.Interface`` + + :raises TypeError: With a human readable error message, the attribute + (of type `attrs.Attribute`), the expected interface, and the + value it got. + + .. deprecated:: 23.1.0 + """ + import warnings + + warnings.warn( + "attrs's zope-interface support is deprecated and will be removed in, " + "or after, April 2024.", + DeprecationWarning, + stacklevel=2, + ) + return _ProvidesValidator(interface) + + +@attrs(repr=False, slots=True, hash=True) +class _OptionalValidator: + validator = attrib() + + def __call__(self, inst, attr, value): + if value is None: + return + + self.validator(inst, attr, value) + + def __repr__(self): + return "".format( + what=repr(self.validator) + ) + + +def optional(validator): + """ + A validator that makes an attribute optional. An optional attribute is one + which can be set to ``None`` in addition to satisfying the requirements of + the sub-validator. + + :param Callable | tuple[Callable] | list[Callable] validator: A validator + (or validators) that is used for non-``None`` values. + + .. versionadded:: 15.1.0 + .. versionchanged:: 17.1.0 *validator* can be a list of validators. + .. versionchanged:: 23.1.0 *validator* can also be a tuple of validators. + """ + if isinstance(validator, (list, tuple)): + return _OptionalValidator(_AndValidator(validator)) + + return _OptionalValidator(validator) + + +@attrs(repr=False, slots=True, hash=True) +class _InValidator: + options = attrib() + + def __call__(self, inst, attr, value): + try: + in_options = value in self.options + except TypeError: # e.g. `1 in "abc"` + in_options = False + + if not in_options: + raise ValueError( + "'{name}' must be in {options!r} (got {value!r})".format( + name=attr.name, options=self.options, value=value + ), + attr, + self.options, + value, + ) + + def __repr__(self): + return "".format( + options=self.options + ) + + +def in_(options): + """ + A validator that raises a `ValueError` if the initializer is called + with a value that does not belong in the options provided. The check is + performed using ``value in options``. + + :param options: Allowed options. + :type options: list, tuple, `enum.Enum`, ... + + :raises ValueError: With a human readable error message, the attribute (of + type `attrs.Attribute`), the expected options, and the value it + got. + + .. versionadded:: 17.1.0 + .. versionchanged:: 22.1.0 + The ValueError was incomplete until now and only contained the human + readable error message. Now it contains all the information that has + been promised since 17.1.0. + """ + return _InValidator(options) + + +@attrs(repr=False, slots=False, hash=True) +class _IsCallableValidator: + def __call__(self, inst, attr, value): + """ + We use a callable class to be able to change the ``__repr__``. + """ + if not callable(value): + message = ( + "'{name}' must be callable " + "(got {value!r} that is a {actual!r})." + ) + raise NotCallableError( + msg=message.format( + name=attr.name, value=value, actual=value.__class__ + ), + value=value, + ) + + def __repr__(self): + return "" + + +def is_callable(): + """ + A validator that raises a `attrs.exceptions.NotCallableError` if the + initializer is called with a value for this particular attribute + that is not callable. + + .. versionadded:: 19.1.0 + + :raises attrs.exceptions.NotCallableError: With a human readable error + message containing the attribute (`attrs.Attribute`) name, + and the value it got. + """ + return _IsCallableValidator() + + +@attrs(repr=False, slots=True, hash=True) +class _DeepIterable: + member_validator = attrib(validator=is_callable()) + iterable_validator = attrib( + default=None, validator=optional(is_callable()) + ) + + def __call__(self, inst, attr, value): + """ + We use a callable class to be able to change the ``__repr__``. + """ + if self.iterable_validator is not None: + self.iterable_validator(inst, attr, value) + + for member in value: + self.member_validator(inst, attr, member) + + def __repr__(self): + iterable_identifier = ( + "" + if self.iterable_validator is None + else f" {self.iterable_validator!r}" + ) + return ( + "" + ).format( + iterable_identifier=iterable_identifier, + member=self.member_validator, + ) + + +def deep_iterable(member_validator, iterable_validator=None): + """ + A validator that performs deep validation of an iterable. + + :param member_validator: Validator(s) to apply to iterable members + :param iterable_validator: Validator to apply to iterable itself + (optional) + + .. versionadded:: 19.1.0 + + :raises TypeError: if any sub-validators fail + """ + if isinstance(member_validator, (list, tuple)): + member_validator = and_(*member_validator) + return _DeepIterable(member_validator, iterable_validator) + + +@attrs(repr=False, slots=True, hash=True) +class _DeepMapping: + key_validator = attrib(validator=is_callable()) + value_validator = attrib(validator=is_callable()) + mapping_validator = attrib(default=None, validator=optional(is_callable())) + + def __call__(self, inst, attr, value): + """ + We use a callable class to be able to change the ``__repr__``. + """ + if self.mapping_validator is not None: + self.mapping_validator(inst, attr, value) + + for key in value: + self.key_validator(inst, attr, key) + self.value_validator(inst, attr, value[key]) + + def __repr__(self): + return ( + "" + ).format(key=self.key_validator, value=self.value_validator) + + +def deep_mapping(key_validator, value_validator, mapping_validator=None): + """ + A validator that performs deep validation of a dictionary. + + :param key_validator: Validator to apply to dictionary keys + :param value_validator: Validator to apply to dictionary values + :param mapping_validator: Validator to apply to top-level mapping + attribute (optional) + + .. versionadded:: 19.1.0 + + :raises TypeError: if any sub-validators fail + """ + return _DeepMapping(key_validator, value_validator, mapping_validator) + + +@attrs(repr=False, frozen=True, slots=True) +class _NumberValidator: + bound = attrib() + compare_op = attrib() + compare_func = attrib() + + def __call__(self, inst, attr, value): + """ + We use a callable class to be able to change the ``__repr__``. + """ + if not self.compare_func(value, self.bound): + raise ValueError( + "'{name}' must be {op} {bound}: {value}".format( + name=attr.name, + op=self.compare_op, + bound=self.bound, + value=value, + ) + ) + + def __repr__(self): + return "".format( + op=self.compare_op, bound=self.bound + ) + + +def lt(val): + """ + A validator that raises `ValueError` if the initializer is called + with a number larger or equal to *val*. + + :param val: Exclusive upper bound for values + + .. versionadded:: 21.3.0 + """ + return _NumberValidator(val, "<", operator.lt) + + +def le(val): + """ + A validator that raises `ValueError` if the initializer is called + with a number greater than *val*. + + :param val: Inclusive upper bound for values + + .. versionadded:: 21.3.0 + """ + return _NumberValidator(val, "<=", operator.le) + + +def ge(val): + """ + A validator that raises `ValueError` if the initializer is called + with a number smaller than *val*. + + :param val: Inclusive lower bound for values + + .. versionadded:: 21.3.0 + """ + return _NumberValidator(val, ">=", operator.ge) + + +def gt(val): + """ + A validator that raises `ValueError` if the initializer is called + with a number smaller or equal to *val*. + + :param val: Exclusive lower bound for values + + .. versionadded:: 21.3.0 + """ + return _NumberValidator(val, ">", operator.gt) + + +@attrs(repr=False, frozen=True, slots=True) +class _MaxLengthValidator: + max_length = attrib() + + def __call__(self, inst, attr, value): + """ + We use a callable class to be able to change the ``__repr__``. + """ + if len(value) > self.max_length: + raise ValueError( + "Length of '{name}' must be <= {max}: {len}".format( + name=attr.name, max=self.max_length, len=len(value) + ) + ) + + def __repr__(self): + return f"" + + +def max_len(length): + """ + A validator that raises `ValueError` if the initializer is called + with a string or iterable that is longer than *length*. + + :param int length: Maximum length of the string or iterable + + .. versionadded:: 21.3.0 + """ + return _MaxLengthValidator(length) + + +@attrs(repr=False, frozen=True, slots=True) +class _MinLengthValidator: + min_length = attrib() + + def __call__(self, inst, attr, value): + """ + We use a callable class to be able to change the ``__repr__``. + """ + if len(value) < self.min_length: + raise ValueError( + "Length of '{name}' must be => {min}: {len}".format( + name=attr.name, min=self.min_length, len=len(value) + ) + ) + + def __repr__(self): + return f"" + + +def min_len(length): + """ + A validator that raises `ValueError` if the initializer is called + with a string or iterable that is shorter than *length*. + + :param int length: Minimum length of the string or iterable + + .. versionadded:: 22.1.0 + """ + return _MinLengthValidator(length) + + +@attrs(repr=False, slots=True, hash=True) +class _SubclassOfValidator: + type = attrib() + + def __call__(self, inst, attr, value): + """ + We use a callable class to be able to change the ``__repr__``. + """ + if not issubclass(value, self.type): + raise TypeError( + "'{name}' must be a subclass of {type!r} " + "(got {value!r}).".format( + name=attr.name, + type=self.type, + value=value, + ), + attr, + self.type, + value, + ) + + def __repr__(self): + return "".format( + type=self.type + ) + + +def _subclass_of(type): + """ + A validator that raises a `TypeError` if the initializer is called + with a wrong type for this particular attribute (checks are performed using + `issubclass` therefore it's also valid to pass a tuple of types). + + :param type: The type to check for. + :type type: type or tuple of types + + :raises TypeError: With a human readable error message, the attribute + (of type `attrs.Attribute`), the expected type, and the value it + got. + """ + return _SubclassOfValidator(type) + + +@attrs(repr=False, slots=True, hash=True) +class _NotValidator: + validator = attrib() + msg = attrib( + converter=default_if_none( + "not_ validator child '{validator!r}' " + "did not raise a captured error" + ) + ) + exc_types = attrib( + validator=deep_iterable( + member_validator=_subclass_of(Exception), + iterable_validator=instance_of(tuple), + ), + ) + + def __call__(self, inst, attr, value): + try: + self.validator(inst, attr, value) + except self.exc_types: + pass # suppress error to invert validity + else: + raise ValueError( + self.msg.format( + validator=self.validator, + exc_types=self.exc_types, + ), + attr, + self.validator, + value, + self.exc_types, + ) + + def __repr__(self): + return ( + "" + ).format( + what=self.validator, + exc_types=self.exc_types, + ) + + +def not_(validator, *, msg=None, exc_types=(ValueError, TypeError)): + """ + A validator that wraps and logically 'inverts' the validator passed to it. + It will raise a `ValueError` if the provided validator *doesn't* raise a + `ValueError` or `TypeError` (by default), and will suppress the exception + if the provided validator *does*. + + Intended to be used with existing validators to compose logic without + needing to create inverted variants, for example, ``not_(in_(...))``. + + :param validator: A validator to be logically inverted. + :param msg: Message to raise if validator fails. + Formatted with keys ``exc_types`` and ``validator``. + :type msg: str + :param exc_types: Exception type(s) to capture. + Other types raised by child validators will not be intercepted and + pass through. + + :raises ValueError: With a human readable error message, + the attribute (of type `attrs.Attribute`), + the validator that failed to raise an exception, + the value it got, + and the expected exception types. + + .. versionadded:: 22.2.0 + """ + try: + exc_types = tuple(exc_types) + except TypeError: + exc_types = (exc_types,) + return _NotValidator(validator, msg, exc_types) diff --git a/venv/lib/python3.10/site-packages/attr/validators.pyi b/venv/lib/python3.10/site-packages/attr/validators.pyi new file mode 100644 index 0000000..d194a75 --- /dev/null +++ b/venv/lib/python3.10/site-packages/attr/validators.pyi @@ -0,0 +1,88 @@ +from typing import ( + Any, + AnyStr, + Callable, + Container, + ContextManager, + Iterable, + List, + Mapping, + Match, + Optional, + Pattern, + Tuple, + Type, + TypeVar, + Union, + overload, +) + +from . import _ValidatorType +from . import _ValidatorArgType + +_T = TypeVar("_T") +_T1 = TypeVar("_T1") +_T2 = TypeVar("_T2") +_T3 = TypeVar("_T3") +_I = TypeVar("_I", bound=Iterable) +_K = TypeVar("_K") +_V = TypeVar("_V") +_M = TypeVar("_M", bound=Mapping) + +def set_disabled(run: bool) -> None: ... +def get_disabled() -> bool: ... +def disabled() -> ContextManager[None]: ... + +# To be more precise on instance_of use some overloads. +# If there are more than 3 items in the tuple then we fall back to Any +@overload +def instance_of(type: Type[_T]) -> _ValidatorType[_T]: ... +@overload +def instance_of(type: Tuple[Type[_T]]) -> _ValidatorType[_T]: ... +@overload +def instance_of( + type: Tuple[Type[_T1], Type[_T2]] +) -> _ValidatorType[Union[_T1, _T2]]: ... +@overload +def instance_of( + type: Tuple[Type[_T1], Type[_T2], Type[_T3]] +) -> _ValidatorType[Union[_T1, _T2, _T3]]: ... +@overload +def instance_of(type: Tuple[type, ...]) -> _ValidatorType[Any]: ... +def provides(interface: Any) -> _ValidatorType[Any]: ... +def optional( + validator: Union[ + _ValidatorType[_T], List[_ValidatorType[_T]], Tuple[_ValidatorType[_T]] + ] +) -> _ValidatorType[Optional[_T]]: ... +def in_(options: Container[_T]) -> _ValidatorType[_T]: ... +def and_(*validators: _ValidatorType[_T]) -> _ValidatorType[_T]: ... +def matches_re( + regex: Union[Pattern[AnyStr], AnyStr], + flags: int = ..., + func: Optional[ + Callable[[AnyStr, AnyStr, int], Optional[Match[AnyStr]]] + ] = ..., +) -> _ValidatorType[AnyStr]: ... +def deep_iterable( + member_validator: _ValidatorArgType[_T], + iterable_validator: Optional[_ValidatorType[_I]] = ..., +) -> _ValidatorType[_I]: ... +def deep_mapping( + key_validator: _ValidatorType[_K], + value_validator: _ValidatorType[_V], + mapping_validator: Optional[_ValidatorType[_M]] = ..., +) -> _ValidatorType[_M]: ... +def is_callable() -> _ValidatorType[_T]: ... +def lt(val: _T) -> _ValidatorType[_T]: ... +def le(val: _T) -> _ValidatorType[_T]: ... +def ge(val: _T) -> _ValidatorType[_T]: ... +def gt(val: _T) -> _ValidatorType[_T]: ... +def max_len(length: int) -> _ValidatorType[_T]: ... +def min_len(length: int) -> _ValidatorType[_T]: ... +def not_( + validator: _ValidatorType[_T], + *, + msg: Optional[str] = None, + exc_types: Union[Type[Exception], Iterable[Type[Exception]]] = ..., +) -> _ValidatorType[_T]: ... diff --git a/venv/lib/python3.10/site-packages/pip-22.3.1.dist-info/INSTALLER b/venv/lib/python3.10/site-packages/attrs-23.1.0.dist-info/INSTALLER similarity index 100% rename from venv/lib/python3.10/site-packages/pip-22.3.1.dist-info/INSTALLER rename to venv/lib/python3.10/site-packages/attrs-23.1.0.dist-info/INSTALLER diff --git a/venv/lib/python3.10/site-packages/attrs-23.1.0.dist-info/METADATA b/venv/lib/python3.10/site-packages/attrs-23.1.0.dist-info/METADATA new file mode 100644 index 0000000..4a986f0 --- /dev/null +++ b/venv/lib/python3.10/site-packages/attrs-23.1.0.dist-info/METADATA @@ -0,0 +1,243 @@ +Metadata-Version: 2.1 +Name: attrs +Version: 23.1.0 +Summary: Classes Without Boilerplate +Project-URL: Documentation, https://www.attrs.org/ +Project-URL: Changelog, https://www.attrs.org/en/stable/changelog.html +Project-URL: Bug Tracker, https://github.com/python-attrs/attrs/issues +Project-URL: Source Code, https://github.com/python-attrs/attrs +Project-URL: Funding, https://github.com/sponsors/hynek +Project-URL: Tidelift, https://tidelift.com/subscription/pkg/pypi-attrs?utm_source=pypi-attrs&utm_medium=pypi +Author-email: Hynek Schlawack +License-Expression: MIT +License-File: LICENSE +Keywords: attribute,boilerplate,class +Classifier: Development Status :: 5 - Production/Stable +Classifier: Intended Audience :: Developers +Classifier: License :: OSI Approved :: MIT License +Classifier: Programming Language :: Python :: 3.7 +Classifier: Programming Language :: Python :: 3.8 +Classifier: Programming Language :: Python :: 3.9 +Classifier: Programming Language :: Python :: 3.10 +Classifier: Programming Language :: Python :: 3.11 +Classifier: Programming Language :: Python :: Implementation :: CPython +Classifier: Programming Language :: Python :: Implementation :: PyPy +Classifier: Typing :: Typed +Requires-Python: >=3.7 +Requires-Dist: importlib-metadata; python_version < '3.8' +Provides-Extra: cov +Requires-Dist: attrs[tests]; extra == 'cov' +Requires-Dist: coverage[toml]>=5.3; extra == 'cov' +Provides-Extra: dev +Requires-Dist: attrs[docs,tests]; extra == 'dev' +Requires-Dist: pre-commit; extra == 'dev' +Provides-Extra: docs +Requires-Dist: furo; extra == 'docs' +Requires-Dist: myst-parser; extra == 'docs' +Requires-Dist: sphinx; extra == 'docs' +Requires-Dist: sphinx-notfound-page; extra == 'docs' +Requires-Dist: sphinxcontrib-towncrier; extra == 'docs' +Requires-Dist: towncrier; extra == 'docs' +Requires-Dist: zope-interface; extra == 'docs' +Provides-Extra: tests +Requires-Dist: attrs[tests-no-zope]; extra == 'tests' +Requires-Dist: zope-interface; extra == 'tests' +Provides-Extra: tests-no-zope +Requires-Dist: cloudpickle; platform_python_implementation == 'CPython' and extra == 'tests-no-zope' +Requires-Dist: hypothesis; extra == 'tests-no-zope' +Requires-Dist: mypy>=1.1.1; platform_python_implementation == 'CPython' and extra == 'tests-no-zope' +Requires-Dist: pympler; extra == 'tests-no-zope' +Requires-Dist: pytest-mypy-plugins; platform_python_implementation == 'CPython' and python_version < '3.11' and extra == 'tests-no-zope' +Requires-Dist: pytest-xdist[psutil]; extra == 'tests-no-zope' +Requires-Dist: pytest>=4.3.0; extra == 'tests-no-zope' +Description-Content-Type: text/markdown + +

+ + attrs + +

+ + +*attrs* is the Python package that will bring back the **joy** of **writing classes** by relieving you from the drudgery of implementing object protocols (aka [dunder methods](https://www.attrs.org/en/latest/glossary.html#term-dunder-methods)). +[Trusted by NASA](https://docs.github.com/en/account-and-profile/setting-up-and-managing-your-github-profile/customizing-your-profile/personalizing-your-profile#list-of-qualifying-repositories-for-mars-2020-helicopter-contributor-achievement) for Mars missions since 2020! + +Its main goal is to help you to write **concise** and **correct** software without slowing down your code. + + +## Sponsors + +*attrs* would not be possible without our [amazing sponsors](https://github.com/sponsors/hynek). +Especially those generously supporting us at the *The Organization* tier and higher: + +

+ + + + + + + + + + + + + + + +

+ +

+ Please consider joining them to help make attrs’s maintenance more sustainable! +

+ + + +## Example + +*attrs* gives you a class decorator and a way to declaratively define the attributes on that class: + + + +```pycon +>>> from attrs import asdict, define, make_class, Factory + +>>> @define +... class SomeClass: +... a_number: int = 42 +... list_of_numbers: list[int] = Factory(list) +... +... def hard_math(self, another_number): +... return self.a_number + sum(self.list_of_numbers) * another_number + + +>>> sc = SomeClass(1, [1, 2, 3]) +>>> sc +SomeClass(a_number=1, list_of_numbers=[1, 2, 3]) + +>>> sc.hard_math(3) +19 +>>> sc == SomeClass(1, [1, 2, 3]) +True +>>> sc != SomeClass(2, [3, 2, 1]) +True + +>>> asdict(sc) +{'a_number': 1, 'list_of_numbers': [1, 2, 3]} + +>>> SomeClass() +SomeClass(a_number=42, list_of_numbers=[]) + +>>> C = make_class("C", ["a", "b"]) +>>> C("foo", "bar") +C(a='foo', b='bar') +``` + +After *declaring* your attributes, *attrs* gives you: + +- a concise and explicit overview of the class's attributes, +- a nice human-readable `__repr__`, +- equality-checking methods, +- an initializer, +- and much more, + +*without* writing dull boilerplate code again and again and *without* runtime performance penalties. + +**Hate type annotations**!? +No problem! +Types are entirely **optional** with *attrs*. +Simply assign `attrs.field()` to the attributes instead of annotating them with types. + +--- + +This example uses *attrs*'s modern APIs that have been introduced in version 20.1.0, and the *attrs* package import name that has been added in version 21.3.0. +The classic APIs (`@attr.s`, `attr.ib`, plus their serious-business aliases) and the `attr` package import name will remain **indefinitely**. + +Please check out [*On The Core API Names*](https://www.attrs.org/en/latest/names.html) for a more in-depth explanation. + + +## Data Classes + +On the tin, *attrs* might remind you of `dataclasses` (and indeed, `dataclasses` [are a descendant](https://hynek.me/articles/import-attrs/) of *attrs*). +In practice it does a lot more and is more flexible. +For instance it allows you to define [special handling of NumPy arrays for equality checks](https://www.attrs.org/en/stable/comparison.html#customization), or allows more ways to [plug into the initialization process](https://www.attrs.org/en/stable/init.html#hooking-yourself-into-initialization). + +For more details, please refer to our [comparison page](https://www.attrs.org/en/stable/why.html#data-classes). + + +## Project Information + +- [**Changelog**](https://www.attrs.org/en/stable/changelog.html) +- [**Documentation**](https://www.attrs.org/) +- [**PyPI**](https://pypi.org/project/attrs/) +- [**Source Code**](https://github.com/python-attrs/attrs) +- [**Contributing**](https://github.com/python-attrs/attrs/blob/main/.github/CONTRIBUTING.md) +- [**Third-party Extensions**](https://github.com/python-attrs/attrs/wiki/Extensions-to-attrs) +- **License**: [MIT](https://www.attrs.org/en/latest/license.html) +- **Get Help**: please use the `python-attrs` tag on [StackOverflow](https://stackoverflow.com/questions/tagged/python-attrs) +- **Supported Python Versions**: 3.7 and later + + +### *attrs* for Enterprise + +Available as part of the Tidelift Subscription. + +The maintainers of *attrs* and thousands of other packages are working with Tidelift to deliver commercial support and maintenance for the open source packages you use to build your applications. +Save time, reduce risk, and improve code health, while paying the maintainers of the exact packages you use. +[Learn more.](https://tidelift.com/subscription/pkg/pypi-attrs?utm_source=pypi-attrs&utm_medium=referral&utm_campaign=enterprise&utm_term=repo) + +## Release Information + +### Backwards-incompatible Changes + +- Python 3.6 has been dropped and packaging switched to static package data using [Hatch](https://hatch.pypa.io/latest/). + [#993](https://github.com/python-attrs/attrs/issues/993) + + +### Deprecations + +- The support for *zope-interface* via the `attrs.validators.provides` validator is now deprecated and will be removed in, or after, April 2024. + + The presence of a C-based package in our developement dependencies has caused headaches and we're not under the impression it's used a lot. + + Let us know if you're using it and we might publish it as a separate package. + [#1120](https://github.com/python-attrs/attrs/issues/1120) + + +### Changes + +- `attrs.filters.exclude()` and `attrs.filters.include()` now support the passing of attribute names as strings. + [#1068](https://github.com/python-attrs/attrs/issues/1068) +- `attrs.has()` and `attrs.fields()` now handle generic classes correctly. + [#1079](https://github.com/python-attrs/attrs/issues/1079) +- Fix frozen exception classes when raised within e.g. `contextlib.contextmanager`, which mutates their `__traceback__` attributes. + [#1081](https://github.com/python-attrs/attrs/issues/1081) +- `@frozen` now works with type checkers that implement [PEP-681](https://peps.python.org/pep-0681/) (ex. [pyright](https://github.com/microsoft/pyright/)). + [#1084](https://github.com/python-attrs/attrs/issues/1084) +- Restored ability to unpickle instances pickled before 22.2.0. + [#1085](https://github.com/python-attrs/attrs/issues/1085) +- `attrs.asdict()`'s and `attrs.astuple()`'s type stubs now accept the `attrs.AttrsInstance` protocol. + [#1090](https://github.com/python-attrs/attrs/issues/1090) +- Fix slots class cellvar updating closure in CPython 3.8+ even when `__code__` introspection is unavailable. + [#1092](https://github.com/python-attrs/attrs/issues/1092) +- `attrs.resolve_types()` can now pass `include_extras` to `typing.get_type_hints()` on Python 3.9+, and does so by default. + [#1099](https://github.com/python-attrs/attrs/issues/1099) +- Added instructions for pull request workflow to `CONTRIBUTING.md`. + [#1105](https://github.com/python-attrs/attrs/issues/1105) +- Added *type* parameter to `attrs.field()` function for use with `attrs.make_class()`. + + Please note that type checkers ignore type metadata passed into `make_class()`, but it can be useful if you're wrapping _attrs_. + [#1107](https://github.com/python-attrs/attrs/issues/1107) +- It is now possible for `attrs.evolve()` (and `attr.evolve()`) to change fields named `inst` if the instance is passed as a positional argument. + + Passing the instance using the `inst` keyword argument is now deprecated and will be removed in, or after, April 2024. + [#1117](https://github.com/python-attrs/attrs/issues/1117) +- `attrs.validators.optional()` now also accepts a tuple of validators (in addition to lists of validators). + [#1122](https://github.com/python-attrs/attrs/issues/1122) + + + +--- + +[Full changelog](https://www.attrs.org/en/stable/changelog.html) diff --git a/venv/lib/python3.10/site-packages/attrs-23.1.0.dist-info/RECORD b/venv/lib/python3.10/site-packages/attrs-23.1.0.dist-info/RECORD new file mode 100644 index 0000000..995cedc --- /dev/null +++ b/venv/lib/python3.10/site-packages/attrs-23.1.0.dist-info/RECORD @@ -0,0 +1,55 @@ +attr/__init__.py,sha256=dSRUBxRVTh-dXMrMR_oQ3ZISu2QSfhSZlik03Mjbu30,3241 +attr/__init__.pyi,sha256=rIK-2IakIoehVtqXK5l5rs9_fJNCbnYtKTS3cOAVJD8,17609 +attr/__pycache__/__init__.cpython-310.pyc,, +attr/__pycache__/_cmp.cpython-310.pyc,, +attr/__pycache__/_compat.cpython-310.pyc,, +attr/__pycache__/_config.cpython-310.pyc,, +attr/__pycache__/_funcs.cpython-310.pyc,, +attr/__pycache__/_make.cpython-310.pyc,, +attr/__pycache__/_next_gen.cpython-310.pyc,, +attr/__pycache__/_version_info.cpython-310.pyc,, +attr/__pycache__/converters.cpython-310.pyc,, +attr/__pycache__/exceptions.cpython-310.pyc,, +attr/__pycache__/filters.cpython-310.pyc,, +attr/__pycache__/setters.cpython-310.pyc,, +attr/__pycache__/validators.cpython-310.pyc,, +attr/_cmp.py,sha256=diMUQV-BIg7IjIb6-o1hswtnjrR4qdAUz_tE8gxS96w,4098 +attr/_cmp.pyi,sha256=sGQmOM0w3_K4-X8cTXR7g0Hqr290E8PTObA9JQxWQqc,399 +attr/_compat.py,sha256=d3cpIu60IbKrLywPni17RUEQY7MvkqqKifyzJ5H3zRU,5803 +attr/_config.py,sha256=5W8lgRePuIOWu1ZuqF1899e2CmXGc95-ipwTpF1cEU4,826 +attr/_funcs.py,sha256=YMtzHRSOnFvOVJ7at3E0K95A2lW26HDjby96TMTDbc0,16730 +attr/_make.py,sha256=JIyKV-HRh3IcHi-EvOj2dw6tRoqATlx2kBHFrrxZpk0,96979 +attr/_next_gen.py,sha256=8lB_S5SFgX2KsflksK8Zygk6XDXToQYtIlmgd37I9aY,6271 +attr/_typing_compat.pyi,sha256=XDP54TUn-ZKhD62TOQebmzrwFyomhUCoGRpclb6alRA,469 +attr/_version_info.py,sha256=exSqb3b5E-fMSsgZAlEw9XcLpEgobPORCZpcaEglAM4,2121 +attr/_version_info.pyi,sha256=x_M3L3WuB7r_ULXAWjx959udKQ4HLB8l-hsc1FDGNvk,209 +attr/converters.py,sha256=xfGVSPRgWGcym6N5FZM9fyfvCQePqFyApWeC5BXKvoM,3602 +attr/converters.pyi,sha256=jKlpHBEt6HVKJvgrMFJRrHq8p61GXg4-Nd5RZWKJX7M,406 +attr/exceptions.py,sha256=0ZTyH_mHmI9utwTTbBWrdS_ck5jps9R2M_fYJPXxH_U,1890 +attr/exceptions.pyi,sha256=zZq8bCUnKAy9mDtBEw42ZhPhAUIHoTKedDQInJD883M,539 +attr/filters.py,sha256=9pYvXqdg6mtLvKIIb56oALRMoHFnQTcGCO4EXTc1qyM,1470 +attr/filters.pyi,sha256=0mRCjLKxdcvAo0vD-Cr81HfRXXCp9j_cAXjOoAHtPGM,225 +attr/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +attr/setters.py,sha256=pbCZQ-pE6ZxjDqZfWWUhUFefXtpekIU4qS_YDMLPQ50,1400 +attr/setters.pyi,sha256=pyY8TVNBu8TWhOldv_RxHzmGvdgFQH981db70r0fn5I,567 +attr/validators.py,sha256=C2MQgX7ubL_cs5YzibWa8m0YxdMq5_3Ch3dVIzsLO-Y,20702 +attr/validators.pyi,sha256=167Dl9nt7NUhE9wht1I-buo039qyUT1nEUT_nKjSWr4,2580 +attrs-23.1.0.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +attrs-23.1.0.dist-info/METADATA,sha256=yglwUXko75Q-IJ6LmPVQ4Y99KJS3CPK0NW8ovXFYsDg,11348 +attrs-23.1.0.dist-info/RECORD,, +attrs-23.1.0.dist-info/WHEEL,sha256=EI2JsGydwUL5GP9t6kzZv7G3HDPi7FuZDDf9In6amRM,87 +attrs-23.1.0.dist-info/licenses/LICENSE,sha256=iCEVyV38KvHutnFPjsbVy8q_Znyv-HKfQkINpj9xTp8,1109 +attrs/__init__.py,sha256=9_5waVbFs7rLqtXZ73tNDrxhezyZ8VZeX4BbvQ3EeJw,1039 +attrs/__init__.pyi,sha256=s_ajQ_U14DOsOz0JbmAKDOi46B3v2PcdO0UAV1MY6Ek,2168 +attrs/__pycache__/__init__.cpython-310.pyc,, +attrs/__pycache__/converters.cpython-310.pyc,, +attrs/__pycache__/exceptions.cpython-310.pyc,, +attrs/__pycache__/filters.cpython-310.pyc,, +attrs/__pycache__/setters.cpython-310.pyc,, +attrs/__pycache__/validators.cpython-310.pyc,, +attrs/converters.py,sha256=fCBEdlYWcmI3sCnpUk2pz22GYtXzqTkp6NeOpdI64PY,70 +attrs/exceptions.py,sha256=SlDli6AY77f6ny-H7oy98OkQjsrw-D_supEuErIVYkE,70 +attrs/filters.py,sha256=dc_dNey29kH6KLU1mT2Dakq7tZ3kBfzEGwzOmDzw1F8,67 +attrs/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +attrs/setters.py,sha256=oKw51C72Hh45wTwYvDHJP9kbicxiMhMR4Y5GvdpKdHQ,67 +attrs/validators.py,sha256=4ag1SyVD2Hm3PYKiNG_NOtR_e7f81Hr6GiNl4YvXo4Q,70 diff --git a/venv/lib/python3.10/site-packages/attrs-23.1.0.dist-info/WHEEL b/venv/lib/python3.10/site-packages/attrs-23.1.0.dist-info/WHEEL new file mode 100644 index 0000000..58d0071 --- /dev/null +++ b/venv/lib/python3.10/site-packages/attrs-23.1.0.dist-info/WHEEL @@ -0,0 +1,4 @@ +Wheel-Version: 1.0 +Generator: hatchling 1.14.0 +Root-Is-Purelib: true +Tag: py3-none-any diff --git a/venv/lib/python3.10/site-packages/attrs-23.1.0.dist-info/licenses/LICENSE b/venv/lib/python3.10/site-packages/attrs-23.1.0.dist-info/licenses/LICENSE new file mode 100644 index 0000000..2bd6453 --- /dev/null +++ b/venv/lib/python3.10/site-packages/attrs-23.1.0.dist-info/licenses/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2015 Hynek Schlawack and the attrs contributors + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/venv/lib/python3.10/site-packages/attrs/__init__.py b/venv/lib/python3.10/site-packages/attrs/__init__.py new file mode 100644 index 0000000..0c24815 --- /dev/null +++ b/venv/lib/python3.10/site-packages/attrs/__init__.py @@ -0,0 +1,65 @@ +# SPDX-License-Identifier: MIT + +from attr import ( + NOTHING, + Attribute, + AttrsInstance, + Factory, + _make_getattr, + assoc, + cmp_using, + define, + evolve, + field, + fields, + fields_dict, + frozen, + has, + make_class, + mutable, + resolve_types, + validate, +) +from attr._next_gen import asdict, astuple + +from . import converters, exceptions, filters, setters, validators + + +__all__ = [ + "__author__", + "__copyright__", + "__description__", + "__doc__", + "__email__", + "__license__", + "__title__", + "__url__", + "__version__", + "__version_info__", + "asdict", + "assoc", + "astuple", + "Attribute", + "AttrsInstance", + "cmp_using", + "converters", + "define", + "evolve", + "exceptions", + "Factory", + "field", + "fields_dict", + "fields", + "filters", + "frozen", + "has", + "make_class", + "mutable", + "NOTHING", + "resolve_types", + "setters", + "validate", + "validators", +] + +__getattr__ = _make_getattr(__name__) diff --git a/venv/lib/python3.10/site-packages/attrs/__init__.pyi b/venv/lib/python3.10/site-packages/attrs/__init__.pyi new file mode 100644 index 0000000..9372cfe --- /dev/null +++ b/venv/lib/python3.10/site-packages/attrs/__init__.pyi @@ -0,0 +1,67 @@ +from typing import ( + Any, + Callable, + Dict, + Mapping, + Optional, + Sequence, + Tuple, + Type, +) + +# Because we need to type our own stuff, we have to make everything from +# attr explicitly public too. +from attr import __author__ as __author__ +from attr import __copyright__ as __copyright__ +from attr import __description__ as __description__ +from attr import __email__ as __email__ +from attr import __license__ as __license__ +from attr import __title__ as __title__ +from attr import __url__ as __url__ +from attr import __version__ as __version__ +from attr import __version_info__ as __version_info__ +from attr import _FilterType +from attr import assoc as assoc +from attr import Attribute as Attribute +from attr import AttrsInstance as AttrsInstance +from attr import cmp_using as cmp_using +from attr import converters as converters +from attr import define as define +from attr import evolve as evolve +from attr import exceptions as exceptions +from attr import Factory as Factory +from attr import field as field +from attr import fields as fields +from attr import fields_dict as fields_dict +from attr import filters as filters +from attr import frozen as frozen +from attr import has as has +from attr import make_class as make_class +from attr import mutable as mutable +from attr import NOTHING as NOTHING +from attr import resolve_types as resolve_types +from attr import setters as setters +from attr import validate as validate +from attr import validators as validators + +# TODO: see definition of attr.asdict/astuple +def asdict( + inst: AttrsInstance, + recurse: bool = ..., + filter: Optional[_FilterType[Any]] = ..., + dict_factory: Type[Mapping[Any, Any]] = ..., + retain_collection_types: bool = ..., + value_serializer: Optional[ + Callable[[type, Attribute[Any], Any], Any] + ] = ..., + tuple_keys: bool = ..., +) -> Dict[str, Any]: ... + +# TODO: add support for returning NamedTuple from the mypy plugin +def astuple( + inst: AttrsInstance, + recurse: bool = ..., + filter: Optional[_FilterType[Any]] = ..., + tuple_factory: Type[Sequence[Any]] = ..., + retain_collection_types: bool = ..., +) -> Tuple[Any, ...]: ... diff --git a/venv/lib/python3.10/site-packages/attrs/converters.py b/venv/lib/python3.10/site-packages/attrs/converters.py new file mode 100644 index 0000000..edfa8d3 --- /dev/null +++ b/venv/lib/python3.10/site-packages/attrs/converters.py @@ -0,0 +1,3 @@ +# SPDX-License-Identifier: MIT + +from attr.converters import * # noqa diff --git a/venv/lib/python3.10/site-packages/attrs/exceptions.py b/venv/lib/python3.10/site-packages/attrs/exceptions.py new file mode 100644 index 0000000..bd9efed --- /dev/null +++ b/venv/lib/python3.10/site-packages/attrs/exceptions.py @@ -0,0 +1,3 @@ +# SPDX-License-Identifier: MIT + +from attr.exceptions import * # noqa diff --git a/venv/lib/python3.10/site-packages/attrs/filters.py b/venv/lib/python3.10/site-packages/attrs/filters.py new file mode 100644 index 0000000..5295900 --- /dev/null +++ b/venv/lib/python3.10/site-packages/attrs/filters.py @@ -0,0 +1,3 @@ +# SPDX-License-Identifier: MIT + +from attr.filters import * # noqa diff --git a/venv/lib/python3.10/site-packages/setuptools-65.5.1.virtualenv b/venv/lib/python3.10/site-packages/attrs/py.typed similarity index 100% rename from venv/lib/python3.10/site-packages/setuptools-65.5.1.virtualenv rename to venv/lib/python3.10/site-packages/attrs/py.typed diff --git a/venv/lib/python3.10/site-packages/attrs/setters.py b/venv/lib/python3.10/site-packages/attrs/setters.py new file mode 100644 index 0000000..9b50770 --- /dev/null +++ b/venv/lib/python3.10/site-packages/attrs/setters.py @@ -0,0 +1,3 @@ +# SPDX-License-Identifier: MIT + +from attr.setters import * # noqa diff --git a/venv/lib/python3.10/site-packages/attrs/validators.py b/venv/lib/python3.10/site-packages/attrs/validators.py new file mode 100644 index 0000000..ab2c9b3 --- /dev/null +++ b/venv/lib/python3.10/site-packages/attrs/validators.py @@ -0,0 +1,3 @@ +# SPDX-License-Identifier: MIT + +from attr.validators import * # noqa diff --git a/venv/lib/python3.10/site-packages/setuptools-65.5.1.dist-info/INSTALLER b/venv/lib/python3.10/site-packages/coverage-7.3.0.dist-info/INSTALLER similarity index 100% rename from venv/lib/python3.10/site-packages/setuptools-65.5.1.dist-info/INSTALLER rename to venv/lib/python3.10/site-packages/coverage-7.3.0.dist-info/INSTALLER diff --git a/venv/lib/python3.10/site-packages/coverage-7.3.0.dist-info/LICENSE.txt b/venv/lib/python3.10/site-packages/coverage-7.3.0.dist-info/LICENSE.txt new file mode 100644 index 0000000..f433b1a --- /dev/null +++ b/venv/lib/python3.10/site-packages/coverage-7.3.0.dist-info/LICENSE.txt @@ -0,0 +1,177 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS diff --git a/venv/lib/python3.10/site-packages/coverage-7.3.0.dist-info/METADATA b/venv/lib/python3.10/site-packages/coverage-7.3.0.dist-info/METADATA new file mode 100644 index 0000000..99a1f74 --- /dev/null +++ b/venv/lib/python3.10/site-packages/coverage-7.3.0.dist-info/METADATA @@ -0,0 +1,195 @@ +Metadata-Version: 2.1 +Name: coverage +Version: 7.3.0 +Summary: Code coverage measurement for Python +Home-page: https://github.com/nedbat/coveragepy +Author: Ned Batchelder and 216 others +Author-email: ned@nedbatchelder.com +License: Apache-2.0 +Project-URL: Documentation, https://coverage.readthedocs.io/en/7.3.0 +Project-URL: Funding, https://tidelift.com/subscription/pkg/pypi-coverage?utm_source=pypi-coverage&utm_medium=referral&utm_campaign=pypi +Project-URL: Issues, https://github.com/nedbat/coveragepy/issues +Project-URL: Mastodon, https://hachyderm.io/@coveragepy +Project-URL: Mastodon (nedbat), https://hachyderm.io/@nedbat +Keywords: code coverage testing +Classifier: Environment :: Console +Classifier: Intended Audience :: Developers +Classifier: License :: OSI Approved :: Apache Software License +Classifier: Operating System :: OS Independent +Classifier: Programming Language :: Python +Classifier: Programming Language :: Python :: 3 +Classifier: Programming Language :: Python :: 3.8 +Classifier: Programming Language :: Python :: 3.9 +Classifier: Programming Language :: Python :: 3.10 +Classifier: Programming Language :: Python :: 3.11 +Classifier: Programming Language :: Python :: 3.12 +Classifier: Programming Language :: Python :: Implementation :: CPython +Classifier: Programming Language :: Python :: Implementation :: PyPy +Classifier: Topic :: Software Development :: Quality Assurance +Classifier: Topic :: Software Development :: Testing +Classifier: Development Status :: 5 - Production/Stable +Requires-Python: >=3.8 +Description-Content-Type: text/x-rst +License-File: LICENSE.txt +Provides-Extra: toml +Requires-Dist: tomli ; (python_full_version <= "3.11.0a6") and extra == 'toml' + +.. Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 +.. For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt + +=========== +Coverage.py +=========== + +Code coverage testing for Python. + +.. image:: https://raw.githubusercontent.com/vshymanskyy/StandWithUkraine/main/banner2-direct.svg + :target: https://vshymanskyy.github.io/StandWithUkraine + :alt: Stand with Ukraine + +------------- + +| |kit| |license| |versions| +| |test-status| |quality-status| |docs| |metacov| +| |tidelift| |sponsor| |stars| |mastodon-coveragepy| |mastodon-nedbat| + +Coverage.py measures code coverage, typically during test execution. It uses +the code analysis tools and tracing hooks provided in the Python standard +library to determine which lines are executable, and which have been executed. + +Coverage.py runs on these versions of Python: + +.. PYVERSIONS + +* CPython 3.8 through 3.12.0rc1 +* PyPy3 versions 3.8 through 3.10. + +Documentation is on `Read the Docs`_. Code repository and issue tracker are on +`GitHub`_. + +.. _Read the Docs: https://coverage.readthedocs.io/en/7.3.0/ +.. _GitHub: https://github.com/nedbat/coveragepy + +**New in 7.x:** +dropped support for Python 3.7; +added ``Coverage.collect()`` context manager; +improved data combining; +``[run] exclude_also`` setting; +``report --format=``; +type annotations. + +**New in 6.x:** +dropped support for Python 2.7, 3.5, and 3.6; +write data on SIGTERM; +added support for 3.10 match/case statements. + + +For Enterprise +-------------- + +.. |tideliftlogo| image:: https://nedbatchelder.com/pix/Tidelift_Logo_small.png + :alt: Tidelift + :target: https://tidelift.com/subscription/pkg/pypi-coverage?utm_source=pypi-coverage&utm_medium=referral&utm_campaign=readme + +.. list-table:: + :widths: 10 100 + + * - |tideliftlogo| + - `Available as part of the Tidelift Subscription. `_ + Coverage and thousands of other packages are working with + Tidelift to deliver one enterprise subscription that covers all of the open + source you use. If you want the flexibility of open source and the confidence + of commercial-grade software, this is for you. + `Learn more. `_ + + +Getting Started +--------------- + +Looking to run ``coverage`` on your test suite? See the `Quick Start section`_ +of the docs. + +.. _Quick Start section: https://coverage.readthedocs.io/en/7.3.0/#quick-start + + +Change history +-------------- + +The complete history of changes is on the `change history page`_. + +.. _change history page: https://coverage.readthedocs.io/en/7.3.0/changes.html + + +Code of Conduct +--------------- + +Everyone participating in the coverage.py project is expected to treat other +people with respect and to follow the guidelines articulated in the `Python +Community Code of Conduct`_. + +.. _Python Community Code of Conduct: https://www.python.org/psf/codeofconduct/ + + +Contributing +------------ + +Found a bug? Want to help improve the code or documentation? See the +`Contributing section`_ of the docs. + +.. _Contributing section: https://coverage.readthedocs.io/en/7.3.0/contributing.html + + +Security +-------- + +To report a security vulnerability, please use the `Tidelift security +contact`_. Tidelift will coordinate the fix and disclosure. + +.. _Tidelift security contact: https://tidelift.com/security + + +License +------- + +Licensed under the `Apache 2.0 License`_. For details, see `NOTICE.txt`_. + +.. _Apache 2.0 License: http://www.apache.org/licenses/LICENSE-2.0 +.. _NOTICE.txt: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt + + +.. |test-status| image:: https://github.com/nedbat/coveragepy/actions/workflows/testsuite.yml/badge.svg?branch=master&event=push + :target: https://github.com/nedbat/coveragepy/actions/workflows/testsuite.yml + :alt: Test suite status +.. |quality-status| image:: https://github.com/nedbat/coveragepy/actions/workflows/quality.yml/badge.svg?branch=master&event=push + :target: https://github.com/nedbat/coveragepy/actions/workflows/quality.yml + :alt: Quality check status +.. |docs| image:: https://readthedocs.org/projects/coverage/badge/?version=latest&style=flat + :target: https://coverage.readthedocs.io/en/7.3.0/ + :alt: Documentation +.. |kit| image:: https://badge.fury.io/py/coverage.svg + :target: https://pypi.org/project/coverage/ + :alt: PyPI status +.. |versions| image:: https://img.shields.io/pypi/pyversions/coverage.svg?logo=python&logoColor=FBE072 + :target: https://pypi.org/project/coverage/ + :alt: Python versions supported +.. |license| image:: https://img.shields.io/pypi/l/coverage.svg + :target: https://pypi.org/project/coverage/ + :alt: License +.. |metacov| image:: https://img.shields.io/endpoint?url=https://gist.githubusercontent.com/nedbat/8c6980f77988a327348f9b02bbaf67f5/raw/metacov.json + :target: https://nedbat.github.io/coverage-reports/latest.html + :alt: Coverage reports +.. |tidelift| image:: https://tidelift.com/badges/package/pypi/coverage + :target: https://tidelift.com/subscription/pkg/pypi-coverage?utm_source=pypi-coverage&utm_medium=referral&utm_campaign=readme + :alt: Tidelift +.. |stars| image:: https://img.shields.io/github/stars/nedbat/coveragepy.svg?logo=github + :target: https://github.com/nedbat/coveragepy/stargazers + :alt: GitHub stars +.. |mastodon-nedbat| image:: https://img.shields.io/badge/dynamic/json?style=flat&labelColor=450657&logo=mastodon&logoColor=ffffff&link=https%3A%2F%2Fhachyderm.io%2F%40nedbat&url=https%3A%2F%2Fhachyderm.io%2Fusers%2Fnedbat%2Ffollowers.json&query=totalItems&label=@nedbat + :target: https://hachyderm.io/@nedbat + :alt: nedbat on Mastodon +.. |mastodon-coveragepy| image:: https://img.shields.io/badge/dynamic/json?style=flat&labelColor=450657&logo=mastodon&logoColor=ffffff&link=https%3A%2F%2Fhachyderm.io%2F%40coveragepy&url=https%3A%2F%2Fhachyderm.io%2Fusers%2Fcoveragepy%2Ffollowers.json&query=totalItems&label=@coveragepy + :target: https://hachyderm.io/@coveragepy + :alt: coveragepy on Mastodon +.. |sponsor| image:: https://img.shields.io/badge/%E2%9D%A4-Sponsor%20me-brightgreen?style=flat&logo=GitHub + :target: https://github.com/sponsors/nedbat + :alt: Sponsor me on GitHub diff --git a/venv/lib/python3.10/site-packages/coverage-7.3.0.dist-info/RECORD b/venv/lib/python3.10/site-packages/coverage-7.3.0.dist-info/RECORD new file mode 100644 index 0000000..fdf1073 --- /dev/null +++ b/venv/lib/python3.10/site-packages/coverage-7.3.0.dist-info/RECORD @@ -0,0 +1,100 @@ +../../../bin/coverage,sha256=jtoHEjX75RbgUlmYclOCuUVa9JqUvosqUHnC_eDkzVk,256 +../../../bin/coverage-3.10,sha256=jtoHEjX75RbgUlmYclOCuUVa9JqUvosqUHnC_eDkzVk,256 +../../../bin/coverage3,sha256=jtoHEjX75RbgUlmYclOCuUVa9JqUvosqUHnC_eDkzVk,256 +coverage-7.3.0.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +coverage-7.3.0.dist-info/LICENSE.txt,sha256=DVQuDIgE45qn836wDaWnYhSdxoLXgpRRKH4RuTjpRZQ,10174 +coverage-7.3.0.dist-info/METADATA,sha256=Db1WNtYA1j-daIH-uA5VCyXR25GKL4HCwbM4Yafk2wg,8077 +coverage-7.3.0.dist-info/RECORD,, +coverage-7.3.0.dist-info/WHEEL,sha256=mxhpChXwyh4Paep436QatdUhtGeLWNqwnqlHqv4854w,225 +coverage-7.3.0.dist-info/entry_points.txt,sha256=-SeH-nlgTLEWW1cmyqqCQneSw9cKYQOUHBXXYO-OWdY,123 +coverage-7.3.0.dist-info/top_level.txt,sha256=BjhyiIvusb5OJkqCXjRncTF3soKF-mDOby-hxkWwwv0,9 +coverage/__init__.py,sha256=-uTrepS1gHgkWvFe3cBcPBLRFv0iCWSp7-CgB0Bwm-U,1284 +coverage/__main__.py,sha256=IOd5fAsdpJd1t8ZyrkGcFk-eqMd3Sdc2qbhNb8YQBW0,257 +coverage/__pycache__/__init__.cpython-310.pyc,, +coverage/__pycache__/__main__.cpython-310.pyc,, +coverage/__pycache__/annotate.cpython-310.pyc,, +coverage/__pycache__/bytecode.cpython-310.pyc,, +coverage/__pycache__/cmdline.cpython-310.pyc,, +coverage/__pycache__/collector.cpython-310.pyc,, +coverage/__pycache__/config.cpython-310.pyc,, +coverage/__pycache__/context.cpython-310.pyc,, +coverage/__pycache__/control.cpython-310.pyc,, +coverage/__pycache__/data.cpython-310.pyc,, +coverage/__pycache__/debug.cpython-310.pyc,, +coverage/__pycache__/disposition.cpython-310.pyc,, +coverage/__pycache__/env.cpython-310.pyc,, +coverage/__pycache__/exceptions.cpython-310.pyc,, +coverage/__pycache__/execfile.cpython-310.pyc,, +coverage/__pycache__/files.cpython-310.pyc,, +coverage/__pycache__/html.cpython-310.pyc,, +coverage/__pycache__/inorout.cpython-310.pyc,, +coverage/__pycache__/jsonreport.cpython-310.pyc,, +coverage/__pycache__/lcovreport.cpython-310.pyc,, +coverage/__pycache__/misc.cpython-310.pyc,, +coverage/__pycache__/multiproc.cpython-310.pyc,, +coverage/__pycache__/numbits.cpython-310.pyc,, +coverage/__pycache__/parser.cpython-310.pyc,, +coverage/__pycache__/phystokens.cpython-310.pyc,, +coverage/__pycache__/plugin.cpython-310.pyc,, +coverage/__pycache__/plugin_support.cpython-310.pyc,, +coverage/__pycache__/python.cpython-310.pyc,, +coverage/__pycache__/pytracer.cpython-310.pyc,, +coverage/__pycache__/report.cpython-310.pyc,, +coverage/__pycache__/report_core.cpython-310.pyc,, +coverage/__pycache__/results.cpython-310.pyc,, +coverage/__pycache__/sqldata.cpython-310.pyc,, +coverage/__pycache__/sqlitedb.cpython-310.pyc,, +coverage/__pycache__/templite.cpython-310.pyc,, +coverage/__pycache__/tomlconfig.cpython-310.pyc,, +coverage/__pycache__/types.cpython-310.pyc,, +coverage/__pycache__/version.cpython-310.pyc,, +coverage/__pycache__/xmlreport.cpython-310.pyc,, +coverage/annotate.py,sha256=yIfVmmRAOFfyMy53DelUH6MStYdoKMWZKcTk0A9rjJI,3758 +coverage/bytecode.py,sha256=m3amujrggn0HwgLn5kjiOsBats7Jhs6uufLhg7f8O1Y,713 +coverage/cmdline.py,sha256=rxkw4ajjh25Odurt2aLg43R1azUftDFUkzO2OTH9FXo,34427 +coverage/collector.py,sha256=VJc9lBFDB7nPoghqlzgoC-MqF_ZrO3OGF5RCdvU9rsw,20558 +coverage/config.py,sha256=YnZA4nCXz0aTArsHDaz1EgKL-mNOv6CFgoBb7vm6O7s,22037 +coverage/context.py,sha256=IDv9gczpF6XD5l-VlDSEunfO9x-Xx3F0QCpRRS2anAA,2483 +coverage/control.py,sha256=u5DMVSAZEqPkrBueeTTwwzvYyXzrGy8m91rM2jauHgE,52303 +coverage/data.py,sha256=eze8anSCtGEMBQOyF_1rKq4s-2pTO5f_ADxKVcyZgmY,7508 +coverage/debug.py,sha256=9D8v7i3qjawi-YJRidgCsnO0U8awBghkwSUGrEXmikQ,17991 +coverage/disposition.py,sha256=PVOcxDbrUOJG0ZdDIC9EqkWWQfB389S5KUeW7dul3aU,1916 +coverage/env.py,sha256=c5yhR3V4LAMKsVE06-7j8TSfms1uwcx6RHSNOl59-1c,5108 +coverage/exceptions.py,sha256=HBjfwbaoz3gxHLg5efKT2-zSoeZMEaqyQA0SPxCHE0c,1362 +coverage/execfile.py,sha256=Sd44KxSzz4sAmCHyVyJkK-vGQfkJeEPrCT0gI9sToQE,12133 +coverage/files.py,sha256=NrMnky5N5nKXnzGsYvH7hQlDtXbcN9I9huXvfRuyx0g,19387 +coverage/fullcoverage/__pycache__/encodings.cpython-310.pyc,, +coverage/fullcoverage/encodings.py,sha256=wPUfMRaz0PXQt57irIxf1pDsw-V_NmoGHtqW1NMS1Rk,2423 +coverage/html.py,sha256=VCT4al0AJrDmFBKpYLg0VXsGFvWJ-L6hM-lAI0XQ-40,23128 +coverage/htmlfiles/coverage_html.js,sha256=uf5vr4EbDhxoMpoA9FHwYrdhB8sfm6Ee7ngtYpSLGn8,21865 +coverage/htmlfiles/favicon_32.png,sha256=vIEA-odDwRvSQ-syWfSwEnWGUWEv2b-Tv4tzTRfwJWE,1732 +coverage/htmlfiles/index.html,sha256=IqBoS7wvCSOAQF_qk7dhdMcSM_bBHr1Q5yTwpH0seP0,5400 +coverage/htmlfiles/keybd_closed.png,sha256=fZv4rmY3DkNJtPQjrFJ5UBOE5DdNof3mdeCZWC7TOoo,9004 +coverage/htmlfiles/keybd_open.png,sha256=SXXQLo9_roBN2LdSTXnW_TyfwiUkYBxt61no6s-o06w,9003 +coverage/htmlfiles/pyfile.html,sha256=46x1lmPrZVqsZ2G9PIPf22JWE7Mbjf-kWAhqpTkIRDM,6434 +coverage/htmlfiles/style.css,sha256=KcwkN3QW5H66ufkhGSYazOrt_aKOss9QzABr2A8Kqi0,12387 +coverage/htmlfiles/style.scss,sha256=8H3sK0wD-ORqAMGtaNtOn-q7SJ7mx7GUkuVU8rsTMXs,17356 +coverage/inorout.py,sha256=m5iXKyYJs9xzJem02IN9jWRQ-rWvXG6K4yfMeSkIJPo,23900 +coverage/jsonreport.py,sha256=DPKNmoSj1NOf2KoKdkhYtyPZgc75jQe7MWFaatrHCO0,4754 +coverage/lcovreport.py,sha256=LeAvfUw1LN-ZSL1yEzJOjMPzVJ9CNSoEJ8m0jXstMDI,4939 +coverage/misc.py,sha256=gd3ZvAV_4gqTl2VoCp0jCmchbwN_mYpF4J6bqizFhUg,12186 +coverage/multiproc.py,sha256=KnrACBtYd6c7GC4VlvESR3WWXfUQD5sKqLkmuDRawZw,3846 +coverage/numbits.py,sha256=z9nFCtTcriZqCUGLzhC3HjAooBn0a6JblrUp-wQa0T8,4669 +coverage/parser.py,sha256=96n4hqkrEVwuKqLJDrYQ4PLbqaFIu5HP4XsXHFy4E0U,56331 +coverage/phystokens.py,sha256=ZIMGY5TjKM9eLfd1c3GaKmkW3ga42L8fumf-OYl8FlA,7805 +coverage/plugin.py,sha256=BajC366WA-FNd1_ggMKWZ-HQGm6cSoxo1nwrFaTs5IQ,19430 +coverage/plugin_support.py,sha256=XdMiwn8y8D9l6exBNqKMKxmrd3nqtCd3xmdh7FMgYZ8,10351 +coverage/py.typed,sha256=_B1ZXy5hKJZ2Zo3jWSXjqy1SO3rnLdZsUULnKGTplfc,72 +coverage/python.py,sha256=Ha81x4zgwx101N7HJJL79aao4324Kif4Lzc-j-hNy_M,8066 +coverage/pytracer.py,sha256=GXLBl3eFFUKafKIaR-WmgeRhPxSX7U-WA0j8k7nq-FA,14342 +coverage/report.py,sha256=ti86vhqPhZwRT0A0nkJnBltSZfgbmJv0E6_dF18zbzQ,10623 +coverage/report_core.py,sha256=yIDtJYvS67mq_IGnd7rOIRN7dJOWiYLCeLs1DHngw4Y,4077 +coverage/results.py,sha256=rxq_m3XfadHJDBKceDXa-DpFSLHfEHb3Y8cpcbuQLuk,13391 +coverage/sqldata.py,sha256=rK_0UaxiQWzlEVezOJrtJKL3uinW6QKMRCnqiYrPQe0,43333 +coverage/sqlitedb.py,sha256=3BbdanB0ZnAEV0xnAdIJwmoCjswZVfvflfvqiqk4D9U,8927 +coverage/templite.py,sha256=DzwnfIeSxcgFUTQ-cqRapFo7554QaQwe9WlQvoh5L4E,10952 +coverage/tomlconfig.py,sha256=K4V6MmN9gODm8KxUehA8iiagyXStt9U5JRfBLPoH0qA,7569 +coverage/tracer.cpython-310-x86_64-linux-gnu.so,sha256=gSg1TnAUXkqA5r4rv43OdDVgQLFRg7qxYQS1Pm_3TeE,107032 +coverage/types.py,sha256=A9Zu_wuCJ-zK_XCb-_WynqfDOR8CAXlPDXf4xM4_lz8,5367 +coverage/version.py,sha256=mEflI3LoQ6VGU6C2NTEgb-SMeDAVckOvddB6okxX9G8,1431 +coverage/xmlreport.py,sha256=Ly6a4elBIBqLRHwvmu7Rvft2EvgQtNpyv-G5kYfBf5c,9795 diff --git a/venv/lib/python3.10/site-packages/coverage-7.3.0.dist-info/WHEEL b/venv/lib/python3.10/site-packages/coverage-7.3.0.dist-info/WHEEL new file mode 100644 index 0000000..7b8ba7b --- /dev/null +++ b/venv/lib/python3.10/site-packages/coverage-7.3.0.dist-info/WHEEL @@ -0,0 +1,8 @@ +Wheel-Version: 1.0 +Generator: bdist_wheel (0.41.1) +Root-Is-Purelib: false +Tag: cp310-cp310-manylinux_2_5_x86_64 +Tag: cp310-cp310-manylinux1_x86_64 +Tag: cp310-cp310-manylinux_2_17_x86_64 +Tag: cp310-cp310-manylinux2014_x86_64 + diff --git a/venv/lib/python3.10/site-packages/coverage-7.3.0.dist-info/entry_points.txt b/venv/lib/python3.10/site-packages/coverage-7.3.0.dist-info/entry_points.txt new file mode 100644 index 0000000..36708a5 --- /dev/null +++ b/venv/lib/python3.10/site-packages/coverage-7.3.0.dist-info/entry_points.txt @@ -0,0 +1,4 @@ +[console_scripts] +coverage = coverage.cmdline:main +coverage-3.10 = coverage.cmdline:main +coverage3 = coverage.cmdline:main diff --git a/venv/lib/python3.10/site-packages/coverage-7.3.0.dist-info/top_level.txt b/venv/lib/python3.10/site-packages/coverage-7.3.0.dist-info/top_level.txt new file mode 100644 index 0000000..4ebc8ae --- /dev/null +++ b/venv/lib/python3.10/site-packages/coverage-7.3.0.dist-info/top_level.txt @@ -0,0 +1 @@ +coverage diff --git a/venv/lib/python3.10/site-packages/coverage/__init__.py b/venv/lib/python3.10/site-packages/coverage/__init__.py new file mode 100644 index 0000000..e3ed232 --- /dev/null +++ b/venv/lib/python3.10/site-packages/coverage/__init__.py @@ -0,0 +1,40 @@ +# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 +# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt + +""" +Code coverage measurement for Python. + +Ned Batchelder +https://coverage.readthedocs.io + +""" + +# mypy's convention is that "import as" names are public from the module. +# We import names as themselves to indicate that. Pylint sees it as pointless, +# so disable its warning. +# pylint: disable=useless-import-alias + +from coverage.version import ( + __version__ as __version__, + version_info as version_info, +) + +from coverage.control import ( + Coverage as Coverage, + process_startup as process_startup, +) +from coverage.data import CoverageData as CoverageData +from coverage.exceptions import CoverageException as CoverageException +from coverage.plugin import ( + CoveragePlugin as CoveragePlugin, + FileReporter as FileReporter, + FileTracer as FileTracer, +) + +# Backward compatibility. +coverage = Coverage + +# On Windows, we encode and decode deep enough that something goes wrong and +# the encodings.utf_8 module is loaded and then unloaded, I don't know why. +# Adding a reference here prevents it from being unloaded. Yuk. +import encodings.utf_8 # pylint: disable=wrong-import-position, wrong-import-order diff --git a/venv/lib/python3.10/site-packages/coverage/__main__.py b/venv/lib/python3.10/site-packages/coverage/__main__.py new file mode 100644 index 0000000..79aa4e2 --- /dev/null +++ b/venv/lib/python3.10/site-packages/coverage/__main__.py @@ -0,0 +1,8 @@ +# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 +# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt + +"""Coverage.py's main entry point.""" + +import sys +from coverage.cmdline import main +sys.exit(main()) diff --git a/venv/lib/python3.10/site-packages/coverage/annotate.py b/venv/lib/python3.10/site-packages/coverage/annotate.py new file mode 100644 index 0000000..2ef89c9 --- /dev/null +++ b/venv/lib/python3.10/site-packages/coverage/annotate.py @@ -0,0 +1,114 @@ +# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 +# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt + +"""Source file annotation for coverage.py.""" + +from __future__ import annotations + +import os +import re + +from typing import Iterable, Optional, TYPE_CHECKING + +from coverage.files import flat_rootname +from coverage.misc import ensure_dir, isolate_module +from coverage.plugin import FileReporter +from coverage.report_core import get_analysis_to_report +from coverage.results import Analysis +from coverage.types import TMorf + +if TYPE_CHECKING: + from coverage import Coverage + +os = isolate_module(os) + + +class AnnotateReporter: + """Generate annotated source files showing line coverage. + + This reporter creates annotated copies of the measured source files. Each + .py file is copied as a .py,cover file, with a left-hand margin annotating + each line:: + + > def h(x): + - if 0: #pragma: no cover + - pass + > if x == 1: + ! a = 1 + > else: + > a = 2 + + > h(2) + + Executed lines use ">", lines not executed use "!", lines excluded from + consideration use "-". + + """ + + def __init__(self, coverage: Coverage) -> None: + self.coverage = coverage + self.config = self.coverage.config + self.directory: Optional[str] = None + + blank_re = re.compile(r"\s*(#|$)") + else_re = re.compile(r"\s*else\s*:\s*(#|$)") + + def report(self, morfs: Optional[Iterable[TMorf]], directory: Optional[str] = None) -> None: + """Run the report. + + See `coverage.report()` for arguments. + + """ + self.directory = directory + self.coverage.get_data() + for fr, analysis in get_analysis_to_report(self.coverage, morfs): + self.annotate_file(fr, analysis) + + def annotate_file(self, fr: FileReporter, analysis: Analysis) -> None: + """Annotate a single file. + + `fr` is the FileReporter for the file to annotate. + + """ + statements = sorted(analysis.statements) + missing = sorted(analysis.missing) + excluded = sorted(analysis.excluded) + + if self.directory: + ensure_dir(self.directory) + dest_file = os.path.join(self.directory, flat_rootname(fr.relative_filename())) + if dest_file.endswith("_py"): + dest_file = dest_file[:-3] + ".py" + dest_file += ",cover" + else: + dest_file = fr.filename + ",cover" + + with open(dest_file, "w", encoding="utf-8") as dest: + i = j = 0 + covered = True + source = fr.source() + for lineno, line in enumerate(source.splitlines(True), start=1): + while i < len(statements) and statements[i] < lineno: + i += 1 + while j < len(missing) and missing[j] < lineno: + j += 1 + if i < len(statements) and statements[i] == lineno: + covered = j >= len(missing) or missing[j] > lineno + if self.blank_re.match(line): + dest.write(" ") + elif self.else_re.match(line): + # Special logic for lines containing only "else:". + if j >= len(missing): + dest.write("> ") + elif statements[i] == missing[j]: + dest.write("! ") + else: + dest.write("> ") + elif lineno in excluded: + dest.write("- ") + elif covered: + dest.write("> ") + else: + dest.write("! ") + + dest.write(line) diff --git a/venv/lib/python3.10/site-packages/coverage/bytecode.py b/venv/lib/python3.10/site-packages/coverage/bytecode.py new file mode 100644 index 0000000..2cad4f9 --- /dev/null +++ b/venv/lib/python3.10/site-packages/coverage/bytecode.py @@ -0,0 +1,22 @@ +# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 +# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt + +"""Bytecode manipulation for coverage.py""" + +from __future__ import annotations + +from types import CodeType +from typing import Iterator + + +def code_objects(code: CodeType) -> Iterator[CodeType]: + """Iterate over all the code objects in `code`.""" + stack = [code] + while stack: + # We're going to return the code object on the stack, but first + # push its children for later returning. + code = stack.pop() + for c in code.co_consts: + if isinstance(c, CodeType): + stack.append(c) + yield code diff --git a/venv/lib/python3.10/site-packages/coverage/cmdline.py b/venv/lib/python3.10/site-packages/coverage/cmdline.py new file mode 100644 index 0000000..55f6c79 --- /dev/null +++ b/venv/lib/python3.10/site-packages/coverage/cmdline.py @@ -0,0 +1,1012 @@ +# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 +# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt + +"""Command-line support for coverage.py.""" + +from __future__ import annotations + +import glob +import optparse # pylint: disable=deprecated-module +import os +import os.path +import shlex +import sys +import textwrap +import traceback + +from typing import cast, Any, List, NoReturn, Optional, Tuple + +import coverage +from coverage import Coverage +from coverage import env +from coverage.collector import HAS_CTRACER +from coverage.config import CoverageConfig +from coverage.control import DEFAULT_DATAFILE +from coverage.data import combinable_files, debug_data_file +from coverage.debug import info_header, short_stack, write_formatted_info +from coverage.exceptions import _BaseCoverageException, _ExceptionDuringRun, NoSource +from coverage.execfile import PyRunner +from coverage.results import Numbers, should_fail_under +from coverage.version import __url__ + +# When adding to this file, alphabetization is important. Look for +# "alphabetize" comments throughout. + +class Opts: + """A namespace class for individual options we'll build parsers from.""" + + # Keep these entries alphabetized (roughly) by the option name as it + # appears on the command line. + + append = optparse.make_option( + "-a", "--append", action="store_true", + help="Append coverage data to .coverage, otherwise it starts clean each time.", + ) + keep = optparse.make_option( + "", "--keep", action="store_true", + help="Keep original coverage files, otherwise they are deleted.", + ) + branch = optparse.make_option( + "", "--branch", action="store_true", + help="Measure branch coverage in addition to statement coverage.", + ) + concurrency = optparse.make_option( + "", "--concurrency", action="store", metavar="LIBS", + help=( + "Properly measure code using a concurrency library. " + + "Valid values are: {}, or a comma-list of them." + ).format(", ".join(sorted(CoverageConfig.CONCURRENCY_CHOICES))), + ) + context = optparse.make_option( + "", "--context", action="store", metavar="LABEL", + help="The context label to record for this coverage run.", + ) + contexts = optparse.make_option( + "", "--contexts", action="store", metavar="REGEX1,REGEX2,...", + help=( + "Only display data from lines covered in the given contexts. " + + "Accepts Python regexes, which must be quoted." + ), + ) + combine_datafile = optparse.make_option( + "", "--data-file", action="store", metavar="DATAFILE", + help=( + "Base name of the data files to operate on. " + + "Defaults to '.coverage'. [env: COVERAGE_FILE]" + ), + ) + input_datafile = optparse.make_option( + "", "--data-file", action="store", metavar="INFILE", + help=( + "Read coverage data for report generation from this file. " + + "Defaults to '.coverage'. [env: COVERAGE_FILE]" + ), + ) + output_datafile = optparse.make_option( + "", "--data-file", action="store", metavar="OUTFILE", + help=( + "Write the recorded coverage data to this file. " + + "Defaults to '.coverage'. [env: COVERAGE_FILE]" + ), + ) + debug = optparse.make_option( + "", "--debug", action="store", metavar="OPTS", + help="Debug options, separated by commas. [env: COVERAGE_DEBUG]", + ) + directory = optparse.make_option( + "-d", "--directory", action="store", metavar="DIR", + help="Write the output files to DIR.", + ) + fail_under = optparse.make_option( + "", "--fail-under", action="store", metavar="MIN", type="float", + help="Exit with a status of 2 if the total coverage is less than MIN.", + ) + format = optparse.make_option( + "", "--format", action="store", metavar="FORMAT", + help="Output format, either text (default), markdown, or total.", + ) + help = optparse.make_option( + "-h", "--help", action="store_true", + help="Get help on this command.", + ) + ignore_errors = optparse.make_option( + "-i", "--ignore-errors", action="store_true", + help="Ignore errors while reading source files.", + ) + include = optparse.make_option( + "", "--include", action="store", metavar="PAT1,PAT2,...", + help=( + "Include only files whose paths match one of these patterns. " + + "Accepts shell-style wildcards, which must be quoted." + ), + ) + pylib = optparse.make_option( + "-L", "--pylib", action="store_true", + help=( + "Measure coverage even inside the Python installed library, " + + "which isn't done by default." + ), + ) + show_missing = optparse.make_option( + "-m", "--show-missing", action="store_true", + help="Show line numbers of statements in each module that weren't executed.", + ) + module = optparse.make_option( + "-m", "--module", action="store_true", + help=( + " is an importable Python module, not a script path, " + + "to be run as 'python -m' would run it." + ), + ) + omit = optparse.make_option( + "", "--omit", action="store", metavar="PAT1,PAT2,...", + help=( + "Omit files whose paths match one of these patterns. " + + "Accepts shell-style wildcards, which must be quoted." + ), + ) + output_xml = optparse.make_option( + "-o", "", action="store", dest="outfile", metavar="OUTFILE", + help="Write the XML report to this file. Defaults to 'coverage.xml'", + ) + output_json = optparse.make_option( + "-o", "", action="store", dest="outfile", metavar="OUTFILE", + help="Write the JSON report to this file. Defaults to 'coverage.json'", + ) + output_lcov = optparse.make_option( + "-o", "", action="store", dest="outfile", metavar="OUTFILE", + help="Write the LCOV report to this file. Defaults to 'coverage.lcov'", + ) + json_pretty_print = optparse.make_option( + "", "--pretty-print", action="store_true", + help="Format the JSON for human readers.", + ) + parallel_mode = optparse.make_option( + "-p", "--parallel-mode", action="store_true", + help=( + "Append the machine name, process id and random number to the " + + "data file name to simplify collecting data from " + + "many processes." + ), + ) + precision = optparse.make_option( + "", "--precision", action="store", metavar="N", type=int, + help=( + "Number of digits after the decimal point to display for " + + "reported coverage percentages." + ), + ) + quiet = optparse.make_option( + "-q", "--quiet", action="store_true", + help="Don't print messages about what is happening.", + ) + rcfile = optparse.make_option( + "", "--rcfile", action="store", + help=( + "Specify configuration file. " + + "By default '.coveragerc', 'setup.cfg', 'tox.ini', and " + + "'pyproject.toml' are tried. [env: COVERAGE_RCFILE]" + ), + ) + show_contexts = optparse.make_option( + "--show-contexts", action="store_true", + help="Show contexts for covered lines.", + ) + skip_covered = optparse.make_option( + "--skip-covered", action="store_true", + help="Skip files with 100% coverage.", + ) + no_skip_covered = optparse.make_option( + "--no-skip-covered", action="store_false", dest="skip_covered", + help="Disable --skip-covered.", + ) + skip_empty = optparse.make_option( + "--skip-empty", action="store_true", + help="Skip files with no code.", + ) + sort = optparse.make_option( + "--sort", action="store", metavar="COLUMN", + help=( + "Sort the report by the named column: name, stmts, miss, branch, brpart, or cover. " + + "Default is name." + ), + ) + source = optparse.make_option( + "", "--source", action="store", metavar="SRC1,SRC2,...", + help="A list of directories or importable names of code to measure.", + ) + timid = optparse.make_option( + "", "--timid", action="store_true", + help=( + "Use a simpler but slower trace method. Try this if you get " + + "seemingly impossible results!" + ), + ) + title = optparse.make_option( + "", "--title", action="store", metavar="TITLE", + help="A text string to use as the title on the HTML.", + ) + version = optparse.make_option( + "", "--version", action="store_true", + help="Display version information and exit.", + ) + + +class CoverageOptionParser(optparse.OptionParser): + """Base OptionParser for coverage.py. + + Problems don't exit the program. + Defaults are initialized for all options. + + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + kwargs["add_help_option"] = False + super().__init__(*args, **kwargs) + self.set_defaults( + # Keep these arguments alphabetized by their names. + action=None, + append=None, + branch=None, + concurrency=None, + context=None, + contexts=None, + data_file=None, + debug=None, + directory=None, + fail_under=None, + format=None, + help=None, + ignore_errors=None, + include=None, + keep=None, + module=None, + omit=None, + parallel_mode=None, + precision=None, + pylib=None, + quiet=None, + rcfile=True, + show_contexts=None, + show_missing=None, + skip_covered=None, + skip_empty=None, + sort=None, + source=None, + timid=None, + title=None, + version=None, + ) + + self.disable_interspersed_args() + + class OptionParserError(Exception): + """Used to stop the optparse error handler ending the process.""" + pass + + def parse_args_ok(self, args: List[str]) -> Tuple[bool, Optional[optparse.Values], List[str]]: + """Call optparse.parse_args, but return a triple: + + (ok, options, args) + + """ + try: + options, args = super().parse_args(args) + except self.OptionParserError: + return False, None, [] + return True, options, args + + def error(self, msg: str) -> NoReturn: + """Override optparse.error so sys.exit doesn't get called.""" + show_help(msg) + raise self.OptionParserError + + +class GlobalOptionParser(CoverageOptionParser): + """Command-line parser for coverage.py global option arguments.""" + + def __init__(self) -> None: + super().__init__() + + self.add_options([ + Opts.help, + Opts.version, + ]) + + +class CmdOptionParser(CoverageOptionParser): + """Parse one of the new-style commands for coverage.py.""" + + def __init__( + self, + action: str, + options: List[optparse.Option], + description: str, + usage: Optional[str] = None, + ): + """Create an OptionParser for a coverage.py command. + + `action` is the slug to put into `options.action`. + `options` is a list of Option's for the command. + `description` is the description of the command, for the help text. + `usage` is the usage string to display in help. + + """ + if usage: + usage = "%prog " + usage + super().__init__( + usage=usage, + description=description, + ) + self.set_defaults(action=action) + self.add_options(options) + self.cmd = action + + def __eq__(self, other: str) -> bool: # type: ignore[override] + # A convenience equality, so that I can put strings in unit test + # results, and they will compare equal to objects. + return (other == f"") + + __hash__ = None # type: ignore[assignment] + + def get_prog_name(self) -> str: + """Override of an undocumented function in optparse.OptionParser.""" + program_name = super().get_prog_name() + + # Include the sub-command for this parser as part of the command. + return f"{program_name} {self.cmd}" + +# In lists of Opts, keep them alphabetized by the option names as they appear +# on the command line, since these lists determine the order of the options in +# the help output. +# +# In COMMANDS, keep the keys (command names) alphabetized. + +GLOBAL_ARGS = [ + Opts.debug, + Opts.help, + Opts.rcfile, +] + +COMMANDS = { + "annotate": CmdOptionParser( + "annotate", + [ + Opts.directory, + Opts.input_datafile, + Opts.ignore_errors, + Opts.include, + Opts.omit, + ] + GLOBAL_ARGS, + usage="[options] [modules]", + description=( + "Make annotated copies of the given files, marking statements that are executed " + + "with > and statements that are missed with !." + ), + ), + + "combine": CmdOptionParser( + "combine", + [ + Opts.append, + Opts.combine_datafile, + Opts.keep, + Opts.quiet, + ] + GLOBAL_ARGS, + usage="[options] ... ", + description=( + "Combine data from multiple coverage files. " + + "The combined results are written to a single " + + "file representing the union of the data. The positional " + + "arguments are data files or directories containing data files. " + + "If no paths are provided, data files in the default data file's " + + "directory are combined." + ), + ), + + "debug": CmdOptionParser( + "debug", GLOBAL_ARGS, + usage="", + description=( + "Display information about the internals of coverage.py, " + + "for diagnosing problems. " + + "Topics are: " + + "'data' to show a summary of the collected data; " + + "'sys' to show installation information; " + + "'config' to show the configuration; " + + "'premain' to show what is calling coverage; " + + "'pybehave' to show internal flags describing Python behavior." + ), + ), + + "erase": CmdOptionParser( + "erase", + [ + Opts.combine_datafile + ] + GLOBAL_ARGS, + description="Erase previously collected coverage data.", + ), + + "help": CmdOptionParser( + "help", GLOBAL_ARGS, + usage="[command]", + description="Describe how to use coverage.py", + ), + + "html": CmdOptionParser( + "html", + [ + Opts.contexts, + Opts.directory, + Opts.input_datafile, + Opts.fail_under, + Opts.ignore_errors, + Opts.include, + Opts.omit, + Opts.precision, + Opts.quiet, + Opts.show_contexts, + Opts.skip_covered, + Opts.no_skip_covered, + Opts.skip_empty, + Opts.title, + ] + GLOBAL_ARGS, + usage="[options] [modules]", + description=( + "Create an HTML report of the coverage of the files. " + + "Each file gets its own page, with the source decorated to show " + + "executed, excluded, and missed lines." + ), + ), + + "json": CmdOptionParser( + "json", + [ + Opts.contexts, + Opts.input_datafile, + Opts.fail_under, + Opts.ignore_errors, + Opts.include, + Opts.omit, + Opts.output_json, + Opts.json_pretty_print, + Opts.quiet, + Opts.show_contexts, + ] + GLOBAL_ARGS, + usage="[options] [modules]", + description="Generate a JSON report of coverage results.", + ), + + "lcov": CmdOptionParser( + "lcov", + [ + Opts.input_datafile, + Opts.fail_under, + Opts.ignore_errors, + Opts.include, + Opts.output_lcov, + Opts.omit, + Opts.quiet, + ] + GLOBAL_ARGS, + usage="[options] [modules]", + description="Generate an LCOV report of coverage results.", + ), + + "report": CmdOptionParser( + "report", + [ + Opts.contexts, + Opts.input_datafile, + Opts.fail_under, + Opts.format, + Opts.ignore_errors, + Opts.include, + Opts.omit, + Opts.precision, + Opts.sort, + Opts.show_missing, + Opts.skip_covered, + Opts.no_skip_covered, + Opts.skip_empty, + ] + GLOBAL_ARGS, + usage="[options] [modules]", + description="Report coverage statistics on modules.", + ), + + "run": CmdOptionParser( + "run", + [ + Opts.append, + Opts.branch, + Opts.concurrency, + Opts.context, + Opts.output_datafile, + Opts.include, + Opts.module, + Opts.omit, + Opts.pylib, + Opts.parallel_mode, + Opts.source, + Opts.timid, + ] + GLOBAL_ARGS, + usage="[options] [program options]", + description="Run a Python program, measuring code execution.", + ), + + "xml": CmdOptionParser( + "xml", + [ + Opts.input_datafile, + Opts.fail_under, + Opts.ignore_errors, + Opts.include, + Opts.omit, + Opts.output_xml, + Opts.quiet, + Opts.skip_empty, + ] + GLOBAL_ARGS, + usage="[options] [modules]", + description="Generate an XML report of coverage results.", + ), +} + + +def show_help( + error: Optional[str] = None, + topic: Optional[str] = None, + parser: Optional[optparse.OptionParser] = None, +) -> None: + """Display an error message, or the named topic.""" + assert error or topic or parser + + program_path = sys.argv[0] + if program_path.endswith(os.path.sep + "__main__.py"): + # The path is the main module of a package; get that path instead. + program_path = os.path.dirname(program_path) + program_name = os.path.basename(program_path) + if env.WINDOWS: + # entry_points={"console_scripts":...} on Windows makes files + # called coverage.exe, coverage3.exe, and coverage-3.5.exe. These + # invoke coverage-script.py, coverage3-script.py, and + # coverage-3.5-script.py. argv[0] is the .py file, but we want to + # get back to the original form. + auto_suffix = "-script.py" + if program_name.endswith(auto_suffix): + program_name = program_name[:-len(auto_suffix)] + + help_params = dict(coverage.__dict__) + help_params["__url__"] = __url__ + help_params["program_name"] = program_name + if HAS_CTRACER: + help_params["extension_modifier"] = "with C extension" + else: + help_params["extension_modifier"] = "without C extension" + + if error: + print(error, file=sys.stderr) + print(f"Use '{program_name} help' for help.", file=sys.stderr) + elif parser: + print(parser.format_help().strip()) + print() + else: + assert topic is not None + help_msg = textwrap.dedent(HELP_TOPICS.get(topic, "")).strip() + if help_msg: + print(help_msg.format(**help_params)) + else: + print(f"Don't know topic {topic!r}") + print("Full documentation is at {__url__}".format(**help_params)) + + +OK, ERR, FAIL_UNDER = 0, 1, 2 + + +class CoverageScript: + """The command-line interface to coverage.py.""" + + def __init__(self) -> None: + self.global_option = False + self.coverage: Coverage + + def command_line(self, argv: List[str]) -> int: + """The bulk of the command line interface to coverage.py. + + `argv` is the argument list to process. + + Returns 0 if all is well, 1 if something went wrong. + + """ + # Collect the command-line options. + if not argv: + show_help(topic="minimum_help") + return OK + + # The command syntax we parse depends on the first argument. Global + # switch syntax always starts with an option. + parser: Optional[optparse.OptionParser] + self.global_option = argv[0].startswith("-") + if self.global_option: + parser = GlobalOptionParser() + else: + parser = COMMANDS.get(argv[0]) + if not parser: + show_help(f"Unknown command: {argv[0]!r}") + return ERR + argv = argv[1:] + + ok, options, args = parser.parse_args_ok(argv) + if not ok: + return ERR + assert options is not None + + # Handle help and version. + if self.do_help(options, args, parser): + return OK + + # Listify the list options. + source = unshell_list(options.source) + omit = unshell_list(options.omit) + include = unshell_list(options.include) + debug = unshell_list(options.debug) + contexts = unshell_list(options.contexts) + + if options.concurrency is not None: + concurrency = options.concurrency.split(",") + else: + concurrency = None + + # Do something. + self.coverage = Coverage( + data_file=options.data_file or DEFAULT_DATAFILE, + data_suffix=options.parallel_mode, + cover_pylib=options.pylib, + timid=options.timid, + branch=options.branch, + config_file=options.rcfile, + source=source, + omit=omit, + include=include, + debug=debug, + concurrency=concurrency, + check_preimported=True, + context=options.context, + messages=not options.quiet, + ) + + if options.action == "debug": + return self.do_debug(args) + + elif options.action == "erase": + self.coverage.erase() + return OK + + elif options.action == "run": + return self.do_run(options, args) + + elif options.action == "combine": + if options.append: + self.coverage.load() + data_paths = args or None + self.coverage.combine(data_paths, strict=True, keep=bool(options.keep)) + self.coverage.save() + return OK + + # Remaining actions are reporting, with some common options. + report_args = dict( + morfs=unglob_args(args), + ignore_errors=options.ignore_errors, + omit=omit, + include=include, + contexts=contexts, + ) + + # We need to be able to import from the current directory, because + # plugins may try to, for example, to read Django settings. + sys.path.insert(0, "") + + self.coverage.load() + + total = None + if options.action == "report": + total = self.coverage.report( + precision=options.precision, + show_missing=options.show_missing, + skip_covered=options.skip_covered, + skip_empty=options.skip_empty, + sort=options.sort, + output_format=options.format, + **report_args + ) + elif options.action == "annotate": + self.coverage.annotate(directory=options.directory, **report_args) + elif options.action == "html": + total = self.coverage.html_report( + directory=options.directory, + precision=options.precision, + skip_covered=options.skip_covered, + skip_empty=options.skip_empty, + show_contexts=options.show_contexts, + title=options.title, + **report_args + ) + elif options.action == "xml": + total = self.coverage.xml_report( + outfile=options.outfile, + skip_empty=options.skip_empty, + **report_args + ) + elif options.action == "json": + total = self.coverage.json_report( + outfile=options.outfile, + pretty_print=options.pretty_print, + show_contexts=options.show_contexts, + **report_args + ) + elif options.action == "lcov": + total = self.coverage.lcov_report( + outfile=options.outfile, + **report_args + ) + else: + # There are no other possible actions. + raise AssertionError + + if total is not None: + # Apply the command line fail-under options, and then use the config + # value, so we can get fail_under from the config file. + if options.fail_under is not None: + self.coverage.set_option("report:fail_under", options.fail_under) + if options.precision is not None: + self.coverage.set_option("report:precision", options.precision) + + fail_under = cast(float, self.coverage.get_option("report:fail_under")) + precision = cast(int, self.coverage.get_option("report:precision")) + if should_fail_under(total, fail_under, precision): + msg = "total of {total} is less than fail-under={fail_under:.{p}f}".format( + total=Numbers(precision=precision).display_covered(total), + fail_under=fail_under, + p=precision, + ) + print("Coverage failure:", msg) + return FAIL_UNDER + + return OK + + def do_help( + self, + options: optparse.Values, + args: List[str], + parser: optparse.OptionParser, + ) -> bool: + """Deal with help requests. + + Return True if it handled the request, False if not. + + """ + # Handle help. + if options.help: + if self.global_option: + show_help(topic="help") + else: + show_help(parser=parser) + return True + + if options.action == "help": + if args: + for a in args: + parser_maybe = COMMANDS.get(a) + if parser_maybe is not None: + show_help(parser=parser_maybe) + else: + show_help(topic=a) + else: + show_help(topic="help") + return True + + # Handle version. + if options.version: + show_help(topic="version") + return True + + return False + + def do_run(self, options: optparse.Values, args: List[str]) -> int: + """Implementation of 'coverage run'.""" + + if not args: + if options.module: + # Specified -m with nothing else. + show_help("No module specified for -m") + return ERR + command_line = cast(str, self.coverage.get_option("run:command_line")) + if command_line is not None: + args = shlex.split(command_line) + if args and args[0] in {"-m", "--module"}: + options.module = True + args = args[1:] + if not args: + show_help("Nothing to do.") + return ERR + + if options.append and self.coverage.get_option("run:parallel"): + show_help("Can't append to data files in parallel mode.") + return ERR + + if options.concurrency == "multiprocessing": + # Can't set other run-affecting command line options with + # multiprocessing. + for opt_name in ["branch", "include", "omit", "pylib", "source", "timid"]: + # As it happens, all of these options have no default, meaning + # they will be None if they have not been specified. + if getattr(options, opt_name) is not None: + show_help( + "Options affecting multiprocessing must only be specified " + + "in a configuration file.\n" + + f"Remove --{opt_name} from the command line." + ) + return ERR + + os.environ["COVERAGE_RUN"] = "true" + + runner = PyRunner(args, as_module=bool(options.module)) + runner.prepare() + + if options.append: + self.coverage.load() + + # Run the script. + self.coverage.start() + code_ran = True + try: + runner.run() + except NoSource: + code_ran = False + raise + finally: + self.coverage.stop() + if code_ran: + self.coverage.save() + + return OK + + def do_debug(self, args: List[str]) -> int: + """Implementation of 'coverage debug'.""" + + if not args: + show_help("What information would you like: config, data, sys, premain, pybehave?") + return ERR + if args[1:]: + show_help("Only one topic at a time, please") + return ERR + + if args[0] == "sys": + write_formatted_info(print, "sys", self.coverage.sys_info()) + elif args[0] == "data": + print(info_header("data")) + data_file = self.coverage.config.data_file + debug_data_file(data_file) + for filename in combinable_files(data_file): + print("-----") + debug_data_file(filename) + elif args[0] == "config": + write_formatted_info(print, "config", self.coverage.config.debug_info()) + elif args[0] == "premain": + print(info_header("premain")) + print(short_stack()) + elif args[0] == "pybehave": + write_formatted_info(print, "pybehave", env.debug_info()) + else: + show_help(f"Don't know what you mean by {args[0]!r}") + return ERR + + return OK + + +def unshell_list(s: str) -> Optional[List[str]]: + """Turn a command-line argument into a list.""" + if not s: + return None + if env.WINDOWS: + # When running coverage.py as coverage.exe, some of the behavior + # of the shell is emulated: wildcards are expanded into a list of + # file names. So you have to single-quote patterns on the command + # line, but (not) helpfully, the single quotes are included in the + # argument, so we have to strip them off here. + s = s.strip("'") + return s.split(",") + + +def unglob_args(args: List[str]) -> List[str]: + """Interpret shell wildcards for platforms that need it.""" + if env.WINDOWS: + globbed = [] + for arg in args: + if "?" in arg or "*" in arg: + globbed.extend(glob.glob(arg)) + else: + globbed.append(arg) + args = globbed + return args + + +HELP_TOPICS = { + "help": """\ + Coverage.py, version {__version__} {extension_modifier} + Measure, collect, and report on code coverage in Python programs. + + usage: {program_name} [options] [args] + + Commands: + annotate Annotate source files with execution information. + combine Combine a number of data files. + debug Display information about the internals of coverage.py + erase Erase previously collected coverage data. + help Get help on using coverage.py. + html Create an HTML report. + json Create a JSON report of coverage results. + lcov Create an LCOV report of coverage results. + report Report coverage stats on modules. + run Run a Python program and measure code execution. + xml Create an XML report of coverage results. + + Use "{program_name} help " for detailed help on any command. + """, + + "minimum_help": ( + "Code coverage for Python, version {__version__} {extension_modifier}. " + + "Use '{program_name} help' for help." + ), + + "version": "Coverage.py, version {__version__} {extension_modifier}", +} + + +def main(argv: Optional[List[str]] = None) -> Optional[int]: + """The main entry point to coverage.py. + + This is installed as the script entry point. + + """ + if argv is None: + argv = sys.argv[1:] + try: + status = CoverageScript().command_line(argv) + except _ExceptionDuringRun as err: + # An exception was caught while running the product code. The + # sys.exc_info() return tuple is packed into an _ExceptionDuringRun + # exception. + traceback.print_exception(*err.args) # pylint: disable=no-value-for-parameter + status = ERR + except _BaseCoverageException as err: + # A controlled error inside coverage.py: print the message to the user. + msg = err.args[0] + print(msg) + status = ERR + except SystemExit as err: + # The user called `sys.exit()`. Exit with their argument, if any. + if err.args: + status = err.args[0] + else: + status = None + return status + +# Profiling using ox_profile. Install it from GitHub: +# pip install git+https://github.com/emin63/ox_profile.git +# +# $set_env.py: COVERAGE_PROFILE - Set to use ox_profile. +_profile = os.environ.get("COVERAGE_PROFILE", "") +if _profile: # pragma: debugging + from ox_profile.core.launchers import SimpleLauncher # pylint: disable=import-error + original_main = main + + def main( # pylint: disable=function-redefined + argv: Optional[List[str]] = None, + ) -> Optional[int]: + """A wrapper around main that profiles.""" + profiler = SimpleLauncher.launch() + try: + return original_main(argv) + finally: + data, _ = profiler.query(re_filter="coverage", max_records=100) + print(profiler.show(query=data, limit=100, sep="", col="")) + profiler.cancel() diff --git a/venv/lib/python3.10/site-packages/coverage/collector.py b/venv/lib/python3.10/site-packages/coverage/collector.py new file mode 100644 index 0000000..ca7f5d9 --- /dev/null +++ b/venv/lib/python3.10/site-packages/coverage/collector.py @@ -0,0 +1,508 @@ +# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 +# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt + +"""Raw data collector for coverage.py.""" + +from __future__ import annotations + +import functools +import os +import sys + +from types import FrameType +from typing import ( + cast, Any, Callable, Dict, List, Mapping, Optional, Set, Tuple, Type, TypeVar, +) + +from coverage import env +from coverage.config import CoverageConfig +from coverage.data import CoverageData +from coverage.debug import short_stack +from coverage.disposition import FileDisposition +from coverage.exceptions import ConfigError +from coverage.misc import human_sorted_items, isolate_module +from coverage.plugin import CoveragePlugin +from coverage.pytracer import PyTracer +from coverage.types import ( + TArc, TFileDisposition, TLineNo, TTraceData, TTraceFn, TTracer, TWarnFn, +) + +os = isolate_module(os) + + +try: + # Use the C extension code when we can, for speed. + from coverage.tracer import CTracer, CFileDisposition + HAS_CTRACER = True +except ImportError: + # Couldn't import the C extension, maybe it isn't built. + if os.getenv('COVERAGE_TEST_TRACER') == 'c': # pragma: part covered + # During testing, we use the COVERAGE_TEST_TRACER environment variable + # to indicate that we've fiddled with the environment to test this + # fallback code. If we thought we had a C tracer, but couldn't import + # it, then exit quickly and clearly instead of dribbling confusing + # errors. I'm using sys.exit here instead of an exception because an + # exception here causes all sorts of other noise in unittest. + sys.stderr.write("*** COVERAGE_TEST_TRACER is 'c' but can't import CTracer!\n") + sys.exit(1) + HAS_CTRACER = False + +T = TypeVar("T") + +class Collector: + """Collects trace data. + + Creates a Tracer object for each thread, since they track stack + information. Each Tracer points to the same shared data, contributing + traced data points. + + When the Collector is started, it creates a Tracer for the current thread, + and installs a function to create Tracers for each new thread started. + When the Collector is stopped, all active Tracers are stopped. + + Threads started while the Collector is stopped will never have Tracers + associated with them. + + """ + + # The stack of active Collectors. Collectors are added here when started, + # and popped when stopped. Collectors on the stack are paused when not + # the top, and resumed when they become the top again. + _collectors: List[Collector] = [] + + # The concurrency settings we support here. + LIGHT_THREADS = {"greenlet", "eventlet", "gevent"} + + def __init__( + self, + should_trace: Callable[[str, FrameType], TFileDisposition], + check_include: Callable[[str, FrameType], bool], + should_start_context: Optional[Callable[[FrameType], Optional[str]]], + file_mapper: Callable[[str], str], + timid: bool, + branch: bool, + warn: TWarnFn, + concurrency: List[str], + ) -> None: + """Create a collector. + + `should_trace` is a function, taking a file name and a frame, and + returning a `coverage.FileDisposition object`. + + `check_include` is a function taking a file name and a frame. It returns + a boolean: True if the file should be traced, False if not. + + `should_start_context` is a function taking a frame, and returning a + string. If the frame should be the start of a new context, the string + is the new context. If the frame should not be the start of a new + context, return None. + + `file_mapper` is a function taking a filename, and returning a Unicode + filename. The result is the name that will be recorded in the data + file. + + If `timid` is true, then a slower simpler trace function will be + used. This is important for some environments where manipulation of + tracing functions make the faster more sophisticated trace function not + operate properly. + + If `branch` is true, then branches will be measured. This involves + collecting data on which statements followed each other (arcs). Use + `get_arc_data` to get the arc data. + + `warn` is a warning function, taking a single string message argument + and an optional slug argument which will be a string or None, to be + used if a warning needs to be issued. + + `concurrency` is a list of strings indicating the concurrency libraries + in use. Valid values are "greenlet", "eventlet", "gevent", or "thread" + (the default). "thread" can be combined with one of the other three. + Other values are ignored. + + """ + self.should_trace = should_trace + self.check_include = check_include + self.should_start_context = should_start_context + self.file_mapper = file_mapper + self.branch = branch + self.warn = warn + self.concurrency = concurrency + assert isinstance(self.concurrency, list), f"Expected a list: {self.concurrency!r}" + + self.covdata: CoverageData + self.threading = None + self.static_context: Optional[str] = None + + self.origin = short_stack() + + self.concur_id_func = None + + self._trace_class: Type[TTracer] + self.file_disposition_class: Type[TFileDisposition] + + use_ctracer = False + if HAS_CTRACER and not timid: + use_ctracer = True + + #if HAS_CTRACER and self._trace_class is CTracer: + if use_ctracer: + self._trace_class = CTracer + self.file_disposition_class = CFileDisposition + self.supports_plugins = True + self.packed_arcs = True + else: + self._trace_class = PyTracer + self.file_disposition_class = FileDisposition + self.supports_plugins = False + self.packed_arcs = False + + # We can handle a few concurrency options here, but only one at a time. + concurrencies = set(self.concurrency) + unknown = concurrencies - CoverageConfig.CONCURRENCY_CHOICES + if unknown: + show = ", ".join(sorted(unknown)) + raise ConfigError(f"Unknown concurrency choices: {show}") + light_threads = concurrencies & self.LIGHT_THREADS + if len(light_threads) > 1: + show = ", ".join(sorted(light_threads)) + raise ConfigError(f"Conflicting concurrency settings: {show}") + do_threading = False + + tried = "nothing" # to satisfy pylint + try: + if "greenlet" in concurrencies: + tried = "greenlet" + import greenlet + self.concur_id_func = greenlet.getcurrent + elif "eventlet" in concurrencies: + tried = "eventlet" + import eventlet.greenthread # pylint: disable=import-error,useless-suppression + self.concur_id_func = eventlet.greenthread.getcurrent + elif "gevent" in concurrencies: + tried = "gevent" + import gevent # pylint: disable=import-error,useless-suppression + self.concur_id_func = gevent.getcurrent + + if "thread" in concurrencies: + do_threading = True + except ImportError as ex: + msg = f"Couldn't trace with concurrency={tried}, the module isn't installed." + raise ConfigError(msg) from ex + + if self.concur_id_func and not hasattr(self._trace_class, "concur_id_func"): + raise ConfigError( + "Can't support concurrency={} with {}, only threads are supported.".format( + tried, self.tracer_name(), + ) + ) + + if do_threading or not concurrencies: + # It's important to import threading only if we need it. If + # it's imported early, and the program being measured uses + # gevent, then gevent's monkey-patching won't work properly. + import threading + self.threading = threading + + self.reset() + + def __repr__(self) -> str: + return f"" + + def use_data(self, covdata: CoverageData, context: Optional[str]) -> None: + """Use `covdata` for recording data.""" + self.covdata = covdata + self.static_context = context + self.covdata.set_context(self.static_context) + + def tracer_name(self) -> str: + """Return the class name of the tracer we're using.""" + return self._trace_class.__name__ + + def _clear_data(self) -> None: + """Clear out existing data, but stay ready for more collection.""" + # We used to use self.data.clear(), but that would remove filename + # keys and data values that were still in use higher up the stack + # when we are called as part of switch_context. + for d in self.data.values(): + d.clear() + + for tracer in self.tracers: + tracer.reset_activity() + + def reset(self) -> None: + """Clear collected data, and prepare to collect more.""" + # The trace data we are collecting. + self.data: TTraceData = {} + + # A dictionary mapping file names to file tracer plugin names that will + # handle them. + self.file_tracers: Dict[str, str] = {} + + self.disabled_plugins: Set[str] = set() + + # The .should_trace_cache attribute is a cache from file names to + # coverage.FileDisposition objects, or None. When a file is first + # considered for tracing, a FileDisposition is obtained from + # Coverage.should_trace. Its .trace attribute indicates whether the + # file should be traced or not. If it should be, a plugin with dynamic + # file names can decide not to trace it based on the dynamic file name + # being excluded by the inclusion rules, in which case the + # FileDisposition will be replaced by None in the cache. + if env.PYPY: + import __pypy__ # pylint: disable=import-error + # Alex Gaynor said: + # should_trace_cache is a strictly growing key: once a key is in + # it, it never changes. Further, the keys used to access it are + # generally constant, given sufficient context. That is to say, at + # any given point _trace() is called, pypy is able to know the key. + # This is because the key is determined by the physical source code + # line, and that's invariant with the call site. + # + # This property of a dict with immutable keys, combined with + # call-site-constant keys is a match for PyPy's module dict, + # which is optimized for such workloads. + # + # This gives a 20% benefit on the workload described at + # https://bitbucket.org/pypy/pypy/issue/1871/10x-slower-than-cpython-under-coverage + self.should_trace_cache = __pypy__.newdict("module") + else: + self.should_trace_cache = {} + + # Our active Tracers. + self.tracers: List[TTracer] = [] + + self._clear_data() + + def _start_tracer(self) -> TTraceFn: + """Start a new Tracer object, and store it in self.tracers.""" + tracer = self._trace_class() + tracer.data = self.data + tracer.trace_arcs = self.branch + tracer.should_trace = self.should_trace + tracer.should_trace_cache = self.should_trace_cache + tracer.warn = self.warn + + if hasattr(tracer, 'concur_id_func'): + tracer.concur_id_func = self.concur_id_func + if hasattr(tracer, 'file_tracers'): + tracer.file_tracers = self.file_tracers + if hasattr(tracer, 'threading'): + tracer.threading = self.threading + if hasattr(tracer, 'check_include'): + tracer.check_include = self.check_include + if hasattr(tracer, 'should_start_context'): + tracer.should_start_context = self.should_start_context + if hasattr(tracer, 'switch_context'): + tracer.switch_context = self.switch_context + if hasattr(tracer, 'disable_plugin'): + tracer.disable_plugin = self.disable_plugin + + fn = tracer.start() + self.tracers.append(tracer) + + return fn + + # The trace function has to be set individually on each thread before + # execution begins. Ironically, the only support the threading module has + # for running code before the thread main is the tracing function. So we + # install this as a trace function, and the first time it's called, it does + # the real trace installation. + # + # New in 3.12: threading.settrace_all_threads: https://github.com/python/cpython/pull/96681 + + def _installation_trace(self, frame: FrameType, event: str, arg: Any) -> Optional[TTraceFn]: + """Called on new threads, installs the real tracer.""" + # Remove ourselves as the trace function. + sys.settrace(None) + # Install the real tracer. + fn: Optional[TTraceFn] = self._start_tracer() + # Invoke the real trace function with the current event, to be sure + # not to lose an event. + if fn: + fn = fn(frame, event, arg) + # Return the new trace function to continue tracing in this scope. + return fn + + def start(self) -> None: + """Start collecting trace information.""" + if self._collectors: + self._collectors[-1].pause() + + self.tracers = [] + + # Check to see whether we had a fullcoverage tracer installed. If so, + # get the stack frames it stashed away for us. + traces0: List[Tuple[Tuple[FrameType, str, Any], TLineNo]] = [] + fn0 = sys.gettrace() + if fn0: + tracer0 = getattr(fn0, '__self__', None) + if tracer0: + traces0 = getattr(tracer0, 'traces', []) + + try: + # Install the tracer on this thread. + fn = self._start_tracer() + except: + if self._collectors: + self._collectors[-1].resume() + raise + + # If _start_tracer succeeded, then we add ourselves to the global + # stack of collectors. + self._collectors.append(self) + + # Replay all the events from fullcoverage into the new trace function. + for (frame, event, arg), lineno in traces0: + try: + fn(frame, event, arg, lineno=lineno) + except TypeError as ex: + raise RuntimeError("fullcoverage must be run with the C trace function.") from ex + + # Install our installation tracer in threading, to jump-start other + # threads. + if self.threading: + self.threading.settrace(self._installation_trace) + + def stop(self) -> None: + """Stop collecting trace information.""" + assert self._collectors + if self._collectors[-1] is not self: + print("self._collectors:") + for c in self._collectors: + print(f" {c!r}\n{c.origin}") + assert self._collectors[-1] is self, ( + f"Expected current collector to be {self!r}, but it's {self._collectors[-1]!r}" + ) + + self.pause() + + # Remove this Collector from the stack, and resume the one underneath + # (if any). + self._collectors.pop() + if self._collectors: + self._collectors[-1].resume() + + def pause(self) -> None: + """Pause tracing, but be prepared to `resume`.""" + for tracer in self.tracers: + tracer.stop() + stats = tracer.get_stats() + if stats: + print("\nCoverage.py tracer stats:") + for k, v in human_sorted_items(stats.items()): + print(f"{k:>20}: {v}") + if self.threading: + self.threading.settrace(None) + + def resume(self) -> None: + """Resume tracing after a `pause`.""" + for tracer in self.tracers: + tracer.start() + if self.threading: + self.threading.settrace(self._installation_trace) + else: + self._start_tracer() + + def _activity(self) -> bool: + """Has any activity been traced? + + Returns a boolean, True if any trace function was invoked. + + """ + return any(tracer.activity() for tracer in self.tracers) + + def switch_context(self, new_context: Optional[str]) -> None: + """Switch to a new dynamic context.""" + context: Optional[str] + self.flush_data() + if self.static_context: + context = self.static_context + if new_context: + context += "|" + new_context + else: + context = new_context + self.covdata.set_context(context) + + def disable_plugin(self, disposition: TFileDisposition) -> None: + """Disable the plugin mentioned in `disposition`.""" + file_tracer = disposition.file_tracer + assert file_tracer is not None + plugin = file_tracer._coverage_plugin + plugin_name = plugin._coverage_plugin_name + self.warn(f"Disabling plug-in {plugin_name!r} due to previous exception") + plugin._coverage_enabled = False + disposition.trace = False + + @functools.lru_cache(maxsize=None) # pylint: disable=method-cache-max-size-none + def cached_mapped_file(self, filename: str) -> str: + """A locally cached version of file names mapped through file_mapper.""" + return self.file_mapper(filename) + + def mapped_file_dict(self, d: Mapping[str, T]) -> Dict[str, T]: + """Return a dict like d, but with keys modified by file_mapper.""" + # The call to list(items()) ensures that the GIL protects the dictionary + # iterator against concurrent modifications by tracers running + # in other threads. We try three times in case of concurrent + # access, hoping to get a clean copy. + runtime_err = None + for _ in range(3): # pragma: part covered + try: + items = list(d.items()) + except RuntimeError as ex: # pragma: cant happen + runtime_err = ex + else: + break + else: # pragma: cant happen + assert isinstance(runtime_err, Exception) + raise runtime_err + + return {self.cached_mapped_file(k): v for k, v in items if v} + + def plugin_was_disabled(self, plugin: CoveragePlugin) -> None: + """Record that `plugin` was disabled during the run.""" + self.disabled_plugins.add(plugin._coverage_plugin_name) + + def flush_data(self) -> bool: + """Save the collected data to our associated `CoverageData`. + + Data may have also been saved along the way. This forces the + last of the data to be saved. + + Returns True if there was data to save, False if not. + """ + if not self._activity(): + return False + + if self.branch: + if self.packed_arcs: + # Unpack the line number pairs packed into integers. See + # tracer.c:CTracer_record_pair for the C code that creates + # these packed ints. + arc_data: Dict[str, List[TArc]] = {} + packed_data = cast(Dict[str, Set[int]], self.data) + for fname, packeds in packed_data.items(): + tuples = [] + for packed in packeds: + l1 = packed & 0xFFFFF + l2 = (packed & (0xFFFFF << 20)) >> 20 + if packed & (1 << 40): + l1 *= -1 + if packed & (1 << 41): + l2 *= -1 + tuples.append((l1, l2)) + arc_data[fname] = tuples + else: + arc_data = cast(Dict[str, List[TArc]], self.data) + self.covdata.add_arcs(self.mapped_file_dict(arc_data)) + else: + line_data = cast(Dict[str, Set[int]], self.data) + self.covdata.add_lines(self.mapped_file_dict(line_data)) + + file_tracers = { + k: v for k, v in self.file_tracers.items() + if v not in self.disabled_plugins + } + self.covdata.add_file_tracers(self.mapped_file_dict(file_tracers)) + + self._clear_data() + return True diff --git a/venv/lib/python3.10/site-packages/coverage/config.py b/venv/lib/python3.10/site-packages/coverage/config.py new file mode 100644 index 0000000..0b0cab3 --- /dev/null +++ b/venv/lib/python3.10/site-packages/coverage/config.py @@ -0,0 +1,619 @@ +# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 +# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt + +"""Config file for coverage.py""" + +from __future__ import annotations + +import collections +import configparser +import copy +import os +import os.path +import re + +from typing import ( + Any, Callable, Dict, Iterable, List, Optional, Tuple, Union, +) + +from coverage.exceptions import ConfigError +from coverage.misc import isolate_module, human_sorted_items, substitute_variables +from coverage.tomlconfig import TomlConfigParser, TomlDecodeError +from coverage.types import ( + TConfigurable, TConfigSectionIn, TConfigValueIn, TConfigSectionOut, + TConfigValueOut, TPluginConfig, +) + +os = isolate_module(os) + + +class HandyConfigParser(configparser.ConfigParser): + """Our specialization of ConfigParser.""" + + def __init__(self, our_file: bool) -> None: + """Create the HandyConfigParser. + + `our_file` is True if this config file is specifically for coverage, + False if we are examining another config file (tox.ini, setup.cfg) + for possible settings. + """ + + super().__init__(interpolation=None) + self.section_prefixes = ["coverage:"] + if our_file: + self.section_prefixes.append("") + + def read( # type: ignore[override] + self, + filenames: Iterable[str], + encoding_unused: Optional[str] = None, + ) -> List[str]: + """Read a file name as UTF-8 configuration data.""" + return super().read(filenames, encoding="utf-8") + + def real_section(self, section: str) -> Optional[str]: + """Get the actual name of a section.""" + for section_prefix in self.section_prefixes: + real_section = section_prefix + section + has = super().has_section(real_section) + if has: + return real_section + return None + + def has_option(self, section: str, option: str) -> bool: + real_section = self.real_section(section) + if real_section is not None: + return super().has_option(real_section, option) + return False + + def has_section(self, section: str) -> bool: + return bool(self.real_section(section)) + + def options(self, section: str) -> List[str]: + real_section = self.real_section(section) + if real_section is not None: + return super().options(real_section) + raise ConfigError(f"No section: {section!r}") + + def get_section(self, section: str) -> TConfigSectionOut: + """Get the contents of a section, as a dictionary.""" + d: Dict[str, TConfigValueOut] = {} + for opt in self.options(section): + d[opt] = self.get(section, opt) + return d + + def get(self, section: str, option: str, *args: Any, **kwargs: Any) -> str: # type: ignore + """Get a value, replacing environment variables also. + + The arguments are the same as `ConfigParser.get`, but in the found + value, ``$WORD`` or ``${WORD}`` are replaced by the value of the + environment variable ``WORD``. + + Returns the finished value. + + """ + for section_prefix in self.section_prefixes: + real_section = section_prefix + section + if super().has_option(real_section, option): + break + else: + raise ConfigError(f"No option {option!r} in section: {section!r}") + + v: str = super().get(real_section, option, *args, **kwargs) + v = substitute_variables(v, os.environ) + return v + + def getlist(self, section: str, option: str) -> List[str]: + """Read a list of strings. + + The value of `section` and `option` is treated as a comma- and newline- + separated list of strings. Each value is stripped of white space. + + Returns the list of strings. + + """ + value_list = self.get(section, option) + values = [] + for value_line in value_list.split("\n"): + for value in value_line.split(","): + value = value.strip() + if value: + values.append(value) + return values + + def getregexlist(self, section: str, option: str) -> List[str]: + """Read a list of full-line regexes. + + The value of `section` and `option` is treated as a newline-separated + list of regexes. Each value is stripped of white space. + + Returns the list of strings. + + """ + line_list = self.get(section, option) + value_list = [] + for value in line_list.splitlines(): + value = value.strip() + try: + re.compile(value) + except re.error as e: + raise ConfigError( + f"Invalid [{section}].{option} value {value!r}: {e}" + ) from e + if value: + value_list.append(value) + return value_list + + +TConfigParser = Union[HandyConfigParser, TomlConfigParser] + + +# The default line exclusion regexes. +DEFAULT_EXCLUDE = [ + r"#\s*(pragma|PRAGMA)[:\s]?\s*(no|NO)\s*(cover|COVER)", +] + +# The default partial branch regexes, to be modified by the user. +DEFAULT_PARTIAL = [ + r"#\s*(pragma|PRAGMA)[:\s]?\s*(no|NO)\s*(branch|BRANCH)", +] + +# The default partial branch regexes, based on Python semantics. +# These are any Python branching constructs that can't actually execute all +# their branches. +DEFAULT_PARTIAL_ALWAYS = [ + "while (True|1|False|0):", + "if (True|1|False|0):", +] + + +class CoverageConfig(TConfigurable, TPluginConfig): + """Coverage.py configuration. + + The attributes of this class are the various settings that control the + operation of coverage.py. + + """ + # pylint: disable=too-many-instance-attributes + + def __init__(self) -> None: + """Initialize the configuration attributes to their defaults.""" + # Metadata about the config. + # We tried to read these config files. + self.attempted_config_files: List[str] = [] + # We did read these config files, but maybe didn't find any content for us. + self.config_files_read: List[str] = [] + # The file that gave us our configuration. + self.config_file: Optional[str] = None + self._config_contents: Optional[bytes] = None + + # Defaults for [run] and [report] + self._include = None + self._omit = None + + # Defaults for [run] + self.branch = False + self.command_line: Optional[str] = None + self.concurrency: List[str] = [] + self.context: Optional[str] = None + self.cover_pylib = False + self.data_file = ".coverage" + self.debug: List[str] = [] + self.debug_file: Optional[str] = None + self.disable_warnings: List[str] = [] + self.dynamic_context: Optional[str] = None + self.parallel = False + self.plugins: List[str] = [] + self.relative_files = False + self.run_include: List[str] = [] + self.run_omit: List[str] = [] + self.sigterm = False + self.source: Optional[List[str]] = None + self.source_pkgs: List[str] = [] + self.timid = False + self._crash: Optional[str] = None + + # Defaults for [report] + self.exclude_list = DEFAULT_EXCLUDE[:] + self.exclude_also: List[str] = [] + self.fail_under = 0.0 + self.format: Optional[str] = None + self.ignore_errors = False + self.include_namespace_packages = False + self.report_include: Optional[List[str]] = None + self.report_omit: Optional[List[str]] = None + self.partial_always_list = DEFAULT_PARTIAL_ALWAYS[:] + self.partial_list = DEFAULT_PARTIAL[:] + self.precision = 0 + self.report_contexts: Optional[List[str]] = None + self.show_missing = False + self.skip_covered = False + self.skip_empty = False + self.sort: Optional[str] = None + + # Defaults for [html] + self.extra_css: Optional[str] = None + self.html_dir = "htmlcov" + self.html_skip_covered: Optional[bool] = None + self.html_skip_empty: Optional[bool] = None + self.html_title = "Coverage report" + self.show_contexts = False + + # Defaults for [xml] + self.xml_output = "coverage.xml" + self.xml_package_depth = 99 + + # Defaults for [json] + self.json_output = "coverage.json" + self.json_pretty_print = False + self.json_show_contexts = False + + # Defaults for [lcov] + self.lcov_output = "coverage.lcov" + + # Defaults for [paths] + self.paths: Dict[str, List[str]] = {} + + # Options for plugins + self.plugin_options: Dict[str, TConfigSectionOut] = {} + + MUST_BE_LIST = { + "debug", "concurrency", "plugins", + "report_omit", "report_include", + "run_omit", "run_include", + } + + def from_args(self, **kwargs: TConfigValueIn) -> None: + """Read config values from `kwargs`.""" + for k, v in kwargs.items(): + if v is not None: + if k in self.MUST_BE_LIST and isinstance(v, str): + v = [v] + setattr(self, k, v) + + def from_file(self, filename: str, warn: Callable[[str], None], our_file: bool) -> bool: + """Read configuration from a .rc file. + + `filename` is a file name to read. + + `our_file` is True if this config file is specifically for coverage, + False if we are examining another config file (tox.ini, setup.cfg) + for possible settings. + + Returns True or False, whether the file could be read, and it had some + coverage.py settings in it. + + """ + _, ext = os.path.splitext(filename) + cp: TConfigParser + if ext == ".toml": + cp = TomlConfigParser(our_file) + else: + cp = HandyConfigParser(our_file) + + self.attempted_config_files.append(filename) + + try: + files_read = cp.read(filename) + except (configparser.Error, TomlDecodeError) as err: + raise ConfigError(f"Couldn't read config file {filename}: {err}") from err + if not files_read: + return False + + self.config_files_read.extend(map(os.path.abspath, files_read)) + + any_set = False + try: + for option_spec in self.CONFIG_FILE_OPTIONS: + was_set = self._set_attr_from_config_option(cp, *option_spec) + if was_set: + any_set = True + except ValueError as err: + raise ConfigError(f"Couldn't read config file {filename}: {err}") from err + + # Check that there are no unrecognized options. + all_options = collections.defaultdict(set) + for option_spec in self.CONFIG_FILE_OPTIONS: + section, option = option_spec[1].split(":") + all_options[section].add(option) + + for section, options in all_options.items(): + real_section = cp.real_section(section) + if real_section: + for unknown in set(cp.options(section)) - options: + warn( + "Unrecognized option '[{}] {}=' in config file {}".format( + real_section, unknown, filename + ) + ) + + # [paths] is special + if cp.has_section("paths"): + for option in cp.options("paths"): + self.paths[option] = cp.getlist("paths", option) + any_set = True + + # plugins can have options + for plugin in self.plugins: + if cp.has_section(plugin): + self.plugin_options[plugin] = cp.get_section(plugin) + any_set = True + + # Was this file used as a config file? If it's specifically our file, + # then it was used. If we're piggybacking on someone else's file, + # then it was only used if we found some settings in it. + if our_file: + used = True + else: + used = any_set + + if used: + self.config_file = os.path.abspath(filename) + with open(filename, "rb") as f: + self._config_contents = f.read() + + return used + + def copy(self) -> CoverageConfig: + """Return a copy of the configuration.""" + return copy.deepcopy(self) + + CONCURRENCY_CHOICES = {"thread", "gevent", "greenlet", "eventlet", "multiprocessing"} + + CONFIG_FILE_OPTIONS = [ + # These are *args for _set_attr_from_config_option: + # (attr, where, type_="") + # + # attr is the attribute to set on the CoverageConfig object. + # where is the section:name to read from the configuration file. + # type_ is the optional type to apply, by using .getTYPE to read the + # configuration value from the file. + + # [run] + ("branch", "run:branch", "boolean"), + ("command_line", "run:command_line"), + ("concurrency", "run:concurrency", "list"), + ("context", "run:context"), + ("cover_pylib", "run:cover_pylib", "boolean"), + ("data_file", "run:data_file"), + ("debug", "run:debug", "list"), + ("debug_file", "run:debug_file"), + ("disable_warnings", "run:disable_warnings", "list"), + ("dynamic_context", "run:dynamic_context"), + ("parallel", "run:parallel", "boolean"), + ("plugins", "run:plugins", "list"), + ("relative_files", "run:relative_files", "boolean"), + ("run_include", "run:include", "list"), + ("run_omit", "run:omit", "list"), + ("sigterm", "run:sigterm", "boolean"), + ("source", "run:source", "list"), + ("source_pkgs", "run:source_pkgs", "list"), + ("timid", "run:timid", "boolean"), + ("_crash", "run:_crash"), + + # [report] + ("exclude_list", "report:exclude_lines", "regexlist"), + ("exclude_also", "report:exclude_also", "regexlist"), + ("fail_under", "report:fail_under", "float"), + ("format", "report:format", "boolean"), + ("ignore_errors", "report:ignore_errors", "boolean"), + ("include_namespace_packages", "report:include_namespace_packages", "boolean"), + ("partial_always_list", "report:partial_branches_always", "regexlist"), + ("partial_list", "report:partial_branches", "regexlist"), + ("precision", "report:precision", "int"), + ("report_contexts", "report:contexts", "list"), + ("report_include", "report:include", "list"), + ("report_omit", "report:omit", "list"), + ("show_missing", "report:show_missing", "boolean"), + ("skip_covered", "report:skip_covered", "boolean"), + ("skip_empty", "report:skip_empty", "boolean"), + ("sort", "report:sort"), + + # [html] + ("extra_css", "html:extra_css"), + ("html_dir", "html:directory"), + ("html_skip_covered", "html:skip_covered", "boolean"), + ("html_skip_empty", "html:skip_empty", "boolean"), + ("html_title", "html:title"), + ("show_contexts", "html:show_contexts", "boolean"), + + # [xml] + ("xml_output", "xml:output"), + ("xml_package_depth", "xml:package_depth", "int"), + + # [json] + ("json_output", "json:output"), + ("json_pretty_print", "json:pretty_print", "boolean"), + ("json_show_contexts", "json:show_contexts", "boolean"), + + # [lcov] + ("lcov_output", "lcov:output"), + ] + + def _set_attr_from_config_option( + self, + cp: TConfigParser, + attr: str, + where: str, + type_: str = "", + ) -> bool: + """Set an attribute on self if it exists in the ConfigParser. + + Returns True if the attribute was set. + + """ + section, option = where.split(":") + if cp.has_option(section, option): + method = getattr(cp, "get" + type_) + setattr(self, attr, method(section, option)) + return True + return False + + def get_plugin_options(self, plugin: str) -> TConfigSectionOut: + """Get a dictionary of options for the plugin named `plugin`.""" + return self.plugin_options.get(plugin, {}) + + def set_option(self, option_name: str, value: Union[TConfigValueIn, TConfigSectionIn]) -> None: + """Set an option in the configuration. + + `option_name` is a colon-separated string indicating the section and + option name. For example, the ``branch`` option in the ``[run]`` + section of the config file would be indicated with `"run:branch"`. + + `value` is the new value for the option. + + """ + # Special-cased options. + if option_name == "paths": + self.paths = value # type: ignore[assignment] + return + + # Check all the hard-coded options. + for option_spec in self.CONFIG_FILE_OPTIONS: + attr, where = option_spec[:2] + if where == option_name: + setattr(self, attr, value) + return + + # See if it's a plugin option. + plugin_name, _, key = option_name.partition(":") + if key and plugin_name in self.plugins: + self.plugin_options.setdefault(plugin_name, {})[key] = value # type: ignore[index] + return + + # If we get here, we didn't find the option. + raise ConfigError(f"No such option: {option_name!r}") + + def get_option(self, option_name: str) -> Optional[TConfigValueOut]: + """Get an option from the configuration. + + `option_name` is a colon-separated string indicating the section and + option name. For example, the ``branch`` option in the ``[run]`` + section of the config file would be indicated with `"run:branch"`. + + Returns the value of the option. + + """ + # Special-cased options. + if option_name == "paths": + return self.paths # type: ignore[return-value] + + # Check all the hard-coded options. + for option_spec in self.CONFIG_FILE_OPTIONS: + attr, where = option_spec[:2] + if where == option_name: + return getattr(self, attr) # type: ignore[no-any-return] + + # See if it's a plugin option. + plugin_name, _, key = option_name.partition(":") + if key and plugin_name in self.plugins: + return self.plugin_options.get(plugin_name, {}).get(key) + + # If we get here, we didn't find the option. + raise ConfigError(f"No such option: {option_name!r}") + + def post_process_file(self, path: str) -> str: + """Make final adjustments to a file path to make it usable.""" + return os.path.expanduser(path) + + def post_process(self) -> None: + """Make final adjustments to settings to make them usable.""" + self.data_file = self.post_process_file(self.data_file) + self.html_dir = self.post_process_file(self.html_dir) + self.xml_output = self.post_process_file(self.xml_output) + self.paths = dict( + (k, [self.post_process_file(f) for f in v]) + for k, v in self.paths.items() + ) + self.exclude_list += self.exclude_also + + def debug_info(self) -> List[Tuple[str, Any]]: + """Make a list of (name, value) pairs for writing debug info.""" + return human_sorted_items( + (k, v) for k, v in self.__dict__.items() if not k.startswith("_") + ) + + +def config_files_to_try(config_file: Union[bool, str]) -> List[Tuple[str, bool, bool]]: + """What config files should we try to read? + + Returns a list of tuples: + (filename, is_our_file, was_file_specified) + """ + + # Some API users were specifying ".coveragerc" to mean the same as + # True, so make it so. + if config_file == ".coveragerc": + config_file = True + specified_file = (config_file is not True) + if not specified_file: + # No file was specified. Check COVERAGE_RCFILE. + rcfile = os.environ.get("COVERAGE_RCFILE") + if rcfile: + config_file = rcfile + specified_file = True + if not specified_file: + # Still no file specified. Default to .coveragerc + config_file = ".coveragerc" + assert isinstance(config_file, str) + files_to_try = [ + (config_file, True, specified_file), + ("setup.cfg", False, False), + ("tox.ini", False, False), + ("pyproject.toml", False, False), + ] + return files_to_try + + +def read_coverage_config( + config_file: Union[bool, str], + warn: Callable[[str], None], + **kwargs: TConfigValueIn, +) -> CoverageConfig: + """Read the coverage.py configuration. + + Arguments: + config_file: a boolean or string, see the `Coverage` class for the + tricky details. + warn: a function to issue warnings. + all others: keyword arguments from the `Coverage` class, used for + setting values in the configuration. + + Returns: + config: + config is a CoverageConfig object read from the appropriate + configuration file. + + """ + # Build the configuration from a number of sources: + # 1) defaults: + config = CoverageConfig() + + # 2) from a file: + if config_file: + files_to_try = config_files_to_try(config_file) + + for fname, our_file, specified_file in files_to_try: + config_read = config.from_file(fname, warn, our_file=our_file) + if config_read: + break + if specified_file: + raise ConfigError(f"Couldn't read {fname!r} as a config file") + + # $set_env.py: COVERAGE_DEBUG - Options for --debug. + # 3) from environment variables: + env_data_file = os.environ.get("COVERAGE_FILE") + if env_data_file: + config.data_file = env_data_file + debugs = os.environ.get("COVERAGE_DEBUG") + if debugs: + config.debug.extend(d.strip() for d in debugs.split(",")) + + # 4) from constructor arguments: + config.from_args(**kwargs) + + # Once all the config has been collected, there's a little post-processing + # to do. + config.post_process() + + return config diff --git a/venv/lib/python3.10/site-packages/coverage/context.py b/venv/lib/python3.10/site-packages/coverage/context.py new file mode 100644 index 0000000..20a5c92 --- /dev/null +++ b/venv/lib/python3.10/site-packages/coverage/context.py @@ -0,0 +1,72 @@ +# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 +# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt + +"""Determine contexts for coverage.py""" + +from __future__ import annotations + +from types import FrameType +from typing import cast, Callable, Optional, Sequence + + +def combine_context_switchers( + context_switchers: Sequence[Callable[[FrameType], Optional[str]]], +) -> Optional[Callable[[FrameType], Optional[str]]]: + """Create a single context switcher from multiple switchers. + + `context_switchers` is a list of functions that take a frame as an + argument and return a string to use as the new context label. + + Returns a function that composites `context_switchers` functions, or None + if `context_switchers` is an empty list. + + When invoked, the combined switcher calls `context_switchers` one-by-one + until a string is returned. The combined switcher returns None if all + `context_switchers` return None. + """ + if not context_switchers: + return None + + if len(context_switchers) == 1: + return context_switchers[0] + + def should_start_context(frame: FrameType) -> Optional[str]: + """The combiner for multiple context switchers.""" + for switcher in context_switchers: + new_context = switcher(frame) + if new_context is not None: + return new_context + return None + + return should_start_context + + +def should_start_context_test_function(frame: FrameType) -> Optional[str]: + """Is this frame calling a test_* function?""" + co_name = frame.f_code.co_name + if co_name.startswith("test") or co_name == "runTest": + return qualname_from_frame(frame) + return None + + +def qualname_from_frame(frame: FrameType) -> Optional[str]: + """Get a qualified name for the code running in `frame`.""" + co = frame.f_code + fname = co.co_name + method = None + if co.co_argcount and co.co_varnames[0] == "self": + self = frame.f_locals.get("self", None) + method = getattr(self, fname, None) + + if method is None: + func = frame.f_globals.get(fname) + if func is None: + return None + return cast(str, func.__module__ + "." + fname) + + func = getattr(method, "__func__", None) + if func is None: + cls = self.__class__ + return cast(str, cls.__module__ + "." + cls.__name__ + "." + fname) + + return cast(str, func.__module__ + "." + func.__qualname__) diff --git a/venv/lib/python3.10/site-packages/coverage/control.py b/venv/lib/python3.10/site-packages/coverage/control.py new file mode 100644 index 0000000..6196cb6 --- /dev/null +++ b/venv/lib/python3.10/site-packages/coverage/control.py @@ -0,0 +1,1406 @@ +# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 +# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt + +"""Core control stuff for coverage.py.""" + +from __future__ import annotations + +import atexit +import collections +import contextlib +import os +import os.path +import platform +import signal +import sys +import threading +import time +import warnings + +from types import FrameType +from typing import ( + cast, + Any, Callable, Dict, IO, Iterable, Iterator, List, Optional, Tuple, Union, +) + +from coverage import env +from coverage.annotate import AnnotateReporter +from coverage.collector import Collector, HAS_CTRACER +from coverage.config import CoverageConfig, read_coverage_config +from coverage.context import should_start_context_test_function, combine_context_switchers +from coverage.data import CoverageData, combine_parallel_data +from coverage.debug import ( + DebugControl, NoDebugging, short_stack, write_formatted_info, relevant_environment_display +) +from coverage.disposition import disposition_debug_msg +from coverage.exceptions import ConfigError, CoverageException, CoverageWarning, PluginError +from coverage.files import PathAliases, abs_file, relative_filename, set_relative_directory +from coverage.html import HtmlReporter +from coverage.inorout import InOrOut +from coverage.jsonreport import JsonReporter +from coverage.lcovreport import LcovReporter +from coverage.misc import bool_or_none, join_regex +from coverage.misc import DefaultValue, ensure_dir_for_file, isolate_module +from coverage.multiproc import patch_multiprocessing +from coverage.plugin import FileReporter +from coverage.plugin_support import Plugins +from coverage.python import PythonFileReporter +from coverage.report import SummaryReporter +from coverage.report_core import render_report +from coverage.results import Analysis +from coverage.types import ( + FilePath, TConfigurable, TConfigSectionIn, TConfigValueIn, TConfigValueOut, + TFileDisposition, TLineNo, TMorf, +) +from coverage.xmlreport import XmlReporter + +os = isolate_module(os) + +@contextlib.contextmanager +def override_config(cov: Coverage, **kwargs: TConfigValueIn) -> Iterator[None]: + """Temporarily tweak the configuration of `cov`. + + The arguments are applied to `cov.config` with the `from_args` method. + At the end of the with-statement, the old configuration is restored. + """ + original_config = cov.config + cov.config = cov.config.copy() + try: + cov.config.from_args(**kwargs) + yield + finally: + cov.config = original_config + + +DEFAULT_DATAFILE = DefaultValue("MISSING") +_DEFAULT_DATAFILE = DEFAULT_DATAFILE # Just in case, for backwards compatibility + +class Coverage(TConfigurable): + """Programmatic access to coverage.py. + + To use:: + + from coverage import Coverage + + cov = Coverage() + cov.start() + #.. call your code .. + cov.stop() + cov.html_report(directory="covhtml") + + A context manager is available to do the same thing:: + + cov = Coverage() + with cov.collect(): + #.. call your code .. + cov.html_report(directory="covhtml") + + Note: in keeping with Python custom, names starting with underscore are + not part of the public API. They might stop working at any point. Please + limit yourself to documented methods to avoid problems. + + Methods can raise any of the exceptions described in :ref:`api_exceptions`. + + """ + + # The stack of started Coverage instances. + _instances: List[Coverage] = [] + + @classmethod + def current(cls) -> Optional[Coverage]: + """Get the latest started `Coverage` instance, if any. + + Returns: a `Coverage` instance, or None. + + .. versionadded:: 5.0 + + """ + if cls._instances: + return cls._instances[-1] + else: + return None + + def __init__( # pylint: disable=too-many-arguments + self, + data_file: Optional[Union[FilePath, DefaultValue]] = DEFAULT_DATAFILE, + data_suffix: Optional[Union[str, bool]] = None, + cover_pylib: Optional[bool] = None, + auto_data: bool = False, + timid: Optional[bool] = None, + branch: Optional[bool] = None, + config_file: Union[FilePath, bool] = True, + source: Optional[Iterable[str]] = None, + source_pkgs: Optional[Iterable[str]] = None, + omit: Optional[Union[str, Iterable[str]]] = None, + include: Optional[Union[str, Iterable[str]]] = None, + debug: Optional[Iterable[str]] = None, + concurrency: Optional[Union[str, Iterable[str]]] = None, + check_preimported: bool = False, + context: Optional[str] = None, + messages: bool = False, + ) -> None: + """ + Many of these arguments duplicate and override values that can be + provided in a configuration file. Parameters that are missing here + will use values from the config file. + + `data_file` is the base name of the data file to use. The config value + defaults to ".coverage". None can be provided to prevent writing a data + file. `data_suffix` is appended (with a dot) to `data_file` to create + the final file name. If `data_suffix` is simply True, then a suffix is + created with the machine and process identity included. + + `cover_pylib` is a boolean determining whether Python code installed + with the Python interpreter is measured. This includes the Python + standard library and any packages installed with the interpreter. + + If `auto_data` is true, then any existing data file will be read when + coverage measurement starts, and data will be saved automatically when + measurement stops. + + If `timid` is true, then a slower and simpler trace function will be + used. This is important for some environments where manipulation of + tracing functions breaks the faster trace function. + + If `branch` is true, then branch coverage will be measured in addition + to the usual statement coverage. + + `config_file` determines what configuration file to read: + + * If it is ".coveragerc", it is interpreted as if it were True, + for backward compatibility. + + * If it is a string, it is the name of the file to read. If the + file can't be read, it is an error. + + * If it is True, then a few standard files names are tried + (".coveragerc", "setup.cfg", "tox.ini"). It is not an error for + these files to not be found. + + * If it is False, then no configuration file is read. + + `source` is a list of file paths or package names. Only code located + in the trees indicated by the file paths or package names will be + measured. + + `source_pkgs` is a list of package names. It works the same as + `source`, but can be used to name packages where the name can also be + interpreted as a file path. + + `include` and `omit` are lists of file name patterns. Files that match + `include` will be measured, files that match `omit` will not. Each + will also accept a single string argument. + + `debug` is a list of strings indicating what debugging information is + desired. + + `concurrency` is a string indicating the concurrency library being used + in the measured code. Without this, coverage.py will get incorrect + results if these libraries are in use. Valid strings are "greenlet", + "eventlet", "gevent", "multiprocessing", or "thread" (the default). + This can also be a list of these strings. + + If `check_preimported` is true, then when coverage is started, the + already-imported files will be checked to see if they should be + measured by coverage. Importing measured files before coverage is + started can mean that code is missed. + + `context` is a string to use as the :ref:`static context + ` label for collected data. + + If `messages` is true, some messages will be printed to stdout + indicating what is happening. + + .. versionadded:: 4.0 + The `concurrency` parameter. + + .. versionadded:: 4.2 + The `concurrency` parameter can now be a list of strings. + + .. versionadded:: 5.0 + The `check_preimported` and `context` parameters. + + .. versionadded:: 5.3 + The `source_pkgs` parameter. + + .. versionadded:: 6.0 + The `messages` parameter. + + """ + # Start self.config as a usable default configuration. It will soon be + # replaced with the real configuration. + self.config = CoverageConfig() + + # data_file=None means no disk file at all. data_file missing means + # use the value from the config file. + self._no_disk = data_file is None + if isinstance(data_file, DefaultValue): + data_file = None + if data_file is not None: + data_file = os.fspath(data_file) + + # This is injectable by tests. + self._debug_file: Optional[IO[str]] = None + + self._auto_load = self._auto_save = auto_data + self._data_suffix_specified = data_suffix + + # Is it ok for no data to be collected? + self._warn_no_data = True + self._warn_unimported_source = True + self._warn_preimported_source = check_preimported + self._no_warn_slugs: List[str] = [] + self._messages = messages + + # A record of all the warnings that have been issued. + self._warnings: List[str] = [] + + # Other instance attributes, set with placebos or placeholders. + # More useful objects will be created later. + self._debug: DebugControl = NoDebugging() + self._inorout: Optional[InOrOut] = None + self._plugins: Plugins = Plugins() + self._data: Optional[CoverageData] = None + self._collector: Optional[Collector] = None + + self._file_mapper: Callable[[str], str] = abs_file + self._data_suffix = self._run_suffix = None + self._exclude_re: Dict[str, str] = {} + self._old_sigterm: Optional[Callable[[int, Optional[FrameType]], Any]] = None + + # State machine variables: + # Have we initialized everything? + self._inited = False + self._inited_for_start = False + # Have we started collecting and not stopped it? + self._started = False + # Should we write the debug output? + self._should_write_debug = True + + # Build our configuration from a number of sources. + if not isinstance(config_file, bool): + config_file = os.fspath(config_file) + self.config = read_coverage_config( + config_file=config_file, + warn=self._warn, + data_file=data_file, + cover_pylib=cover_pylib, + timid=timid, + branch=branch, + parallel=bool_or_none(data_suffix), + source=source, + source_pkgs=source_pkgs, + run_omit=omit, + run_include=include, + debug=debug, + report_omit=omit, + report_include=include, + concurrency=concurrency, + context=context, + ) + + # If we have sub-process measurement happening automatically, then we + # want any explicit creation of a Coverage object to mean, this process + # is already coverage-aware, so don't auto-measure it. By now, the + # auto-creation of a Coverage object has already happened. But we can + # find it and tell it not to save its data. + if not env.METACOV: + _prevent_sub_process_measurement() + + def _init(self) -> None: + """Set all the initial state. + + This is called by the public methods to initialize state. This lets us + construct a :class:`Coverage` object, then tweak its state before this + function is called. + + """ + if self._inited: + return + + self._inited = True + + # Create and configure the debugging controller. + self._debug = DebugControl(self.config.debug, self._debug_file, self.config.debug_file) + + if "multiprocessing" in (self.config.concurrency or ()): + # Multi-processing uses parallel for the subprocesses, so also use + # it for the main process. + self.config.parallel = True + + # _exclude_re is a dict that maps exclusion list names to compiled regexes. + self._exclude_re = {} + + set_relative_directory() + if self.config.relative_files: + self._file_mapper = relative_filename + + # Load plugins + self._plugins = Plugins.load_plugins(self.config.plugins, self.config, self._debug) + + # Run configuring plugins. + for plugin in self._plugins.configurers: + # We need an object with set_option and get_option. Either self or + # self.config will do. Choosing randomly stops people from doing + # other things with those objects, against the public API. Yes, + # this is a bit childish. :) + plugin.configure([self, self.config][int(time.time()) % 2]) + + def _post_init(self) -> None: + """Stuff to do after everything is initialized.""" + if self._should_write_debug: + self._should_write_debug = False + self._write_startup_debug() + + # "[run] _crash" will raise an exception if the value is close by in + # the call stack, for testing error handling. + if self.config._crash and self.config._crash in short_stack(limit=4): + raise RuntimeError(f"Crashing because called by {self.config._crash}") + + def _write_startup_debug(self) -> None: + """Write out debug info at startup if needed.""" + wrote_any = False + with self._debug.without_callers(): + if self._debug.should("config"): + config_info = self.config.debug_info() + write_formatted_info(self._debug.write, "config", config_info) + wrote_any = True + + if self._debug.should("sys"): + write_formatted_info(self._debug.write, "sys", self.sys_info()) + for plugin in self._plugins: + header = "sys: " + plugin._coverage_plugin_name + info = plugin.sys_info() + write_formatted_info(self._debug.write, header, info) + wrote_any = True + + if self._debug.should("pybehave"): + write_formatted_info(self._debug.write, "pybehave", env.debug_info()) + wrote_any = True + + if wrote_any: + write_formatted_info(self._debug.write, "end", ()) + + def _should_trace(self, filename: str, frame: FrameType) -> TFileDisposition: + """Decide whether to trace execution in `filename`. + + Calls `_should_trace_internal`, and returns the FileDisposition. + + """ + assert self._inorout is not None + disp = self._inorout.should_trace(filename, frame) + if self._debug.should("trace"): + self._debug.write(disposition_debug_msg(disp)) + return disp + + def _check_include_omit_etc(self, filename: str, frame: FrameType) -> bool: + """Check a file name against the include/omit/etc, rules, verbosely. + + Returns a boolean: True if the file should be traced, False if not. + + """ + assert self._inorout is not None + reason = self._inorout.check_include_omit_etc(filename, frame) + if self._debug.should("trace"): + if not reason: + msg = f"Including {filename!r}" + else: + msg = f"Not including {filename!r}: {reason}" + self._debug.write(msg) + + return not reason + + def _warn(self, msg: str, slug: Optional[str] = None, once: bool = False) -> None: + """Use `msg` as a warning. + + For warning suppression, use `slug` as the shorthand. + + If `once` is true, only show this warning once (determined by the + slug.) + + """ + if not self._no_warn_slugs: + self._no_warn_slugs = list(self.config.disable_warnings) + + if slug in self._no_warn_slugs: + # Don't issue the warning + return + + self._warnings.append(msg) + if slug: + msg = f"{msg} ({slug})" + if self._debug.should("pid"): + msg = f"[{os.getpid()}] {msg}" + warnings.warn(msg, category=CoverageWarning, stacklevel=2) + + if once: + assert slug is not None + self._no_warn_slugs.append(slug) + + def _message(self, msg: str) -> None: + """Write a message to the user, if configured to do so.""" + if self._messages: + print(msg) + + def get_option(self, option_name: str) -> Optional[TConfigValueOut]: + """Get an option from the configuration. + + `option_name` is a colon-separated string indicating the section and + option name. For example, the ``branch`` option in the ``[run]`` + section of the config file would be indicated with `"run:branch"`. + + Returns the value of the option. The type depends on the option + selected. + + As a special case, an `option_name` of ``"paths"`` will return an + dictionary with the entire ``[paths]`` section value. + + .. versionadded:: 4.0 + + """ + return self.config.get_option(option_name) + + def set_option(self, option_name: str, value: Union[TConfigValueIn, TConfigSectionIn]) -> None: + """Set an option in the configuration. + + `option_name` is a colon-separated string indicating the section and + option name. For example, the ``branch`` option in the ``[run]`` + section of the config file would be indicated with ``"run:branch"``. + + `value` is the new value for the option. This should be an + appropriate Python value. For example, use True for booleans, not the + string ``"True"``. + + As an example, calling: + + .. code-block:: python + + cov.set_option("run:branch", True) + + has the same effect as this configuration file: + + .. code-block:: ini + + [run] + branch = True + + As a special case, an `option_name` of ``"paths"`` will replace the + entire ``[paths]`` section. The value should be a dictionary. + + .. versionadded:: 4.0 + + """ + self.config.set_option(option_name, value) + + def load(self) -> None: + """Load previously-collected coverage data from the data file.""" + self._init() + if self._collector is not None: + self._collector.reset() + should_skip = self.config.parallel and not os.path.exists(self.config.data_file) + if not should_skip: + self._init_data(suffix=None) + self._post_init() + if not should_skip: + assert self._data is not None + self._data.read() + + def _init_for_start(self) -> None: + """Initialization for start()""" + # Construct the collector. + concurrency: List[str] = self.config.concurrency or [] + if "multiprocessing" in concurrency: + if self.config.config_file is None: + raise ConfigError("multiprocessing requires a configuration file") + patch_multiprocessing(rcfile=self.config.config_file) + + dycon = self.config.dynamic_context + if not dycon or dycon == "none": + context_switchers = [] + elif dycon == "test_function": + context_switchers = [should_start_context_test_function] + else: + raise ConfigError(f"Don't understand dynamic_context setting: {dycon!r}") + + context_switchers.extend( + plugin.dynamic_context for plugin in self._plugins.context_switchers + ) + + should_start_context = combine_context_switchers(context_switchers) + + self._collector = Collector( + should_trace=self._should_trace, + check_include=self._check_include_omit_etc, + should_start_context=should_start_context, + file_mapper=self._file_mapper, + timid=self.config.timid, + branch=self.config.branch, + warn=self._warn, + concurrency=concurrency, + ) + + suffix = self._data_suffix_specified + if suffix: + if not isinstance(suffix, str): + # if data_suffix=True, use .machinename.pid.random + suffix = True + elif self.config.parallel: + if suffix is None: + suffix = True + elif not isinstance(suffix, str): + suffix = bool(suffix) + else: + suffix = None + + self._init_data(suffix) + + assert self._data is not None + self._collector.use_data(self._data, self.config.context) + + # Early warning if we aren't going to be able to support plugins. + if self._plugins.file_tracers and not self._collector.supports_plugins: + self._warn( + "Plugin file tracers ({}) aren't supported with {}".format( + ", ".join( + plugin._coverage_plugin_name + for plugin in self._plugins.file_tracers + ), + self._collector.tracer_name(), + ) + ) + for plugin in self._plugins.file_tracers: + plugin._coverage_enabled = False + + # Create the file classifying substructure. + self._inorout = InOrOut( + config=self.config, + warn=self._warn, + debug=(self._debug if self._debug.should("trace") else None), + include_namespace_packages=self.config.include_namespace_packages, + ) + self._inorout.plugins = self._plugins + self._inorout.disp_class = self._collector.file_disposition_class + + # It's useful to write debug info after initing for start. + self._should_write_debug = True + + # Register our clean-up handlers. + atexit.register(self._atexit) + if self.config.sigterm: + is_main = (threading.current_thread() == threading.main_thread()) + if is_main and not env.WINDOWS: + # The Python docs seem to imply that SIGTERM works uniformly even + # on Windows, but that's not my experience, and this agrees: + # https://stackoverflow.com/questions/35772001/x/35792192#35792192 + self._old_sigterm = signal.signal( # type: ignore[assignment] + signal.SIGTERM, self._on_sigterm, + ) + + def _init_data(self, suffix: Optional[Union[str, bool]]) -> None: + """Create a data file if we don't have one yet.""" + if self._data is None: + # Create the data file. We do this at construction time so that the + # data file will be written into the directory where the process + # started rather than wherever the process eventually chdir'd to. + ensure_dir_for_file(self.config.data_file) + self._data = CoverageData( + basename=self.config.data_file, + suffix=suffix, + warn=self._warn, + debug=self._debug, + no_disk=self._no_disk, + ) + + def start(self) -> None: + """Start measuring code coverage. + + Coverage measurement is only collected in functions called after + :meth:`start` is invoked. Statements in the same scope as + :meth:`start` won't be measured. + + Once you invoke :meth:`start`, you must also call :meth:`stop` + eventually, or your process might not shut down cleanly. + + The :meth:`collect` method is a context manager to handle both + starting and stopping collection. + + """ + self._init() + if not self._inited_for_start: + self._inited_for_start = True + self._init_for_start() + self._post_init() + + assert self._collector is not None + assert self._inorout is not None + + # Issue warnings for possible problems. + self._inorout.warn_conflicting_settings() + + # See if we think some code that would eventually be measured has + # already been imported. + if self._warn_preimported_source: + self._inorout.warn_already_imported_files() + + if self._auto_load: + self.load() + + self._collector.start() + self._started = True + self._instances.append(self) + + def stop(self) -> None: + """Stop measuring code coverage.""" + if self._instances: + if self._instances[-1] is self: + self._instances.pop() + if self._started: + assert self._collector is not None + self._collector.stop() + self._started = False + + @contextlib.contextmanager + def collect(self) -> Iterator[None]: + """A context manager to start/stop coverage measurement collection. + + .. versionadded:: 7.3 + + """ + self.start() + try: + yield + finally: + self.stop() + + def _atexit(self, event: str = "atexit") -> None: + """Clean up on process shutdown.""" + if self._debug.should("process"): + self._debug.write(f"{event}: pid: {os.getpid()}, instance: {self!r}") + if self._started: + self.stop() + if self._auto_save or event == "sigterm": + self.save() + + def _on_sigterm(self, signum_unused: int, frame_unused: Optional[FrameType]) -> None: + """A handler for signal.SIGTERM.""" + self._atexit("sigterm") + # Statements after here won't be seen by metacov because we just wrote + # the data, and are about to kill the process. + signal.signal(signal.SIGTERM, self._old_sigterm) # pragma: not covered + os.kill(os.getpid(), signal.SIGTERM) # pragma: not covered + + def erase(self) -> None: + """Erase previously collected coverage data. + + This removes the in-memory data collected in this session as well as + discarding the data file. + + """ + self._init() + self._post_init() + if self._collector is not None: + self._collector.reset() + self._init_data(suffix=None) + assert self._data is not None + self._data.erase(parallel=self.config.parallel) + self._data = None + self._inited_for_start = False + + def switch_context(self, new_context: str) -> None: + """Switch to a new dynamic context. + + `new_context` is a string to use as the :ref:`dynamic context + ` label for collected data. If a :ref:`static + context ` is in use, the static and dynamic context + labels will be joined together with a pipe character. + + Coverage collection must be started already. + + .. versionadded:: 5.0 + + """ + if not self._started: # pragma: part started + raise CoverageException("Cannot switch context, coverage is not started") + + assert self._collector is not None + if self._collector.should_start_context: + self._warn("Conflicting dynamic contexts", slug="dynamic-conflict", once=True) + + self._collector.switch_context(new_context) + + def clear_exclude(self, which: str = "exclude") -> None: + """Clear the exclude list.""" + self._init() + setattr(self.config, which + "_list", []) + self._exclude_regex_stale() + + def exclude(self, regex: str, which: str = "exclude") -> None: + """Exclude source lines from execution consideration. + + A number of lists of regular expressions are maintained. Each list + selects lines that are treated differently during reporting. + + `which` determines which list is modified. The "exclude" list selects + lines that are not considered executable at all. The "partial" list + indicates lines with branches that are not taken. + + `regex` is a regular expression. The regex is added to the specified + list. If any of the regexes in the list is found in a line, the line + is marked for special treatment during reporting. + + """ + self._init() + excl_list = getattr(self.config, which + "_list") + excl_list.append(regex) + self._exclude_regex_stale() + + def _exclude_regex_stale(self) -> None: + """Drop all the compiled exclusion regexes, a list was modified.""" + self._exclude_re.clear() + + def _exclude_regex(self, which: str) -> str: + """Return a regex string for the given exclusion list.""" + if which not in self._exclude_re: + excl_list = getattr(self.config, which + "_list") + self._exclude_re[which] = join_regex(excl_list) + return self._exclude_re[which] + + def get_exclude_list(self, which: str = "exclude") -> List[str]: + """Return a list of excluded regex strings. + + `which` indicates which list is desired. See :meth:`exclude` for the + lists that are available, and their meaning. + + """ + self._init() + return cast(List[str], getattr(self.config, which + "_list")) + + def save(self) -> None: + """Save the collected coverage data to the data file.""" + data = self.get_data() + data.write() + + def _make_aliases(self) -> PathAliases: + """Create a PathAliases from our configuration.""" + aliases = PathAliases( + debugfn=(self._debug.write if self._debug.should("pathmap") else None), + relative=self.config.relative_files, + ) + for paths in self.config.paths.values(): + result = paths[0] + for pattern in paths[1:]: + aliases.add(pattern, result) + return aliases + + def combine( + self, + data_paths: Optional[Iterable[str]] = None, + strict: bool = False, + keep: bool = False + ) -> None: + """Combine together a number of similarly-named coverage data files. + + All coverage data files whose name starts with `data_file` (from the + coverage() constructor) will be read, and combined together into the + current measurements. + + `data_paths` is a list of files or directories from which data should + be combined. If no list is passed, then the data files from the + directory indicated by the current data file (probably the current + directory) will be combined. + + If `strict` is true, then it is an error to attempt to combine when + there are no data files to combine. + + If `keep` is true, then original input data files won't be deleted. + + .. versionadded:: 4.0 + The `data_paths` parameter. + + .. versionadded:: 4.3 + The `strict` parameter. + + .. versionadded: 5.5 + The `keep` parameter. + """ + self._init() + self._init_data(suffix=None) + self._post_init() + self.get_data() + + assert self._data is not None + combine_parallel_data( + self._data, + aliases=self._make_aliases(), + data_paths=data_paths, + strict=strict, + keep=keep, + message=self._message, + ) + + def get_data(self) -> CoverageData: + """Get the collected data. + + Also warn about various problems collecting data. + + Returns a :class:`coverage.CoverageData`, the collected coverage data. + + .. versionadded:: 4.0 + + """ + self._init() + self._init_data(suffix=None) + self._post_init() + + if self._collector is not None: + for plugin in self._plugins: + if not plugin._coverage_enabled: + self._collector.plugin_was_disabled(plugin) + + if self._collector.flush_data(): + self._post_save_work() + + assert self._data is not None + return self._data + + def _post_save_work(self) -> None: + """After saving data, look for warnings, post-work, etc. + + Warn about things that should have happened but didn't. + Look for un-executed files. + + """ + assert self._data is not None + assert self._inorout is not None + + # If there are still entries in the source_pkgs_unmatched list, + # then we never encountered those packages. + if self._warn_unimported_source: + self._inorout.warn_unimported_source() + + # Find out if we got any data. + if not self._data and self._warn_no_data: + self._warn("No data was collected.", slug="no-data-collected") + + # Touch all the files that could have executed, so that we can + # mark completely un-executed files as 0% covered. + file_paths = collections.defaultdict(list) + for file_path, plugin_name in self._inorout.find_possibly_unexecuted_files(): + file_path = self._file_mapper(file_path) + file_paths[plugin_name].append(file_path) + for plugin_name, paths in file_paths.items(): + self._data.touch_files(paths, plugin_name) + + # Backward compatibility with version 1. + def analysis(self, morf: TMorf) -> Tuple[str, List[TLineNo], List[TLineNo], str]: + """Like `analysis2` but doesn't return excluded line numbers.""" + f, s, _, m, mf = self.analysis2(morf) + return f, s, m, mf + + def analysis2( + self, + morf: TMorf, + ) -> Tuple[str, List[TLineNo], List[TLineNo], List[TLineNo], str]: + """Analyze a module. + + `morf` is a module or a file name. It will be analyzed to determine + its coverage statistics. The return value is a 5-tuple: + + * The file name for the module. + * A list of line numbers of executable statements. + * A list of line numbers of excluded statements. + * A list of line numbers of statements not run (missing from + execution). + * A readable formatted string of the missing line numbers. + + The analysis uses the source file itself and the current measured + coverage data. + + """ + analysis = self._analyze(morf) + return ( + analysis.filename, + sorted(analysis.statements), + sorted(analysis.excluded), + sorted(analysis.missing), + analysis.missing_formatted(), + ) + + def _analyze(self, it: Union[FileReporter, TMorf]) -> Analysis: + """Analyze a single morf or code unit. + + Returns an `Analysis` object. + + """ + # All reporting comes through here, so do reporting initialization. + self._init() + self._post_init() + + data = self.get_data() + if isinstance(it, FileReporter): + fr = it + else: + fr = self._get_file_reporter(it) + + return Analysis(data, self.config.precision, fr, self._file_mapper) + + def _get_file_reporter(self, morf: TMorf) -> FileReporter: + """Get a FileReporter for a module or file name.""" + assert self._data is not None + plugin = None + file_reporter: Union[str, FileReporter] = "python" + + if isinstance(morf, str): + mapped_morf = self._file_mapper(morf) + plugin_name = self._data.file_tracer(mapped_morf) + if plugin_name: + plugin = self._plugins.get(plugin_name) + + if plugin: + file_reporter = plugin.file_reporter(mapped_morf) + if file_reporter is None: + raise PluginError( + "Plugin {!r} did not provide a file reporter for {!r}.".format( + plugin._coverage_plugin_name, morf + ) + ) + + if file_reporter == "python": + file_reporter = PythonFileReporter(morf, self) + + assert isinstance(file_reporter, FileReporter) + return file_reporter + + def _get_file_reporters(self, morfs: Optional[Iterable[TMorf]] = None) -> List[FileReporter]: + """Get a list of FileReporters for a list of modules or file names. + + For each module or file name in `morfs`, find a FileReporter. Return + the list of FileReporters. + + If `morfs` is a single module or file name, this returns a list of one + FileReporter. If `morfs` is empty or None, then the list of all files + measured is used to find the FileReporters. + + """ + assert self._data is not None + if not morfs: + morfs = self._data.measured_files() + + # Be sure we have a collection. + if not isinstance(morfs, (list, tuple, set)): + morfs = [morfs] # type: ignore[list-item] + + file_reporters = [self._get_file_reporter(morf) for morf in morfs] + return file_reporters + + def _prepare_data_for_reporting(self) -> None: + """Re-map data before reporting, to get implicit "combine" behavior.""" + if self.config.paths: + mapped_data = CoverageData(warn=self._warn, debug=self._debug, no_disk=True) + if self._data is not None: + mapped_data.update(self._data, aliases=self._make_aliases()) + self._data = mapped_data + + def report( + self, + morfs: Optional[Iterable[TMorf]] = None, + show_missing: Optional[bool] = None, + ignore_errors: Optional[bool] = None, + file: Optional[IO[str]] = None, + omit: Optional[Union[str, List[str]]] = None, + include: Optional[Union[str, List[str]]] = None, + skip_covered: Optional[bool] = None, + contexts: Optional[List[str]] = None, + skip_empty: Optional[bool] = None, + precision: Optional[int] = None, + sort: Optional[str] = None, + output_format: Optional[str] = None, + ) -> float: + """Write a textual summary report to `file`. + + Each module in `morfs` is listed, with counts of statements, executed + statements, missing statements, and a list of lines missed. + + If `show_missing` is true, then details of which lines or branches are + missing will be included in the report. If `ignore_errors` is true, + then a failure while reporting a single file will not stop the entire + report. + + `file` is a file-like object, suitable for writing. + + `output_format` determines the format, either "text" (the default), + "markdown", or "total". + + `include` is a list of file name patterns. Files that match will be + included in the report. Files matching `omit` will not be included in + the report. + + If `skip_covered` is true, don't report on files with 100% coverage. + + If `skip_empty` is true, don't report on empty files (those that have + no statements). + + `contexts` is a list of regular expression strings. Only data from + :ref:`dynamic contexts ` that match one of those + expressions (using :func:`re.search `) will be + included in the report. + + `precision` is the number of digits to display after the decimal + point for percentages. + + All of the arguments default to the settings read from the + :ref:`configuration file `. + + Returns a float, the total percentage covered. + + .. versionadded:: 4.0 + The `skip_covered` parameter. + + .. versionadded:: 5.0 + The `contexts` and `skip_empty` parameters. + + .. versionadded:: 5.2 + The `precision` parameter. + + .. versionadded:: 7.0 + The `format` parameter. + + """ + self._prepare_data_for_reporting() + with override_config( + self, + ignore_errors=ignore_errors, + report_omit=omit, + report_include=include, + show_missing=show_missing, + skip_covered=skip_covered, + report_contexts=contexts, + skip_empty=skip_empty, + precision=precision, + sort=sort, + format=output_format, + ): + reporter = SummaryReporter(self) + return reporter.report(morfs, outfile=file) + + def annotate( + self, + morfs: Optional[Iterable[TMorf]] = None, + directory: Optional[str] = None, + ignore_errors: Optional[bool] = None, + omit: Optional[Union[str, List[str]]] = None, + include: Optional[Union[str, List[str]]] = None, + contexts: Optional[List[str]] = None, + ) -> None: + """Annotate a list of modules. + + .. note:: + + This method has been obsoleted by more modern reporting tools, + including the :meth:`html_report` method. It will be removed in a + future version. + + Each module in `morfs` is annotated. The source is written to a new + file, named with a ",cover" suffix, with each line prefixed with a + marker to indicate the coverage of the line. Covered lines have ">", + excluded lines have "-", and missing lines have "!". + + See :meth:`report` for other arguments. + + """ + print("The annotate command will be removed in a future version.") + print("Get in touch if you still use it: ned@nedbatchelder.com") + + self._prepare_data_for_reporting() + with override_config( + self, + ignore_errors=ignore_errors, + report_omit=omit, + report_include=include, + report_contexts=contexts, + ): + reporter = AnnotateReporter(self) + reporter.report(morfs, directory=directory) + + def html_report( + self, + morfs: Optional[Iterable[TMorf]] = None, + directory: Optional[str] = None, + ignore_errors: Optional[bool] = None, + omit: Optional[Union[str, List[str]]] = None, + include: Optional[Union[str, List[str]]] = None, + extra_css: Optional[str] = None, + title: Optional[str] = None, + skip_covered: Optional[bool] = None, + show_contexts: Optional[bool] = None, + contexts: Optional[List[str]] = None, + skip_empty: Optional[bool] = None, + precision: Optional[int] = None, + ) -> float: + """Generate an HTML report. + + The HTML is written to `directory`. The file "index.html" is the + overview starting point, with links to more detailed pages for + individual modules. + + `extra_css` is a path to a file of other CSS to apply on the page. + It will be copied into the HTML directory. + + `title` is a text string (not HTML) to use as the title of the HTML + report. + + See :meth:`report` for other arguments. + + Returns a float, the total percentage covered. + + .. note:: + + The HTML report files are generated incrementally based on the + source files and coverage results. If you modify the report files, + the changes will not be considered. You should be careful about + changing the files in the report folder. + + """ + self._prepare_data_for_reporting() + with override_config( + self, + ignore_errors=ignore_errors, + report_omit=omit, + report_include=include, + html_dir=directory, + extra_css=extra_css, + html_title=title, + html_skip_covered=skip_covered, + show_contexts=show_contexts, + report_contexts=contexts, + html_skip_empty=skip_empty, + precision=precision, + ): + reporter = HtmlReporter(self) + ret = reporter.report(morfs) + return ret + + def xml_report( + self, + morfs: Optional[Iterable[TMorf]] = None, + outfile: Optional[str] = None, + ignore_errors: Optional[bool] = None, + omit: Optional[Union[str, List[str]]] = None, + include: Optional[Union[str, List[str]]] = None, + contexts: Optional[List[str]] = None, + skip_empty: Optional[bool] = None, + ) -> float: + """Generate an XML report of coverage results. + + The report is compatible with Cobertura reports. + + Each module in `morfs` is included in the report. `outfile` is the + path to write the file to, "-" will write to stdout. + + See :meth:`report` for other arguments. + + Returns a float, the total percentage covered. + + """ + self._prepare_data_for_reporting() + with override_config( + self, + ignore_errors=ignore_errors, + report_omit=omit, + report_include=include, + xml_output=outfile, + report_contexts=contexts, + skip_empty=skip_empty, + ): + return render_report(self.config.xml_output, XmlReporter(self), morfs, self._message) + + def json_report( + self, + morfs: Optional[Iterable[TMorf]] = None, + outfile: Optional[str] = None, + ignore_errors: Optional[bool] = None, + omit: Optional[Union[str, List[str]]] = None, + include: Optional[Union[str, List[str]]] = None, + contexts: Optional[List[str]] = None, + pretty_print: Optional[bool] = None, + show_contexts: Optional[bool] = None, + ) -> float: + """Generate a JSON report of coverage results. + + Each module in `morfs` is included in the report. `outfile` is the + path to write the file to, "-" will write to stdout. + + `pretty_print` is a boolean, whether to pretty-print the JSON output or not. + + See :meth:`report` for other arguments. + + Returns a float, the total percentage covered. + + .. versionadded:: 5.0 + + """ + self._prepare_data_for_reporting() + with override_config( + self, + ignore_errors=ignore_errors, + report_omit=omit, + report_include=include, + json_output=outfile, + report_contexts=contexts, + json_pretty_print=pretty_print, + json_show_contexts=show_contexts, + ): + return render_report(self.config.json_output, JsonReporter(self), morfs, self._message) + + def lcov_report( + self, + morfs: Optional[Iterable[TMorf]] = None, + outfile: Optional[str] = None, + ignore_errors: Optional[bool] = None, + omit: Optional[Union[str, List[str]]] = None, + include: Optional[Union[str, List[str]]] = None, + contexts: Optional[List[str]] = None, + ) -> float: + """Generate an LCOV report of coverage results. + + Each module in `morfs` is included in the report. `outfile` is the + path to write the file to, "-" will write to stdout. + + See :meth:`report` for other arguments. + + .. versionadded:: 6.3 + """ + self._prepare_data_for_reporting() + with override_config( + self, + ignore_errors=ignore_errors, + report_omit=omit, + report_include=include, + lcov_output=outfile, + report_contexts=contexts, + ): + return render_report(self.config.lcov_output, LcovReporter(self), morfs, self._message) + + def sys_info(self) -> Iterable[Tuple[str, Any]]: + """Return a list of (key, value) pairs showing internal information.""" + + import coverage as covmod + + self._init() + self._post_init() + + def plugin_info(plugins: List[Any]) -> List[str]: + """Make an entry for the sys_info from a list of plug-ins.""" + entries = [] + for plugin in plugins: + entry = plugin._coverage_plugin_name + if not plugin._coverage_enabled: + entry += " (disabled)" + entries.append(entry) + return entries + + info = [ + ("coverage_version", covmod.__version__), + ("coverage_module", covmod.__file__), + ("tracer", self._collector.tracer_name() if self._collector is not None else "-none-"), + ("CTracer", "available" if HAS_CTRACER else "unavailable"), + ("plugins.file_tracers", plugin_info(self._plugins.file_tracers)), + ("plugins.configurers", plugin_info(self._plugins.configurers)), + ("plugins.context_switchers", plugin_info(self._plugins.context_switchers)), + ("configs_attempted", self.config.attempted_config_files), + ("configs_read", self.config.config_files_read), + ("config_file", self.config.config_file), + ("config_contents", + repr(self.config._config_contents) if self.config._config_contents else "-none-" + ), + ("data_file", self._data.data_filename() if self._data is not None else "-none-"), + ("python", sys.version.replace("\n", "")), + ("platform", platform.platform()), + ("implementation", platform.python_implementation()), + ("executable", sys.executable), + ("def_encoding", sys.getdefaultencoding()), + ("fs_encoding", sys.getfilesystemencoding()), + ("pid", os.getpid()), + ("cwd", os.getcwd()), + ("path", sys.path), + ("environment", [f"{k} = {v}" for k, v in relevant_environment_display(os.environ)]), + ("command_line", " ".join(getattr(sys, "argv", ["-none-"]))), + ] + + if self._inorout is not None: + info.extend(self._inorout.sys_info()) + + info.extend(CoverageData.sys_info()) + + return info + + +# Mega debugging... +# $set_env.py: COVERAGE_DEBUG_CALLS - Lots and lots of output about calls to Coverage. +if int(os.environ.get("COVERAGE_DEBUG_CALLS", 0)): # pragma: debugging + from coverage.debug import decorate_methods, show_calls + + Coverage = decorate_methods( # type: ignore[misc] + show_calls(show_args=True), + butnot=["get_data"] + )(Coverage) + + +def process_startup() -> Optional[Coverage]: + """Call this at Python start-up to perhaps measure coverage. + + If the environment variable COVERAGE_PROCESS_START is defined, coverage + measurement is started. The value of the variable is the config file + to use. + + There are two ways to configure your Python installation to invoke this + function when Python starts: + + #. Create or append to sitecustomize.py to add these lines:: + + import coverage + coverage.process_startup() + + #. Create a .pth file in your Python installation containing:: + + import coverage; coverage.process_startup() + + Returns the :class:`Coverage` instance that was started, or None if it was + not started by this call. + + """ + cps = os.environ.get("COVERAGE_PROCESS_START") + if not cps: + # No request for coverage, nothing to do. + return None + + # This function can be called more than once in a process. This happens + # because some virtualenv configurations make the same directory visible + # twice in sys.path. This means that the .pth file will be found twice, + # and executed twice, executing this function twice. We set a global + # flag (an attribute on this function) to indicate that coverage.py has + # already been started, so we can avoid doing it twice. + # + # https://github.com/nedbat/coveragepy/issues/340 has more details. + + if hasattr(process_startup, "coverage"): + # We've annotated this function before, so we must have already + # started coverage.py in this process. Nothing to do. + return None + + cov = Coverage(config_file=cps) + process_startup.coverage = cov # type: ignore[attr-defined] + cov._warn_no_data = False + cov._warn_unimported_source = False + cov._warn_preimported_source = False + cov._auto_save = True + cov.start() + + return cov + + +def _prevent_sub_process_measurement() -> None: + """Stop any subprocess auto-measurement from writing data.""" + auto_created_coverage = getattr(process_startup, "coverage", None) + if auto_created_coverage is not None: + auto_created_coverage._auto_save = False diff --git a/venv/lib/python3.10/site-packages/coverage/data.py b/venv/lib/python3.10/site-packages/coverage/data.py new file mode 100644 index 0000000..c196ac7 --- /dev/null +++ b/venv/lib/python3.10/site-packages/coverage/data.py @@ -0,0 +1,213 @@ +# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 +# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt + +"""Coverage data for coverage.py. + +This file had the 4.x JSON data support, which is now gone. This file still +has storage-agnostic helpers, and is kept to avoid changing too many imports. +CoverageData is now defined in sqldata.py, and imported here to keep the +imports working. + +""" + +from __future__ import annotations + +import glob +import hashlib +import os.path + +from typing import Callable, Dict, Iterable, List, Optional + +from coverage.exceptions import CoverageException, NoDataError +from coverage.files import PathAliases +from coverage.misc import Hasher, file_be_gone, human_sorted, plural +from coverage.sqldata import CoverageData + + +def line_counts(data: CoverageData, fullpath: bool = False) -> Dict[str, int]: + """Return a dict summarizing the line coverage data. + + Keys are based on the file names, and values are the number of executed + lines. If `fullpath` is true, then the keys are the full pathnames of + the files, otherwise they are the basenames of the files. + + Returns a dict mapping file names to counts of lines. + + """ + summ = {} + filename_fn: Callable[[str], str] + if fullpath: + # pylint: disable=unnecessary-lambda-assignment + filename_fn = lambda f: f + else: + filename_fn = os.path.basename + for filename in data.measured_files(): + lines = data.lines(filename) + assert lines is not None + summ[filename_fn(filename)] = len(lines) + return summ + + +def add_data_to_hash(data: CoverageData, filename: str, hasher: Hasher) -> None: + """Contribute `filename`'s data to the `hasher`. + + `hasher` is a `coverage.misc.Hasher` instance to be updated with + the file's data. It should only get the results data, not the run + data. + + """ + if data.has_arcs(): + hasher.update(sorted(data.arcs(filename) or [])) + else: + hasher.update(sorted_lines(data, filename)) + hasher.update(data.file_tracer(filename)) + + +def combinable_files(data_file: str, data_paths: Optional[Iterable[str]] = None) -> List[str]: + """Make a list of data files to be combined. + + `data_file` is a path to a data file. `data_paths` is a list of files or + directories of files. + + Returns a list of absolute file paths. + """ + data_dir, local = os.path.split(os.path.abspath(data_file)) + + data_paths = data_paths or [data_dir] + files_to_combine = [] + for p in data_paths: + if os.path.isfile(p): + files_to_combine.append(os.path.abspath(p)) + elif os.path.isdir(p): + pattern = glob.escape(os.path.join(os.path.abspath(p), local)) +".*" + files_to_combine.extend(glob.glob(pattern)) + else: + raise NoDataError(f"Couldn't combine from non-existent path '{p}'") + return files_to_combine + + +def combine_parallel_data( + data: CoverageData, + aliases: Optional[PathAliases] = None, + data_paths: Optional[Iterable[str]] = None, + strict: bool = False, + keep: bool = False, + message: Optional[Callable[[str], None]] = None, +) -> None: + """Combine a number of data files together. + + `data` is a CoverageData. + + Treat `data.filename` as a file prefix, and combine the data from all + of the data files starting with that prefix plus a dot. + + If `aliases` is provided, it's a `PathAliases` object that is used to + re-map paths to match the local machine's. + + If `data_paths` is provided, it is a list of directories or files to + combine. Directories are searched for files that start with + `data.filename` plus dot as a prefix, and those files are combined. + + If `data_paths` is not provided, then the directory portion of + `data.filename` is used as the directory to search for data files. + + Unless `keep` is True every data file found and combined is then deleted + from disk. If a file cannot be read, a warning will be issued, and the + file will not be deleted. + + If `strict` is true, and no files are found to combine, an error is + raised. + + `message` is a function to use for printing messages to the user. + + """ + files_to_combine = combinable_files(data.base_filename(), data_paths) + + if strict and not files_to_combine: + raise NoDataError("No data to combine") + + file_hashes = set() + combined_any = False + + for f in files_to_combine: + if f == data.data_filename(): + # Sometimes we are combining into a file which is one of the + # parallel files. Skip that file. + if data._debug.should("dataio"): + data._debug.write(f"Skipping combining ourself: {f!r}") + continue + + try: + rel_file_name = os.path.relpath(f) + except ValueError: + # ValueError can be raised under Windows when os.getcwd() returns a + # folder from a different drive than the drive of f, in which case + # we print the original value of f instead of its relative path + rel_file_name = f + + with open(f, "rb") as fobj: + hasher = hashlib.new("sha3_256") + hasher.update(fobj.read()) + sha = hasher.digest() + combine_this_one = sha not in file_hashes + + delete_this_one = not keep + if combine_this_one: + if data._debug.should("dataio"): + data._debug.write(f"Combining data file {f!r}") + file_hashes.add(sha) + try: + new_data = CoverageData(f, debug=data._debug) + new_data.read() + except CoverageException as exc: + if data._warn: + # The CoverageException has the file name in it, so just + # use the message as the warning. + data._warn(str(exc)) + if message: + message(f"Couldn't combine data file {rel_file_name}: {exc}") + delete_this_one = False + else: + data.update(new_data, aliases=aliases) + combined_any = True + if message: + message(f"Combined data file {rel_file_name}") + else: + if message: + message(f"Skipping duplicate data {rel_file_name}") + + if delete_this_one: + if data._debug.should("dataio"): + data._debug.write(f"Deleting data file {f!r}") + file_be_gone(f) + + if strict and not combined_any: + raise NoDataError("No usable data files") + + +def debug_data_file(filename: str) -> None: + """Implementation of 'coverage debug data'.""" + data = CoverageData(filename) + filename = data.data_filename() + print(f"path: {filename}") + if not os.path.exists(filename): + print("No data collected: file doesn't exist") + return + data.read() + print(f"has_arcs: {data.has_arcs()!r}") + summary = line_counts(data, fullpath=True) + filenames = human_sorted(summary.keys()) + nfiles = len(filenames) + print(f"{nfiles} file{plural(nfiles)}:") + for f in filenames: + line = f"{f}: {summary[f]} line{plural(summary[f])}" + plugin = data.file_tracer(f) + if plugin: + line += f" [{plugin}]" + print(line) + + +def sorted_lines(data: CoverageData, filename: str) -> List[int]: + """Get the sorted lines for a file, for tests.""" + lines = data.lines(filename) + return sorted(lines or []) diff --git a/venv/lib/python3.10/site-packages/coverage/debug.py b/venv/lib/python3.10/site-packages/coverage/debug.py new file mode 100644 index 0000000..c6eb23e --- /dev/null +++ b/venv/lib/python3.10/site-packages/coverage/debug.py @@ -0,0 +1,530 @@ +# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 +# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt + +"""Control of and utilities for debugging.""" + +from __future__ import annotations + +import contextlib +import functools +import inspect +import io +import itertools +import os +import pprint +import re +import reprlib +import sys +import traceback +import types +import _thread + +from typing import ( + cast, + Any, Callable, IO, Iterable, Iterator, Mapping, Optional, List, Tuple, +) + +from coverage.misc import human_sorted_items, isolate_module +from coverage.types import TWritable + +os = isolate_module(os) + + +# When debugging, it can be helpful to force some options, especially when +# debugging the configuration mechanisms you usually use to control debugging! +# This is a list of forced debugging options. +FORCED_DEBUG: List[str] = [] +FORCED_DEBUG_FILE = None + + +class DebugControl: + """Control and output for debugging.""" + + show_repr_attr = False # For auto_repr + + def __init__( + self, + options: Iterable[str], + output: Optional[IO[str]], + file_name: Optional[str] = None, + ) -> None: + """Configure the options and output file for debugging.""" + self.options = list(options) + FORCED_DEBUG + self.suppress_callers = False + + filters = [] + if self.should("pid"): + filters.append(add_pid_and_tid) + self.output = DebugOutputFile.get_one( + output, + file_name=file_name, + show_process=self.should("process"), + filters=filters, + ) + self.raw_output = self.output.outfile + + def __repr__(self) -> str: + return f"" + + def should(self, option: str) -> bool: + """Decide whether to output debug information in category `option`.""" + if option == "callers" and self.suppress_callers: + return False + return (option in self.options) + + @contextlib.contextmanager + def without_callers(self) -> Iterator[None]: + """A context manager to prevent call stacks from being logged.""" + old = self.suppress_callers + self.suppress_callers = True + try: + yield + finally: + self.suppress_callers = old + + def write(self, msg: str) -> None: + """Write a line of debug output. + + `msg` is the line to write. A newline will be appended. + + """ + self.output.write(msg+"\n") + if self.should("self"): + caller_self = inspect.stack()[1][0].f_locals.get("self") + if caller_self is not None: + self.output.write(f"self: {caller_self!r}\n") + if self.should("callers"): + dump_stack_frames(out=self.output, skip=1) + self.output.flush() + + +class DebugControlString(DebugControl): + """A `DebugControl` that writes to a StringIO, for testing.""" + def __init__(self, options: Iterable[str]) -> None: + super().__init__(options, io.StringIO()) + + def get_output(self) -> str: + """Get the output text from the `DebugControl`.""" + return cast(str, self.raw_output.getvalue()) # type: ignore[union-attr] + + +class NoDebugging(DebugControl): + """A replacement for DebugControl that will never try to do anything.""" + def __init__(self) -> None: + # pylint: disable=super-init-not-called + ... + + def should(self, option: str) -> bool: + """Should we write debug messages? Never.""" + return False + + def write(self, msg: str) -> None: + """This will never be called.""" + raise AssertionError("NoDebugging.write should never be called.") + + +def info_header(label: str) -> str: + """Make a nice header string.""" + return "--{:-<60s}".format(" "+label+" ") + + +def info_formatter(info: Iterable[Tuple[str, Any]]) -> Iterator[str]: + """Produce a sequence of formatted lines from info. + + `info` is a sequence of pairs (label, data). The produced lines are + nicely formatted, ready to print. + + """ + info = list(info) + if not info: + return + label_len = 30 + assert all(len(l) < label_len for l, _ in info) + for label, data in info: + if data == []: + data = "-none-" + if isinstance(data, tuple) and len(repr(tuple(data))) < 30: + # Convert to tuple to scrub namedtuples. + yield "%*s: %r" % (label_len, label, tuple(data)) + elif isinstance(data, (list, set, tuple)): + prefix = "%*s:" % (label_len, label) + for e in data: + yield "%*s %s" % (label_len+1, prefix, e) + prefix = "" + else: + yield "%*s: %s" % (label_len, label, data) + + +def write_formatted_info( + write: Callable[[str], None], + header: str, + info: Iterable[Tuple[str, Any]], +) -> None: + """Write a sequence of (label,data) pairs nicely. + + `write` is a function write(str) that accepts each line of output. + `header` is a string to start the section. `info` is a sequence of + (label, data) pairs, where label is a str, and data can be a single + value, or a list/set/tuple. + + """ + write(info_header(header)) + for line in info_formatter(info): + write(f" {line}") + + +def exc_one_line(exc: Exception) -> str: + """Get a one-line summary of an exception, including class name and message.""" + lines = traceback.format_exception_only(type(exc), exc) + return "|".join(l.rstrip() for l in lines) + + +def short_stack(limit: Optional[int] = None, skip: int = 0) -> str: + """Return a string summarizing the call stack. + + The string is multi-line, with one line per stack frame. Each line shows + the function name, the file name, and the line number: + + ... + start_import_stop : /Users/ned/coverage/trunk/tests/coveragetest.py @95 + import_local_file : /Users/ned/coverage/trunk/tests/coveragetest.py @81 + import_local_file : /Users/ned/coverage/trunk/coverage/backward.py @159 + ... + + `limit` is the number of frames to include, defaulting to all of them. + + `skip` is the number of frames to skip, so that debugging functions can + call this and not be included in the result. + + """ + stack = inspect.stack()[limit:skip:-1] + return "\n".join("%30s : %s:%d" % (t[3], t[1], t[2]) for t in stack) + + +def dump_stack_frames( + limit: Optional[int] = None, + out: Optional[TWritable] = None, + skip: int = 0 +) -> None: + """Print a summary of the stack to stdout, or someplace else.""" + fout = out or sys.stdout + fout.write(short_stack(limit=limit, skip=skip+1)) + fout.write("\n") + + +def clipped_repr(text: str, numchars: int = 50) -> str: + """`repr(text)`, but limited to `numchars`.""" + r = reprlib.Repr() + r.maxstring = numchars + return r.repr(text) + + +def short_id(id64: int) -> int: + """Given a 64-bit id, make a shorter 16-bit one.""" + id16 = 0 + for offset in range(0, 64, 16): + id16 ^= id64 >> offset + return id16 & 0xFFFF + + +def add_pid_and_tid(text: str) -> str: + """A filter to add pid and tid to debug messages.""" + # Thread ids are useful, but too long. Make a shorter one. + tid = f"{short_id(_thread.get_ident()):04x}" + text = f"{os.getpid():5d}.{tid}: {text}" + return text + + +AUTO_REPR_IGNORE = {"$coverage.object_id"} + +def auto_repr(self: Any) -> str: + """A function implementing an automatic __repr__ for debugging.""" + show_attrs = ( + (k, v) for k, v in self.__dict__.items() + if getattr(v, "show_repr_attr", True) + and not callable(v) + and k not in AUTO_REPR_IGNORE + ) + return "<{klass} @0x{id:x} {attrs}>".format( + klass=self.__class__.__name__, + id=id(self), + attrs=" ".join(f"{k}={v!r}" for k, v in show_attrs), + ) + + +def simplify(v: Any) -> Any: # pragma: debugging + """Turn things which are nearly dict/list/etc into dict/list/etc.""" + if isinstance(v, dict): + return {k:simplify(vv) for k, vv in v.items()} + elif isinstance(v, (list, tuple)): + return type(v)(simplify(vv) for vv in v) + elif hasattr(v, "__dict__"): + return simplify({"."+k: v for k, v in v.__dict__.items()}) + else: + return v + + +def pp(v: Any) -> None: # pragma: debugging + """Debug helper to pretty-print data, including SimpleNamespace objects.""" + # Might not be needed in 3.9+ + pprint.pprint(simplify(v)) + + +def filter_text(text: str, filters: Iterable[Callable[[str], str]]) -> str: + """Run `text` through a series of filters. + + `filters` is a list of functions. Each takes a string and returns a + string. Each is run in turn. + + Returns: the final string that results after all of the filters have + run. + + """ + clean_text = text.rstrip() + ending = text[len(clean_text):] + text = clean_text + for fn in filters: + lines = [] + for line in text.splitlines(): + lines.extend(fn(line).splitlines()) + text = "\n".join(lines) + return text + ending + + +class CwdTracker: + """A class to add cwd info to debug messages.""" + def __init__(self) -> None: + self.cwd: Optional[str] = None + + def filter(self, text: str) -> str: + """Add a cwd message for each new cwd.""" + cwd = os.getcwd() + if cwd != self.cwd: + text = f"cwd is now {cwd!r}\n" + text + self.cwd = cwd + return text + + +class DebugOutputFile: + """A file-like object that includes pid and cwd information.""" + def __init__( + self, + outfile: Optional[IO[str]], + show_process: bool, + filters: Iterable[Callable[[str], str]], + ): + self.outfile = outfile + self.show_process = show_process + self.filters = list(filters) + + if self.show_process: + self.filters.insert(0, CwdTracker().filter) + self.write(f"New process: executable: {sys.executable!r}\n") + self.write("New process: cmd: {!r}\n".format(getattr(sys, "argv", None))) + if hasattr(os, "getppid"): + self.write(f"New process: pid: {os.getpid()!r}, parent pid: {os.getppid()!r}\n") + + @classmethod + def get_one( + cls, + fileobj: Optional[IO[str]] = None, + file_name: Optional[str] = None, + show_process: bool = True, + filters: Iterable[Callable[[str], str]] = (), + interim: bool = False, + ) -> DebugOutputFile: + """Get a DebugOutputFile. + + If `fileobj` is provided, then a new DebugOutputFile is made with it. + + If `fileobj` isn't provided, then a file is chosen (`file_name` if + provided, or COVERAGE_DEBUG_FILE, or stderr), and a process-wide + singleton DebugOutputFile is made. + + `show_process` controls whether the debug file adds process-level + information, and filters is a list of other message filters to apply. + + `filters` are the text filters to apply to the stream to annotate with + pids, etc. + + If `interim` is true, then a future `get_one` can replace this one. + + """ + if fileobj is not None: + # Make DebugOutputFile around the fileobj passed. + return cls(fileobj, show_process, filters) + + the_one, is_interim = cls._get_singleton_data() + if the_one is None or is_interim: + if file_name is not None: + fileobj = open(file_name, "a", encoding="utf-8") + else: + file_name = os.environ.get("COVERAGE_DEBUG_FILE", FORCED_DEBUG_FILE) + if file_name in ("stdout", "stderr"): + fileobj = getattr(sys, file_name) + elif file_name: + fileobj = open(file_name, "a", encoding="utf-8") + else: + fileobj = sys.stderr + the_one = cls(fileobj, show_process, filters) + cls._set_singleton_data(the_one, interim) + return the_one + + # Because of the way igor.py deletes and re-imports modules, + # this class can be defined more than once. But we really want + # a process-wide singleton. So stash it in sys.modules instead of + # on a class attribute. Yes, this is aggressively gross. + + SYS_MOD_NAME = "$coverage.debug.DebugOutputFile.the_one" + SINGLETON_ATTR = "the_one_and_is_interim" + + @classmethod + def _set_singleton_data(cls, the_one: DebugOutputFile, interim: bool) -> None: + """Set the one DebugOutputFile to rule them all.""" + singleton_module = types.ModuleType(cls.SYS_MOD_NAME) + setattr(singleton_module, cls.SINGLETON_ATTR, (the_one, interim)) + sys.modules[cls.SYS_MOD_NAME] = singleton_module + + @classmethod + def _get_singleton_data(cls) -> Tuple[Optional[DebugOutputFile], bool]: + """Get the one DebugOutputFile.""" + singleton_module = sys.modules.get(cls.SYS_MOD_NAME) + return getattr(singleton_module, cls.SINGLETON_ATTR, (None, True)) + + @classmethod + def _del_singleton_data(cls) -> None: + """Delete the one DebugOutputFile, just for tests to use.""" + if cls.SYS_MOD_NAME in sys.modules: + del sys.modules[cls.SYS_MOD_NAME] + + def write(self, text: str) -> None: + """Just like file.write, but filter through all our filters.""" + assert self.outfile is not None + self.outfile.write(filter_text(text, self.filters)) + self.outfile.flush() + + def flush(self) -> None: + """Flush our file.""" + assert self.outfile is not None + self.outfile.flush() + + +def log(msg: str, stack: bool = False) -> None: # pragma: debugging + """Write a log message as forcefully as possible.""" + out = DebugOutputFile.get_one(interim=True) + out.write(msg+"\n") + if stack: + dump_stack_frames(out=out, skip=1) + + +def decorate_methods( + decorator: Callable[..., Any], + butnot: Iterable[str] = (), + private: bool = False, +) -> Callable[..., Any]: # pragma: debugging + """A class decorator to apply a decorator to methods.""" + def _decorator(cls): # type: ignore[no-untyped-def] + for name, meth in inspect.getmembers(cls, inspect.isroutine): + if name not in cls.__dict__: + continue + if name != "__init__": + if not private and name.startswith("_"): + continue + if name in butnot: + continue + setattr(cls, name, decorator(meth)) + return cls + return _decorator + + +def break_in_pudb(func: Callable[..., Any]) -> Callable[..., Any]: # pragma: debugging + """A function decorator to stop in the debugger for each call.""" + @functools.wraps(func) + def _wrapper(*args: Any, **kwargs: Any) -> Any: + import pudb + sys.stdout = sys.__stdout__ + pudb.set_trace() + return func(*args, **kwargs) + return _wrapper + + +OBJ_IDS = itertools.count() +CALLS = itertools.count() +OBJ_ID_ATTR = "$coverage.object_id" + +def show_calls( + show_args: bool = True, + show_stack: bool = False, + show_return: bool = False, +) -> Callable[..., Any]: # pragma: debugging + """A method decorator to debug-log each call to the function.""" + def _decorator(func: Callable[..., Any]) -> Callable[..., Any]: + @functools.wraps(func) + def _wrapper(self: Any, *args: Any, **kwargs: Any) -> Any: + oid = getattr(self, OBJ_ID_ATTR, None) + if oid is None: + oid = f"{os.getpid():08d} {next(OBJ_IDS):04d}" + setattr(self, OBJ_ID_ATTR, oid) + extra = "" + if show_args: + eargs = ", ".join(map(repr, args)) + ekwargs = ", ".join("{}={!r}".format(*item) for item in kwargs.items()) + extra += "(" + extra += eargs + if eargs and ekwargs: + extra += ", " + extra += ekwargs + extra += ")" + if show_stack: + extra += " @ " + extra += "; ".join(_clean_stack_line(l) for l in short_stack().splitlines()) + callid = next(CALLS) + msg = f"{oid} {callid:04d} {func.__name__}{extra}\n" + DebugOutputFile.get_one(interim=True).write(msg) + ret = func(self, *args, **kwargs) + if show_return: + msg = f"{oid} {callid:04d} {func.__name__} return {ret!r}\n" + DebugOutputFile.get_one(interim=True).write(msg) + return ret + return _wrapper + return _decorator + + +def _clean_stack_line(s: str) -> str: # pragma: debugging + """Simplify some paths in a stack trace, for compactness.""" + s = s.strip() + s = s.replace(os.path.dirname(__file__) + "/", "") + s = s.replace(os.path.dirname(os.__file__) + "/", "") + s = s.replace(sys.prefix + "/", "") + return s + + +def relevant_environment_display(env: Mapping[str, str]) -> List[Tuple[str, str]]: + """Filter environment variables for a debug display. + + Select variables to display (with COV or PY in the name, or HOME, TEMP, or + TMP), and also cloak sensitive values with asterisks. + + Arguments: + env: a dict of environment variable names and values. + + Returns: + A list of pairs (name, value) to show. + + """ + slugs = {"COV", "PY"} + include = {"HOME", "TEMP", "TMP"} + cloak = {"API", "TOKEN", "KEY", "SECRET", "PASS", "SIGNATURE"} + + to_show = [] + for name, val in env.items(): + keep = False + if name in include: + keep = True + elif any(slug in name for slug in slugs): + keep = True + if keep: + if any(slug in name for slug in cloak): + val = re.sub(r"\w", "*", val) + to_show.append((name, val)) + return human_sorted_items(to_show) diff --git a/venv/lib/python3.10/site-packages/coverage/disposition.py b/venv/lib/python3.10/site-packages/coverage/disposition.py new file mode 100644 index 0000000..3cc6c8d --- /dev/null +++ b/venv/lib/python3.10/site-packages/coverage/disposition.py @@ -0,0 +1,58 @@ +# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 +# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt + +"""Simple value objects for tracking what to do with files.""" + +from __future__ import annotations + +from typing import Optional, Type, TYPE_CHECKING + +from coverage.types import TFileDisposition + +if TYPE_CHECKING: + from coverage.plugin import FileTracer + + +class FileDisposition: + """A simple value type for recording what to do with a file.""" + + original_filename: str + canonical_filename: str + source_filename: Optional[str] + trace: bool + reason: str + file_tracer: Optional[FileTracer] + has_dynamic_filename: bool + + def __repr__(self) -> str: + return f"" + + +# FileDisposition "methods": FileDisposition is a pure value object, so it can +# be implemented in either C or Python. Acting on them is done with these +# functions. + +def disposition_init(cls: Type[TFileDisposition], original_filename: str) -> TFileDisposition: + """Construct and initialize a new FileDisposition object.""" + disp = cls() + disp.original_filename = original_filename + disp.canonical_filename = original_filename + disp.source_filename = None + disp.trace = False + disp.reason = "" + disp.file_tracer = None + disp.has_dynamic_filename = False + return disp + + +def disposition_debug_msg(disp: TFileDisposition) -> str: + """Make a nice debug message of what the FileDisposition is doing.""" + if disp.trace: + msg = f"Tracing {disp.original_filename!r}" + if disp.original_filename != disp.source_filename: + msg += f" as {disp.source_filename!r}" + if disp.file_tracer: + msg += f": will be traced by {disp.file_tracer!r}" + else: + msg = f"Not tracing {disp.original_filename!r}: {disp.reason}" + return msg diff --git a/venv/lib/python3.10/site-packages/coverage/env.py b/venv/lib/python3.10/site-packages/coverage/env.py new file mode 100644 index 0000000..7ed129b --- /dev/null +++ b/venv/lib/python3.10/site-packages/coverage/env.py @@ -0,0 +1,140 @@ +# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 +# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt + +"""Determine facts about the environment.""" + +from __future__ import annotations + +import os +import platform +import sys + +from typing import Any, Iterable, Tuple + +# debug_info() at the bottom wants to show all the globals, but not imports. +# Grab the global names here to know which names to not show. Nothing defined +# above this line will be in the output. +_UNINTERESTING_GLOBALS = list(globals()) +# These names also shouldn't be shown. +_UNINTERESTING_GLOBALS += ["PYBEHAVIOR", "debug_info"] + +# Operating systems. +WINDOWS = sys.platform == "win32" +LINUX = sys.platform.startswith("linux") +OSX = sys.platform == "darwin" + +# Python implementations. +CPYTHON = (platform.python_implementation() == "CPython") +PYPY = (platform.python_implementation() == "PyPy") + +# Python versions. We amend version_info with one more value, a zero if an +# official version, or 1 if built from source beyond an official version. +PYVERSION = sys.version_info + (int(platform.python_version()[-1] == "+"),) + +if PYPY: + PYPYVERSION = sys.pypy_version_info # type: ignore[attr-defined] + +# Python behavior. +class PYBEHAVIOR: + """Flags indicating this Python's behavior.""" + + # Does Python conform to PEP626, Precise line numbers for debugging and other tools. + # https://www.python.org/dev/peps/pep-0626 + pep626 = (PYVERSION > (3, 10, 0, "alpha", 4)) + + # Is "if __debug__" optimized away? + optimize_if_debug = not pep626 + + # Is "if not __debug__" optimized away? The exact details have changed + # across versions. + if pep626: + optimize_if_not_debug = 1 + elif PYPY: + if PYVERSION >= (3, 9): + optimize_if_not_debug = 2 + else: + optimize_if_not_debug = 3 + else: + optimize_if_not_debug = 2 + + # 3.7 changed how functions with only docstrings are numbered. + docstring_only_function = (not PYPY) and ((3, 7, 0, "beta", 5) <= PYVERSION <= (3, 10)) + + # When a break/continue/return statement in a try block jumps to a finally + # block, does the finally block do the break/continue/return (pre-3.8), or + # does the finally jump back to the break/continue/return (3.8) to do the + # work? + finally_jumps_back = ((3, 8) <= PYVERSION < (3, 10)) + + # CPython 3.11 now jumps to the decorator line again while executing + # the decorator. + trace_decorator_line_again = (CPYTHON and PYVERSION > (3, 11, 0, "alpha", 3, 0)) + + # CPython 3.9a1 made sys.argv[0] and other reported files absolute paths. + report_absolute_files = ( + (CPYTHON or (PYPY and PYPYVERSION >= (7, 3, 10))) + and PYVERSION >= (3, 9) + ) + + # Lines after break/continue/return/raise are no longer compiled into the + # bytecode. They used to be marked as missing, now they aren't executable. + omit_after_jump = ( + pep626 + or (PYPY and PYVERSION >= (3, 9) and PYPYVERSION >= (7, 3, 12)) + ) + + # PyPy has always omitted statements after return. + omit_after_return = omit_after_jump or PYPY + + # Optimize away unreachable try-else clauses. + optimize_unreachable_try_else = pep626 + + # Modules used to have firstlineno equal to the line number of the first + # real line of code. Now they always start at 1. + module_firstline_1 = pep626 + + # Are "if 0:" lines (and similar) kept in the compiled code? + keep_constant_test = pep626 + + # When leaving a with-block, do we visit the with-line again for the exit? + exit_through_with = (PYVERSION >= (3, 10, 0, "beta")) + + # Match-case construct. + match_case = (PYVERSION >= (3, 10)) + + # Some words are keywords in some places, identifiers in other places. + soft_keywords = (PYVERSION >= (3, 10)) + + # Modules start with a line numbered zero. This means empty modules have + # only a 0-number line, which is ignored, giving a truly empty module. + empty_is_empty = (PYVERSION >= (3, 11, 0, "beta", 4)) + + # Are comprehensions inlined (new) or compiled as called functions (old)? + # Changed in https://github.com/python/cpython/pull/101441 + comprehensions_are_functions = (PYVERSION <= (3, 12, 0, "alpha", 7, 0)) + +# Coverage.py specifics. + +# Are we using the C-implemented trace function? +C_TRACER = os.getenv("COVERAGE_TEST_TRACER", "c") == "c" + +# Are we coverage-measuring ourselves? +METACOV = os.getenv("COVERAGE_COVERAGE", "") != "" + +# Are we running our test suite? +# Even when running tests, you can use COVERAGE_TESTING=0 to disable the +# test-specific behavior like AST checking. +TESTING = os.getenv("COVERAGE_TESTING", "") == "True" + + +def debug_info() -> Iterable[Tuple[str, Any]]: + """Return a list of (name, value) pairs for printing debug information.""" + info = [ + (name, value) for name, value in globals().items() + if not name.startswith("_") and name not in _UNINTERESTING_GLOBALS + ] + info += [ + (name, value) for name, value in PYBEHAVIOR.__dict__.items() + if not name.startswith("_") + ] + return sorted(info) diff --git a/venv/lib/python3.10/site-packages/coverage/exceptions.py b/venv/lib/python3.10/site-packages/coverage/exceptions.py new file mode 100644 index 0000000..43dc004 --- /dev/null +++ b/venv/lib/python3.10/site-packages/coverage/exceptions.py @@ -0,0 +1,62 @@ +# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 +# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt + +"""Exceptions coverage.py can raise.""" + + +class _BaseCoverageException(Exception): + """The base-base of all Coverage exceptions.""" + pass + + +class CoverageException(_BaseCoverageException): + """The base class of all exceptions raised by Coverage.py.""" + pass + + +class ConfigError(_BaseCoverageException): + """A problem with a config file, or a value in one.""" + pass + + +class DataError(CoverageException): + """An error in using a data file.""" + pass + +class NoDataError(CoverageException): + """We didn't have data to work with.""" + pass + + +class NoSource(CoverageException): + """We couldn't find the source for a module.""" + pass + + +class NoCode(NoSource): + """We couldn't find any code at all.""" + pass + + +class NotPython(CoverageException): + """A source file turned out not to be parsable Python.""" + pass + + +class PluginError(CoverageException): + """A plugin misbehaved.""" + pass + + +class _ExceptionDuringRun(CoverageException): + """An exception happened while running customer code. + + Construct it with three arguments, the values from `sys.exc_info`. + + """ + pass + + +class CoverageWarning(Warning): + """A warning from Coverage.py.""" + pass diff --git a/venv/lib/python3.10/site-packages/coverage/execfile.py b/venv/lib/python3.10/site-packages/coverage/execfile.py new file mode 100644 index 0000000..aac4d30 --- /dev/null +++ b/venv/lib/python3.10/site-packages/coverage/execfile.py @@ -0,0 +1,327 @@ +# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 +# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt + +"""Execute files of Python code.""" + +from __future__ import annotations + +import importlib.machinery +import importlib.util +import inspect +import marshal +import os +import struct +import sys + +from importlib.machinery import ModuleSpec +from types import CodeType, ModuleType +from typing import Any, List, Optional, Tuple + +from coverage import env +from coverage.exceptions import CoverageException, _ExceptionDuringRun, NoCode, NoSource +from coverage.files import canonical_filename, python_reported_file +from coverage.misc import isolate_module +from coverage.python import get_python_source + +os = isolate_module(os) + + +PYC_MAGIC_NUMBER = importlib.util.MAGIC_NUMBER + +class DummyLoader: + """A shim for the pep302 __loader__, emulating pkgutil.ImpLoader. + + Currently only implements the .fullname attribute + """ + def __init__(self, fullname: str, *_args: Any) -> None: + self.fullname = fullname + + +def find_module( + modulename: str, +) -> Tuple[Optional[str], str, ModuleSpec]: + """Find the module named `modulename`. + + Returns the file path of the module, the name of the enclosing + package, and the spec. + """ + try: + spec = importlib.util.find_spec(modulename) + except ImportError as err: + raise NoSource(str(err)) from err + if not spec: + raise NoSource(f"No module named {modulename!r}") + pathname = spec.origin + packagename = spec.name + if spec.submodule_search_locations: + mod_main = modulename + ".__main__" + spec = importlib.util.find_spec(mod_main) + if not spec: + raise NoSource( + f"No module named {mod_main}; " + + f"{modulename!r} is a package and cannot be directly executed" + ) + pathname = spec.origin + packagename = spec.name + packagename = packagename.rpartition(".")[0] + return pathname, packagename, spec + + +class PyRunner: + """Multi-stage execution of Python code. + + This is meant to emulate real Python execution as closely as possible. + + """ + def __init__(self, args: List[str], as_module: bool = False) -> None: + self.args = args + self.as_module = as_module + + self.arg0 = args[0] + self.package: Optional[str] = None + self.modulename: Optional[str] = None + self.pathname: Optional[str] = None + self.loader: Optional[DummyLoader] = None + self.spec: Optional[ModuleSpec] = None + + def prepare(self) -> None: + """Set sys.path properly. + + This needs to happen before any importing, and without importing anything. + """ + path0: Optional[str] + if self.as_module: + path0 = os.getcwd() + elif os.path.isdir(self.arg0): + # Running a directory means running the __main__.py file in that + # directory. + path0 = self.arg0 + else: + path0 = os.path.abspath(os.path.dirname(self.arg0)) + + if os.path.isdir(sys.path[0]): + # sys.path fakery. If we are being run as a command, then sys.path[0] + # is the directory of the "coverage" script. If this is so, replace + # sys.path[0] with the directory of the file we're running, or the + # current directory when running modules. If it isn't so, then we + # don't know what's going on, and just leave it alone. + top_file = inspect.stack()[-1][0].f_code.co_filename + sys_path_0_abs = os.path.abspath(sys.path[0]) + top_file_dir_abs = os.path.abspath(os.path.dirname(top_file)) + sys_path_0_abs = canonical_filename(sys_path_0_abs) + top_file_dir_abs = canonical_filename(top_file_dir_abs) + if sys_path_0_abs != top_file_dir_abs: + path0 = None + + else: + # sys.path[0] is a file. Is the next entry the directory containing + # that file? + if sys.path[1] == os.path.dirname(sys.path[0]): + # Can it be right to always remove that? + del sys.path[1] + + if path0 is not None: + sys.path[0] = python_reported_file(path0) + + def _prepare2(self) -> None: + """Do more preparation to run Python code. + + Includes finding the module to run and adjusting sys.argv[0]. + This method is allowed to import code. + + """ + if self.as_module: + self.modulename = self.arg0 + pathname, self.package, self.spec = find_module(self.modulename) + if self.spec is not None: + self.modulename = self.spec.name + self.loader = DummyLoader(self.modulename) + assert pathname is not None + self.pathname = os.path.abspath(pathname) + self.args[0] = self.arg0 = self.pathname + elif os.path.isdir(self.arg0): + # Running a directory means running the __main__.py file in that + # directory. + for ext in [".py", ".pyc", ".pyo"]: + try_filename = os.path.join(self.arg0, "__main__" + ext) + # 3.8.10 changed how files are reported when running a + # directory. But I'm not sure how far this change is going to + # spread, so I'll just hard-code it here for now. + if env.PYVERSION >= (3, 8, 10): + try_filename = os.path.abspath(try_filename) + if os.path.exists(try_filename): + self.arg0 = try_filename + break + else: + raise NoSource(f"Can't find '__main__' module in '{self.arg0}'") + + # Make a spec. I don't know if this is the right way to do it. + try_filename = python_reported_file(try_filename) + self.spec = importlib.machinery.ModuleSpec("__main__", None, origin=try_filename) + self.spec.has_location = True + self.package = "" + self.loader = DummyLoader("__main__") + else: + self.loader = DummyLoader("__main__") + + self.arg0 = python_reported_file(self.arg0) + + def run(self) -> None: + """Run the Python code!""" + + self._prepare2() + + # Create a module to serve as __main__ + main_mod = ModuleType("__main__") + + from_pyc = self.arg0.endswith((".pyc", ".pyo")) + main_mod.__file__ = self.arg0 + if from_pyc: + main_mod.__file__ = main_mod.__file__[:-1] + if self.package is not None: + main_mod.__package__ = self.package + main_mod.__loader__ = self.loader # type: ignore[assignment] + if self.spec is not None: + main_mod.__spec__ = self.spec + + main_mod.__builtins__ = sys.modules["builtins"] # type: ignore[attr-defined] + + sys.modules["__main__"] = main_mod + + # Set sys.argv properly. + sys.argv = self.args + + try: + # Make a code object somehow. + if from_pyc: + code = make_code_from_pyc(self.arg0) + else: + code = make_code_from_py(self.arg0) + except CoverageException: + raise + except Exception as exc: + msg = f"Couldn't run '{self.arg0}' as Python code: {exc.__class__.__name__}: {exc}" + raise CoverageException(msg) from exc + + # Execute the code object. + # Return to the original directory in case the test code exits in + # a non-existent directory. + cwd = os.getcwd() + try: + exec(code, main_mod.__dict__) + except SystemExit: # pylint: disable=try-except-raise + # The user called sys.exit(). Just pass it along to the upper + # layers, where it will be handled. + raise + except Exception: + # Something went wrong while executing the user code. + # Get the exc_info, and pack them into an exception that we can + # throw up to the outer loop. We peel one layer off the traceback + # so that the coverage.py code doesn't appear in the final printed + # traceback. + typ, err, tb = sys.exc_info() + assert typ is not None + assert err is not None + assert tb is not None + + # PyPy3 weirdness. If I don't access __context__, then somehow it + # is non-None when the exception is reported at the upper layer, + # and a nested exception is shown to the user. This getattr fixes + # it somehow? https://bitbucket.org/pypy/pypy/issue/1903 + getattr(err, "__context__", None) + + # Call the excepthook. + try: + assert err.__traceback__ is not None + err.__traceback__ = err.__traceback__.tb_next + sys.excepthook(typ, err, tb.tb_next) + except SystemExit: # pylint: disable=try-except-raise + raise + except Exception as exc: + # Getting the output right in the case of excepthook + # shenanigans is kind of involved. + sys.stderr.write("Error in sys.excepthook:\n") + typ2, err2, tb2 = sys.exc_info() + assert typ2 is not None + assert err2 is not None + assert tb2 is not None + err2.__suppress_context__ = True + assert err2.__traceback__ is not None + err2.__traceback__ = err2.__traceback__.tb_next + sys.__excepthook__(typ2, err2, tb2.tb_next) + sys.stderr.write("\nOriginal exception was:\n") + raise _ExceptionDuringRun(typ, err, tb.tb_next) from exc + else: + sys.exit(1) + finally: + os.chdir(cwd) + + +def run_python_module(args: List[str]) -> None: + """Run a Python module, as though with ``python -m name args...``. + + `args` is the argument array to present as sys.argv, including the first + element naming the module being executed. + + This is a helper for tests, to encapsulate how to use PyRunner. + + """ + runner = PyRunner(args, as_module=True) + runner.prepare() + runner.run() + + +def run_python_file(args: List[str]) -> None: + """Run a Python file as if it were the main program on the command line. + + `args` is the argument array to present as sys.argv, including the first + element naming the file being executed. `package` is the name of the + enclosing package, if any. + + This is a helper for tests, to encapsulate how to use PyRunner. + + """ + runner = PyRunner(args, as_module=False) + runner.prepare() + runner.run() + + +def make_code_from_py(filename: str) -> CodeType: + """Get source from `filename` and make a code object of it.""" + # Open the source file. + try: + source = get_python_source(filename) + except (OSError, NoSource) as exc: + raise NoSource(f"No file to run: '{filename}'") from exc + + return compile(source, filename, "exec", dont_inherit=True) + + +def make_code_from_pyc(filename: str) -> CodeType: + """Get a code object from a .pyc file.""" + try: + fpyc = open(filename, "rb") + except OSError as exc: + raise NoCode(f"No file to run: '{filename}'") from exc + + with fpyc: + # First four bytes are a version-specific magic number. It has to + # match or we won't run the file. + magic = fpyc.read(4) + if magic != PYC_MAGIC_NUMBER: + raise NoCode(f"Bad magic number in .pyc file: {magic!r} != {PYC_MAGIC_NUMBER!r}") + + flags = struct.unpack(" None: + """Set the directory that `relative_filename` will be relative to.""" + global RELATIVE_DIR, CANONICAL_FILENAME_CACHE + + # The current directory + abs_curdir = abs_file(os.curdir) + if not abs_curdir.endswith(os.sep): + # Suffix with separator only if not at the system root + abs_curdir = abs_curdir + os.sep + + # The absolute path to our current directory. + RELATIVE_DIR = os.path.normcase(abs_curdir) + + # Cache of results of calling the canonical_filename() method, to + # avoid duplicating work. + CANONICAL_FILENAME_CACHE = {} + + +def relative_directory() -> str: + """Return the directory that `relative_filename` is relative to.""" + return RELATIVE_DIR + + +def relative_filename(filename: str) -> str: + """Return the relative form of `filename`. + + The file name will be relative to the current directory when the + `set_relative_directory` was called. + + """ + fnorm = os.path.normcase(filename) + if fnorm.startswith(RELATIVE_DIR): + filename = filename[len(RELATIVE_DIR):] + return filename + + +def canonical_filename(filename: str) -> str: + """Return a canonical file name for `filename`. + + An absolute path with no redundant components and normalized case. + + """ + if filename not in CANONICAL_FILENAME_CACHE: + cf = filename + if not os.path.isabs(filename): + for path in [os.curdir] + sys.path: + if path is None: + continue # type: ignore[unreachable] + f = os.path.join(path, filename) + try: + exists = os.path.exists(f) + except UnicodeError: + exists = False + if exists: + cf = f + break + cf = abs_file(cf) + CANONICAL_FILENAME_CACHE[filename] = cf + return CANONICAL_FILENAME_CACHE[filename] + + +MAX_FLAT = 100 + +def flat_rootname(filename: str) -> str: + """A base for a flat file name to correspond to this file. + + Useful for writing files about the code where you want all the files in + the same directory, but need to differentiate same-named files from + different directories. + + For example, the file a/b/c.py will return 'd_86bbcbe134d28fd2_c_py' + + """ + dirname, basename = ntpath.split(filename) + if dirname: + fp = hashlib.new("sha3_256", dirname.encode("UTF-8")).hexdigest()[:16] + prefix = f"d_{fp}_" + else: + prefix = "" + return prefix + basename.replace(".", "_") + + +if env.WINDOWS: + + _ACTUAL_PATH_CACHE: Dict[str, str] = {} + _ACTUAL_PATH_LIST_CACHE: Dict[str, List[str]] = {} + + def actual_path(path: str) -> str: + """Get the actual path of `path`, including the correct case.""" + if path in _ACTUAL_PATH_CACHE: + return _ACTUAL_PATH_CACHE[path] + + head, tail = os.path.split(path) + if not tail: + # This means head is the drive spec: normalize it. + actpath = head.upper() + elif not head: + actpath = tail + else: + head = actual_path(head) + if head in _ACTUAL_PATH_LIST_CACHE: + files = _ACTUAL_PATH_LIST_CACHE[head] + else: + try: + files = os.listdir(head) + except Exception: + # This will raise OSError, or this bizarre TypeError: + # https://bugs.python.org/issue1776160 + files = [] + _ACTUAL_PATH_LIST_CACHE[head] = files + normtail = os.path.normcase(tail) + for f in files: + if os.path.normcase(f) == normtail: + tail = f + break + actpath = os.path.join(head, tail) + _ACTUAL_PATH_CACHE[path] = actpath + return actpath + +else: + def actual_path(path: str) -> str: + """The actual path for non-Windows platforms.""" + return path + + +def abs_file(path: str) -> str: + """Return the absolute normalized form of `path`.""" + return actual_path(os.path.abspath(os.path.realpath(path))) + + +def zip_location(filename: str) -> Optional[Tuple[str, str]]: + """Split a filename into a zipfile / inner name pair. + + Only return a pair if the zipfile exists. No check is made if the inner + name is in the zipfile. + + """ + for ext in [".zip", ".whl", ".egg", ".pex"]: + zipbase, extension, inner = filename.partition(ext + sep(filename)) + if extension: + zipfile = zipbase + ext + if os.path.exists(zipfile): + return zipfile, inner + return None + + +def source_exists(path: str) -> bool: + """Determine if a source file path exists.""" + if os.path.exists(path): + return True + + if zip_location(path): + # If zip_location returns anything, then it's a zipfile that + # exists. That's good enough for us. + return True + + return False + + +def python_reported_file(filename: str) -> str: + """Return the string as Python would describe this file name.""" + if env.PYBEHAVIOR.report_absolute_files: + filename = os.path.abspath(filename) + return filename + + +def isabs_anywhere(filename: str) -> bool: + """Is `filename` an absolute path on any OS?""" + return ntpath.isabs(filename) or posixpath.isabs(filename) + + +def prep_patterns(patterns: Iterable[str]) -> List[str]: + """Prepare the file patterns for use in a `GlobMatcher`. + + If a pattern starts with a wildcard, it is used as a pattern + as-is. If it does not start with a wildcard, then it is made + absolute with the current directory. + + If `patterns` is None, an empty list is returned. + + """ + prepped = [] + for p in patterns or []: + prepped.append(p) + if not p.startswith(("*", "?")): + prepped.append(abs_file(p)) + return prepped + + +class TreeMatcher: + """A matcher for files in a tree. + + Construct with a list of paths, either files or directories. Paths match + with the `match` method if they are one of the files, or if they are + somewhere in a subtree rooted at one of the directories. + + """ + def __init__(self, paths: Iterable[str], name: str = "unknown") -> None: + self.original_paths: List[str] = human_sorted(paths) + #self.paths = list(map(os.path.normcase, paths)) + self.paths = [os.path.normcase(p) for p in paths] + self.name = name + + def __repr__(self) -> str: + return f"" + + def info(self) -> List[str]: + """A list of strings for displaying when dumping state.""" + return self.original_paths + + def match(self, fpath: str) -> bool: + """Does `fpath` indicate a file in one of our trees?""" + fpath = os.path.normcase(fpath) + for p in self.paths: + if fpath.startswith(p): + if fpath == p: + # This is the same file! + return True + if fpath[len(p)] == os.sep: + # This is a file in the directory + return True + return False + + +class ModuleMatcher: + """A matcher for modules in a tree.""" + def __init__(self, module_names: Iterable[str], name:str = "unknown") -> None: + self.modules = list(module_names) + self.name = name + + def __repr__(self) -> str: + return f"" + + def info(self) -> List[str]: + """A list of strings for displaying when dumping state.""" + return self.modules + + def match(self, module_name: str) -> bool: + """Does `module_name` indicate a module in one of our packages?""" + if not module_name: + return False + + for m in self.modules: + if module_name.startswith(m): + if module_name == m: + return True + if module_name[len(m)] == ".": + # This is a module in the package + return True + + return False + + +class GlobMatcher: + """A matcher for files by file name pattern.""" + def __init__(self, pats: Iterable[str], name: str = "unknown") -> None: + self.pats = list(pats) + self.re = globs_to_regex(self.pats, case_insensitive=env.WINDOWS) + self.name = name + + def __repr__(self) -> str: + return f"" + + def info(self) -> List[str]: + """A list of strings for displaying when dumping state.""" + return self.pats + + def match(self, fpath: str) -> bool: + """Does `fpath` match one of our file name patterns?""" + return self.re.match(fpath) is not None + + +def sep(s: str) -> str: + """Find the path separator used in this string, or os.sep if none.""" + sep_match = re.search(r"[\\/]", s) + if sep_match: + the_sep = sep_match[0] + else: + the_sep = os.sep + return the_sep + + +# Tokenizer for _glob_to_regex. +# None as a sub means disallowed. +G2RX_TOKENS = [(re.compile(rx), sub) for rx, sub in [ + (r"\*\*\*+", None), # Can't have *** + (r"[^/]+\*\*+", None), # Can't have x** + (r"\*\*+[^/]+", None), # Can't have **x + (r"\*\*/\*\*", None), # Can't have **/** + (r"^\*+/", r"(.*[/\\\\])?"), # ^*/ matches any prefix-slash, or nothing. + (r"/\*+$", r"[/\\\\].*"), # /*$ matches any slash-suffix. + (r"\*\*/", r"(.*[/\\\\])?"), # **/ matches any subdirs, including none + (r"/", r"[/\\\\]"), # / matches either slash or backslash + (r"\*", r"[^/\\\\]*"), # * matches any number of non slash-likes + (r"\?", r"[^/\\\\]"), # ? matches one non slash-like + (r"\[.*?\]", r"\g<0>"), # [a-f] matches [a-f] + (r"[a-zA-Z0-9_-]+", r"\g<0>"), # word chars match themselves + (r"[\[\]]", None), # Can't have single square brackets + (r".", r"\\\g<0>"), # Anything else is escaped to be safe +]] + +def _glob_to_regex(pattern: str) -> str: + """Convert a file-path glob pattern into a regex.""" + # Turn all backslashes into slashes to simplify the tokenizer. + pattern = pattern.replace("\\", "/") + if "/" not in pattern: + pattern = "**/" + pattern + path_rx = [] + pos = 0 + while pos < len(pattern): + for rx, sub in G2RX_TOKENS: # pragma: always breaks + m = rx.match(pattern, pos=pos) + if m: + if sub is None: + raise ConfigError(f"File pattern can't include {m[0]!r}") + path_rx.append(m.expand(sub)) + pos = m.end() + break + return "".join(path_rx) + + +def globs_to_regex( + patterns: Iterable[str], + case_insensitive: bool = False, + partial: bool = False, +) -> re.Pattern[str]: + """Convert glob patterns to a compiled regex that matches any of them. + + Slashes are always converted to match either slash or backslash, for + Windows support, even when running elsewhere. + + If the pattern has no slash or backslash, then it is interpreted as + matching a file name anywhere it appears in the tree. Otherwise, the glob + pattern must match the whole file path. + + If `partial` is true, then the pattern will match if the target string + starts with the pattern. Otherwise, it must match the entire string. + + Returns: a compiled regex object. Use the .match method to compare target + strings. + + """ + flags = 0 + if case_insensitive: + flags |= re.IGNORECASE + rx = join_regex(map(_glob_to_regex, patterns)) + if not partial: + rx = rf"(?:{rx})\Z" + compiled = re.compile(rx, flags=flags) + return compiled + + +class PathAliases: + """A collection of aliases for paths. + + When combining data files from remote machines, often the paths to source + code are different, for example, due to OS differences, or because of + serialized checkouts on continuous integration machines. + + A `PathAliases` object tracks a list of pattern/result pairs, and can + map a path through those aliases to produce a unified path. + + """ + def __init__( + self, + debugfn: Optional[Callable[[str], None]] = None, + relative: bool = False, + ) -> None: + # A list of (original_pattern, regex, result) + self.aliases: List[Tuple[str, re.Pattern[str], str]] = [] + self.debugfn = debugfn or (lambda msg: 0) + self.relative = relative + self.pprinted = False + + def pprint(self) -> None: + """Dump the important parts of the PathAliases, for debugging.""" + self.debugfn(f"Aliases (relative={self.relative}):") + for original_pattern, regex, result in self.aliases: + self.debugfn(f" Rule: {original_pattern!r} -> {result!r} using regex {regex.pattern!r}") + + def add(self, pattern: str, result: str) -> None: + """Add the `pattern`/`result` pair to the list of aliases. + + `pattern` is an `glob`-style pattern. `result` is a simple + string. When mapping paths, if a path starts with a match against + `pattern`, then that match is replaced with `result`. This models + isomorphic source trees being rooted at different places on two + different machines. + + `pattern` can't end with a wildcard component, since that would + match an entire tree, and not just its root. + + """ + original_pattern = pattern + pattern_sep = sep(pattern) + + if len(pattern) > 1: + pattern = pattern.rstrip(r"\/") + + # The pattern can't end with a wildcard component. + if pattern.endswith("*"): + raise ConfigError("Pattern must not end with wildcards.") + + # The pattern is meant to match a file path. Let's make it absolute + # unless it already is, or is meant to match any prefix. + if not self.relative: + if not pattern.startswith("*") and not isabs_anywhere(pattern + pattern_sep): + pattern = abs_file(pattern) + if not pattern.endswith(pattern_sep): + pattern += pattern_sep + + # Make a regex from the pattern. + regex = globs_to_regex([pattern], case_insensitive=True, partial=True) + + # Normalize the result: it must end with a path separator. + result_sep = sep(result) + result = result.rstrip(r"\/") + result_sep + self.aliases.append((original_pattern, regex, result)) + + def map(self, path: str, exists:Callable[[str], bool] = source_exists) -> str: + """Map `path` through the aliases. + + `path` is checked against all of the patterns. The first pattern to + match is used to replace the root of the path with the result root. + Only one pattern is ever used. If no patterns match, `path` is + returned unchanged. + + The separator style in the result is made to match that of the result + in the alias. + + `exists` is a function to determine if the resulting path actually + exists. + + Returns the mapped path. If a mapping has happened, this is a + canonical path. If no mapping has happened, it is the original value + of `path` unchanged. + + """ + if not self.pprinted: + self.pprint() + self.pprinted = True + + for original_pattern, regex, result in self.aliases: + m = regex.match(path) + if m: + new = path.replace(m[0], result) + new = new.replace(sep(path), sep(result)) + if not self.relative: + new = canonical_filename(new) + dot_start = result.startswith(("./", ".\\")) and len(result) > 2 + if new.startswith(("./", ".\\")) and not dot_start: + new = new[2:] + if not exists(new): + self.debugfn( + f"Rule {original_pattern!r} changed {path!r} to {new!r} " + + "which doesn't exist, continuing" + ) + continue + self.debugfn( + f"Matched path {path!r} to rule {original_pattern!r} -> {result!r}, " + + f"producing {new!r}" + ) + return new + + # If we get here, no pattern matched. + + if self.relative and not isabs_anywhere(path): + # Auto-generate a pattern to implicitly match relative files + parts = re.split(r"[/\\]", path) + if len(parts) > 1: + dir1 = parts[0] + pattern = f"*/{dir1}" + regex_pat = rf"^(.*[\\/])?{re.escape(dir1)}[\\/]" + result = f"{dir1}{os.sep}" + # Only add a new pattern if we don't already have this pattern. + if not any(p == pattern for p, _, _ in self.aliases): + self.debugfn( + f"Generating rule: {pattern!r} -> {result!r} using regex {regex_pat!r}" + ) + self.aliases.append((pattern, re.compile(regex_pat), result)) + return self.map(path, exists=exists) + + self.debugfn(f"No rules match, path {path!r} is unchanged") + return path + + +def find_python_files(dirname: str, include_namespace_packages: bool) -> Iterable[str]: + """Yield all of the importable Python files in `dirname`, recursively. + + To be importable, the files have to be in a directory with a __init__.py, + except for `dirname` itself, which isn't required to have one. The + assumption is that `dirname` was specified directly, so the user knows + best, but sub-directories are checked for a __init__.py to be sure we only + find the importable files. + + If `include_namespace_packages` is True, then the check for __init__.py + files is skipped. + + Files with strange characters are skipped, since they couldn't have been + imported, and are probably editor side-files. + + """ + for i, (dirpath, dirnames, filenames) in enumerate(os.walk(dirname)): + if not include_namespace_packages: + if i > 0 and "__init__.py" not in filenames: + # If a directory doesn't have __init__.py, then it isn't + # importable and neither are its files + del dirnames[:] + continue + for filename in filenames: + # We're only interested in files that look like reasonable Python + # files: Must end with .py or .pyw, and must not have certain funny + # characters that probably mean they are editor junk. + if re.match(r"^[^.#~!$@%^&*()+=,]+\.pyw?$", filename): + yield os.path.join(dirpath, filename) + + +# Globally set the relative directory. +set_relative_directory() diff --git a/venv/lib/python3.10/site-packages/coverage/fullcoverage/encodings.py b/venv/lib/python3.10/site-packages/coverage/fullcoverage/encodings.py new file mode 100644 index 0000000..73bd564 --- /dev/null +++ b/venv/lib/python3.10/site-packages/coverage/fullcoverage/encodings.py @@ -0,0 +1,57 @@ +# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 +# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt + +"""Imposter encodings module that installs a coverage-style tracer. + +This is NOT the encodings module; it is an imposter that sets up tracing +instrumentation and then replaces itself with the real encodings module. + +If the directory that holds this file is placed first in the PYTHONPATH when +using "coverage" to run Python's tests, then this file will become the very +first module imported by the internals of Python 3. It installs a +coverage.py-compatible trace function that can watch Standard Library modules +execute from the very earliest stages of Python's own boot process. This fixes +a problem with coverage.py - that it starts too late to trace the coverage of +many of the most fundamental modules in the Standard Library. + +DO NOT import other modules into here, it will interfere with the goal of this +code executing before all imports. This is why this file isn't type-checked. + +""" + +import sys + +class FullCoverageTracer: + def __init__(self): + # `traces` is a list of trace events. Frames are tricky: the same + # frame object is used for a whole scope, with new line numbers + # written into it. So in one scope, all the frame objects are the + # same object, and will eventually all will point to the last line + # executed. So we keep the line numbers alongside the frames. + # The list looks like: + # + # traces = [ + # ((frame, event, arg), lineno), ... + # ] + # + self.traces = [] + + def fullcoverage_trace(self, *args): + frame, event, arg = args + if frame.f_lineno is not None: + # https://bugs.python.org/issue46911 + self.traces.append((args, frame.f_lineno)) + return self.fullcoverage_trace + +sys.settrace(FullCoverageTracer().fullcoverage_trace) + +# Remove our own directory from sys.path; remove ourselves from +# sys.modules; and re-import "encodings", which will be the real package +# this time. Note that the delete from sys.modules dictionary has to +# happen last, since all of the symbols in this module will become None +# at that exact moment, including "sys". + +parentdir = max(filter(__file__.startswith, sys.path), key=len) +sys.path.remove(parentdir) +del sys.modules['encodings'] +import encodings diff --git a/venv/lib/python3.10/site-packages/coverage/html.py b/venv/lib/python3.10/site-packages/coverage/html.py new file mode 100644 index 0000000..7b827a7 --- /dev/null +++ b/venv/lib/python3.10/site-packages/coverage/html.py @@ -0,0 +1,650 @@ +# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 +# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt + +"""HTML reporting for coverage.py.""" + +from __future__ import annotations + +import collections +import datetime +import functools +import json +import os +import re +import shutil +import string + +from dataclasses import dataclass +from typing import Any, Dict, Iterable, List, Optional, Tuple, TYPE_CHECKING, cast + +import coverage +from coverage.data import CoverageData, add_data_to_hash +from coverage.exceptions import NoDataError +from coverage.files import flat_rootname +from coverage.misc import ensure_dir, file_be_gone, Hasher, isolate_module, format_local_datetime +from coverage.misc import human_sorted, plural, stdout_link +from coverage.report_core import get_analysis_to_report +from coverage.results import Analysis, Numbers +from coverage.templite import Templite +from coverage.types import TLineNo, TMorf +from coverage.version import __url__ + + +if TYPE_CHECKING: + # To avoid circular imports: + from coverage import Coverage + from coverage.plugins import FileReporter + + # To be able to use 3.8 typing features, and still run on 3.7: + from typing import TypedDict + + class IndexInfoDict(TypedDict): + """Information for each file, to render the index file.""" + nums: Numbers + html_filename: str + relative_filename: str + + class FileInfoDict(TypedDict): + """Summary of the information from last rendering, to avoid duplicate work.""" + hash: str + index: IndexInfoDict + + +os = isolate_module(os) + + +def data_filename(fname: str) -> str: + """Return the path to an "htmlfiles" data file of ours. + """ + static_dir = os.path.join(os.path.dirname(__file__), "htmlfiles") + static_filename = os.path.join(static_dir, fname) + return static_filename + + +def read_data(fname: str) -> str: + """Return the contents of a data file of ours.""" + with open(data_filename(fname)) as data_file: + return data_file.read() + + +def write_html(fname: str, html: str) -> None: + """Write `html` to `fname`, properly encoded.""" + html = re.sub(r"(\A\s+)|(\s+$)", "", html, flags=re.MULTILINE) + "\n" + with open(fname, "wb") as fout: + fout.write(html.encode("ascii", "xmlcharrefreplace")) + + +@dataclass +class LineData: + """The data for each source line of HTML output.""" + tokens: List[Tuple[str, str]] + number: TLineNo + category: str + statement: bool + contexts: List[str] + contexts_label: str + context_list: List[str] + short_annotations: List[str] + long_annotations: List[str] + html: str = "" + context_str: Optional[str] = None + annotate: Optional[str] = None + annotate_long: Optional[str] = None + css_class: str = "" + + +@dataclass +class FileData: + """The data for each source file of HTML output.""" + relative_filename: str + nums: Numbers + lines: List[LineData] + + +class HtmlDataGeneration: + """Generate structured data to be turned into HTML reports.""" + + EMPTY = "(empty)" + + def __init__(self, cov: Coverage) -> None: + self.coverage = cov + self.config = self.coverage.config + data = self.coverage.get_data() + self.has_arcs = data.has_arcs() + if self.config.show_contexts: + if data.measured_contexts() == {""}: + self.coverage._warn("No contexts were measured") + data.set_query_contexts(self.config.report_contexts) + + def data_for_file(self, fr: FileReporter, analysis: Analysis) -> FileData: + """Produce the data needed for one file's report.""" + if self.has_arcs: + missing_branch_arcs = analysis.missing_branch_arcs() + arcs_executed = analysis.arcs_executed() + + if self.config.show_contexts: + contexts_by_lineno = analysis.data.contexts_by_lineno(analysis.filename) + + lines = [] + + for lineno, tokens in enumerate(fr.source_token_lines(), start=1): + # Figure out how to mark this line. + category = "" + short_annotations = [] + long_annotations = [] + + if lineno in analysis.excluded: + category = "exc" + elif lineno in analysis.missing: + category = "mis" + elif self.has_arcs and lineno in missing_branch_arcs: + category = "par" + for b in missing_branch_arcs[lineno]: + if b < 0: + short_annotations.append("exit") + else: + short_annotations.append(str(b)) + long_annotations.append(fr.missing_arc_description(lineno, b, arcs_executed)) + elif lineno in analysis.statements: + category = "run" + + contexts = [] + contexts_label = "" + context_list = [] + if category and self.config.show_contexts: + contexts = human_sorted(c or self.EMPTY for c in contexts_by_lineno.get(lineno, ())) + if contexts == [self.EMPTY]: + contexts_label = self.EMPTY + else: + contexts_label = f"{len(contexts)} ctx" + context_list = contexts + + lines.append(LineData( + tokens=tokens, + number=lineno, + category=category, + statement=(lineno in analysis.statements), + contexts=contexts, + contexts_label=contexts_label, + context_list=context_list, + short_annotations=short_annotations, + long_annotations=long_annotations, + )) + + file_data = FileData( + relative_filename=fr.relative_filename(), + nums=analysis.numbers, + lines=lines, + ) + + return file_data + + +class FileToReport: + """A file we're considering reporting.""" + def __init__(self, fr: FileReporter, analysis: Analysis) -> None: + self.fr = fr + self.analysis = analysis + self.rootname = flat_rootname(fr.relative_filename()) + self.html_filename = self.rootname + ".html" + + +HTML_SAFE = string.ascii_letters + string.digits + "!#$%'()*+,-./:;=?@[]^_`{|}~" + +@functools.lru_cache(maxsize=None) +def encode_int(n: int) -> str: + """Create a short HTML-safe string from an integer, using HTML_SAFE.""" + if n == 0: + return HTML_SAFE[0] + + r = [] + while n: + n, t = divmod(n, len(HTML_SAFE)) + r.append(HTML_SAFE[t]) + return "".join(r) + + +class HtmlReporter: + """HTML reporting.""" + + # These files will be copied from the htmlfiles directory to the output + # directory. + STATIC_FILES = [ + "style.css", + "coverage_html.js", + "keybd_closed.png", + "keybd_open.png", + "favicon_32.png", + ] + + def __init__(self, cov: Coverage) -> None: + self.coverage = cov + self.config = self.coverage.config + self.directory = self.config.html_dir + + self.skip_covered = self.config.html_skip_covered + if self.skip_covered is None: + self.skip_covered = self.config.skip_covered + self.skip_empty = self.config.html_skip_empty + if self.skip_empty is None: + self.skip_empty = self.config.skip_empty + self.skipped_covered_count = 0 + self.skipped_empty_count = 0 + + title = self.config.html_title + + self.extra_css: Optional[str] + if self.config.extra_css: + self.extra_css = os.path.basename(self.config.extra_css) + else: + self.extra_css = None + + self.data = self.coverage.get_data() + self.has_arcs = self.data.has_arcs() + + self.file_summaries: List[IndexInfoDict] = [] + self.all_files_nums: List[Numbers] = [] + self.incr = IncrementalChecker(self.directory) + self.datagen = HtmlDataGeneration(self.coverage) + self.totals = Numbers(precision=self.config.precision) + self.directory_was_empty = False + self.first_fr = None + self.final_fr = None + + self.template_globals = { + # Functions available in the templates. + "escape": escape, + "pair": pair, + "len": len, + + # Constants for this report. + "__url__": __url__, + "__version__": coverage.__version__, + "title": title, + "time_stamp": format_local_datetime(datetime.datetime.now()), + "extra_css": self.extra_css, + "has_arcs": self.has_arcs, + "show_contexts": self.config.show_contexts, + + # Constants for all reports. + # These css classes determine which lines are highlighted by default. + "category": { + "exc": "exc show_exc", + "mis": "mis show_mis", + "par": "par run show_par", + "run": "run", + }, + } + self.pyfile_html_source = read_data("pyfile.html") + self.source_tmpl = Templite(self.pyfile_html_source, self.template_globals) + + def report(self, morfs: Optional[Iterable[TMorf]]) -> float: + """Generate an HTML report for `morfs`. + + `morfs` is a list of modules or file names. + + """ + # Read the status data and check that this run used the same + # global data as the last run. + self.incr.read() + self.incr.check_global_data(self.config, self.pyfile_html_source) + + # Process all the files. For each page we need to supply a link + # to the next and previous page. + files_to_report = [] + + for fr, analysis in get_analysis_to_report(self.coverage, morfs): + ftr = FileToReport(fr, analysis) + should = self.should_report_file(ftr) + if should: + files_to_report.append(ftr) + else: + file_be_gone(os.path.join(self.directory, ftr.html_filename)) + + for i, ftr in enumerate(files_to_report): + if i == 0: + prev_html = "index.html" + else: + prev_html = files_to_report[i - 1].html_filename + if i == len(files_to_report) - 1: + next_html = "index.html" + else: + next_html = files_to_report[i + 1].html_filename + self.write_html_file(ftr, prev_html, next_html) + + if not self.all_files_nums: + raise NoDataError("No data to report.") + + self.totals = cast(Numbers, sum(self.all_files_nums)) + + # Write the index file. + if files_to_report: + first_html = files_to_report[0].html_filename + final_html = files_to_report[-1].html_filename + else: + first_html = final_html = "index.html" + self.index_file(first_html, final_html) + + self.make_local_static_report_files() + return self.totals.n_statements and self.totals.pc_covered + + def make_directory(self) -> None: + """Make sure our htmlcov directory exists.""" + ensure_dir(self.directory) + if not os.listdir(self.directory): + self.directory_was_empty = True + + def make_local_static_report_files(self) -> None: + """Make local instances of static files for HTML report.""" + # The files we provide must always be copied. + for static in self.STATIC_FILES: + shutil.copyfile(data_filename(static), os.path.join(self.directory, static)) + + # Only write the .gitignore file if the directory was originally empty. + # .gitignore can't be copied from the source tree because it would + # prevent the static files from being checked in. + if self.directory_was_empty: + with open(os.path.join(self.directory, ".gitignore"), "w") as fgi: + fgi.write("# Created by coverage.py\n*\n") + + # The user may have extra CSS they want copied. + if self.extra_css: + assert self.config.extra_css is not None + shutil.copyfile(self.config.extra_css, os.path.join(self.directory, self.extra_css)) + + def should_report_file(self, ftr: FileToReport) -> bool: + """Determine if we'll report this file.""" + # Get the numbers for this file. + nums = ftr.analysis.numbers + self.all_files_nums.append(nums) + + if self.skip_covered: + # Don't report on 100% files. + no_missing_lines = (nums.n_missing == 0) + no_missing_branches = (nums.n_partial_branches == 0) + if no_missing_lines and no_missing_branches: + # If there's an existing file, remove it. + self.skipped_covered_count += 1 + return False + + if self.skip_empty: + # Don't report on empty files. + if nums.n_statements == 0: + self.skipped_empty_count += 1 + return False + + return True + + def write_html_file(self, ftr: FileToReport, prev_html: str, next_html: str) -> None: + """Generate an HTML file for one source file.""" + self.make_directory() + + # Find out if the file on disk is already correct. + if self.incr.can_skip_file(self.data, ftr.fr, ftr.rootname): + self.file_summaries.append(self.incr.index_info(ftr.rootname)) + return + + # Write the HTML page for this file. + file_data = self.datagen.data_for_file(ftr.fr, ftr.analysis) + + contexts = collections.Counter(c for cline in file_data.lines for c in cline.contexts) + context_codes = {y: i for (i, y) in enumerate(x[0] for x in contexts.most_common())} + if context_codes: + contexts_json = json.dumps( + {encode_int(v): k for (k, v) in context_codes.items()}, + indent=2, + ) + else: + contexts_json = None + + for ldata in file_data.lines: + # Build the HTML for the line. + html_parts = [] + for tok_type, tok_text in ldata.tokens: + if tok_type == "ws": + html_parts.append(escape(tok_text)) + else: + tok_html = escape(tok_text) or " " + html_parts.append(f'{tok_html}') + ldata.html = "".join(html_parts) + if ldata.context_list: + encoded_contexts = [ + encode_int(context_codes[c_context]) for c_context in ldata.context_list + ] + code_width = max(len(ec) for ec in encoded_contexts) + ldata.context_str = ( + str(code_width) + + "".join(ec.ljust(code_width) for ec in encoded_contexts) + ) + else: + ldata.context_str = "" + + if ldata.short_annotations: + # 202F is NARROW NO-BREAK SPACE. + # 219B is RIGHTWARDS ARROW WITH STROKE. + ldata.annotate = ",   ".join( + f"{ldata.number} ↛ {d}" + for d in ldata.short_annotations + ) + else: + ldata.annotate = None + + if ldata.long_annotations: + longs = ldata.long_annotations + if len(longs) == 1: + ldata.annotate_long = longs[0] + else: + ldata.annotate_long = "{:d} missed branches: {}".format( + len(longs), + ", ".join( + f"{num:d}) {ann_long}" + for num, ann_long in enumerate(longs, start=1) + ), + ) + else: + ldata.annotate_long = None + + css_classes = [] + if ldata.category: + css_classes.append( + self.template_globals["category"][ldata.category] # type: ignore[index] + ) + ldata.css_class = " ".join(css_classes) or "pln" + + html_path = os.path.join(self.directory, ftr.html_filename) + html = self.source_tmpl.render({ + **file_data.__dict__, + "contexts_json": contexts_json, + "prev_html": prev_html, + "next_html": next_html, + }) + write_html(html_path, html) + + # Save this file's information for the index file. + index_info: IndexInfoDict = { + "nums": ftr.analysis.numbers, + "html_filename": ftr.html_filename, + "relative_filename": ftr.fr.relative_filename(), + } + self.file_summaries.append(index_info) + self.incr.set_index_info(ftr.rootname, index_info) + + def index_file(self, first_html: str, final_html: str) -> None: + """Write the index.html file for this report.""" + self.make_directory() + index_tmpl = Templite(read_data("index.html"), self.template_globals) + + skipped_covered_msg = skipped_empty_msg = "" + if self.skipped_covered_count: + n = self.skipped_covered_count + skipped_covered_msg = f"{n} file{plural(n)} skipped due to complete coverage." + if self.skipped_empty_count: + n = self.skipped_empty_count + skipped_empty_msg = f"{n} empty file{plural(n)} skipped." + + html = index_tmpl.render({ + "files": self.file_summaries, + "totals": self.totals, + "skipped_covered_msg": skipped_covered_msg, + "skipped_empty_msg": skipped_empty_msg, + "first_html": first_html, + "final_html": final_html, + }) + + index_file = os.path.join(self.directory, "index.html") + write_html(index_file, html) + + print_href = stdout_link(index_file, f"file://{os.path.abspath(index_file)}") + self.coverage._message(f"Wrote HTML report to {print_href}") + + # Write the latest hashes for next time. + self.incr.write() + + +class IncrementalChecker: + """Logic and data to support incremental reporting.""" + + STATUS_FILE = "status.json" + STATUS_FORMAT = 2 + + # The data looks like: + # + # { + # "format": 2, + # "globals": "540ee119c15d52a68a53fe6f0897346d", + # "version": "4.0a1", + # "files": { + # "cogapp___init__": { + # "hash": "e45581a5b48f879f301c0f30bf77a50c", + # "index": { + # "html_filename": "cogapp___init__.html", + # "relative_filename": "cogapp/__init__", + # "nums": [ 1, 14, 0, 0, 0, 0, 0 ] + # } + # }, + # ... + # "cogapp_whiteutils": { + # "hash": "8504bb427fc488c4176809ded0277d51", + # "index": { + # "html_filename": "cogapp_whiteutils.html", + # "relative_filename": "cogapp/whiteutils", + # "nums": [ 1, 59, 0, 1, 28, 2, 2 ] + # } + # } + # } + # } + + def __init__(self, directory: str) -> None: + self.directory = directory + self.reset() + + def reset(self) -> None: + """Initialize to empty. Causes all files to be reported.""" + self.globals = "" + self.files: Dict[str, FileInfoDict] = {} + + def read(self) -> None: + """Read the information we stored last time.""" + usable = False + try: + status_file = os.path.join(self.directory, self.STATUS_FILE) + with open(status_file) as fstatus: + status = json.load(fstatus) + except (OSError, ValueError): + usable = False + else: + usable = True + if status["format"] != self.STATUS_FORMAT: + usable = False + elif status["version"] != coverage.__version__: + usable = False + + if usable: + self.files = {} + for filename, fileinfo in status["files"].items(): + fileinfo["index"]["nums"] = Numbers(*fileinfo["index"]["nums"]) + self.files[filename] = fileinfo + self.globals = status["globals"] + else: + self.reset() + + def write(self) -> None: + """Write the current status.""" + status_file = os.path.join(self.directory, self.STATUS_FILE) + files = {} + for filename, fileinfo in self.files.items(): + index = fileinfo["index"] + index["nums"] = index["nums"].init_args() # type: ignore[typeddict-item] + files[filename] = fileinfo + + status = { + "format": self.STATUS_FORMAT, + "version": coverage.__version__, + "globals": self.globals, + "files": files, + } + with open(status_file, "w") as fout: + json.dump(status, fout, separators=(",", ":")) + + def check_global_data(self, *data: Any) -> None: + """Check the global data that can affect incremental reporting.""" + m = Hasher() + for d in data: + m.update(d) + these_globals = m.hexdigest() + if self.globals != these_globals: + self.reset() + self.globals = these_globals + + def can_skip_file(self, data: CoverageData, fr: FileReporter, rootname: str) -> bool: + """Can we skip reporting this file? + + `data` is a CoverageData object, `fr` is a `FileReporter`, and + `rootname` is the name being used for the file. + """ + m = Hasher() + m.update(fr.source().encode("utf-8")) + add_data_to_hash(data, fr.filename, m) + this_hash = m.hexdigest() + + that_hash = self.file_hash(rootname) + + if this_hash == that_hash: + # Nothing has changed to require the file to be reported again. + return True + else: + self.set_file_hash(rootname, this_hash) + return False + + def file_hash(self, fname: str) -> str: + """Get the hash of `fname`'s contents.""" + return self.files.get(fname, {}).get("hash", "") # type: ignore[call-overload] + + def set_file_hash(self, fname: str, val: str) -> None: + """Set the hash of `fname`'s contents.""" + self.files.setdefault(fname, {})["hash"] = val # type: ignore[typeddict-item] + + def index_info(self, fname: str) -> IndexInfoDict: + """Get the information for index.html for `fname`.""" + return self.files.get(fname, {}).get("index", {}) # type: ignore + + def set_index_info(self, fname: str, info: IndexInfoDict) -> None: + """Set the information for index.html for `fname`.""" + self.files.setdefault(fname, {})["index"] = info # type: ignore[typeddict-item] + + +# Helpers for templates and generating HTML + +def escape(t: str) -> str: + """HTML-escape the text in `t`. + + This is only suitable for HTML text, not attributes. + + """ + # Convert HTML special chars into HTML entities. + return t.replace("&", "&").replace("<", "<") + + +def pair(ratio: Tuple[int, int]) -> str: + """Format a pair of numbers so JavaScript can read them in an attribute.""" + return "%s %s" % ratio diff --git a/venv/lib/python3.10/site-packages/coverage/htmlfiles/coverage_html.js b/venv/lib/python3.10/site-packages/coverage/htmlfiles/coverage_html.js new file mode 100644 index 0000000..5934882 --- /dev/null +++ b/venv/lib/python3.10/site-packages/coverage/htmlfiles/coverage_html.js @@ -0,0 +1,624 @@ +// Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 +// For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt + +// Coverage.py HTML report browser code. +/*jslint browser: true, sloppy: true, vars: true, plusplus: true, maxerr: 50, indent: 4 */ +/*global coverage: true, document, window, $ */ + +coverage = {}; + +// General helpers +function debounce(callback, wait) { + let timeoutId = null; + return function(...args) { + clearTimeout(timeoutId); + timeoutId = setTimeout(() => { + callback.apply(this, args); + }, wait); + }; +}; + +function checkVisible(element) { + const rect = element.getBoundingClientRect(); + const viewBottom = Math.max(document.documentElement.clientHeight, window.innerHeight); + const viewTop = 30; + return !(rect.bottom < viewTop || rect.top >= viewBottom); +} + +function on_click(sel, fn) { + const elt = document.querySelector(sel); + if (elt) { + elt.addEventListener("click", fn); + } +} + +// Helpers for table sorting +function getCellValue(row, column = 0) { + const cell = row.cells[column] // nosemgrep: eslint.detect-object-injection + if (cell.childElementCount == 1) { + const child = cell.firstElementChild + if (child instanceof HTMLTimeElement && child.dateTime) { + return child.dateTime + } else if (child instanceof HTMLDataElement && child.value) { + return child.value + } + } + return cell.innerText || cell.textContent; +} + +function rowComparator(rowA, rowB, column = 0) { + let valueA = getCellValue(rowA, column); + let valueB = getCellValue(rowB, column); + if (!isNaN(valueA) && !isNaN(valueB)) { + return valueA - valueB + } + return valueA.localeCompare(valueB, undefined, {numeric: true}); +} + +function sortColumn(th) { + // Get the current sorting direction of the selected header, + // clear state on other headers and then set the new sorting direction + const currentSortOrder = th.getAttribute("aria-sort"); + [...th.parentElement.cells].forEach(header => header.setAttribute("aria-sort", "none")); + if (currentSortOrder === "none") { + th.setAttribute("aria-sort", th.dataset.defaultSortOrder || "ascending"); + } else { + th.setAttribute("aria-sort", currentSortOrder === "ascending" ? "descending" : "ascending"); + } + + const column = [...th.parentElement.cells].indexOf(th) + + // Sort all rows and afterwards append them in order to move them in the DOM + Array.from(th.closest("table").querySelectorAll("tbody tr")) + .sort((rowA, rowB) => rowComparator(rowA, rowB, column) * (th.getAttribute("aria-sort") === "ascending" ? 1 : -1)) + .forEach(tr => tr.parentElement.appendChild(tr) ); +} + +// Find all the elements with data-shortcut attribute, and use them to assign a shortcut key. +coverage.assign_shortkeys = function () { + document.querySelectorAll("[data-shortcut]").forEach(element => { + document.addEventListener("keypress", event => { + if (event.target.tagName.toLowerCase() === "input") { + return; // ignore keypress from search filter + } + if (event.key === element.dataset.shortcut) { + element.click(); + } + }); + }); +}; + +// Create the events for the filter box. +coverage.wire_up_filter = function () { + // Cache elements. + const table = document.querySelector("table.index"); + const table_body_rows = table.querySelectorAll("tbody tr"); + const no_rows = document.getElementById("no_rows"); + + // Observe filter keyevents. + document.getElementById("filter").addEventListener("input", debounce(event => { + // Keep running total of each metric, first index contains number of shown rows + const totals = new Array(table.rows[0].cells.length).fill(0); + // Accumulate the percentage as fraction + totals[totals.length - 1] = { "numer": 0, "denom": 0 }; // nosemgrep: eslint.detect-object-injection + + // Hide / show elements. + table_body_rows.forEach(row => { + if (!row.cells[0].textContent.includes(event.target.value)) { + // hide + row.classList.add("hidden"); + return; + } + + // show + row.classList.remove("hidden"); + totals[0]++; + + for (let column = 1; column < totals.length; column++) { + // Accumulate dynamic totals + cell = row.cells[column] // nosemgrep: eslint.detect-object-injection + if (column === totals.length - 1) { + // Last column contains percentage + const [numer, denom] = cell.dataset.ratio.split(" "); + totals[column]["numer"] += parseInt(numer, 10); // nosemgrep: eslint.detect-object-injection + totals[column]["denom"] += parseInt(denom, 10); // nosemgrep: eslint.detect-object-injection + } else { + totals[column] += parseInt(cell.textContent, 10); // nosemgrep: eslint.detect-object-injection + } + } + }); + + // Show placeholder if no rows will be displayed. + if (!totals[0]) { + // Show placeholder, hide table. + no_rows.style.display = "block"; + table.style.display = "none"; + return; + } + + // Hide placeholder, show table. + no_rows.style.display = null; + table.style.display = null; + + const footer = table.tFoot.rows[0]; + // Calculate new dynamic sum values based on visible rows. + for (let column = 1; column < totals.length; column++) { + // Get footer cell element. + const cell = footer.cells[column]; // nosemgrep: eslint.detect-object-injection + + // Set value into dynamic footer cell element. + if (column === totals.length - 1) { + // Percentage column uses the numerator and denominator, + // and adapts to the number of decimal places. + const match = /\.([0-9]+)/.exec(cell.textContent); + const places = match ? match[1].length : 0; + const { numer, denom } = totals[column]; // nosemgrep: eslint.detect-object-injection + cell.dataset.ratio = `${numer} ${denom}`; + // Check denom to prevent NaN if filtered files contain no statements + cell.textContent = denom + ? `${(numer * 100 / denom).toFixed(places)}%` + : `${(100).toFixed(places)}%`; + } else { + cell.textContent = totals[column]; // nosemgrep: eslint.detect-object-injection + } + } + })); + + // Trigger change event on setup, to force filter on page refresh + // (filter value may still be present). + document.getElementById("filter").dispatchEvent(new Event("input")); +}; + +coverage.INDEX_SORT_STORAGE = "COVERAGE_INDEX_SORT_2"; + +// Loaded on index.html +coverage.index_ready = function () { + coverage.assign_shortkeys(); + coverage.wire_up_filter(); + document.querySelectorAll("[data-sortable] th[aria-sort]").forEach( + th => th.addEventListener("click", e => sortColumn(e.target)) + ); + + // Look for a localStorage item containing previous sort settings: + const stored_list = localStorage.getItem(coverage.INDEX_SORT_STORAGE); + + if (stored_list) { + const {column, direction} = JSON.parse(stored_list); + const th = document.querySelector("[data-sortable]").tHead.rows[0].cells[column]; // nosemgrep: eslint.detect-object-injection + th.setAttribute("aria-sort", direction === "ascending" ? "descending" : "ascending"); + th.click() + } + + // Watch for page unload events so we can save the final sort settings: + window.addEventListener("unload", function () { + const th = document.querySelector('[data-sortable] th[aria-sort="ascending"], [data-sortable] [aria-sort="descending"]'); + if (!th) { + return; + } + localStorage.setItem(coverage.INDEX_SORT_STORAGE, JSON.stringify({ + column: [...th.parentElement.cells].indexOf(th), + direction: th.getAttribute("aria-sort"), + })); + }); + + on_click(".button_prev_file", coverage.to_prev_file); + on_click(".button_next_file", coverage.to_next_file); + + on_click(".button_show_hide_help", coverage.show_hide_help); +}; + +// -- pyfile stuff -- + +coverage.LINE_FILTERS_STORAGE = "COVERAGE_LINE_FILTERS"; + +coverage.pyfile_ready = function () { + // If we're directed to a particular line number, highlight the line. + var frag = location.hash; + if (frag.length > 2 && frag[1] === "t") { + document.querySelector(frag).closest(".n").classList.add("highlight"); + coverage.set_sel(parseInt(frag.substr(2), 10)); + } else { + coverage.set_sel(0); + } + + on_click(".button_toggle_run", coverage.toggle_lines); + on_click(".button_toggle_mis", coverage.toggle_lines); + on_click(".button_toggle_exc", coverage.toggle_lines); + on_click(".button_toggle_par", coverage.toggle_lines); + + on_click(".button_next_chunk", coverage.to_next_chunk_nicely); + on_click(".button_prev_chunk", coverage.to_prev_chunk_nicely); + on_click(".button_top_of_page", coverage.to_top); + on_click(".button_first_chunk", coverage.to_first_chunk); + + on_click(".button_prev_file", coverage.to_prev_file); + on_click(".button_next_file", coverage.to_next_file); + on_click(".button_to_index", coverage.to_index); + + on_click(".button_show_hide_help", coverage.show_hide_help); + + coverage.filters = undefined; + try { + coverage.filters = localStorage.getItem(coverage.LINE_FILTERS_STORAGE); + } catch(err) {} + + if (coverage.filters) { + coverage.filters = JSON.parse(coverage.filters); + } + else { + coverage.filters = {run: false, exc: true, mis: true, par: true}; + } + + for (cls in coverage.filters) { + coverage.set_line_visibilty(cls, coverage.filters[cls]); // nosemgrep: eslint.detect-object-injection + } + + coverage.assign_shortkeys(); + coverage.init_scroll_markers(); + coverage.wire_up_sticky_header(); + + document.querySelectorAll("[id^=ctxs]").forEach( + cbox => cbox.addEventListener("click", coverage.expand_contexts) + ); + + // Rebuild scroll markers when the window height changes. + window.addEventListener("resize", coverage.build_scroll_markers); +}; + +coverage.toggle_lines = function (event) { + const btn = event.target.closest("button"); + const category = btn.value + const show = !btn.classList.contains("show_" + category); + coverage.set_line_visibilty(category, show); + coverage.build_scroll_markers(); + coverage.filters[category] = show; + try { + localStorage.setItem(coverage.LINE_FILTERS_STORAGE, JSON.stringify(coverage.filters)); + } catch(err) {} +}; + +coverage.set_line_visibilty = function (category, should_show) { + const cls = "show_" + category; + const btn = document.querySelector(".button_toggle_" + category); + if (btn) { + if (should_show) { + document.querySelectorAll("#source ." + category).forEach(e => e.classList.add(cls)); + btn.classList.add(cls); + } + else { + document.querySelectorAll("#source ." + category).forEach(e => e.classList.remove(cls)); + btn.classList.remove(cls); + } + } +}; + +// Return the nth line div. +coverage.line_elt = function (n) { + return document.getElementById("t" + n)?.closest("p"); +}; + +// Set the selection. b and e are line numbers. +coverage.set_sel = function (b, e) { + // The first line selected. + coverage.sel_begin = b; + // The next line not selected. + coverage.sel_end = (e === undefined) ? b+1 : e; +}; + +coverage.to_top = function () { + coverage.set_sel(0, 1); + coverage.scroll_window(0); +}; + +coverage.to_first_chunk = function () { + coverage.set_sel(0, 1); + coverage.to_next_chunk(); +}; + +coverage.to_prev_file = function () { + window.location = document.getElementById("prevFileLink").href; +} + +coverage.to_next_file = function () { + window.location = document.getElementById("nextFileLink").href; +} + +coverage.to_index = function () { + location.href = document.getElementById("indexLink").href; +} + +coverage.show_hide_help = function () { + const helpCheck = document.getElementById("help_panel_state") + helpCheck.checked = !helpCheck.checked; +} + +// Return a string indicating what kind of chunk this line belongs to, +// or null if not a chunk. +coverage.chunk_indicator = function (line_elt) { + const classes = line_elt?.className; + if (!classes) { + return null; + } + const match = classes.match(/\bshow_\w+\b/); + if (!match) { + return null; + } + return match[0]; +}; + +coverage.to_next_chunk = function () { + const c = coverage; + + // Find the start of the next colored chunk. + var probe = c.sel_end; + var chunk_indicator, probe_line; + while (true) { + probe_line = c.line_elt(probe); + if (!probe_line) { + return; + } + chunk_indicator = c.chunk_indicator(probe_line); + if (chunk_indicator) { + break; + } + probe++; + } + + // There's a next chunk, `probe` points to it. + var begin = probe; + + // Find the end of this chunk. + var next_indicator = chunk_indicator; + while (next_indicator === chunk_indicator) { + probe++; + probe_line = c.line_elt(probe); + next_indicator = c.chunk_indicator(probe_line); + } + c.set_sel(begin, probe); + c.show_selection(); +}; + +coverage.to_prev_chunk = function () { + const c = coverage; + + // Find the end of the prev colored chunk. + var probe = c.sel_begin-1; + var probe_line = c.line_elt(probe); + if (!probe_line) { + return; + } + var chunk_indicator = c.chunk_indicator(probe_line); + while (probe > 1 && !chunk_indicator) { + probe--; + probe_line = c.line_elt(probe); + if (!probe_line) { + return; + } + chunk_indicator = c.chunk_indicator(probe_line); + } + + // There's a prev chunk, `probe` points to its last line. + var end = probe+1; + + // Find the beginning of this chunk. + var prev_indicator = chunk_indicator; + while (prev_indicator === chunk_indicator) { + probe--; + if (probe <= 0) { + return; + } + probe_line = c.line_elt(probe); + prev_indicator = c.chunk_indicator(probe_line); + } + c.set_sel(probe+1, end); + c.show_selection(); +}; + +// Returns 0, 1, or 2: how many of the two ends of the selection are on +// the screen right now? +coverage.selection_ends_on_screen = function () { + if (coverage.sel_begin === 0) { + return 0; + } + + const begin = coverage.line_elt(coverage.sel_begin); + const end = coverage.line_elt(coverage.sel_end-1); + + return ( + (checkVisible(begin) ? 1 : 0) + + (checkVisible(end) ? 1 : 0) + ); +}; + +coverage.to_next_chunk_nicely = function () { + if (coverage.selection_ends_on_screen() === 0) { + // The selection is entirely off the screen: + // Set the top line on the screen as selection. + + // This will select the top-left of the viewport + // As this is most likely the span with the line number we take the parent + const line = document.elementFromPoint(0, 0).parentElement; + if (line.parentElement !== document.getElementById("source")) { + // The element is not a source line but the header or similar + coverage.select_line_or_chunk(1); + } else { + // We extract the line number from the id + coverage.select_line_or_chunk(parseInt(line.id.substring(1), 10)); + } + } + coverage.to_next_chunk(); +}; + +coverage.to_prev_chunk_nicely = function () { + if (coverage.selection_ends_on_screen() === 0) { + // The selection is entirely off the screen: + // Set the lowest line on the screen as selection. + + // This will select the bottom-left of the viewport + // As this is most likely the span with the line number we take the parent + const line = document.elementFromPoint(document.documentElement.clientHeight-1, 0).parentElement; + if (line.parentElement !== document.getElementById("source")) { + // The element is not a source line but the header or similar + coverage.select_line_or_chunk(coverage.lines_len); + } else { + // We extract the line number from the id + coverage.select_line_or_chunk(parseInt(line.id.substring(1), 10)); + } + } + coverage.to_prev_chunk(); +}; + +// Select line number lineno, or if it is in a colored chunk, select the +// entire chunk +coverage.select_line_or_chunk = function (lineno) { + var c = coverage; + var probe_line = c.line_elt(lineno); + if (!probe_line) { + return; + } + var the_indicator = c.chunk_indicator(probe_line); + if (the_indicator) { + // The line is in a highlighted chunk. + // Search backward for the first line. + var probe = lineno; + var indicator = the_indicator; + while (probe > 0 && indicator === the_indicator) { + probe--; + probe_line = c.line_elt(probe); + if (!probe_line) { + break; + } + indicator = c.chunk_indicator(probe_line); + } + var begin = probe + 1; + + // Search forward for the last line. + probe = lineno; + indicator = the_indicator; + while (indicator === the_indicator) { + probe++; + probe_line = c.line_elt(probe); + indicator = c.chunk_indicator(probe_line); + } + + coverage.set_sel(begin, probe); + } + else { + coverage.set_sel(lineno); + } +}; + +coverage.show_selection = function () { + // Highlight the lines in the chunk + document.querySelectorAll("#source .highlight").forEach(e => e.classList.remove("highlight")); + for (let probe = coverage.sel_begin; probe < coverage.sel_end; probe++) { + coverage.line_elt(probe).querySelector(".n").classList.add("highlight"); + } + + coverage.scroll_to_selection(); +}; + +coverage.scroll_to_selection = function () { + // Scroll the page if the chunk isn't fully visible. + if (coverage.selection_ends_on_screen() < 2) { + const element = coverage.line_elt(coverage.sel_begin); + coverage.scroll_window(element.offsetTop - 60); + } +}; + +coverage.scroll_window = function (to_pos) { + window.scroll({top: to_pos, behavior: "smooth"}); +}; + +coverage.init_scroll_markers = function () { + // Init some variables + coverage.lines_len = document.querySelectorAll("#source > p").length; + + // Build html + coverage.build_scroll_markers(); +}; + +coverage.build_scroll_markers = function () { + const temp_scroll_marker = document.getElementById("scroll_marker") + if (temp_scroll_marker) temp_scroll_marker.remove(); + // Don't build markers if the window has no scroll bar. + if (document.body.scrollHeight <= window.innerHeight) { + return; + } + + const marker_scale = window.innerHeight / document.body.scrollHeight; + const line_height = Math.min(Math.max(3, window.innerHeight / coverage.lines_len), 10); + + let previous_line = -99, last_mark, last_top; + + const scroll_marker = document.createElement("div"); + scroll_marker.id = "scroll_marker"; + document.getElementById("source").querySelectorAll( + "p.show_run, p.show_mis, p.show_exc, p.show_exc, p.show_par" + ).forEach(element => { + const line_top = Math.floor(element.offsetTop * marker_scale); + const line_number = parseInt(element.querySelector(".n a").id.substr(1)); + + if (line_number === previous_line + 1) { + // If this solid missed block just make previous mark higher. + last_mark.style.height = `${line_top + line_height - last_top}px`; + } else { + // Add colored line in scroll_marker block. + last_mark = document.createElement("div"); + last_mark.id = `m${line_number}`; + last_mark.classList.add("marker"); + last_mark.style.height = `${line_height}px`; + last_mark.style.top = `${line_top}px`; + scroll_marker.append(last_mark); + last_top = line_top; + } + + previous_line = line_number; + }); + + // Append last to prevent layout calculation + document.body.append(scroll_marker); +}; + +coverage.wire_up_sticky_header = function () { + const header = document.querySelector("header"); + const header_bottom = ( + header.querySelector(".content h2").getBoundingClientRect().top - + header.getBoundingClientRect().top + ); + + function updateHeader() { + if (window.scrollY > header_bottom) { + header.classList.add("sticky"); + } else { + header.classList.remove("sticky"); + } + } + + window.addEventListener("scroll", updateHeader); + updateHeader(); +}; + +coverage.expand_contexts = function (e) { + var ctxs = e.target.parentNode.querySelector(".ctxs"); + + if (!ctxs.classList.contains("expanded")) { + var ctxs_text = ctxs.textContent; + var width = Number(ctxs_text[0]); + ctxs.textContent = ""; + for (var i = 1; i < ctxs_text.length; i += width) { + key = ctxs_text.substring(i, i + width).trim(); + ctxs.appendChild(document.createTextNode(contexts[key])); + ctxs.appendChild(document.createElement("br")); + } + ctxs.classList.add("expanded"); + } +}; + +document.addEventListener("DOMContentLoaded", () => { + if (document.body.classList.contains("indexfile")) { + coverage.index_ready(); + } else { + coverage.pyfile_ready(); + } +}); diff --git a/venv/lib/python3.10/site-packages/coverage/htmlfiles/favicon_32.png b/venv/lib/python3.10/site-packages/coverage/htmlfiles/favicon_32.png new file mode 100644 index 0000000..8649f04 Binary files /dev/null and b/venv/lib/python3.10/site-packages/coverage/htmlfiles/favicon_32.png differ diff --git a/venv/lib/python3.10/site-packages/coverage/htmlfiles/index.html b/venv/lib/python3.10/site-packages/coverage/htmlfiles/index.html new file mode 100644 index 0000000..bde46ea --- /dev/null +++ b/venv/lib/python3.10/site-packages/coverage/htmlfiles/index.html @@ -0,0 +1,142 @@ +{# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 #} +{# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt #} + + + + + + {{ title|escape }} + + + {% if extra_css %} + + {% endif %} + + + + +
+
+

{{ title|escape }}: + {{totals.pc_covered_str}}% +

+ + + +
+ +
+ +

+ coverage.py v{{__version__}}, + created at {{ time_stamp }} +

+
+
+ +
+ + + {# The title="" attr doesn"t work in Safari. #} + + + + + + {% if has_arcs %} + + + {% endif %} + + + + + {% for file in files %} + + + + + + {% if has_arcs %} + + + {% endif %} + + + {% endfor %} + + + + + + + + {% if has_arcs %} + + + {% endif %} + + + +
Modulestatementsmissingexcludedbranchespartialcoverage
{{file.relative_filename}}{{file.nums.n_statements}}{{file.nums.n_missing}}{{file.nums.n_excluded}}{{file.nums.n_branches}}{{file.nums.n_partial_branches}}{{file.nums.pc_covered_str}}%
Total{{totals.n_statements}}{{totals.n_missing}}{{totals.n_excluded}}{{totals.n_branches}}{{totals.n_partial_branches}}{{totals.pc_covered_str}}%
+ +

+ No items found using the specified filter. +

+ + {% if skipped_covered_msg %} +

{{ skipped_covered_msg }}

+ {% endif %} + {% if skipped_empty_msg %} +

{{ skipped_empty_msg }}

+ {% endif %} +
+ + + + + diff --git a/venv/lib/python3.10/site-packages/coverage/htmlfiles/keybd_closed.png b/venv/lib/python3.10/site-packages/coverage/htmlfiles/keybd_closed.png new file mode 100644 index 0000000..ba119c4 Binary files /dev/null and b/venv/lib/python3.10/site-packages/coverage/htmlfiles/keybd_closed.png differ diff --git a/venv/lib/python3.10/site-packages/coverage/htmlfiles/keybd_open.png b/venv/lib/python3.10/site-packages/coverage/htmlfiles/keybd_open.png new file mode 100644 index 0000000..a8bac6c Binary files /dev/null and b/venv/lib/python3.10/site-packages/coverage/htmlfiles/keybd_open.png differ diff --git a/venv/lib/python3.10/site-packages/coverage/htmlfiles/pyfile.html b/venv/lib/python3.10/site-packages/coverage/htmlfiles/pyfile.html new file mode 100644 index 0000000..bc8fa69 --- /dev/null +++ b/venv/lib/python3.10/site-packages/coverage/htmlfiles/pyfile.html @@ -0,0 +1,149 @@ +{# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 #} +{# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt #} + + + + + + Coverage for {{relative_filename|escape}}: {{nums.pc_covered_str}}% + + + {% if extra_css %} + + {% endif %} + + {% if contexts_json %} + + {% endif %} + + + + + +
+ +
+ +
+ {% for line in lines -%} + {% joined %} +

+ {{line.number}} + {{line.html}}  + {% if line.context_list %} + + {% endif %} + {# Things that should float right in the line. #} + + {% if line.annotate %} + {{line.annotate}} + {{line.annotate_long}} + {% endif %} + {% if line.contexts %} + + {% endif %} + + {# Things that should appear below the line. #} + {% if line.context_str %} + {{ line.context_str }} + {% endif %} +

+ {% endjoined %} + {% endfor %} +
+ + + + + diff --git a/venv/lib/python3.10/site-packages/coverage/htmlfiles/style.css b/venv/lib/python3.10/site-packages/coverage/htmlfiles/style.css new file mode 100644 index 0000000..11b24c4 --- /dev/null +++ b/venv/lib/python3.10/site-packages/coverage/htmlfiles/style.css @@ -0,0 +1,309 @@ +@charset "UTF-8"; +/* Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 */ +/* For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt */ +/* Don't edit this .css file. Edit the .scss file instead! */ +html, body, h1, h2, h3, p, table, td, th { margin: 0; padding: 0; border: 0; font-weight: inherit; font-style: inherit; font-size: 100%; font-family: inherit; vertical-align: baseline; } + +body { font-family: -apple-system, BlinkMacSystemFont, "Segoe UI", Roboto, Ubuntu, Cantarell, "Helvetica Neue", sans-serif; font-size: 1em; background: #fff; color: #000; } + +@media (prefers-color-scheme: dark) { body { background: #1e1e1e; } } + +@media (prefers-color-scheme: dark) { body { color: #eee; } } + +html > body { font-size: 16px; } + +a:active, a:focus { outline: 2px dashed #007acc; } + +p { font-size: .875em; line-height: 1.4em; } + +table { border-collapse: collapse; } + +td { vertical-align: top; } + +table tr.hidden { display: none !important; } + +p#no_rows { display: none; font-size: 1.2em; } + +a.nav { text-decoration: none; color: inherit; } + +a.nav:hover { text-decoration: underline; color: inherit; } + +.hidden { display: none; } + +header { background: #f8f8f8; width: 100%; z-index: 2; border-bottom: 1px solid #ccc; } + +@media (prefers-color-scheme: dark) { header { background: black; } } + +@media (prefers-color-scheme: dark) { header { border-color: #333; } } + +header .content { padding: 1rem 3.5rem; } + +header h2 { margin-top: .5em; font-size: 1em; } + +header p.text { margin: .5em 0 -.5em; color: #666; font-style: italic; } + +@media (prefers-color-scheme: dark) { header p.text { color: #aaa; } } + +header.sticky { position: fixed; left: 0; right: 0; height: 2.5em; } + +header.sticky .text { display: none; } + +header.sticky h1, header.sticky h2 { font-size: 1em; margin-top: 0; display: inline-block; } + +header.sticky .content { padding: 0.5rem 3.5rem; } + +header.sticky .content p { font-size: 1em; } + +header.sticky ~ #source { padding-top: 6.5em; } + +main { position: relative; z-index: 1; } + +footer { margin: 1rem 3.5rem; } + +footer .content { padding: 0; color: #666; font-style: italic; } + +@media (prefers-color-scheme: dark) { footer .content { color: #aaa; } } + +#index { margin: 1rem 0 0 3.5rem; } + +h1 { font-size: 1.25em; display: inline-block; } + +#filter_container { float: right; margin: 0 2em 0 0; } + +#filter_container input { width: 10em; padding: 0.2em 0.5em; border: 2px solid #ccc; background: #fff; color: #000; } + +@media (prefers-color-scheme: dark) { #filter_container input { border-color: #444; } } + +@media (prefers-color-scheme: dark) { #filter_container input { background: #1e1e1e; } } + +@media (prefers-color-scheme: dark) { #filter_container input { color: #eee; } } + +#filter_container input:focus { border-color: #007acc; } + +header button { font-family: inherit; font-size: inherit; border: 1px solid; border-radius: .2em; color: inherit; padding: .1em .5em; margin: 1px calc(.1em + 1px); cursor: pointer; border-color: #ccc; } + +@media (prefers-color-scheme: dark) { header button { border-color: #444; } } + +header button:active, header button:focus { outline: 2px dashed #007acc; } + +header button.run { background: #eeffee; } + +@media (prefers-color-scheme: dark) { header button.run { background: #373d29; } } + +header button.run.show_run { background: #dfd; border: 2px solid #00dd00; margin: 0 .1em; } + +@media (prefers-color-scheme: dark) { header button.run.show_run { background: #373d29; } } + +header button.mis { background: #ffeeee; } + +@media (prefers-color-scheme: dark) { header button.mis { background: #4b1818; } } + +header button.mis.show_mis { background: #fdd; border: 2px solid #ff0000; margin: 0 .1em; } + +@media (prefers-color-scheme: dark) { header button.mis.show_mis { background: #4b1818; } } + +header button.exc { background: #f7f7f7; } + +@media (prefers-color-scheme: dark) { header button.exc { background: #333; } } + +header button.exc.show_exc { background: #eee; border: 2px solid #808080; margin: 0 .1em; } + +@media (prefers-color-scheme: dark) { header button.exc.show_exc { background: #333; } } + +header button.par { background: #ffffd5; } + +@media (prefers-color-scheme: dark) { header button.par { background: #650; } } + +header button.par.show_par { background: #ffa; border: 2px solid #bbbb00; margin: 0 .1em; } + +@media (prefers-color-scheme: dark) { header button.par.show_par { background: #650; } } + +#help_panel, #source p .annotate.long { display: none; position: absolute; z-index: 999; background: #ffffcc; border: 1px solid #888; border-radius: .2em; color: #333; padding: .25em .5em; } + +#source p .annotate.long { white-space: normal; float: right; top: 1.75em; right: 1em; height: auto; } + +#help_panel_wrapper { float: right; position: relative; } + +#keyboard_icon { margin: 5px; } + +#help_panel_state { display: none; } + +#help_panel { top: 25px; right: 0; padding: .75em; border: 1px solid #883; color: #333; } + +#help_panel .keyhelp p { margin-top: .75em; } + +#help_panel .legend { font-style: italic; margin-bottom: 1em; } + +.indexfile #help_panel { width: 25em; } + +.pyfile #help_panel { width: 18em; } + +#help_panel_state:checked ~ #help_panel { display: block; } + +kbd { border: 1px solid black; border-color: #888 #333 #333 #888; padding: .1em .35em; font-family: SFMono-Regular, Menlo, Monaco, Consolas, monospace; font-weight: bold; background: #eee; border-radius: 3px; } + +#source { padding: 1em 0 1em 3.5rem; font-family: SFMono-Regular, Menlo, Monaco, Consolas, monospace; } + +#source p { position: relative; white-space: pre; } + +#source p * { box-sizing: border-box; } + +#source p .n { float: left; text-align: right; width: 3.5rem; box-sizing: border-box; margin-left: -3.5rem; padding-right: 1em; color: #999; } + +@media (prefers-color-scheme: dark) { #source p .n { color: #777; } } + +#source p .n.highlight { background: #ffdd00; } + +#source p .n a { margin-top: -4em; padding-top: 4em; text-decoration: none; color: #999; } + +@media (prefers-color-scheme: dark) { #source p .n a { color: #777; } } + +#source p .n a:hover { text-decoration: underline; color: #999; } + +@media (prefers-color-scheme: dark) { #source p .n a:hover { color: #777; } } + +#source p .t { display: inline-block; width: 100%; box-sizing: border-box; margin-left: -.5em; padding-left: 0.3em; border-left: 0.2em solid #fff; } + +@media (prefers-color-scheme: dark) { #source p .t { border-color: #1e1e1e; } } + +#source p .t:hover { background: #f2f2f2; } + +@media (prefers-color-scheme: dark) { #source p .t:hover { background: #282828; } } + +#source p .t:hover ~ .r .annotate.long { display: block; } + +#source p .t .com { color: #008000; font-style: italic; line-height: 1px; } + +@media (prefers-color-scheme: dark) { #source p .t .com { color: #6a9955; } } + +#source p .t .key { font-weight: bold; line-height: 1px; } + +#source p .t .str { color: #0451a5; } + +@media (prefers-color-scheme: dark) { #source p .t .str { color: #9cdcfe; } } + +#source p.mis .t { border-left: 0.2em solid #ff0000; } + +#source p.mis.show_mis .t { background: #fdd; } + +@media (prefers-color-scheme: dark) { #source p.mis.show_mis .t { background: #4b1818; } } + +#source p.mis.show_mis .t:hover { background: #f2d2d2; } + +@media (prefers-color-scheme: dark) { #source p.mis.show_mis .t:hover { background: #532323; } } + +#source p.run .t { border-left: 0.2em solid #00dd00; } + +#source p.run.show_run .t { background: #dfd; } + +@media (prefers-color-scheme: dark) { #source p.run.show_run .t { background: #373d29; } } + +#source p.run.show_run .t:hover { background: #d2f2d2; } + +@media (prefers-color-scheme: dark) { #source p.run.show_run .t:hover { background: #404633; } } + +#source p.exc .t { border-left: 0.2em solid #808080; } + +#source p.exc.show_exc .t { background: #eee; } + +@media (prefers-color-scheme: dark) { #source p.exc.show_exc .t { background: #333; } } + +#source p.exc.show_exc .t:hover { background: #e2e2e2; } + +@media (prefers-color-scheme: dark) { #source p.exc.show_exc .t:hover { background: #3c3c3c; } } + +#source p.par .t { border-left: 0.2em solid #bbbb00; } + +#source p.par.show_par .t { background: #ffa; } + +@media (prefers-color-scheme: dark) { #source p.par.show_par .t { background: #650; } } + +#source p.par.show_par .t:hover { background: #f2f2a2; } + +@media (prefers-color-scheme: dark) { #source p.par.show_par .t:hover { background: #6d5d0c; } } + +#source p .r { position: absolute; top: 0; right: 2.5em; font-family: -apple-system, BlinkMacSystemFont, "Segoe UI", Roboto, Ubuntu, Cantarell, "Helvetica Neue", sans-serif; } + +#source p .annotate { font-family: -apple-system, BlinkMacSystemFont, "Segoe UI", Roboto, Ubuntu, Cantarell, "Helvetica Neue", sans-serif; color: #666; padding-right: .5em; } + +@media (prefers-color-scheme: dark) { #source p .annotate { color: #ddd; } } + +#source p .annotate.short:hover ~ .long { display: block; } + +#source p .annotate.long { width: 30em; right: 2.5em; } + +#source p input { display: none; } + +#source p input ~ .r label.ctx { cursor: pointer; border-radius: .25em; } + +#source p input ~ .r label.ctx::before { content: "▶ "; } + +#source p input ~ .r label.ctx:hover { background: #e8f4ff; color: #666; } + +@media (prefers-color-scheme: dark) { #source p input ~ .r label.ctx:hover { background: #0f3a42; } } + +@media (prefers-color-scheme: dark) { #source p input ~ .r label.ctx:hover { color: #aaa; } } + +#source p input:checked ~ .r label.ctx { background: #d0e8ff; color: #666; border-radius: .75em .75em 0 0; padding: 0 .5em; margin: -.25em 0; } + +@media (prefers-color-scheme: dark) { #source p input:checked ~ .r label.ctx { background: #056; } } + +@media (prefers-color-scheme: dark) { #source p input:checked ~ .r label.ctx { color: #aaa; } } + +#source p input:checked ~ .r label.ctx::before { content: "▼ "; } + +#source p input:checked ~ .ctxs { padding: .25em .5em; overflow-y: scroll; max-height: 10.5em; } + +#source p label.ctx { color: #999; display: inline-block; padding: 0 .5em; font-size: .8333em; } + +@media (prefers-color-scheme: dark) { #source p label.ctx { color: #777; } } + +#source p .ctxs { display: block; max-height: 0; overflow-y: hidden; transition: all .2s; padding: 0 .5em; font-family: -apple-system, BlinkMacSystemFont, "Segoe UI", Roboto, Ubuntu, Cantarell, "Helvetica Neue", sans-serif; white-space: nowrap; background: #d0e8ff; border-radius: .25em; margin-right: 1.75em; text-align: right; } + +@media (prefers-color-scheme: dark) { #source p .ctxs { background: #056; } } + +#index { font-family: SFMono-Regular, Menlo, Monaco, Consolas, monospace; font-size: 0.875em; } + +#index table.index { margin-left: -.5em; } + +#index td, #index th { text-align: right; width: 5em; padding: .25em .5em; border-bottom: 1px solid #eee; } + +@media (prefers-color-scheme: dark) { #index td, #index th { border-color: #333; } } + +#index td.name, #index th.name { text-align: left; width: auto; } + +#index th { font-style: italic; color: #333; cursor: pointer; } + +@media (prefers-color-scheme: dark) { #index th { color: #ddd; } } + +#index th:hover { background: #eee; } + +@media (prefers-color-scheme: dark) { #index th:hover { background: #333; } } + +#index th[aria-sort="ascending"], #index th[aria-sort="descending"] { white-space: nowrap; background: #eee; padding-left: .5em; } + +@media (prefers-color-scheme: dark) { #index th[aria-sort="ascending"], #index th[aria-sort="descending"] { background: #333; } } + +#index th[aria-sort="ascending"]::after { font-family: sans-serif; content: " ↑"; } + +#index th[aria-sort="descending"]::after { font-family: sans-serif; content: " ↓"; } + +#index td.name a { text-decoration: none; color: inherit; } + +#index tr.total td, #index tr.total_dynamic td { font-weight: bold; border-top: 1px solid #ccc; border-bottom: none; } + +#index tr.file:hover { background: #eee; } + +@media (prefers-color-scheme: dark) { #index tr.file:hover { background: #333; } } + +#index tr.file:hover td.name { text-decoration: underline; color: inherit; } + +#scroll_marker { position: fixed; z-index: 3; right: 0; top: 0; width: 16px; height: 100%; background: #fff; border-left: 1px solid #eee; will-change: transform; } + +@media (prefers-color-scheme: dark) { #scroll_marker { background: #1e1e1e; } } + +@media (prefers-color-scheme: dark) { #scroll_marker { border-color: #333; } } + +#scroll_marker .marker { background: #ccc; position: absolute; min-height: 3px; width: 100%; } + +@media (prefers-color-scheme: dark) { #scroll_marker .marker { background: #444; } } diff --git a/venv/lib/python3.10/site-packages/coverage/htmlfiles/style.scss b/venv/lib/python3.10/site-packages/coverage/htmlfiles/style.scss new file mode 100644 index 0000000..b146515 --- /dev/null +++ b/venv/lib/python3.10/site-packages/coverage/htmlfiles/style.scss @@ -0,0 +1,716 @@ +/* Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 */ +/* For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt */ + +// CSS styles for coverage.py HTML reports. + +// When you edit this file, you need to run "make css" to get the CSS file +// generated, and then check in both the .scss and the .css files. + +// When working on the file, this command is useful: +// sass --watch --style=compact --sourcemap=none --no-cache coverage/htmlfiles/style.scss:htmlcov/style.css +// +// OR you can process sass purely in python with `pip install pysass`, then: +// pysassc --style=compact coverage/htmlfiles/style.scss coverage/htmlfiles/style.css + +// Ignore this comment, it's for the CSS output file: +/* Don't edit this .css file. Edit the .scss file instead! */ + +// Dimensions +$left-gutter: 3.5rem; + +// +// Declare colors and variables +// + +$font-normal: -apple-system, BlinkMacSystemFont, "Segoe UI", Roboto, Ubuntu, Cantarell, "Helvetica Neue", sans-serif; +$font-code: SFMono-Regular, Menlo, Monaco, Consolas, monospace; + +$off-button-lighten: 50%; +$hover-dark-amt: 95%; + +$focus-color: #007acc; + +$mis-color: #ff0000; +$run-color: #00dd00; +$exc-color: #808080; +$par-color: #bbbb00; + +$light-bg: #fff; +$light-fg: #000; +$light-gray1: #f8f8f8; +$light-gray2: #eee; +$light-gray3: #ccc; +$light-gray4: #999; +$light-gray5: #666; +$light-gray6: #333; +$light-pln-bg: $light-bg; +$light-mis-bg: #fdd; +$light-run-bg: #dfd; +$light-exc-bg: $light-gray2; +$light-par-bg: #ffa; +$light-token-com: #008000; +$light-token-str: #0451a5; +$light-context-bg-color: #d0e8ff; + +$dark-bg: #1e1e1e; +$dark-fg: #eee; +$dark-gray1: #222; +$dark-gray2: #333; +$dark-gray3: #444; +$dark-gray4: #777; +$dark-gray5: #aaa; +$dark-gray6: #ddd; +$dark-pln-bg: $dark-bg; +$dark-mis-bg: #4b1818; +$dark-run-bg: #373d29; +$dark-exc-bg: $dark-gray2; +$dark-par-bg: #650; +$dark-token-com: #6a9955; +$dark-token-str: #9cdcfe; +$dark-context-bg-color: #056; + +// +// Mixins and utilities +// + +@mixin background-dark($color) { + @media (prefers-color-scheme: dark) { + background: $color; + } +} +@mixin color-dark($color) { + @media (prefers-color-scheme: dark) { + color: $color; + } +} +@mixin border-color-dark($color) { + @media (prefers-color-scheme: dark) { + border-color: $color; + } +} + +// Add visual outline to navigable elements on focus improve accessibility. +@mixin focus-border { + &:active, &:focus { + outline: 2px dashed $focus-color; + } +} + +// Page-wide styles +html, body, h1, h2, h3, p, table, td, th { + margin: 0; + padding: 0; + border: 0; + font-weight: inherit; + font-style: inherit; + font-size: 100%; + font-family: inherit; + vertical-align: baseline; +} + +// Set baseline grid to 16 pt. +body { + font-family: $font-normal; + font-size: 1em; + background: $light-bg; + color: $light-fg; + @include background-dark($dark-bg); + @include color-dark($dark-fg); +} + +html>body { + font-size: 16px; +} + +a { + @include focus-border; +} + +p { + font-size: .875em; + line-height: 1.4em; +} + +table { + border-collapse: collapse; +} +td { + vertical-align: top; +} +table tr.hidden { + display: none !important; +} + +p#no_rows { + display: none; + font-size: 1.2em; +} + +a.nav { + text-decoration: none; + color: inherit; + + &:hover { + text-decoration: underline; + color: inherit; + } +} + +.hidden { + display: none; +} + +// Page structure +header { + background: $light-gray1; + @include background-dark(black); + width: 100%; + z-index: 2; + border-bottom: 1px solid $light-gray3; + @include border-color-dark($dark-gray2); + + .content { + padding: 1rem $left-gutter; + } + + h2 { + margin-top: .5em; + font-size: 1em; + } + + p.text { + margin: .5em 0 -.5em; + color: $light-gray5; + @include color-dark($dark-gray5); + font-style: italic; + } + + &.sticky { + position: fixed; + left: 0; + right: 0; + height: 2.5em; + + .text { + display: none; + } + + h1, h2 { + font-size: 1em; + margin-top: 0; + display: inline-block; + } + + .content { + padding: .5rem $left-gutter; + p { + font-size: 1em; + } + } + + & ~ #source { + padding-top: 6.5em; + } + } +} + +main { + position: relative; + z-index: 1; +} + +footer { + margin: 1rem $left-gutter; + + .content { + padding: 0; + color: $light-gray5; + @include color-dark($dark-gray5); + font-style: italic; + } +} + +#index { + margin: 1rem 0 0 $left-gutter; +} + +// Header styles + +h1 { + font-size: 1.25em; + display: inline-block; +} + +#filter_container { + float: right; + margin: 0 2em 0 0; + + input { + width: 10em; + padding: 0.2em 0.5em; + border: 2px solid $light-gray3; + background: $light-bg; + color: $light-fg; + @include border-color-dark($dark-gray3); + @include background-dark($dark-bg); + @include color-dark($dark-fg); + &:focus { + border-color: $focus-color; + } + } +} + +header button { + font-family: inherit; + font-size: inherit; + border: 1px solid; + border-radius: .2em; + color: inherit; + padding: .1em .5em; + margin: 1px calc(.1em + 1px); + cursor: pointer; + border-color: $light-gray3; + @include border-color-dark($dark-gray3); + @include focus-border; + + &.run { + background: mix($light-run-bg, $light-bg, $off-button-lighten); + @include background-dark($dark-run-bg); + &.show_run { + background: $light-run-bg; + @include background-dark($dark-run-bg); + border: 2px solid $run-color; + margin: 0 .1em; + } + } + &.mis { + background: mix($light-mis-bg, $light-bg, $off-button-lighten); + @include background-dark($dark-mis-bg); + &.show_mis { + background: $light-mis-bg; + @include background-dark($dark-mis-bg); + border: 2px solid $mis-color; + margin: 0 .1em; + } + } + &.exc { + background: mix($light-exc-bg, $light-bg, $off-button-lighten); + @include background-dark($dark-exc-bg); + &.show_exc { + background: $light-exc-bg; + @include background-dark($dark-exc-bg); + border: 2px solid $exc-color; + margin: 0 .1em; + } + } + &.par { + background: mix($light-par-bg, $light-bg, $off-button-lighten); + @include background-dark($dark-par-bg); + &.show_par { + background: $light-par-bg; + @include background-dark($dark-par-bg); + border: 2px solid $par-color; + margin: 0 .1em; + } + } +} + +// Yellow post-it things. +%popup { + display: none; + position: absolute; + z-index: 999; + background: #ffffcc; + border: 1px solid #888; + border-radius: .2em; + color: #333; + padding: .25em .5em; +} + +// Yellow post-it's in the text listings. +%in-text-popup { + @extend %popup; + white-space: normal; + float: right; + top: 1.75em; + right: 1em; + height: auto; +} + +// Help panel +#help_panel_wrapper { + float: right; + position: relative; +} + +#keyboard_icon { + margin: 5px; +} + +#help_panel_state { + display: none; +} + +#help_panel { + @extend %popup; + top: 25px; + right: 0; + padding: .75em; + border: 1px solid #883; + + color: #333; + + .keyhelp p { + margin-top: .75em; + } + + .legend { + font-style: italic; + margin-bottom: 1em; + } + + .indexfile & { + width: 25em; + } + + .pyfile & { + width: 18em; + } + + #help_panel_state:checked ~ & { + display: block; + } +} + +kbd { + border: 1px solid black; + border-color: #888 #333 #333 #888; + padding: .1em .35em; + font-family: $font-code; + font-weight: bold; + background: #eee; + border-radius: 3px; +} + +// Source file styles + +// The slim bar at the left edge of the source lines, colored by coverage. +$border-indicator-width: .2em; + +#source { + padding: 1em 0 1em $left-gutter; + font-family: $font-code; + + p { + // position relative makes position:absolute pop-ups appear in the right place. + position: relative; + white-space: pre; + + * { + box-sizing: border-box; + } + + .n { + float: left; + text-align: right; + width: $left-gutter; + box-sizing: border-box; + margin-left: -$left-gutter; + padding-right: 1em; + color: $light-gray4; + @include color-dark($dark-gray4); + + &.highlight { + background: #ffdd00; + } + + a { + // These two lines make anchors to the line scroll the line to be + // visible beneath the fixed-position header. + margin-top: -4em; + padding-top: 4em; + + text-decoration: none; + color: $light-gray4; + @include color-dark($dark-gray4); + &:hover { + text-decoration: underline; + color: $light-gray4; + @include color-dark($dark-gray4); + } + } + } + + .t { + display: inline-block; + width: 100%; + box-sizing: border-box; + margin-left: -.5em; + padding-left: .5em - $border-indicator-width; + border-left: $border-indicator-width solid $light-bg; + @include border-color-dark($dark-bg); + + &:hover { + background: mix($light-pln-bg, $light-fg, $hover-dark-amt); + @include background-dark(mix($dark-pln-bg, $dark-fg, $hover-dark-amt)); + + & ~ .r .annotate.long { + display: block; + } + } + + // Syntax coloring + .com { + color: $light-token-com; + @include color-dark($dark-token-com); + font-style: italic; + line-height: 1px; + } + .key { + font-weight: bold; + line-height: 1px; + } + .str { + color: $light-token-str; + @include color-dark($dark-token-str); + } + } + + &.mis { + .t { + border-left: $border-indicator-width solid $mis-color; + } + + &.show_mis .t { + background: $light-mis-bg; + @include background-dark($dark-mis-bg); + + &:hover { + background: mix($light-mis-bg, $light-fg, $hover-dark-amt); + @include background-dark(mix($dark-mis-bg, $dark-fg, $hover-dark-amt)); + } + } + } + + &.run { + .t { + border-left: $border-indicator-width solid $run-color; + } + + &.show_run .t { + background: $light-run-bg; + @include background-dark($dark-run-bg); + + &:hover { + background: mix($light-run-bg, $light-fg, $hover-dark-amt); + @include background-dark(mix($dark-run-bg, $dark-fg, $hover-dark-amt)); + } + } + } + + &.exc { + .t { + border-left: $border-indicator-width solid $exc-color; + } + + &.show_exc .t { + background: $light-exc-bg; + @include background-dark($dark-exc-bg); + + &:hover { + background: mix($light-exc-bg, $light-fg, $hover-dark-amt); + @include background-dark(mix($dark-exc-bg, $dark-fg, $hover-dark-amt)); + } + } + } + + &.par { + .t { + border-left: $border-indicator-width solid $par-color; + } + + &.show_par .t { + background: $light-par-bg; + @include background-dark($dark-par-bg); + + &:hover { + background: mix($light-par-bg, $light-fg, $hover-dark-amt); + @include background-dark(mix($dark-par-bg, $dark-fg, $hover-dark-amt)); + } + } + + } + + .r { + position: absolute; + top: 0; + right: 2.5em; + font-family: $font-normal; + } + + .annotate { + font-family: $font-normal; + color: $light-gray5; + @include color-dark($dark-gray6); + padding-right: .5em; + + &.short:hover ~ .long { + display: block; + } + + &.long { + @extend %in-text-popup; + width: 30em; + right: 2.5em; + } + } + + input { + display: none; + + & ~ .r label.ctx { + cursor: pointer; + border-radius: .25em; + &::before { + content: "▶ "; + } + &:hover { + background: mix($light-context-bg-color, $light-bg, $off-button-lighten); + @include background-dark(mix($dark-context-bg-color, $dark-bg, $off-button-lighten)); + color: $light-gray5; + @include color-dark($dark-gray5); + } + } + + &:checked ~ .r label.ctx { + background: $light-context-bg-color; + @include background-dark($dark-context-bg-color); + color: $light-gray5; + @include color-dark($dark-gray5); + border-radius: .75em .75em 0 0; + padding: 0 .5em; + margin: -.25em 0; + &::before { + content: "▼ "; + } + } + + &:checked ~ .ctxs { + padding: .25em .5em; + overflow-y: scroll; + max-height: 10.5em; + } + } + + label.ctx { + color: $light-gray4; + @include color-dark($dark-gray4); + display: inline-block; + padding: 0 .5em; + font-size: .8333em; // 10/12 + } + + .ctxs { + display: block; + max-height: 0; + overflow-y: hidden; + transition: all .2s; + padding: 0 .5em; + font-family: $font-normal; + white-space: nowrap; + background: $light-context-bg-color; + @include background-dark($dark-context-bg-color); + border-radius: .25em; + margin-right: 1.75em; + text-align: right; + } + } +} + + +// index styles +#index { + font-family: $font-code; + font-size: 0.875em; + + table.index { + margin-left: -.5em; + } + td, th { + text-align: right; + width: 5em; + padding: .25em .5em; + border-bottom: 1px solid $light-gray2; + @include border-color-dark($dark-gray2); + &.name { + text-align: left; + width: auto; + } + } + th { + font-style: italic; + color: $light-gray6; + @include color-dark($dark-gray6); + cursor: pointer; + &:hover { + background: $light-gray2; + @include background-dark($dark-gray2); + } + &[aria-sort="ascending"], &[aria-sort="descending"] { + white-space: nowrap; + background: $light-gray2; + @include background-dark($dark-gray2); + padding-left: .5em; + } + &[aria-sort="ascending"]::after { + font-family: sans-serif; + content: " ↑"; + } + &[aria-sort="descending"]::after { + font-family: sans-serif; + content: " ↓"; + } + } + td.name a { + text-decoration: none; + color: inherit; + } + + tr.total td, + tr.total_dynamic td { + font-weight: bold; + border-top: 1px solid #ccc; + border-bottom: none; + } + tr.file:hover { + background: $light-gray2; + @include background-dark($dark-gray2); + td.name { + text-decoration: underline; + color: inherit; + } + } +} + +// scroll marker styles +#scroll_marker { + position: fixed; + z-index: 3; + right: 0; + top: 0; + width: 16px; + height: 100%; + background: $light-bg; + border-left: 1px solid $light-gray2; + @include background-dark($dark-bg); + @include border-color-dark($dark-gray2); + will-change: transform; // for faster scrolling of fixed element in Chrome + + .marker { + background: $light-gray3; + @include background-dark($dark-gray3); + position: absolute; + min-height: 3px; + width: 100%; + } +} diff --git a/venv/lib/python3.10/site-packages/coverage/inorout.py b/venv/lib/python3.10/site-packages/coverage/inorout.py new file mode 100644 index 0000000..d2dbdcd --- /dev/null +++ b/venv/lib/python3.10/site-packages/coverage/inorout.py @@ -0,0 +1,596 @@ +# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 +# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt + +"""Determining whether files are being measured/reported or not.""" + +from __future__ import annotations + +import importlib.util +import inspect +import itertools +import os +import platform +import re +import sys +import sysconfig +import traceback + +from types import FrameType, ModuleType +from typing import ( + cast, Any, Iterable, List, Optional, Set, Tuple, Type, TYPE_CHECKING, +) + +from coverage import env +from coverage.disposition import FileDisposition, disposition_init +from coverage.exceptions import CoverageException, PluginError +from coverage.files import TreeMatcher, GlobMatcher, ModuleMatcher +from coverage.files import prep_patterns, find_python_files, canonical_filename +from coverage.misc import sys_modules_saved +from coverage.python import source_for_file, source_for_morf +from coverage.types import TFileDisposition, TMorf, TWarnFn, TDebugCtl + +if TYPE_CHECKING: + from coverage.config import CoverageConfig + from coverage.plugin_support import Plugins + + +# Pypy has some unusual stuff in the "stdlib". Consider those locations +# when deciding where the stdlib is. These modules are not used for anything, +# they are modules importable from the pypy lib directories, so that we can +# find those directories. +modules_we_happen_to_have: List[ModuleType] = [ + inspect, itertools, os, platform, re, sysconfig, traceback, +] + +if env.PYPY: + try: + import _structseq + modules_we_happen_to_have.append(_structseq) + except ImportError: + pass + + try: + import _pypy_irc_topic + modules_we_happen_to_have.append(_pypy_irc_topic) + except ImportError: + pass + + +def canonical_path(morf: TMorf, directory: bool = False) -> str: + """Return the canonical path of the module or file `morf`. + + If the module is a package, then return its directory. If it is a + module, then return its file, unless `directory` is True, in which + case return its enclosing directory. + + """ + morf_path = canonical_filename(source_for_morf(morf)) + if morf_path.endswith("__init__.py") or directory: + morf_path = os.path.split(morf_path)[0] + return morf_path + + +def name_for_module(filename: str, frame: Optional[FrameType]) -> str: + """Get the name of the module for a filename and frame. + + For configurability's sake, we allow __main__ modules to be matched by + their importable name. + + If loaded via runpy (aka -m), we can usually recover the "original" + full dotted module name, otherwise, we resort to interpreting the + file name to get the module's name. In the case that the module name + can't be determined, None is returned. + + """ + module_globals = frame.f_globals if frame is not None else {} + dunder_name: str = module_globals.get("__name__", None) + + if isinstance(dunder_name, str) and dunder_name != "__main__": + # This is the usual case: an imported module. + return dunder_name + + loader = module_globals.get("__loader__", None) + for attrname in ("fullname", "name"): # attribute renamed in py3.2 + if hasattr(loader, attrname): + fullname = getattr(loader, attrname) + else: + continue + + if isinstance(fullname, str) and fullname != "__main__": + # Module loaded via: runpy -m + return fullname + + # Script as first argument to Python command line. + inspectedname = inspect.getmodulename(filename) + if inspectedname is not None: + return inspectedname + else: + return dunder_name + + +def module_is_namespace(mod: ModuleType) -> bool: + """Is the module object `mod` a PEP420 namespace module?""" + return hasattr(mod, "__path__") and getattr(mod, "__file__", None) is None + + +def module_has_file(mod: ModuleType) -> bool: + """Does the module object `mod` have an existing __file__ ?""" + mod__file__ = getattr(mod, "__file__", None) + if mod__file__ is None: + return False + return os.path.exists(mod__file__) + + +def file_and_path_for_module(modulename: str) -> Tuple[Optional[str], List[str]]: + """Find the file and search path for `modulename`. + + Returns: + filename: The filename of the module, or None. + path: A list (possibly empty) of directories to find submodules in. + + """ + filename = None + path = [] + try: + spec = importlib.util.find_spec(modulename) + except Exception: + pass + else: + if spec is not None: + filename = spec.origin + path = list(spec.submodule_search_locations or ()) + return filename, path + + +def add_stdlib_paths(paths: Set[str]) -> None: + """Add paths where the stdlib can be found to the set `paths`.""" + # Look at where some standard modules are located. That's the + # indication for "installed with the interpreter". In some + # environments (virtualenv, for example), these modules may be + # spread across a few locations. Look at all the candidate modules + # we've imported, and take all the different ones. + for m in modules_we_happen_to_have: + if hasattr(m, "__file__"): + paths.add(canonical_path(m, directory=True)) + + +def add_third_party_paths(paths: Set[str]) -> None: + """Add locations for third-party packages to the set `paths`.""" + # Get the paths that sysconfig knows about. + scheme_names = set(sysconfig.get_scheme_names()) + + for scheme in scheme_names: + # https://foss.heptapod.net/pypy/pypy/-/issues/3433 + better_scheme = "pypy_posix" if scheme == "pypy" else scheme + if os.name in better_scheme.split("_"): + config_paths = sysconfig.get_paths(scheme) + for path_name in ["platlib", "purelib", "scripts"]: + paths.add(config_paths[path_name]) + + +def add_coverage_paths(paths: Set[str]) -> None: + """Add paths where coverage.py code can be found to the set `paths`.""" + cover_path = canonical_path(__file__, directory=True) + paths.add(cover_path) + if env.TESTING: + # Don't include our own test code. + paths.add(os.path.join(cover_path, "tests")) + + +class InOrOut: + """Machinery for determining what files to measure.""" + + def __init__( + self, + config: CoverageConfig, + warn: TWarnFn, + debug: Optional[TDebugCtl], + include_namespace_packages: bool, + ) -> None: + self.warn = warn + self.debug = debug + self.include_namespace_packages = include_namespace_packages + + self.source: List[str] = [] + self.source_pkgs: List[str] = [] + self.source_pkgs.extend(config.source_pkgs) + for src in config.source or []: + if os.path.isdir(src): + self.source.append(canonical_filename(src)) + else: + self.source_pkgs.append(src) + self.source_pkgs_unmatched = self.source_pkgs[:] + + self.include = prep_patterns(config.run_include) + self.omit = prep_patterns(config.run_omit) + + # The directories for files considered "installed with the interpreter". + self.pylib_paths: Set[str] = set() + if not config.cover_pylib: + add_stdlib_paths(self.pylib_paths) + + # To avoid tracing the coverage.py code itself, we skip anything + # located where we are. + self.cover_paths: Set[str] = set() + add_coverage_paths(self.cover_paths) + + # Find where third-party packages are installed. + self.third_paths: Set[str] = set() + add_third_party_paths(self.third_paths) + + def _debug(msg: str) -> None: + if self.debug: + self.debug.write(msg) + + # The matchers for should_trace. + + # Generally useful information + _debug("sys.path:" + "".join(f"\n {p}" for p in sys.path)) + + # Create the matchers we need for should_trace + self.source_match = None + self.source_pkgs_match = None + self.pylib_match = None + self.include_match = self.omit_match = None + + if self.source or self.source_pkgs: + against = [] + if self.source: + self.source_match = TreeMatcher(self.source, "source") + against.append(f"trees {self.source_match!r}") + if self.source_pkgs: + self.source_pkgs_match = ModuleMatcher(self.source_pkgs, "source_pkgs") + against.append(f"modules {self.source_pkgs_match!r}") + _debug("Source matching against " + " and ".join(against)) + else: + if self.pylib_paths: + self.pylib_match = TreeMatcher(self.pylib_paths, "pylib") + _debug(f"Python stdlib matching: {self.pylib_match!r}") + if self.include: + self.include_match = GlobMatcher(self.include, "include") + _debug(f"Include matching: {self.include_match!r}") + if self.omit: + self.omit_match = GlobMatcher(self.omit, "omit") + _debug(f"Omit matching: {self.omit_match!r}") + + self.cover_match = TreeMatcher(self.cover_paths, "coverage") + _debug(f"Coverage code matching: {self.cover_match!r}") + + self.third_match = TreeMatcher(self.third_paths, "third") + _debug(f"Third-party lib matching: {self.third_match!r}") + + # Check if the source we want to measure has been installed as a + # third-party package. + # Is the source inside a third-party area? + self.source_in_third_paths = set() + with sys_modules_saved(): + for pkg in self.source_pkgs: + try: + modfile, path = file_and_path_for_module(pkg) + _debug(f"Imported source package {pkg!r} as {modfile!r}") + except CoverageException as exc: + _debug(f"Couldn't import source package {pkg!r}: {exc}") + continue + if modfile: + if self.third_match.match(modfile): + _debug( + f"Source in third-party: source_pkg {pkg!r} at {modfile!r}" + ) + self.source_in_third_paths.add(canonical_path(source_for_file(modfile))) + else: + for pathdir in path: + if self.third_match.match(pathdir): + _debug( + f"Source in third-party: {pkg!r} path directory at {pathdir!r}" + ) + self.source_in_third_paths.add(pathdir) + + for src in self.source: + if self.third_match.match(src): + _debug(f"Source in third-party: source directory {src!r}") + self.source_in_third_paths.add(src) + self.source_in_third_match = TreeMatcher(self.source_in_third_paths, "source_in_third") + _debug(f"Source in third-party matching: {self.source_in_third_match}") + + self.plugins: Plugins + self.disp_class: Type[TFileDisposition] = FileDisposition + + def should_trace(self, filename: str, frame: Optional[FrameType] = None) -> TFileDisposition: + """Decide whether to trace execution in `filename`, with a reason. + + This function is called from the trace function. As each new file name + is encountered, this function determines whether it is traced or not. + + Returns a FileDisposition object. + + """ + original_filename = filename + disp = disposition_init(self.disp_class, filename) + + def nope(disp: TFileDisposition, reason: str) -> TFileDisposition: + """Simple helper to make it easy to return NO.""" + disp.trace = False + disp.reason = reason + return disp + + if original_filename.startswith("<"): + return nope(disp, "original file name is not real") + + if frame is not None: + # Compiled Python files have two file names: frame.f_code.co_filename is + # the file name at the time the .pyc was compiled. The second name is + # __file__, which is where the .pyc was actually loaded from. Since + # .pyc files can be moved after compilation (for example, by being + # installed), we look for __file__ in the frame and prefer it to the + # co_filename value. + dunder_file = frame.f_globals and frame.f_globals.get("__file__") + if dunder_file: + filename = source_for_file(dunder_file) + if original_filename and not original_filename.startswith("<"): + orig = os.path.basename(original_filename) + if orig != os.path.basename(filename): + # Files shouldn't be renamed when moved. This happens when + # exec'ing code. If it seems like something is wrong with + # the frame's file name, then just use the original. + filename = original_filename + + if not filename: + # Empty string is pretty useless. + return nope(disp, "empty string isn't a file name") + + if filename.startswith("memory:"): + return nope(disp, "memory isn't traceable") + + if filename.startswith("<"): + # Lots of non-file execution is represented with artificial + # file names like "", "", or + # "". Don't ever trace these executions, since we + # can't do anything with the data later anyway. + return nope(disp, "file name is not real") + + canonical = canonical_filename(filename) + disp.canonical_filename = canonical + + # Try the plugins, see if they have an opinion about the file. + plugin = None + for plugin in self.plugins.file_tracers: + if not plugin._coverage_enabled: + continue + + try: + file_tracer = plugin.file_tracer(canonical) + if file_tracer is not None: + file_tracer._coverage_plugin = plugin + disp.trace = True + disp.file_tracer = file_tracer + if file_tracer.has_dynamic_source_filename(): + disp.has_dynamic_filename = True + else: + disp.source_filename = canonical_filename( + file_tracer.source_filename() + ) + break + except Exception: + plugin_name = plugin._coverage_plugin_name + tb = traceback.format_exc() + self.warn(f"Disabling plug-in {plugin_name!r} due to an exception:\n{tb}") + plugin._coverage_enabled = False + continue + else: + # No plugin wanted it: it's Python. + disp.trace = True + disp.source_filename = canonical + + if not disp.has_dynamic_filename: + if not disp.source_filename: + raise PluginError( + f"Plugin {plugin!r} didn't set source_filename for '{disp.original_filename}'" + ) + reason = self.check_include_omit_etc(disp.source_filename, frame) + if reason: + nope(disp, reason) + + return disp + + def check_include_omit_etc(self, filename: str, frame: Optional[FrameType]) -> Optional[str]: + """Check a file name against the include, omit, etc, rules. + + Returns a string or None. String means, don't trace, and is the reason + why. None means no reason found to not trace. + + """ + modulename = name_for_module(filename, frame) + + # If the user specified source or include, then that's authoritative + # about the outer bound of what to measure and we don't have to apply + # any canned exclusions. If they didn't, then we have to exclude the + # stdlib and coverage.py directories. + if self.source_match or self.source_pkgs_match: + extra = "" + ok = False + if self.source_pkgs_match: + if self.source_pkgs_match.match(modulename): + ok = True + if modulename in self.source_pkgs_unmatched: + self.source_pkgs_unmatched.remove(modulename) + else: + extra = f"module {modulename!r} " + if not ok and self.source_match: + if self.source_match.match(filename): + ok = True + if not ok: + return extra + "falls outside the --source spec" + if self.third_match.match(filename) and not self.source_in_third_match.match(filename): + return "inside --source, but is third-party" + elif self.include_match: + if not self.include_match.match(filename): + return "falls outside the --include trees" + else: + # We exclude the coverage.py code itself, since a little of it + # will be measured otherwise. + if self.cover_match.match(filename): + return "is part of coverage.py" + + # If we aren't supposed to trace installed code, then check if this + # is near the Python standard library and skip it if so. + if self.pylib_match and self.pylib_match.match(filename): + return "is in the stdlib" + + # Exclude anything in the third-party installation areas. + if self.third_match.match(filename): + return "is a third-party module" + + # Check the file against the omit pattern. + if self.omit_match and self.omit_match.match(filename): + return "is inside an --omit pattern" + + # No point tracing a file we can't later write to SQLite. + try: + filename.encode("utf-8") + except UnicodeEncodeError: + return "non-encodable filename" + + # No reason found to skip this file. + return None + + def warn_conflicting_settings(self) -> None: + """Warn if there are settings that conflict.""" + if self.include: + if self.source or self.source_pkgs: + self.warn("--include is ignored because --source is set", slug="include-ignored") + + def warn_already_imported_files(self) -> None: + """Warn if files have already been imported that we will be measuring.""" + if self.include or self.source or self.source_pkgs: + warned = set() + for mod in list(sys.modules.values()): + filename = getattr(mod, "__file__", None) + if filename is None: + continue + if filename in warned: + continue + + if len(getattr(mod, "__path__", ())) > 1: + # A namespace package, which confuses this code, so ignore it. + continue + + disp = self.should_trace(filename) + if disp.has_dynamic_filename: + # A plugin with dynamic filenames: the Python file + # shouldn't cause a warning, since it won't be the subject + # of tracing anyway. + continue + if disp.trace: + msg = f"Already imported a file that will be measured: {filename}" + self.warn(msg, slug="already-imported") + warned.add(filename) + elif self.debug and self.debug.should("trace"): + self.debug.write( + "Didn't trace already imported file {!r}: {}".format( + disp.original_filename, disp.reason + ) + ) + + def warn_unimported_source(self) -> None: + """Warn about source packages that were of interest, but never traced.""" + for pkg in self.source_pkgs_unmatched: + self._warn_about_unmeasured_code(pkg) + + def _warn_about_unmeasured_code(self, pkg: str) -> None: + """Warn about a package or module that we never traced. + + `pkg` is a string, the name of the package or module. + + """ + mod = sys.modules.get(pkg) + if mod is None: + self.warn(f"Module {pkg} was never imported.", slug="module-not-imported") + return + + if module_is_namespace(mod): + # A namespace package. It's OK for this not to have been traced, + # since there is no code directly in it. + return + + if not module_has_file(mod): + self.warn(f"Module {pkg} has no Python source.", slug="module-not-python") + return + + # The module was in sys.modules, and seems like a module with code, but + # we never measured it. I guess that means it was imported before + # coverage even started. + msg = f"Module {pkg} was previously imported, but not measured" + self.warn(msg, slug="module-not-measured") + + def find_possibly_unexecuted_files(self) -> Iterable[Tuple[str, Optional[str]]]: + """Find files in the areas of interest that might be untraced. + + Yields pairs: file path, and responsible plug-in name. + """ + for pkg in self.source_pkgs: + if (pkg not in sys.modules or + not module_has_file(sys.modules[pkg])): + continue + pkg_file = source_for_file(cast(str, sys.modules[pkg].__file__)) + yield from self._find_executable_files(canonical_path(pkg_file)) + + for src in self.source: + yield from self._find_executable_files(src) + + def _find_plugin_files(self, src_dir: str) -> Iterable[Tuple[str, str]]: + """Get executable files from the plugins.""" + for plugin in self.plugins.file_tracers: + for x_file in plugin.find_executable_files(src_dir): + yield x_file, plugin._coverage_plugin_name + + def _find_executable_files(self, src_dir: str) -> Iterable[Tuple[str, Optional[str]]]: + """Find executable files in `src_dir`. + + Search for files in `src_dir` that can be executed because they + are probably importable. Don't include ones that have been omitted + by the configuration. + + Yield the file path, and the plugin name that handles the file. + + """ + py_files = ( + (py_file, None) for py_file in + find_python_files(src_dir, self.include_namespace_packages) + ) + plugin_files = self._find_plugin_files(src_dir) + + for file_path, plugin_name in itertools.chain(py_files, plugin_files): + file_path = canonical_filename(file_path) + if self.omit_match and self.omit_match.match(file_path): + # Turns out this file was omitted, so don't pull it back + # in as un-executed. + continue + yield file_path, plugin_name + + def sys_info(self) -> Iterable[Tuple[str, Any]]: + """Our information for Coverage.sys_info. + + Returns a list of (key, value) pairs. + """ + info = [ + ("coverage_paths", self.cover_paths), + ("stdlib_paths", self.pylib_paths), + ("third_party_paths", self.third_paths), + ("source_in_third_party_paths", self.source_in_third_paths), + ] + + matcher_names = [ + "source_match", "source_pkgs_match", + "include_match", "omit_match", + "cover_match", "pylib_match", "third_match", "source_in_third_match", + ] + + for matcher_name in matcher_names: + matcher = getattr(self, matcher_name) + if matcher: + matcher_info = matcher.info() + else: + matcher_info = "-none-" + info.append((matcher_name, matcher_info)) + + return info diff --git a/venv/lib/python3.10/site-packages/coverage/jsonreport.py b/venv/lib/python3.10/site-packages/coverage/jsonreport.py new file mode 100644 index 0000000..9780e26 --- /dev/null +++ b/venv/lib/python3.10/site-packages/coverage/jsonreport.py @@ -0,0 +1,129 @@ +# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 +# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt + +"""Json reporting for coverage.py""" + +from __future__ import annotations + +import datetime +import json +import sys + +from typing import Any, Dict, IO, Iterable, List, Optional, Tuple, TYPE_CHECKING + +from coverage import __version__ +from coverage.report_core import get_analysis_to_report +from coverage.results import Analysis, Numbers +from coverage.types import TMorf, TLineNo + +if TYPE_CHECKING: + from coverage import Coverage + from coverage.data import CoverageData + + +class JsonReporter: + """A reporter for writing JSON coverage results.""" + + report_type = "JSON report" + + def __init__(self, coverage: Coverage) -> None: + self.coverage = coverage + self.config = self.coverage.config + self.total = Numbers(self.config.precision) + self.report_data: Dict[str, Any] = {} + + def report(self, morfs: Optional[Iterable[TMorf]], outfile: IO[str]) -> float: + """Generate a json report for `morfs`. + + `morfs` is a list of modules or file names. + + `outfile` is a file object to write the json to. + + """ + outfile = outfile or sys.stdout + coverage_data = self.coverage.get_data() + coverage_data.set_query_contexts(self.config.report_contexts) + self.report_data["meta"] = { + "version": __version__, + "timestamp": datetime.datetime.now().isoformat(), + "branch_coverage": coverage_data.has_arcs(), + "show_contexts": self.config.json_show_contexts, + } + + measured_files = {} + for file_reporter, analysis in get_analysis_to_report(self.coverage, morfs): + measured_files[file_reporter.relative_filename()] = self.report_one_file( + coverage_data, + analysis + ) + + self.report_data["files"] = measured_files + + self.report_data["totals"] = { + "covered_lines": self.total.n_executed, + "num_statements": self.total.n_statements, + "percent_covered": self.total.pc_covered, + "percent_covered_display": self.total.pc_covered_str, + "missing_lines": self.total.n_missing, + "excluded_lines": self.total.n_excluded, + } + + if coverage_data.has_arcs(): + self.report_data["totals"].update({ + "num_branches": self.total.n_branches, + "num_partial_branches": self.total.n_partial_branches, + "covered_branches": self.total.n_executed_branches, + "missing_branches": self.total.n_missing_branches, + }) + + json.dump( + self.report_data, + outfile, + indent=(4 if self.config.json_pretty_print else None), + ) + + return self.total.n_statements and self.total.pc_covered + + def report_one_file(self, coverage_data: CoverageData, analysis: Analysis) -> Dict[str, Any]: + """Extract the relevant report data for a single file.""" + nums = analysis.numbers + self.total += nums + summary = { + "covered_lines": nums.n_executed, + "num_statements": nums.n_statements, + "percent_covered": nums.pc_covered, + "percent_covered_display": nums.pc_covered_str, + "missing_lines": nums.n_missing, + "excluded_lines": nums.n_excluded, + } + reported_file = { + "executed_lines": sorted(analysis.executed), + "summary": summary, + "missing_lines": sorted(analysis.missing), + "excluded_lines": sorted(analysis.excluded), + } + if self.config.json_show_contexts: + reported_file["contexts"] = analysis.data.contexts_by_lineno(analysis.filename) + if coverage_data.has_arcs(): + summary.update({ + "num_branches": nums.n_branches, + "num_partial_branches": nums.n_partial_branches, + "covered_branches": nums.n_executed_branches, + "missing_branches": nums.n_missing_branches, + }) + reported_file["executed_branches"] = list( + _convert_branch_arcs(analysis.executed_branch_arcs()) + ) + reported_file["missing_branches"] = list( + _convert_branch_arcs(analysis.missing_branch_arcs()) + ) + return reported_file + + +def _convert_branch_arcs( + branch_arcs: Dict[TLineNo, List[TLineNo]], +) -> Iterable[Tuple[TLineNo, TLineNo]]: + """Convert branch arcs to a list of two-element tuples.""" + for source, targets in branch_arcs.items(): + for target in targets: + yield source, target diff --git a/venv/lib/python3.10/site-packages/coverage/lcovreport.py b/venv/lib/python3.10/site-packages/coverage/lcovreport.py new file mode 100644 index 0000000..3da164d --- /dev/null +++ b/venv/lib/python3.10/site-packages/coverage/lcovreport.py @@ -0,0 +1,126 @@ +# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 +# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt + +"""LCOV reporting for coverage.py.""" + +from __future__ import annotations + +import base64 +import hashlib +import sys + +from typing import IO, Iterable, Optional, TYPE_CHECKING + +from coverage.plugin import FileReporter +from coverage.report_core import get_analysis_to_report +from coverage.results import Analysis, Numbers +from coverage.types import TMorf + +if TYPE_CHECKING: + from coverage import Coverage + + +def line_hash(line: str) -> str: + """Produce a hash of a source line for use in the LCOV file.""" + hashed = hashlib.md5(line.encode("utf-8")).digest() + return base64.b64encode(hashed).decode("ascii").rstrip("=") + + +class LcovReporter: + """A reporter for writing LCOV coverage reports.""" + + report_type = "LCOV report" + + def __init__(self, coverage: Coverage) -> None: + self.coverage = coverage + self.total = Numbers(self.coverage.config.precision) + + def report(self, morfs: Optional[Iterable[TMorf]], outfile: IO[str]) -> float: + """Renders the full lcov report. + + `morfs` is a list of modules or filenames + + outfile is the file object to write the file into. + """ + + self.coverage.get_data() + outfile = outfile or sys.stdout + + for fr, analysis in get_analysis_to_report(self.coverage, morfs): + self.get_lcov(fr, analysis, outfile) + + return self.total.n_statements and self.total.pc_covered + + def get_lcov(self, fr: FileReporter, analysis: Analysis, outfile: IO[str]) -> None: + """Produces the lcov data for a single file. + + This currently supports both line and branch coverage, + however function coverage is not supported. + """ + self.total += analysis.numbers + + outfile.write("TN:\n") + outfile.write(f"SF:{fr.relative_filename()}\n") + source_lines = fr.source().splitlines() + + for covered in sorted(analysis.executed): + # Note: Coverage.py currently only supports checking *if* a line + # has been executed, not how many times, so we set this to 1 for + # nice output even if it's technically incorrect. + + # The lines below calculate a 64-bit encoded md5 hash of the line + # corresponding to the DA lines in the lcov file, for either case + # of the line being covered or missed in coverage.py. The final two + # characters of the encoding ("==") are removed from the hash to + # allow genhtml to run on the resulting lcov file. + if source_lines: + if covered-1 >= len(source_lines): + break + line = source_lines[covered-1] + else: + line = "" + outfile.write(f"DA:{covered},1,{line_hash(line)}\n") + + for missed in sorted(analysis.missing): + assert source_lines + line = source_lines[missed-1] + outfile.write(f"DA:{missed},0,{line_hash(line)}\n") + + outfile.write(f"LF:{analysis.numbers.n_statements}\n") + outfile.write(f"LH:{analysis.numbers.n_executed}\n") + + # More information dense branch coverage data. + missing_arcs = analysis.missing_branch_arcs() + executed_arcs = analysis.executed_branch_arcs() + for block_number, block_line_number in enumerate( + sorted(analysis.branch_stats().keys()) + ): + for branch_number, line_number in enumerate( + sorted(missing_arcs[block_line_number]) + ): + # The exit branches have a negative line number, + # this will not produce valid lcov. Setting + # the line number of the exit branch to 0 will allow + # for valid lcov, while preserving the data. + line_number = max(line_number, 0) + outfile.write(f"BRDA:{line_number},{block_number},{branch_number},-\n") + + # The start value below allows for the block number to be + # preserved between these two for loops (stopping the loop from + # resetting the value of the block number to 0). + for branch_number, line_number in enumerate( + sorted(executed_arcs[block_line_number]), + start=len(missing_arcs[block_line_number]), + ): + line_number = max(line_number, 0) + outfile.write(f"BRDA:{line_number},{block_number},{branch_number},1\n") + + # Summary of the branch coverage. + if analysis.has_arcs(): + branch_stats = analysis.branch_stats() + brf = sum(t for t, k in branch_stats.values()) + brh = brf - sum(t - k for t, k in branch_stats.values()) + outfile.write(f"BRF:{brf}\n") + outfile.write(f"BRH:{brh}\n") + + outfile.write("end_of_record\n") diff --git a/venv/lib/python3.10/site-packages/coverage/misc.py b/venv/lib/python3.10/site-packages/coverage/misc.py new file mode 100644 index 0000000..8f9d4f1 --- /dev/null +++ b/venv/lib/python3.10/site-packages/coverage/misc.py @@ -0,0 +1,400 @@ +# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 +# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt + +"""Miscellaneous stuff for coverage.py.""" + +from __future__ import annotations + +import contextlib +import datetime +import errno +import hashlib +import importlib +import importlib.util +import inspect +import locale +import os +import os.path +import re +import sys +import types + +from types import ModuleType +from typing import ( + Any, Callable, Dict, IO, Iterable, Iterator, List, Mapping, NoReturn, Optional, + Sequence, Tuple, TypeVar, Union, +) + +from coverage import env +from coverage.exceptions import CoverageException +from coverage.types import TArc + +# In 6.0, the exceptions moved from misc.py to exceptions.py. But a number of +# other packages were importing the exceptions from misc, so import them here. +# pylint: disable=unused-wildcard-import +from coverage.exceptions import * # pylint: disable=wildcard-import + +ISOLATED_MODULES: Dict[ModuleType, ModuleType] = {} + + +def isolate_module(mod: ModuleType) -> ModuleType: + """Copy a module so that we are isolated from aggressive mocking. + + If a test suite mocks os.path.exists (for example), and then we need to use + it during the test, everything will get tangled up if we use their mock. + Making a copy of the module when we import it will isolate coverage.py from + those complications. + """ + if mod not in ISOLATED_MODULES: + new_mod = types.ModuleType(mod.__name__) + ISOLATED_MODULES[mod] = new_mod + for name in dir(mod): + value = getattr(mod, name) + if isinstance(value, types.ModuleType): + value = isolate_module(value) + setattr(new_mod, name, value) + return ISOLATED_MODULES[mod] + +os = isolate_module(os) + + +class SysModuleSaver: + """Saves the contents of sys.modules, and removes new modules later.""" + def __init__(self) -> None: + self.old_modules = set(sys.modules) + + def restore(self) -> None: + """Remove any modules imported since this object started.""" + new_modules = set(sys.modules) - self.old_modules + for m in new_modules: + del sys.modules[m] + + +@contextlib.contextmanager +def sys_modules_saved() -> Iterator[None]: + """A context manager to remove any modules imported during a block.""" + saver = SysModuleSaver() + try: + yield + finally: + saver.restore() + + +def import_third_party(modname: str) -> Tuple[ModuleType, bool]: + """Import a third-party module we need, but might not be installed. + + This also cleans out the module after the import, so that coverage won't + appear to have imported it. This lets the third party use coverage for + their own tests. + + Arguments: + modname (str): the name of the module to import. + + Returns: + The imported module, and a boolean indicating if the module could be imported. + + If the boolean is False, the module returned is not the one you want: don't use it. + + """ + with sys_modules_saved(): + try: + return importlib.import_module(modname), True + except ImportError: + return sys, False + + +def nice_pair(pair: TArc) -> str: + """Make a nice string representation of a pair of numbers. + + If the numbers are equal, just return the number, otherwise return the pair + with a dash between them, indicating the range. + + """ + start, end = pair + if start == end: + return "%d" % start + else: + return "%d-%d" % (start, end) + + +TSelf = TypeVar("TSelf") +TRetVal = TypeVar("TRetVal") + +def expensive(fn: Callable[[TSelf], TRetVal]) -> Callable[[TSelf], TRetVal]: + """A decorator to indicate that a method shouldn't be called more than once. + + Normally, this does nothing. During testing, this raises an exception if + called more than once. + + """ + if env.TESTING: + attr = "_once_" + fn.__name__ + + def _wrapper(self: TSelf) -> TRetVal: + if hasattr(self, attr): + raise AssertionError(f"Shouldn't have called {fn.__name__} more than once") + setattr(self, attr, True) + return fn(self) + return _wrapper + else: + return fn # pragma: not testing + + +def bool_or_none(b: Any) -> Optional[bool]: + """Return bool(b), but preserve None.""" + if b is None: + return None + else: + return bool(b) + + +def join_regex(regexes: Iterable[str]) -> str: + """Combine a series of regex strings into one that matches any of them.""" + regexes = list(regexes) + if len(regexes) == 1: + return regexes[0] + else: + return "|".join(f"(?:{r})" for r in regexes) + + +def file_be_gone(path: str) -> None: + """Remove a file, and don't get annoyed if it doesn't exist.""" + try: + os.remove(path) + except OSError as e: + if e.errno != errno.ENOENT: + raise + + +def ensure_dir(directory: str) -> None: + """Make sure the directory exists. + + If `directory` is None or empty, do nothing. + """ + if directory: + os.makedirs(directory, exist_ok=True) + + +def ensure_dir_for_file(path: str) -> None: + """Make sure the directory for the path exists.""" + ensure_dir(os.path.dirname(path)) + + +def output_encoding(outfile: Optional[IO[str]] = None) -> str: + """Determine the encoding to use for output written to `outfile` or stdout.""" + if outfile is None: + outfile = sys.stdout + encoding = ( + getattr(outfile, "encoding", None) or + getattr(sys.__stdout__, "encoding", None) or + locale.getpreferredencoding() + ) + return encoding + + +class Hasher: + """Hashes Python data for fingerprinting.""" + def __init__(self) -> None: + self.hash = hashlib.new("sha3_256") + + def update(self, v: Any) -> None: + """Add `v` to the hash, recursively if needed.""" + self.hash.update(str(type(v)).encode("utf-8")) + if isinstance(v, str): + self.hash.update(v.encode("utf-8")) + elif isinstance(v, bytes): + self.hash.update(v) + elif v is None: + pass + elif isinstance(v, (int, float)): + self.hash.update(str(v).encode("utf-8")) + elif isinstance(v, (tuple, list)): + for e in v: + self.update(e) + elif isinstance(v, dict): + keys = v.keys() + for k in sorted(keys): + self.update(k) + self.update(v[k]) + else: + for k in dir(v): + if k.startswith("__"): + continue + a = getattr(v, k) + if inspect.isroutine(a): + continue + self.update(k) + self.update(a) + self.hash.update(b".") + + def hexdigest(self) -> str: + """Retrieve the hex digest of the hash.""" + return self.hash.hexdigest()[:32] + + +def _needs_to_implement(that: Any, func_name: str) -> NoReturn: + """Helper to raise NotImplementedError in interface stubs.""" + if hasattr(that, "_coverage_plugin_name"): + thing = "Plugin" + name = that._coverage_plugin_name + else: + thing = "Class" + klass = that.__class__ + name = f"{klass.__module__}.{klass.__name__}" + + raise NotImplementedError( + f"{thing} {name!r} needs to implement {func_name}()" + ) + + +class DefaultValue: + """A sentinel object to use for unusual default-value needs. + + Construct with a string that will be used as the repr, for display in help + and Sphinx output. + + """ + def __init__(self, display_as: str) -> None: + self.display_as = display_as + + def __repr__(self) -> str: + return self.display_as + + +def substitute_variables(text: str, variables: Mapping[str, str]) -> str: + """Substitute ``${VAR}`` variables in `text` with their values. + + Variables in the text can take a number of shell-inspired forms:: + + $VAR + ${VAR} + ${VAR?} strict: an error if VAR isn't defined. + ${VAR-missing} defaulted: "missing" if VAR isn't defined. + $$ just a dollar sign. + + `variables` is a dictionary of variable values. + + Returns the resulting text with values substituted. + + """ + dollar_pattern = r"""(?x) # Use extended regex syntax + \$ # A dollar sign, + (?: # then + (?P\$) | # a dollar sign, or + (?P\w+) | # a plain word, or + { # a {-wrapped + (?P\w+) # word, + (?: + (?P\?) | # with a strict marker + -(?P[^}]*) # or a default value + )? # maybe. + } + ) + """ + + dollar_groups = ("dollar", "word1", "word2") + + def dollar_replace(match: re.Match[str]) -> str: + """Called for each $replacement.""" + # Only one of the dollar_groups will have matched, just get its text. + word = next(g for g in match.group(*dollar_groups) if g) # pragma: always breaks + if word == "$": + return "$" + elif word in variables: + return variables[word] + elif match["strict"]: + msg = f"Variable {word} is undefined: {text!r}" + raise CoverageException(msg) + else: + return match["defval"] + + text = re.sub(dollar_pattern, dollar_replace, text) + return text + + +def format_local_datetime(dt: datetime.datetime) -> str: + """Return a string with local timezone representing the date. + """ + return dt.astimezone().strftime("%Y-%m-%d %H:%M %z") + + +def import_local_file(modname: str, modfile: Optional[str] = None) -> ModuleType: + """Import a local file as a module. + + Opens a file in the current directory named `modname`.py, imports it + as `modname`, and returns the module object. `modfile` is the file to + import if it isn't in the current directory. + + """ + if modfile is None: + modfile = modname + ".py" + spec = importlib.util.spec_from_file_location(modname, modfile) + assert spec is not None + mod = importlib.util.module_from_spec(spec) + sys.modules[modname] = mod + assert spec.loader is not None + spec.loader.exec_module(mod) + + return mod + + +def _human_key(s: str) -> List[Union[str, int]]: + """Turn a string into a list of string and number chunks. + "z23a" -> ["z", 23, "a"] + """ + def tryint(s: str) -> Union[str, int]: + """If `s` is a number, return an int, else `s` unchanged.""" + try: + return int(s) + except ValueError: + return s + + return [tryint(c) for c in re.split(r"(\d+)", s)] + +def human_sorted(strings: Iterable[str]) -> List[str]: + """Sort the given iterable of strings the way that humans expect. + + Numeric components in the strings are sorted as numbers. + + Returns the sorted list. + + """ + return sorted(strings, key=_human_key) + +SortableItem = TypeVar("SortableItem", bound=Sequence[Any]) + +def human_sorted_items( + items: Iterable[SortableItem], + reverse: bool = False, +) -> List[SortableItem]: + """Sort (string, ...) items the way humans expect. + + The elements of `items` can be any tuple/list. They'll be sorted by the + first element (a string), with ties broken by the remaining elements. + + Returns the sorted list of items. + """ + return sorted(items, key=lambda item: (_human_key(item[0]), *item[1:]), reverse=reverse) + + +def plural(n: int, thing: str = "", things: str = "") -> str: + """Pluralize a word. + + If n is 1, return thing. Otherwise return things, or thing+s. + """ + if n == 1: + return thing + else: + return things or (thing + "s") + + +def stdout_link(text: str, url: str) -> str: + """Format text+url as a clickable link for stdout. + + If attached to a terminal, use escape sequences. Otherwise, just return + the text. + """ + if hasattr(sys.stdout, "isatty") and sys.stdout.isatty(): + return f"\033]8;;{url}\a{text}\033]8;;\a" + else: + return text diff --git a/venv/lib/python3.10/site-packages/coverage/multiproc.py b/venv/lib/python3.10/site-packages/coverage/multiproc.py new file mode 100644 index 0000000..2fd8ad5 --- /dev/null +++ b/venv/lib/python3.10/site-packages/coverage/multiproc.py @@ -0,0 +1,104 @@ +# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 +# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt + +"""Monkey-patching to add multiprocessing support for coverage.py""" + +import multiprocessing +import multiprocessing.process +import os +import os.path +import sys +import traceback + +from typing import Any, Dict + + +# An attribute that will be set on the module to indicate that it has been +# monkey-patched. +PATCHED_MARKER = "_coverage$patched" + + +OriginalProcess = multiprocessing.process.BaseProcess +original_bootstrap = OriginalProcess._bootstrap # type: ignore[attr-defined] + +class ProcessWithCoverage(OriginalProcess): # pylint: disable=abstract-method + """A replacement for multiprocess.Process that starts coverage.""" + + def _bootstrap(self, *args, **kwargs): # type: ignore[no-untyped-def] + """Wrapper around _bootstrap to start coverage.""" + try: + from coverage import Coverage # avoid circular import + cov = Coverage(data_suffix=True, auto_data=True) + cov._warn_preimported_source = False + cov.start() + debug = cov._debug + assert debug is not None + if debug.should("multiproc"): + debug.write("Calling multiprocessing bootstrap") + except Exception: + print("Exception during multiprocessing bootstrap init:") + traceback.print_exc(file=sys.stdout) + sys.stdout.flush() + raise + try: + return original_bootstrap(self, *args, **kwargs) + finally: + if debug.should("multiproc"): + debug.write("Finished multiprocessing bootstrap") + cov.stop() + cov.save() + if debug.should("multiproc"): + debug.write("Saved multiprocessing data") + +class Stowaway: + """An object to pickle, so when it is unpickled, it can apply the monkey-patch.""" + def __init__(self, rcfile: str) -> None: + self.rcfile = rcfile + + def __getstate__(self) -> Dict[str, str]: + return {"rcfile": self.rcfile} + + def __setstate__(self, state: Dict[str, str]) -> None: + patch_multiprocessing(state["rcfile"]) + + +def patch_multiprocessing(rcfile: str) -> None: + """Monkey-patch the multiprocessing module. + + This enables coverage measurement of processes started by multiprocessing. + This involves aggressive monkey-patching. + + `rcfile` is the path to the rcfile being used. + + """ + + if hasattr(multiprocessing, PATCHED_MARKER): + return + + OriginalProcess._bootstrap = ProcessWithCoverage._bootstrap # type: ignore[attr-defined] + + # Set the value in ProcessWithCoverage that will be pickled into the child + # process. + os.environ["COVERAGE_RCFILE"] = os.path.abspath(rcfile) + + # When spawning processes rather than forking them, we have no state in the + # new process. We sneak in there with a Stowaway: we stuff one of our own + # objects into the data that gets pickled and sent to the sub-process. When + # the Stowaway is unpickled, it's __setstate__ method is called, which + # re-applies the monkey-patch. + # Windows only spawns, so this is needed to keep Windows working. + try: + from multiprocessing import spawn + original_get_preparation_data = spawn.get_preparation_data + except (ImportError, AttributeError): + pass + else: + def get_preparation_data_with_stowaway(name: str) -> Dict[str, Any]: + """Get the original preparation data, and also insert our stowaway.""" + d = original_get_preparation_data(name) + d["stowaway"] = Stowaway(rcfile) + return d + + spawn.get_preparation_data = get_preparation_data_with_stowaway + + setattr(multiprocessing, PATCHED_MARKER, True) diff --git a/venv/lib/python3.10/site-packages/coverage/numbits.py b/venv/lib/python3.10/site-packages/coverage/numbits.py new file mode 100644 index 0000000..71b974d --- /dev/null +++ b/venv/lib/python3.10/site-packages/coverage/numbits.py @@ -0,0 +1,147 @@ +# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 +# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt + +""" +Functions to manipulate packed binary representations of number sets. + +To save space, coverage stores sets of line numbers in SQLite using a packed +binary representation called a numbits. A numbits is a set of positive +integers. + +A numbits is stored as a blob in the database. The exact meaning of the bytes +in the blobs should be considered an implementation detail that might change in +the future. Use these functions to work with those binary blobs of data. + +""" + +from __future__ import annotations + +import json +import sqlite3 + +from itertools import zip_longest +from typing import Iterable, List + + +def nums_to_numbits(nums: Iterable[int]) -> bytes: + """Convert `nums` into a numbits. + + Arguments: + nums: a reusable iterable of integers, the line numbers to store. + + Returns: + A binary blob. + """ + try: + nbytes = max(nums) // 8 + 1 + except ValueError: + # nums was empty. + return b"" + b = bytearray(nbytes) + for num in nums: + b[num//8] |= 1 << num % 8 + return bytes(b) + + +def numbits_to_nums(numbits: bytes) -> List[int]: + """Convert a numbits into a list of numbers. + + Arguments: + numbits: a binary blob, the packed number set. + + Returns: + A list of ints. + + When registered as a SQLite function by :func:`register_sqlite_functions`, + this returns a string, a JSON-encoded list of ints. + + """ + nums = [] + for byte_i, byte in enumerate(numbits): + for bit_i in range(8): + if (byte & (1 << bit_i)): + nums.append(byte_i * 8 + bit_i) + return nums + + +def numbits_union(numbits1: bytes, numbits2: bytes) -> bytes: + """Compute the union of two numbits. + + Returns: + A new numbits, the union of `numbits1` and `numbits2`. + """ + byte_pairs = zip_longest(numbits1, numbits2, fillvalue=0) + return bytes(b1 | b2 for b1, b2 in byte_pairs) + + +def numbits_intersection(numbits1: bytes, numbits2: bytes) -> bytes: + """Compute the intersection of two numbits. + + Returns: + A new numbits, the intersection `numbits1` and `numbits2`. + """ + byte_pairs = zip_longest(numbits1, numbits2, fillvalue=0) + intersection_bytes = bytes(b1 & b2 for b1, b2 in byte_pairs) + return intersection_bytes.rstrip(b"\0") + + +def numbits_any_intersection(numbits1: bytes, numbits2: bytes) -> bool: + """Is there any number that appears in both numbits? + + Determine whether two number sets have a non-empty intersection. This is + faster than computing the intersection. + + Returns: + A bool, True if there is any number in both `numbits1` and `numbits2`. + """ + byte_pairs = zip_longest(numbits1, numbits2, fillvalue=0) + return any(b1 & b2 for b1, b2 in byte_pairs) + + +def num_in_numbits(num: int, numbits: bytes) -> bool: + """Does the integer `num` appear in `numbits`? + + Returns: + A bool, True if `num` is a member of `numbits`. + """ + nbyte, nbit = divmod(num, 8) + if nbyte >= len(numbits): + return False + return bool(numbits[nbyte] & (1 << nbit)) + + +def register_sqlite_functions(connection: sqlite3.Connection) -> None: + """ + Define numbits functions in a SQLite connection. + + This defines these functions for use in SQLite statements: + + * :func:`numbits_union` + * :func:`numbits_intersection` + * :func:`numbits_any_intersection` + * :func:`num_in_numbits` + * :func:`numbits_to_nums` + + `connection` is a :class:`sqlite3.Connection ` + object. After creating the connection, pass it to this function to + register the numbits functions. Then you can use numbits functions in your + queries:: + + import sqlite3 + from coverage.numbits import register_sqlite_functions + + conn = sqlite3.connect("example.db") + register_sqlite_functions(conn) + c = conn.cursor() + # Kind of a nonsense query: + # Find all the files and contexts that executed line 47 in any file: + c.execute( + "select file_id, context_id from line_bits where num_in_numbits(?, numbits)", + (47,) + ) + """ + connection.create_function("numbits_union", 2, numbits_union) + connection.create_function("numbits_intersection", 2, numbits_intersection) + connection.create_function("numbits_any_intersection", 2, numbits_any_intersection) + connection.create_function("num_in_numbits", 2, num_in_numbits) + connection.create_function("numbits_to_nums", 1, lambda b: json.dumps(numbits_to_nums(b))) diff --git a/venv/lib/python3.10/site-packages/coverage/parser.py b/venv/lib/python3.10/site-packages/coverage/parser.py new file mode 100644 index 0000000..bff3ead --- /dev/null +++ b/venv/lib/python3.10/site-packages/coverage/parser.py @@ -0,0 +1,1415 @@ +# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 +# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt + +"""Code parsing for coverage.py.""" + +from __future__ import annotations + +import ast +import collections +import os +import re +import sys +import token +import tokenize + +from types import CodeType +from typing import ( + cast, Any, Callable, Dict, Iterable, List, Optional, Protocol, Sequence, + Set, Tuple, +) + +from coverage import env +from coverage.bytecode import code_objects +from coverage.debug import short_stack +from coverage.exceptions import NoSource, NotPython +from coverage.misc import join_regex, nice_pair +from coverage.phystokens import generate_tokens +from coverage.types import TArc, TLineNo + + +class PythonParser: + """Parse code to find executable lines, excluded lines, etc. + + This information is all based on static analysis: no code execution is + involved. + + """ + def __init__( + self, + text: Optional[str] = None, + filename: Optional[str] = None, + exclude: Optional[str] = None, + ) -> None: + """ + Source can be provided as `text`, the text itself, or `filename`, from + which the text will be read. Excluded lines are those that match + `exclude`, a regex string. + + """ + assert text or filename, "PythonParser needs either text or filename" + self.filename = filename or "" + if text is not None: + self.text: str = text + else: + from coverage.python import get_python_source + try: + self.text = get_python_source(self.filename) + except OSError as err: + raise NoSource(f"No source for code: '{self.filename}': {err}") from err + + self.exclude = exclude + + # The text lines of the parsed code. + self.lines: List[str] = self.text.split("\n") + + # The normalized line numbers of the statements in the code. Exclusions + # are taken into account, and statements are adjusted to their first + # lines. + self.statements: Set[TLineNo] = set() + + # The normalized line numbers of the excluded lines in the code, + # adjusted to their first lines. + self.excluded: Set[TLineNo] = set() + + # The raw_* attributes are only used in this class, and in + # lab/parser.py to show how this class is working. + + # The line numbers that start statements, as reported by the line + # number table in the bytecode. + self.raw_statements: Set[TLineNo] = set() + + # The raw line numbers of excluded lines of code, as marked by pragmas. + self.raw_excluded: Set[TLineNo] = set() + + # The line numbers of class definitions. + self.raw_classdefs: Set[TLineNo] = set() + + # The line numbers of docstring lines. + self.raw_docstrings: Set[TLineNo] = set() + + # Internal detail, used by lab/parser.py. + self.show_tokens = False + + # A dict mapping line numbers to lexical statement starts for + # multi-line statements. + self._multiline: Dict[TLineNo, TLineNo] = {} + + # Lazily-created arc data, and missing arc descriptions. + self._all_arcs: Optional[Set[TArc]] = None + self._missing_arc_fragments: Optional[TArcFragments] = None + + def lines_matching(self, *regexes: str) -> Set[TLineNo]: + """Find the lines matching one of a list of regexes. + + Returns a set of line numbers, the lines that contain a match for one + of the regexes in `regexes`. The entire line needn't match, just a + part of it. + + """ + combined = join_regex(regexes) + regex_c = re.compile(combined) + matches = set() + for i, ltext in enumerate(self.lines, start=1): + if regex_c.search(ltext): + matches.add(i) + return matches + + def _raw_parse(self) -> None: + """Parse the source to find the interesting facts about its lines. + + A handful of attributes are updated. + + """ + # Find lines which match an exclusion pattern. + if self.exclude: + self.raw_excluded = self.lines_matching(self.exclude) + + # Tokenize, to find excluded suites, to find docstrings, and to find + # multi-line statements. + indent = 0 + exclude_indent = 0 + excluding = False + excluding_decorators = False + prev_toktype = token.INDENT + first_line = None + empty = True + first_on_line = True + nesting = 0 + + assert self.text is not None + tokgen = generate_tokens(self.text) + for toktype, ttext, (slineno, _), (elineno, _), ltext in tokgen: + if self.show_tokens: # pragma: debugging + print("%10s %5s %-20r %r" % ( + tokenize.tok_name.get(toktype, toktype), + nice_pair((slineno, elineno)), ttext, ltext + )) + if toktype == token.INDENT: + indent += 1 + elif toktype == token.DEDENT: + indent -= 1 + elif toktype == token.NAME: + if ttext == "class": + # Class definitions look like branches in the bytecode, so + # we need to exclude them. The simplest way is to note the + # lines with the "class" keyword. + self.raw_classdefs.add(slineno) + elif toktype == token.OP: + if ttext == ":" and nesting == 0: + should_exclude = (elineno in self.raw_excluded) or excluding_decorators + if not excluding and should_exclude: + # Start excluding a suite. We trigger off of the colon + # token so that the #pragma comment will be recognized on + # the same line as the colon. + self.raw_excluded.add(elineno) + exclude_indent = indent + excluding = True + excluding_decorators = False + elif ttext == "@" and first_on_line: + # A decorator. + if elineno in self.raw_excluded: + excluding_decorators = True + if excluding_decorators: + self.raw_excluded.add(elineno) + elif ttext in "([{": + nesting += 1 + elif ttext in ")]}": + nesting -= 1 + elif toktype == token.STRING and prev_toktype == token.INDENT: + # Strings that are first on an indented line are docstrings. + # (a trick from trace.py in the stdlib.) This works for + # 99.9999% of cases. For the rest (!) see: + # http://stackoverflow.com/questions/1769332/x/1769794#1769794 + self.raw_docstrings.update(range(slineno, elineno+1)) + elif toktype == token.NEWLINE: + if first_line is not None and elineno != first_line: # type: ignore[unreachable] + # We're at the end of a line, and we've ended on a + # different line than the first line of the statement, + # so record a multi-line range. + for l in range(first_line, elineno+1): # type: ignore[unreachable] + self._multiline[l] = first_line + first_line = None + first_on_line = True + + if ttext.strip() and toktype != tokenize.COMMENT: + # A non-white-space token. + empty = False + if first_line is None: + # The token is not white space, and is the first in a statement. + first_line = slineno + # Check whether to end an excluded suite. + if excluding and indent <= exclude_indent: + excluding = False + if excluding: + self.raw_excluded.add(elineno) + first_on_line = False + + prev_toktype = toktype + + # Find the starts of the executable statements. + if not empty: + byte_parser = ByteParser(self.text, filename=self.filename) + self.raw_statements.update(byte_parser._find_statements()) + + # The first line of modules can lie and say 1 always, even if the first + # line of code is later. If so, map 1 to the actual first line of the + # module. + if env.PYBEHAVIOR.module_firstline_1 and self._multiline: + self._multiline[1] = min(self.raw_statements) + + def first_line(self, lineno: TLineNo) -> TLineNo: + """Return the first line number of the statement including `lineno`.""" + if lineno < 0: + lineno = -self._multiline.get(-lineno, -lineno) + else: + lineno = self._multiline.get(lineno, lineno) + return lineno + + def first_lines(self, linenos: Iterable[TLineNo]) -> Set[TLineNo]: + """Map the line numbers in `linenos` to the correct first line of the + statement. + + Returns a set of the first lines. + + """ + return {self.first_line(l) for l in linenos} + + def translate_lines(self, lines: Iterable[TLineNo]) -> Set[TLineNo]: + """Implement `FileReporter.translate_lines`.""" + return self.first_lines(lines) + + def translate_arcs(self, arcs: Iterable[TArc]) -> Set[TArc]: + """Implement `FileReporter.translate_arcs`.""" + return {(self.first_line(a), self.first_line(b)) for (a, b) in arcs} + + def parse_source(self) -> None: + """Parse source text to find executable lines, excluded lines, etc. + + Sets the .excluded and .statements attributes, normalized to the first + line of multi-line statements. + + """ + try: + self._raw_parse() + except (tokenize.TokenError, IndentationError, SyntaxError) as err: + if hasattr(err, "lineno"): + lineno = err.lineno # IndentationError + else: + lineno = err.args[1][0] # TokenError + raise NotPython( + f"Couldn't parse '{self.filename}' as Python source: " + + f"{err.args[0]!r} at line {lineno}" + ) from err + + self.excluded = self.first_lines(self.raw_excluded) + + ignore = self.excluded | self.raw_docstrings + starts = self.raw_statements - ignore + self.statements = self.first_lines(starts) - ignore + + def arcs(self) -> Set[TArc]: + """Get information about the arcs available in the code. + + Returns a set of line number pairs. Line numbers have been normalized + to the first line of multi-line statements. + + """ + if self._all_arcs is None: + self._analyze_ast() + assert self._all_arcs is not None + return self._all_arcs + + def _analyze_ast(self) -> None: + """Run the AstArcAnalyzer and save its results. + + `_all_arcs` is the set of arcs in the code. + + """ + aaa = AstArcAnalyzer(self.text, self.raw_statements, self._multiline) + aaa.analyze() + + self._all_arcs = set() + for l1, l2 in aaa.arcs: + fl1 = self.first_line(l1) + fl2 = self.first_line(l2) + if fl1 != fl2: + self._all_arcs.add((fl1, fl2)) + + self._missing_arc_fragments = aaa.missing_arc_fragments + + def exit_counts(self) -> Dict[TLineNo, int]: + """Get a count of exits from that each line. + + Excluded lines are excluded. + + """ + exit_counts: Dict[TLineNo, int] = collections.defaultdict(int) + for l1, l2 in self.arcs(): + if l1 < 0: + # Don't ever report -1 as a line number + continue + if l1 in self.excluded: + # Don't report excluded lines as line numbers. + continue + if l2 in self.excluded: + # Arcs to excluded lines shouldn't count. + continue + exit_counts[l1] += 1 + + # Class definitions have one extra exit, so remove one for each: + for l in self.raw_classdefs: + # Ensure key is there: class definitions can include excluded lines. + if l in exit_counts: + exit_counts[l] -= 1 + + return exit_counts + + def missing_arc_description( + self, + start: TLineNo, + end: TLineNo, + executed_arcs: Optional[Iterable[TArc]] = None, + ) -> str: + """Provide an English sentence describing a missing arc.""" + if self._missing_arc_fragments is None: + self._analyze_ast() + assert self._missing_arc_fragments is not None + + actual_start = start + + if ( + executed_arcs and + end < 0 and end == -start and + (end, start) not in executed_arcs and + (end, start) in self._missing_arc_fragments + ): + # It's a one-line callable, and we never even started it, + # and we have a message about not starting it. + start, end = end, start + + fragment_pairs = self._missing_arc_fragments.get((start, end), [(None, None)]) + + msgs = [] + for smsg, emsg in fragment_pairs: + if emsg is None: + if end < 0: + # Hmm, maybe we have a one-line callable, let's check. + if (-end, end) in self._missing_arc_fragments: + return self.missing_arc_description(-end, end) + emsg = "didn't jump to the function exit" + else: + emsg = "didn't jump to line {lineno}" + emsg = emsg.format(lineno=end) + + msg = f"line {actual_start} {emsg}" + if smsg is not None: + msg += f", because {smsg.format(lineno=actual_start)}" + + msgs.append(msg) + + return " or ".join(msgs) + + +class ByteParser: + """Parse bytecode to understand the structure of code.""" + + def __init__( + self, + text: str, + code: Optional[CodeType] = None, + filename: Optional[str] = None, + ) -> None: + self.text = text + if code is not None: + self.code = code + else: + assert filename is not None + try: + self.code = compile(text, filename, "exec", dont_inherit=True) + except SyntaxError as synerr: + raise NotPython( + "Couldn't parse '%s' as Python source: '%s' at line %d" % ( + filename, synerr.msg, synerr.lineno or 0 + ) + ) from synerr + + def child_parsers(self) -> Iterable[ByteParser]: + """Iterate over all the code objects nested within this one. + + The iteration includes `self` as its first value. + + """ + return (ByteParser(self.text, code=c) for c in code_objects(self.code)) + + def _line_numbers(self) -> Iterable[TLineNo]: + """Yield the line numbers possible in this code object. + + Uses co_lnotab described in Python/compile.c to find the + line numbers. Produces a sequence: l0, l1, ... + """ + if hasattr(self.code, "co_lines"): + for _, _, line in self.code.co_lines(): + if line: + yield line + else: + # Adapted from dis.py in the standard library. + byte_increments = self.code.co_lnotab[0::2] + line_increments = self.code.co_lnotab[1::2] + + last_line_num = None + line_num = self.code.co_firstlineno + byte_num = 0 + for byte_incr, line_incr in zip(byte_increments, line_increments): + if byte_incr: + if line_num != last_line_num: + yield line_num + last_line_num = line_num + byte_num += byte_incr + if line_incr >= 0x80: + line_incr -= 0x100 + line_num += line_incr + if line_num != last_line_num: + yield line_num + + def _find_statements(self) -> Iterable[TLineNo]: + """Find the statements in `self.code`. + + Produce a sequence of line numbers that start statements. Recurses + into all code objects reachable from `self.code`. + + """ + for bp in self.child_parsers(): + # Get all of the lineno information from this code. + yield from bp._line_numbers() + + +# +# AST analysis +# + +class ArcStart(collections.namedtuple("Arc", "lineno, cause")): + """The information needed to start an arc. + + `lineno` is the line number the arc starts from. + + `cause` is an English text fragment used as the `startmsg` for + AstArcAnalyzer.missing_arc_fragments. It will be used to describe why an + arc wasn't executed, so should fit well into a sentence of the form, + "Line 17 didn't run because {cause}." The fragment can include "{lineno}" + to have `lineno` interpolated into it. + + """ + def __new__(cls, lineno: TLineNo, cause: Optional[str] = None) -> ArcStart: + return super().__new__(cls, lineno, cause) + + +class TAddArcFn(Protocol): + """The type for AstArcAnalyzer.add_arc().""" + def __call__( + self, + start: TLineNo, + end: TLineNo, + smsg: Optional[str] = None, + emsg: Optional[str] = None, + ) -> None: + ... + +TArcFragments = Dict[TArc, List[Tuple[Optional[str], Optional[str]]]] + +class Block: + """ + Blocks need to handle various exiting statements in their own ways. + + All of these methods take a list of exits, and a callable `add_arc` + function that they can use to add arcs if needed. They return True if the + exits are handled, or False if the search should continue up the block + stack. + """ + # pylint: disable=unused-argument + def process_break_exits(self, exits: Set[ArcStart], add_arc: TAddArcFn) -> bool: + """Process break exits.""" + # Because break can only appear in loops, and most subclasses + # implement process_break_exits, this function is never reached. + raise AssertionError + + def process_continue_exits(self, exits: Set[ArcStart], add_arc: TAddArcFn) -> bool: + """Process continue exits.""" + # Because continue can only appear in loops, and most subclasses + # implement process_continue_exits, this function is never reached. + raise AssertionError + + def process_raise_exits(self, exits: Set[ArcStart], add_arc: TAddArcFn) -> bool: + """Process raise exits.""" + return False + + def process_return_exits(self, exits: Set[ArcStart], add_arc: TAddArcFn) -> bool: + """Process return exits.""" + return False + + +class LoopBlock(Block): + """A block on the block stack representing a `for` or `while` loop.""" + def __init__(self, start: TLineNo) -> None: + # The line number where the loop starts. + self.start = start + # A set of ArcStarts, the arcs from break statements exiting this loop. + self.break_exits: Set[ArcStart] = set() + + def process_break_exits(self, exits: Set[ArcStart], add_arc: TAddArcFn) -> bool: + self.break_exits.update(exits) + return True + + def process_continue_exits(self, exits: Set[ArcStart], add_arc: TAddArcFn) -> bool: + for xit in exits: + add_arc(xit.lineno, self.start, xit.cause) + return True + + +class FunctionBlock(Block): + """A block on the block stack representing a function definition.""" + def __init__(self, start: TLineNo, name: str) -> None: + # The line number where the function starts. + self.start = start + # The name of the function. + self.name = name + + def process_raise_exits(self, exits: Set[ArcStart], add_arc: TAddArcFn) -> bool: + for xit in exits: + add_arc( + xit.lineno, -self.start, xit.cause, + f"didn't except from function {self.name!r}", + ) + return True + + def process_return_exits(self, exits: Set[ArcStart], add_arc: TAddArcFn) -> bool: + for xit in exits: + add_arc( + xit.lineno, -self.start, xit.cause, + f"didn't return from function {self.name!r}", + ) + return True + + +class TryBlock(Block): + """A block on the block stack representing a `try` block.""" + def __init__(self, handler_start: Optional[TLineNo], final_start: Optional[TLineNo]) -> None: + # The line number of the first "except" handler, if any. + self.handler_start = handler_start + # The line number of the "finally:" clause, if any. + self.final_start = final_start + + # The ArcStarts for breaks/continues/returns/raises inside the "try:" + # that need to route through the "finally:" clause. + self.break_from: Set[ArcStart] = set() + self.continue_from: Set[ArcStart] = set() + self.raise_from: Set[ArcStart] = set() + self.return_from: Set[ArcStart] = set() + + def process_break_exits(self, exits: Set[ArcStart], add_arc: TAddArcFn) -> bool: + if self.final_start is not None: + self.break_from.update(exits) + return True + return False + + def process_continue_exits(self, exits: Set[ArcStart], add_arc: TAddArcFn) -> bool: + if self.final_start is not None: + self.continue_from.update(exits) + return True + return False + + def process_raise_exits(self, exits: Set[ArcStart], add_arc: TAddArcFn) -> bool: + if self.handler_start is not None: + for xit in exits: + add_arc(xit.lineno, self.handler_start, xit.cause) + else: + assert self.final_start is not None + self.raise_from.update(exits) + return True + + def process_return_exits(self, exits: Set[ArcStart], add_arc: TAddArcFn) -> bool: + if self.final_start is not None: + self.return_from.update(exits) + return True + return False + + +class WithBlock(Block): + """A block on the block stack representing a `with` block.""" + def __init__(self, start: TLineNo) -> None: + # We only ever use this block if it is needed, so that we don't have to + # check this setting in all the methods. + assert env.PYBEHAVIOR.exit_through_with + + # The line number of the with statement. + self.start = start + + # The ArcStarts for breaks/continues/returns/raises inside the "with:" + # that need to go through the with-statement while exiting. + self.break_from: Set[ArcStart] = set() + self.continue_from: Set[ArcStart] = set() + self.return_from: Set[ArcStart] = set() + + def _process_exits( + self, + exits: Set[ArcStart], + add_arc: TAddArcFn, + from_set: Optional[Set[ArcStart]] = None, + ) -> bool: + """Helper to process the four kinds of exits.""" + for xit in exits: + add_arc(xit.lineno, self.start, xit.cause) + if from_set is not None: + from_set.update(exits) + return True + + def process_break_exits(self, exits: Set[ArcStart], add_arc: TAddArcFn) -> bool: + return self._process_exits(exits, add_arc, self.break_from) + + def process_continue_exits(self, exits: Set[ArcStart], add_arc: TAddArcFn) -> bool: + return self._process_exits(exits, add_arc, self.continue_from) + + def process_raise_exits(self, exits: Set[ArcStart], add_arc: TAddArcFn) -> bool: + return self._process_exits(exits, add_arc) + + def process_return_exits(self, exits: Set[ArcStart], add_arc: TAddArcFn) -> bool: + return self._process_exits(exits, add_arc, self.return_from) + + +class NodeList(ast.AST): + """A synthetic fictitious node, containing a sequence of nodes. + + This is used when collapsing optimized if-statements, to represent the + unconditional execution of one of the clauses. + + """ + def __init__(self, body: Sequence[ast.AST]) -> None: + self.body = body + self.lineno = body[0].lineno + +# TODO: some add_arcs methods here don't add arcs, they return them. Rename them. +# TODO: the cause messages have too many commas. +# TODO: Shouldn't the cause messages join with "and" instead of "or"? + +def _make_expression_code_method(noun: str) -> Callable[[AstArcAnalyzer, ast.AST], None]: + """A function to make methods for expression-based callable _code_object__ methods.""" + def _code_object__expression_callable(self: AstArcAnalyzer, node: ast.AST) -> None: + start = self.line_for_node(node) + self.add_arc(-start, start, None, f"didn't run the {noun} on line {start}") + self.add_arc(start, -start, None, f"didn't finish the {noun} on line {start}") + return _code_object__expression_callable + + +class AstArcAnalyzer: + """Analyze source text with an AST to find executable code paths.""" + + def __init__( + self, + text: str, + statements: Set[TLineNo], + multiline: Dict[TLineNo, TLineNo], + ) -> None: + self.root_node = ast.parse(text) + # TODO: I think this is happening in too many places. + self.statements = {multiline.get(l, l) for l in statements} + self.multiline = multiline + + # Turn on AST dumps with an environment variable. + # $set_env.py: COVERAGE_AST_DUMP - Dump the AST nodes when parsing code. + dump_ast = bool(int(os.environ.get("COVERAGE_AST_DUMP", 0))) + + if dump_ast: # pragma: debugging + # Dump the AST so that failing tests have helpful output. + print(f"Statements: {self.statements}") + print(f"Multiline map: {self.multiline}") + ast_dump(self.root_node) + + self.arcs: Set[TArc] = set() + + # A map from arc pairs to a list of pairs of sentence fragments: + # { (start, end): [(startmsg, endmsg), ...], } + # + # For an arc from line 17, they should be usable like: + # "Line 17 {endmsg}, because {startmsg}" + self.missing_arc_fragments: TArcFragments = collections.defaultdict(list) + self.block_stack: List[Block] = [] + + # $set_env.py: COVERAGE_TRACK_ARCS - Trace possible arcs added while parsing code. + self.debug = bool(int(os.environ.get("COVERAGE_TRACK_ARCS", 0))) + + def analyze(self) -> None: + """Examine the AST tree from `root_node` to determine possible arcs. + + This sets the `arcs` attribute to be a set of (from, to) line number + pairs. + + """ + for node in ast.walk(self.root_node): + node_name = node.__class__.__name__ + code_object_handler = getattr(self, "_code_object__" + node_name, None) + if code_object_handler is not None: + code_object_handler(node) + + def add_arc( + self, + start: TLineNo, + end: TLineNo, + smsg: Optional[str] = None, + emsg: Optional[str] = None, + ) -> None: + """Add an arc, including message fragments to use if it is missing.""" + if self.debug: # pragma: debugging + print(f"\nAdding possible arc: ({start}, {end}): {smsg!r}, {emsg!r}") + print(short_stack(limit=10)) + self.arcs.add((start, end)) + + if smsg is not None or emsg is not None: + self.missing_arc_fragments[(start, end)].append((smsg, emsg)) + + def nearest_blocks(self) -> Iterable[Block]: + """Yield the blocks in nearest-to-farthest order.""" + return reversed(self.block_stack) + + def line_for_node(self, node: ast.AST) -> TLineNo: + """What is the right line number to use for this node? + + This dispatches to _line__Node functions where needed. + + """ + node_name = node.__class__.__name__ + handler = cast( + Optional[Callable[[ast.AST], TLineNo]], + getattr(self, "_line__" + node_name, None) + ) + if handler is not None: + return handler(node) + else: + return node.lineno + + def _line_decorated(self, node: ast.FunctionDef) -> TLineNo: + """Compute first line number for things that can be decorated (classes and functions).""" + if node.decorator_list: + lineno = node.decorator_list[0].lineno + else: + lineno = node.lineno + return lineno + + def _line__Assign(self, node: ast.Assign) -> TLineNo: + return self.line_for_node(node.value) + + _line__ClassDef = _line_decorated + + def _line__Dict(self, node: ast.Dict) -> TLineNo: + if node.keys: + if node.keys[0] is not None: + return node.keys[0].lineno + else: + # Unpacked dict literals `{**{"a":1}}` have None as the key, + # use the value in that case. + return node.values[0].lineno + else: + return node.lineno + + _line__FunctionDef = _line_decorated + _line__AsyncFunctionDef = _line_decorated + + def _line__List(self, node: ast.List) -> TLineNo: + if node.elts: + return self.line_for_node(node.elts[0]) + else: + return node.lineno + + def _line__Module(self, node: ast.Module) -> TLineNo: + if env.PYBEHAVIOR.module_firstline_1: + return 1 + elif node.body: + return self.line_for_node(node.body[0]) + else: + # Empty modules have no line number, they always start at 1. + return 1 + + # The node types that just flow to the next node with no complications. + OK_TO_DEFAULT = { + "AnnAssign", "Assign", "Assert", "AugAssign", "Delete", "Expr", "Global", + "Import", "ImportFrom", "Nonlocal", "Pass", + } + + def add_arcs(self, node: ast.AST) -> Set[ArcStart]: + """Add the arcs for `node`. + + Return a set of ArcStarts, exits from this node to the next. Because a + node represents an entire sub-tree (including its children), the exits + from a node can be arbitrarily complex:: + + if something(1): + if other(2): + doit(3) + else: + doit(5) + + There are two exits from line 1: they start at line 3 and line 5. + + """ + node_name = node.__class__.__name__ + handler = cast( + Optional[Callable[[ast.AST], Set[ArcStart]]], + getattr(self, "_handle__" + node_name, None) + ) + if handler is not None: + return handler(node) + else: + # No handler: either it's something that's ok to default (a simple + # statement), or it's something we overlooked. + if env.TESTING: + if node_name not in self.OK_TO_DEFAULT: + raise RuntimeError(f"*** Unhandled: {node}") # pragma: only failure + + # Default for simple statements: one exit from this node. + return {ArcStart(self.line_for_node(node))} + + def add_body_arcs( + self, + body: Sequence[ast.AST], + from_start: Optional[ArcStart] = None, + prev_starts: Optional[Set[ArcStart]] = None + ) -> Set[ArcStart]: + """Add arcs for the body of a compound statement. + + `body` is the body node. `from_start` is a single `ArcStart` that can + be the previous line in flow before this body. `prev_starts` is a set + of ArcStarts that can be the previous line. Only one of them should be + given. + + Returns a set of ArcStarts, the exits from this body. + + """ + if prev_starts is None: + assert from_start is not None + prev_starts = {from_start} + for body_node in body: + lineno = self.line_for_node(body_node) + first_line = self.multiline.get(lineno, lineno) + if first_line not in self.statements: + maybe_body_node = self.find_non_missing_node(body_node) + if maybe_body_node is None: + continue + body_node = maybe_body_node + lineno = self.line_for_node(body_node) + for prev_start in prev_starts: + self.add_arc(prev_start.lineno, lineno, prev_start.cause) + prev_starts = self.add_arcs(body_node) + return prev_starts + + def find_non_missing_node(self, node: ast.AST) -> Optional[ast.AST]: + """Search `node` looking for a child that has not been optimized away. + + This might return the node you started with, or it will work recursively + to find a child node in self.statements. + + Returns a node, or None if none of the node remains. + + """ + # This repeats work just done in add_body_arcs, but this duplication + # means we can avoid a function call in the 99.9999% case of not + # optimizing away statements. + lineno = self.line_for_node(node) + first_line = self.multiline.get(lineno, lineno) + if first_line in self.statements: + return node + + missing_fn = cast( + Optional[Callable[[ast.AST], Optional[ast.AST]]], + getattr(self, "_missing__" + node.__class__.__name__, None) + ) + if missing_fn is not None: + ret_node = missing_fn(node) + else: + ret_node = None + return ret_node + + # Missing nodes: _missing__* + # + # Entire statements can be optimized away by Python. They will appear in + # the AST, but not the bytecode. These functions are called (by + # find_non_missing_node) to find a node to use instead of the missing + # node. They can return None if the node should truly be gone. + + def _missing__If(self, node: ast.If) -> Optional[ast.AST]: + # If the if-node is missing, then one of its children might still be + # here, but not both. So return the first of the two that isn't missing. + # Use a NodeList to hold the clauses as a single node. + non_missing = self.find_non_missing_node(NodeList(node.body)) + if non_missing: + return non_missing + if node.orelse: + return self.find_non_missing_node(NodeList(node.orelse)) + return None + + def _missing__NodeList(self, node: NodeList) -> Optional[ast.AST]: + # A NodeList might be a mixture of missing and present nodes. Find the + # ones that are present. + non_missing_children = [] + for child in node.body: + maybe_child = self.find_non_missing_node(child) + if maybe_child is not None: + non_missing_children.append(maybe_child) + + # Return the simplest representation of the present children. + if not non_missing_children: + return None + if len(non_missing_children) == 1: + return non_missing_children[0] + return NodeList(non_missing_children) + + def _missing__While(self, node: ast.While) -> Optional[ast.AST]: + body_nodes = self.find_non_missing_node(NodeList(node.body)) + if not body_nodes: + return None + # Make a synthetic While-true node. + new_while = ast.While() + new_while.lineno = body_nodes.lineno + new_while.test = ast.Name() + new_while.test.lineno = body_nodes.lineno + new_while.test.id = "True" + assert hasattr(body_nodes, "body") + new_while.body = body_nodes.body + new_while.orelse = [] + return new_while + + def is_constant_expr(self, node: ast.AST) -> Optional[str]: + """Is this a compile-time constant?""" + node_name = node.__class__.__name__ + if node_name in ["Constant", "NameConstant", "Num"]: + return "Num" + elif isinstance(node, ast.Name): + if node.id in ["True", "False", "None", "__debug__"]: + return "Name" + return None + + # In the fullness of time, these might be good tests to write: + # while EXPR: + # while False: + # listcomps hidden deep in other expressions + # listcomps hidden in lists: x = [[i for i in range(10)]] + # nested function definitions + + # Exit processing: process_*_exits + # + # These functions process the four kinds of jump exits: break, continue, + # raise, and return. To figure out where an exit goes, we have to look at + # the block stack context. For example, a break will jump to the nearest + # enclosing loop block, or the nearest enclosing finally block, whichever + # is nearer. + + def process_break_exits(self, exits: Set[ArcStart]) -> None: + """Add arcs due to jumps from `exits` being breaks.""" + for block in self.nearest_blocks(): # pragma: always breaks + if block.process_break_exits(exits, self.add_arc): + break + + def process_continue_exits(self, exits: Set[ArcStart]) -> None: + """Add arcs due to jumps from `exits` being continues.""" + for block in self.nearest_blocks(): # pragma: always breaks + if block.process_continue_exits(exits, self.add_arc): + break + + def process_raise_exits(self, exits: Set[ArcStart]) -> None: + """Add arcs due to jumps from `exits` being raises.""" + for block in self.nearest_blocks(): + if block.process_raise_exits(exits, self.add_arc): + break + + def process_return_exits(self, exits: Set[ArcStart]) -> None: + """Add arcs due to jumps from `exits` being returns.""" + for block in self.nearest_blocks(): # pragma: always breaks + if block.process_return_exits(exits, self.add_arc): + break + + # Handlers: _handle__* + # + # Each handler deals with a specific AST node type, dispatched from + # add_arcs. Handlers return the set of exits from that node, and can + # also call self.add_arc to record arcs they find. These functions mirror + # the Python semantics of each syntactic construct. See the docstring + # for add_arcs to understand the concept of exits from a node. + # + # Every node type that represents a statement should have a handler, or it + # should be listed in OK_TO_DEFAULT. + + def _handle__Break(self, node: ast.Break) -> Set[ArcStart]: + here = self.line_for_node(node) + break_start = ArcStart(here, cause="the break on line {lineno} wasn't executed") + self.process_break_exits({break_start}) + return set() + + def _handle_decorated(self, node: ast.FunctionDef) -> Set[ArcStart]: + """Add arcs for things that can be decorated (classes and functions).""" + main_line: TLineNo = node.lineno + last: Optional[TLineNo] = node.lineno + decs = node.decorator_list + if decs: + last = None + for dec_node in decs: + dec_start = self.line_for_node(dec_node) + if last is not None and dec_start != last: # type: ignore[unreachable] + self.add_arc(last, dec_start) # type: ignore[unreachable] + last = dec_start + assert last is not None + self.add_arc(last, main_line) + last = main_line + if env.PYBEHAVIOR.trace_decorator_line_again: + for top, bot in zip(decs, decs[1:]): + self.add_arc(self.line_for_node(bot), self.line_for_node(top)) + self.add_arc(self.line_for_node(decs[0]), main_line) + self.add_arc(main_line, self.line_for_node(decs[-1])) + # The definition line may have been missed, but we should have it + # in `self.statements`. For some constructs, `line_for_node` is + # not what we'd think of as the first line in the statement, so map + # it to the first one. + if node.body: + body_start = self.line_for_node(node.body[0]) + body_start = self.multiline.get(body_start, body_start) + # The body is handled in collect_arcs. + assert last is not None + return {ArcStart(last)} + + _handle__ClassDef = _handle_decorated + + def _handle__Continue(self, node: ast.Continue) -> Set[ArcStart]: + here = self.line_for_node(node) + continue_start = ArcStart(here, cause="the continue on line {lineno} wasn't executed") + self.process_continue_exits({continue_start}) + return set() + + def _handle__For(self, node: ast.For) -> Set[ArcStart]: + start = self.line_for_node(node.iter) + self.block_stack.append(LoopBlock(start=start)) + from_start = ArcStart(start, cause="the loop on line {lineno} never started") + exits = self.add_body_arcs(node.body, from_start=from_start) + # Any exit from the body will go back to the top of the loop. + for xit in exits: + self.add_arc(xit.lineno, start, xit.cause) + my_block = self.block_stack.pop() + assert isinstance(my_block, LoopBlock) + exits = my_block.break_exits + from_start = ArcStart(start, cause="the loop on line {lineno} didn't complete") + if node.orelse: + else_exits = self.add_body_arcs(node.orelse, from_start=from_start) + exits |= else_exits + else: + # No else clause: exit from the for line. + exits.add(from_start) + return exits + + _handle__AsyncFor = _handle__For + + _handle__FunctionDef = _handle_decorated + _handle__AsyncFunctionDef = _handle_decorated + + def _handle__If(self, node: ast.If) -> Set[ArcStart]: + start = self.line_for_node(node.test) + from_start = ArcStart(start, cause="the condition on line {lineno} was never true") + exits = self.add_body_arcs(node.body, from_start=from_start) + from_start = ArcStart(start, cause="the condition on line {lineno} was never false") + exits |= self.add_body_arcs(node.orelse, from_start=from_start) + return exits + + if sys.version_info >= (3, 10): + def _handle__Match(self, node: ast.Match) -> Set[ArcStart]: + start = self.line_for_node(node) + last_start = start + exits = set() + had_wildcard = False + for case in node.cases: + case_start = self.line_for_node(case.pattern) + pattern = case.pattern + while isinstance(pattern, ast.MatchOr): + pattern = pattern.patterns[-1] + if isinstance(pattern, ast.MatchAs): + had_wildcard = True + self.add_arc(last_start, case_start, "the pattern on line {lineno} always matched") + from_start = ArcStart( + case_start, + cause="the pattern on line {lineno} never matched", + ) + exits |= self.add_body_arcs(case.body, from_start=from_start) + last_start = case_start + if not had_wildcard: + exits.add(from_start) + return exits + + def _handle__NodeList(self, node: NodeList) -> Set[ArcStart]: + start = self.line_for_node(node) + exits = self.add_body_arcs(node.body, from_start=ArcStart(start)) + return exits + + def _handle__Raise(self, node: ast.Raise) -> Set[ArcStart]: + here = self.line_for_node(node) + raise_start = ArcStart(here, cause="the raise on line {lineno} wasn't executed") + self.process_raise_exits({raise_start}) + # `raise` statement jumps away, no exits from here. + return set() + + def _handle__Return(self, node: ast.Return) -> Set[ArcStart]: + here = self.line_for_node(node) + return_start = ArcStart(here, cause="the return on line {lineno} wasn't executed") + self.process_return_exits({return_start}) + # `return` statement jumps away, no exits from here. + return set() + + def _handle__Try(self, node: ast.Try) -> Set[ArcStart]: + if node.handlers: + handler_start = self.line_for_node(node.handlers[0]) + else: + handler_start = None + + if node.finalbody: + final_start = self.line_for_node(node.finalbody[0]) + else: + final_start = None + + # This is true by virtue of Python syntax: have to have either except + # or finally, or both. + assert handler_start is not None or final_start is not None + try_block = TryBlock(handler_start, final_start) + self.block_stack.append(try_block) + + start = self.line_for_node(node) + exits = self.add_body_arcs(node.body, from_start=ArcStart(start)) + + # We're done with the `try` body, so this block no longer handles + # exceptions. We keep the block so the `finally` clause can pick up + # flows from the handlers and `else` clause. + if node.finalbody: + try_block.handler_start = None + if node.handlers: + # If there are `except` clauses, then raises in the try body + # will already jump to them. Start this set over for raises in + # `except` and `else`. + try_block.raise_from = set() + else: + self.block_stack.pop() + + handler_exits: Set[ArcStart] = set() + + if node.handlers: + last_handler_start: Optional[TLineNo] = None + for handler_node in node.handlers: + handler_start = self.line_for_node(handler_node) + if last_handler_start is not None: + self.add_arc(last_handler_start, handler_start) + last_handler_start = handler_start + from_cause = "the exception caught by line {lineno} didn't happen" + from_start = ArcStart(handler_start, cause=from_cause) + handler_exits |= self.add_body_arcs(handler_node.body, from_start=from_start) + + if node.orelse: + exits = self.add_body_arcs(node.orelse, prev_starts=exits) + + exits |= handler_exits + + if node.finalbody: + self.block_stack.pop() + final_from = ( # You can get to the `finally` clause from: + exits | # the exits of the body or `else` clause, + try_block.break_from | # or a `break`, + try_block.continue_from | # or a `continue`, + try_block.raise_from | # or a `raise`, + try_block.return_from # or a `return`. + ) + + final_exits = self.add_body_arcs(node.finalbody, prev_starts=final_from) + + if try_block.break_from: + if env.PYBEHAVIOR.finally_jumps_back: + for break_line in try_block.break_from: + lineno = break_line.lineno + cause = break_line.cause.format(lineno=lineno) + for final_exit in final_exits: + self.add_arc(final_exit.lineno, lineno, cause) + breaks = try_block.break_from + else: + breaks = self._combine_finally_starts(try_block.break_from, final_exits) + self.process_break_exits(breaks) + + if try_block.continue_from: + if env.PYBEHAVIOR.finally_jumps_back: + for continue_line in try_block.continue_from: + lineno = continue_line.lineno + cause = continue_line.cause.format(lineno=lineno) + for final_exit in final_exits: + self.add_arc(final_exit.lineno, lineno, cause) + continues = try_block.continue_from + else: + continues = self._combine_finally_starts(try_block.continue_from, final_exits) + self.process_continue_exits(continues) + + if try_block.raise_from: + self.process_raise_exits( + self._combine_finally_starts(try_block.raise_from, final_exits) + ) + + if try_block.return_from: + if env.PYBEHAVIOR.finally_jumps_back: + for return_line in try_block.return_from: + lineno = return_line.lineno + cause = return_line.cause.format(lineno=lineno) + for final_exit in final_exits: + self.add_arc(final_exit.lineno, lineno, cause) + returns = try_block.return_from + else: + returns = self._combine_finally_starts(try_block.return_from, final_exits) + self.process_return_exits(returns) + + if exits: + # The finally clause's exits are only exits for the try block + # as a whole if the try block had some exits to begin with. + exits = final_exits + + return exits + + def _combine_finally_starts(self, starts: Set[ArcStart], exits: Set[ArcStart]) -> Set[ArcStart]: + """Helper for building the cause of `finally` branches. + + "finally" clauses might not execute their exits, and the causes could + be due to a failure to execute any of the exits in the try block. So + we use the causes from `starts` as the causes for `exits`. + """ + causes = [] + for start in sorted(starts): + if start.cause is not None: + causes.append(start.cause.format(lineno=start.lineno)) + cause = " or ".join(causes) + exits = {ArcStart(xit.lineno, cause) for xit in exits} + return exits + + def _handle__While(self, node: ast.While) -> Set[ArcStart]: + start = to_top = self.line_for_node(node.test) + constant_test = self.is_constant_expr(node.test) + top_is_body0 = False + if constant_test: + top_is_body0 = True + if env.PYBEHAVIOR.keep_constant_test: + top_is_body0 = False + if top_is_body0: + to_top = self.line_for_node(node.body[0]) + self.block_stack.append(LoopBlock(start=to_top)) + from_start = ArcStart(start, cause="the condition on line {lineno} was never true") + exits = self.add_body_arcs(node.body, from_start=from_start) + for xit in exits: + self.add_arc(xit.lineno, to_top, xit.cause) + exits = set() + my_block = self.block_stack.pop() + assert isinstance(my_block, LoopBlock) + exits.update(my_block.break_exits) + from_start = ArcStart(start, cause="the condition on line {lineno} was never false") + if node.orelse: + else_exits = self.add_body_arcs(node.orelse, from_start=from_start) + exits |= else_exits + else: + # No `else` clause: you can exit from the start. + if not constant_test: + exits.add(from_start) + return exits + + def _handle__With(self, node: ast.With) -> Set[ArcStart]: + start = self.line_for_node(node) + if env.PYBEHAVIOR.exit_through_with: + self.block_stack.append(WithBlock(start=start)) + exits = self.add_body_arcs(node.body, from_start=ArcStart(start)) + if env.PYBEHAVIOR.exit_through_with: + with_block = self.block_stack.pop() + assert isinstance(with_block, WithBlock) + with_exit = {ArcStart(start)} + if exits: + for xit in exits: + self.add_arc(xit.lineno, start) + exits = with_exit + if with_block.break_from: + self.process_break_exits( + self._combine_finally_starts(with_block.break_from, with_exit) + ) + if with_block.continue_from: + self.process_continue_exits( + self._combine_finally_starts(with_block.continue_from, with_exit) + ) + if with_block.return_from: + self.process_return_exits( + self._combine_finally_starts(with_block.return_from, with_exit) + ) + return exits + + _handle__AsyncWith = _handle__With + + # Code object dispatchers: _code_object__* + # + # These methods are used by analyze() as the start of the analysis. + # There is one for each construct with a code object. + + def _code_object__Module(self, node: ast.Module) -> None: + start = self.line_for_node(node) + if node.body: + exits = self.add_body_arcs(node.body, from_start=ArcStart(-start)) + for xit in exits: + self.add_arc(xit.lineno, -start, xit.cause, "didn't exit the module") + else: + # Empty module. + self.add_arc(-start, start) + self.add_arc(start, -start) + + def _code_object__FunctionDef(self, node: ast.FunctionDef) -> None: + start = self.line_for_node(node) + self.block_stack.append(FunctionBlock(start=start, name=node.name)) + exits = self.add_body_arcs(node.body, from_start=ArcStart(-start)) + self.process_return_exits(exits) + self.block_stack.pop() + + _code_object__AsyncFunctionDef = _code_object__FunctionDef + + def _code_object__ClassDef(self, node: ast.ClassDef) -> None: + start = self.line_for_node(node) + self.add_arc(-start, start) + exits = self.add_body_arcs(node.body, from_start=ArcStart(start)) + for xit in exits: + self.add_arc( + xit.lineno, -start, xit.cause, + f"didn't exit the body of class {node.name!r}", + ) + + _code_object__Lambda = _make_expression_code_method("lambda") + _code_object__GeneratorExp = _make_expression_code_method("generator expression") + if env.PYBEHAVIOR.comprehensions_are_functions: + _code_object__DictComp = _make_expression_code_method("dictionary comprehension") + _code_object__SetComp = _make_expression_code_method("set comprehension") + _code_object__ListComp = _make_expression_code_method("list comprehension") + + +# Code only used when dumping the AST for debugging. + +SKIP_DUMP_FIELDS = ["ctx"] + +def _is_simple_value(value: Any) -> bool: + """Is `value` simple enough to be displayed on a single line?""" + return ( + value in [None, [], (), {}, set(), frozenset(), Ellipsis] or + isinstance(value, (bytes, int, float, str)) + ) + +def ast_dump( + node: ast.AST, + depth: int = 0, + print: Callable[[str], None] = print, # pylint: disable=redefined-builtin +) -> None: + """Dump the AST for `node`. + + This recursively walks the AST, printing a readable version. + + """ + indent = " " * depth + lineno = getattr(node, "lineno", None) + if lineno is not None: + linemark = f" @ {node.lineno},{node.col_offset}" + if hasattr(node, "end_lineno"): + assert hasattr(node, "end_col_offset") + linemark += ":" + if node.end_lineno != node.lineno: + linemark += f"{node.end_lineno}," + linemark += f"{node.end_col_offset}" + else: + linemark = "" + head = f"{indent}<{node.__class__.__name__}{linemark}" + + named_fields = [ + (name, value) + for name, value in ast.iter_fields(node) + if name not in SKIP_DUMP_FIELDS + ] + if not named_fields: + print(f"{head}>") + elif len(named_fields) == 1 and _is_simple_value(named_fields[0][1]): + field_name, value = named_fields[0] + print(f"{head} {field_name}: {value!r}>") + else: + print(head) + if 0: + print("{}# mro: {}".format( # type: ignore[unreachable] + indent, ", ".join(c.__name__ for c in node.__class__.__mro__[1:]), + )) + next_indent = indent + " " + for field_name, value in named_fields: + prefix = f"{next_indent}{field_name}:" + if _is_simple_value(value): + print(f"{prefix} {value!r}") + elif isinstance(value, list): + print(f"{prefix} [") + for n in value: + if _is_simple_value(n): + print(f"{next_indent} {n!r}") + else: + ast_dump(n, depth + 8, print=print) + print(f"{next_indent}]") + else: + print(prefix) + ast_dump(value, depth + 8, print=print) + + print(f"{indent}>") diff --git a/venv/lib/python3.10/site-packages/coverage/phystokens.py b/venv/lib/python3.10/site-packages/coverage/phystokens.py new file mode 100644 index 0000000..d565926 --- /dev/null +++ b/venv/lib/python3.10/site-packages/coverage/phystokens.py @@ -0,0 +1,207 @@ +# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 +# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt + +"""Better tokenizing for coverage.py.""" + +from __future__ import annotations + +import ast +import io +import keyword +import re +import sys +import token +import tokenize + +from typing import Iterable, List, Optional, Set, Tuple + +from coverage import env +from coverage.types import TLineNo, TSourceTokenLines + + +TokenInfos = Iterable[tokenize.TokenInfo] + + +def _phys_tokens(toks: TokenInfos) -> TokenInfos: + """Return all physical tokens, even line continuations. + + tokenize.generate_tokens() doesn't return a token for the backslash that + continues lines. This wrapper provides those tokens so that we can + re-create a faithful representation of the original source. + + Returns the same values as generate_tokens() + + """ + last_line: Optional[str] = None + last_lineno = -1 + last_ttext: str = "" + for ttype, ttext, (slineno, scol), (elineno, ecol), ltext in toks: + if last_lineno != elineno: + if last_line and last_line.endswith("\\\n"): + # We are at the beginning of a new line, and the last line + # ended with a backslash. We probably have to inject a + # backslash token into the stream. Unfortunately, there's more + # to figure out. This code:: + # + # usage = """\ + # HEY THERE + # """ + # + # triggers this condition, but the token text is:: + # + # '"""\\\nHEY THERE\n"""' + # + # so we need to figure out if the backslash is already in the + # string token or not. + inject_backslash = True + if last_ttext.endswith("\\"): + inject_backslash = False + elif ttype == token.STRING: + if "\n" in ttext and ttext.split("\n", 1)[0][-1] == "\\": + # It's a multi-line string and the first line ends with + # a backslash, so we don't need to inject another. + inject_backslash = False + if inject_backslash: + # Figure out what column the backslash is in. + ccol = len(last_line.split("\n")[-2]) - 1 + # Yield the token, with a fake token type. + yield tokenize.TokenInfo( + 99999, "\\\n", + (slineno, ccol), (slineno, ccol+2), + last_line + ) + last_line = ltext + if ttype not in (tokenize.NEWLINE, tokenize.NL): + last_ttext = ttext + yield tokenize.TokenInfo(ttype, ttext, (slineno, scol), (elineno, ecol), ltext) + last_lineno = elineno + + +class MatchCaseFinder(ast.NodeVisitor): + """Helper for finding match/case lines.""" + def __init__(self, source: str) -> None: + # This will be the set of line numbers that start match or case statements. + self.match_case_lines: Set[TLineNo] = set() + self.visit(ast.parse(source)) + + if sys.version_info >= (3, 10): + def visit_Match(self, node: ast.Match) -> None: + """Invoked by ast.NodeVisitor.visit""" + self.match_case_lines.add(node.lineno) + for case in node.cases: + self.match_case_lines.add(case.pattern.lineno) + self.generic_visit(node) + + +def source_token_lines(source: str) -> TSourceTokenLines: + """Generate a series of lines, one for each line in `source`. + + Each line is a list of pairs, each pair is a token:: + + [('key', 'def'), ('ws', ' '), ('nam', 'hello'), ('op', '('), ... ] + + Each pair has a token class, and the token text. + + If you concatenate all the token texts, and then join them with newlines, + you should have your original `source` back, with two differences: + trailing white space is not preserved, and a final line with no newline + is indistinguishable from a final line with a newline. + + """ + + ws_tokens = {token.INDENT, token.DEDENT, token.NEWLINE, tokenize.NL} + line: List[Tuple[str, str]] = [] + col = 0 + + source = source.expandtabs(8).replace("\r\n", "\n") + tokgen = generate_tokens(source) + + if env.PYBEHAVIOR.soft_keywords: + match_case_lines = MatchCaseFinder(source).match_case_lines + + for ttype, ttext, (sline, scol), (_, ecol), _ in _phys_tokens(tokgen): + mark_start = True + for part in re.split("(\n)", ttext): + if part == "\n": + yield line + line = [] + col = 0 + mark_end = False + elif part == "": + mark_end = False + elif ttype in ws_tokens: + mark_end = False + else: + if mark_start and scol > col: + line.append(("ws", " " * (scol - col))) + mark_start = False + tok_class = tokenize.tok_name.get(ttype, "xx").lower()[:3] + if ttype == token.NAME: + if keyword.iskeyword(ttext): + # Hard keywords are always keywords. + tok_class = "key" + elif sys.version_info >= (3, 10): # PYVERSIONS + # Need the version_info check to keep mypy from borking + # on issoftkeyword here. + if env.PYBEHAVIOR.soft_keywords and keyword.issoftkeyword(ttext): + # Soft keywords appear at the start of the line, + # on lines that start match or case statements. + if len(line) == 0: + is_start_of_line = True + elif (len(line) == 1) and line[0][0] == "ws": + is_start_of_line = True + else: + is_start_of_line = False + if is_start_of_line and sline in match_case_lines: + tok_class = "key" + line.append((tok_class, part)) + mark_end = True + scol = 0 + if mark_end: + col = ecol + + if line: + yield line + + +class CachedTokenizer: + """A one-element cache around tokenize.generate_tokens. + + When reporting, coverage.py tokenizes files twice, once to find the + structure of the file, and once to syntax-color it. Tokenizing is + expensive, and easily cached. + + This is a one-element cache so that our twice-in-a-row tokenizing doesn't + actually tokenize twice. + + """ + def __init__(self) -> None: + self.last_text: Optional[str] = None + self.last_tokens: List[tokenize.TokenInfo] = [] + + def generate_tokens(self, text: str) -> TokenInfos: + """A stand-in for `tokenize.generate_tokens`.""" + if text != self.last_text: + self.last_text = text + readline = io.StringIO(text).readline + try: + self.last_tokens = list(tokenize.generate_tokens(readline)) + except: + self.last_text = None + raise + return self.last_tokens + +# Create our generate_tokens cache as a callable replacement function. +generate_tokens = CachedTokenizer().generate_tokens + + +def source_encoding(source: bytes) -> str: + """Determine the encoding for `source`, according to PEP 263. + + `source` is a byte string: the text of the program. + + Returns a string, the name of the encoding. + + """ + readline = iter(source.splitlines(True)).__next__ + return tokenize.detect_encoding(readline)[0] diff --git a/venv/lib/python3.10/site-packages/coverage/plugin.py b/venv/lib/python3.10/site-packages/coverage/plugin.py new file mode 100644 index 0000000..67dcfbf --- /dev/null +++ b/venv/lib/python3.10/site-packages/coverage/plugin.py @@ -0,0 +1,553 @@ +# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 +# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt + +""" +.. versionadded:: 4.0 + +Plug-in interfaces for coverage.py. + +Coverage.py supports a few different kinds of plug-ins that change its +behavior: + +* File tracers implement tracing of non-Python file types. + +* Configurers add custom configuration, using Python code to change the + configuration. + +* Dynamic context switchers decide when the dynamic context has changed, for + example, to record what test function produced the coverage. + +To write a coverage.py plug-in, create a module with a subclass of +:class:`~coverage.CoveragePlugin`. You will override methods in your class to +participate in various aspects of coverage.py's processing. +Different types of plug-ins have to override different methods. + +Any plug-in can optionally implement :meth:`~coverage.CoveragePlugin.sys_info` +to provide debugging information about their operation. + +Your module must also contain a ``coverage_init`` function that registers an +instance of your plug-in class:: + + import coverage + + class MyPlugin(coverage.CoveragePlugin): + ... + + def coverage_init(reg, options): + reg.add_file_tracer(MyPlugin()) + +You use the `reg` parameter passed to your ``coverage_init`` function to +register your plug-in object. The registration method you call depends on +what kind of plug-in it is. + +If your plug-in takes options, the `options` parameter is a dictionary of your +plug-in's options from the coverage.py configuration file. Use them however +you want to configure your object before registering it. + +Coverage.py will store its own information on your plug-in object, using +attributes whose names start with ``_coverage_``. Don't be startled. + +.. warning:: + Plug-ins are imported by coverage.py before it begins measuring code. + If you write a plugin in your own project, it might import your product + code before coverage.py can start measuring. This can result in your + own code being reported as missing. + + One solution is to put your plugins in your project tree, but not in + your importable Python package. + + +.. _file_tracer_plugins: + +File Tracers +============ + +File tracers implement measurement support for non-Python files. File tracers +implement the :meth:`~coverage.CoveragePlugin.file_tracer` method to claim +files and the :meth:`~coverage.CoveragePlugin.file_reporter` method to report +on those files. + +In your ``coverage_init`` function, use the ``add_file_tracer`` method to +register your file tracer. + + +.. _configurer_plugins: + +Configurers +=========== + +.. versionadded:: 4.5 + +Configurers modify the configuration of coverage.py during start-up. +Configurers implement the :meth:`~coverage.CoveragePlugin.configure` method to +change the configuration. + +In your ``coverage_init`` function, use the ``add_configurer`` method to +register your configurer. + + +.. _dynamic_context_plugins: + +Dynamic Context Switchers +========================= + +.. versionadded:: 5.0 + +Dynamic context switcher plugins implement the +:meth:`~coverage.CoveragePlugin.dynamic_context` method to dynamically compute +the context label for each measured frame. + +Computed context labels are useful when you want to group measured data without +modifying the source code. + +For example, you could write a plugin that checks `frame.f_code` to inspect +the currently executed method, and set the context label to a fully qualified +method name if it's an instance method of `unittest.TestCase` and the method +name starts with 'test'. Such a plugin would provide basic coverage grouping +by test and could be used with test runners that have no built-in coveragepy +support. + +In your ``coverage_init`` function, use the ``add_dynamic_context`` method to +register your dynamic context switcher. + +""" + +from __future__ import annotations + +import functools + +from types import FrameType +from typing import Any, Dict, Iterable, Optional, Set, Tuple, Union + +from coverage import files +from coverage.misc import _needs_to_implement +from coverage.types import TArc, TConfigurable, TLineNo, TSourceTokenLines + + +class CoveragePlugin: + """Base class for coverage.py plug-ins.""" + + _coverage_plugin_name: str + _coverage_enabled: bool + + def file_tracer(self, filename: str) -> Optional[FileTracer]: # pylint: disable=unused-argument + """Get a :class:`FileTracer` object for a file. + + Plug-in type: file tracer. + + Every Python source file is offered to your plug-in to give it a chance + to take responsibility for tracing the file. If your plug-in can + handle the file, it should return a :class:`FileTracer` object. + Otherwise return None. + + There is no way to register your plug-in for particular files. + Instead, this method is invoked for all files as they are executed, + and the plug-in decides whether it can trace the file or not. + Be prepared for `filename` to refer to all kinds of files that have + nothing to do with your plug-in. + + The file name will be a Python file being executed. There are two + broad categories of behavior for a plug-in, depending on the kind of + files your plug-in supports: + + * Static file names: each of your original source files has been + converted into a distinct Python file. Your plug-in is invoked with + the Python file name, and it maps it back to its original source + file. + + * Dynamic file names: all of your source files are executed by the same + Python file. In this case, your plug-in implements + :meth:`FileTracer.dynamic_source_filename` to provide the actual + source file for each execution frame. + + `filename` is a string, the path to the file being considered. This is + the absolute real path to the file. If you are comparing to other + paths, be sure to take this into account. + + Returns a :class:`FileTracer` object to use to trace `filename`, or + None if this plug-in cannot trace this file. + + """ + return None + + def file_reporter( + self, + filename: str, # pylint: disable=unused-argument + ) -> Union[FileReporter, str]: # str should be Literal["python"] + """Get the :class:`FileReporter` class to use for a file. + + Plug-in type: file tracer. + + This will only be invoked if `filename` returns non-None from + :meth:`file_tracer`. It's an error to return None from this method. + + Returns a :class:`FileReporter` object to use to report on `filename`, + or the string `"python"` to have coverage.py treat the file as Python. + + """ + _needs_to_implement(self, "file_reporter") + + def dynamic_context( + self, + frame: FrameType, # pylint: disable=unused-argument + ) -> Optional[str]: + """Get the dynamically computed context label for `frame`. + + Plug-in type: dynamic context. + + This method is invoked for each frame when outside of a dynamic + context, to see if a new dynamic context should be started. If it + returns a string, a new context label is set for this and deeper + frames. The dynamic context ends when this frame returns. + + Returns a string to start a new dynamic context, or None if no new + context should be started. + + """ + return None + + def find_executable_files( + self, + src_dir: str, # pylint: disable=unused-argument + ) -> Iterable[str]: + """Yield all of the executable files in `src_dir`, recursively. + + Plug-in type: file tracer. + + Executability is a plug-in-specific property, but generally means files + which would have been considered for coverage analysis, had they been + included automatically. + + Returns or yields a sequence of strings, the paths to files that could + have been executed, including files that had been executed. + + """ + return [] + + def configure(self, config: TConfigurable) -> None: + """Modify the configuration of coverage.py. + + Plug-in type: configurer. + + This method is called during coverage.py start-up, to give your plug-in + a chance to change the configuration. The `config` parameter is an + object with :meth:`~coverage.Coverage.get_option` and + :meth:`~coverage.Coverage.set_option` methods. Do not call any other + methods on the `config` object. + + """ + pass + + def sys_info(self) -> Iterable[Tuple[str, Any]]: + """Get a list of information useful for debugging. + + Plug-in type: any. + + This method will be invoked for ``--debug=sys``. Your + plug-in can return any information it wants to be displayed. + + Returns a list of pairs: `[(name, value), ...]`. + + """ + return [] + + +class CoveragePluginBase: + """Plugins produce specialized objects, which point back to the original plugin.""" + _coverage_plugin: CoveragePlugin + + +class FileTracer(CoveragePluginBase): + """Support needed for files during the execution phase. + + File tracer plug-ins implement subclasses of FileTracer to return from + their :meth:`~CoveragePlugin.file_tracer` method. + + You may construct this object from :meth:`CoveragePlugin.file_tracer` any + way you like. A natural choice would be to pass the file name given to + `file_tracer`. + + `FileTracer` objects should only be created in the + :meth:`CoveragePlugin.file_tracer` method. + + See :ref:`howitworks` for details of the different coverage.py phases. + + """ + + def source_filename(self) -> str: + """The source file name for this file. + + This may be any file name you like. A key responsibility of a plug-in + is to own the mapping from Python execution back to whatever source + file name was originally the source of the code. + + See :meth:`CoveragePlugin.file_tracer` for details about static and + dynamic file names. + + Returns the file name to credit with this execution. + + """ + _needs_to_implement(self, "source_filename") + + def has_dynamic_source_filename(self) -> bool: + """Does this FileTracer have dynamic source file names? + + FileTracers can provide dynamically determined file names by + implementing :meth:`dynamic_source_filename`. Invoking that function + is expensive. To determine whether to invoke it, coverage.py uses the + result of this function to know if it needs to bother invoking + :meth:`dynamic_source_filename`. + + See :meth:`CoveragePlugin.file_tracer` for details about static and + dynamic file names. + + Returns True if :meth:`dynamic_source_filename` should be called to get + dynamic source file names. + + """ + return False + + def dynamic_source_filename( + self, + filename: str, # pylint: disable=unused-argument + frame: FrameType, # pylint: disable=unused-argument + ) -> Optional[str]: + """Get a dynamically computed source file name. + + Some plug-ins need to compute the source file name dynamically for each + frame. + + This function will not be invoked if + :meth:`has_dynamic_source_filename` returns False. + + Returns the source file name for this frame, or None if this frame + shouldn't be measured. + + """ + return None + + def line_number_range(self, frame: FrameType) -> Tuple[TLineNo, TLineNo]: + """Get the range of source line numbers for a given a call frame. + + The call frame is examined, and the source line number in the original + file is returned. The return value is a pair of numbers, the starting + line number and the ending line number, both inclusive. For example, + returning (5, 7) means that lines 5, 6, and 7 should be considered + executed. + + This function might decide that the frame doesn't indicate any lines + from the source file were executed. Return (-1, -1) in this case to + tell coverage.py that no lines should be recorded for this frame. + + """ + lineno = frame.f_lineno + return lineno, lineno + + +@functools.total_ordering +class FileReporter(CoveragePluginBase): + """Support needed for files during the analysis and reporting phases. + + File tracer plug-ins implement a subclass of `FileReporter`, and return + instances from their :meth:`CoveragePlugin.file_reporter` method. + + There are many methods here, but only :meth:`lines` is required, to provide + the set of executable lines in the file. + + See :ref:`howitworks` for details of the different coverage.py phases. + + """ + + def __init__(self, filename: str) -> None: + """Simple initialization of a `FileReporter`. + + The `filename` argument is the path to the file being reported. This + will be available as the `.filename` attribute on the object. Other + method implementations on this base class rely on this attribute. + + """ + self.filename = filename + + def __repr__(self) -> str: + return "<{0.__class__.__name__} filename={0.filename!r}>".format(self) + + def relative_filename(self) -> str: + """Get the relative file name for this file. + + This file path will be displayed in reports. The default + implementation will supply the actual project-relative file path. You + only need to supply this method if you have an unusual syntax for file + paths. + + """ + return files.relative_filename(self.filename) + + def source(self) -> str: + """Get the source for the file. + + Returns a Unicode string. + + The base implementation simply reads the `self.filename` file and + decodes it as UTF-8. Override this method if your file isn't readable + as a text file, or if you need other encoding support. + + """ + with open(self.filename, encoding="utf-8") as f: + return f.read() + + def lines(self) -> Set[TLineNo]: + """Get the executable lines in this file. + + Your plug-in must determine which lines in the file were possibly + executable. This method returns a set of those line numbers. + + Returns a set of line numbers. + + """ + _needs_to_implement(self, "lines") + + def excluded_lines(self) -> Set[TLineNo]: + """Get the excluded executable lines in this file. + + Your plug-in can use any method it likes to allow the user to exclude + executable lines from consideration. + + Returns a set of line numbers. + + The base implementation returns the empty set. + + """ + return set() + + def translate_lines(self, lines: Iterable[TLineNo]) -> Set[TLineNo]: + """Translate recorded lines into reported lines. + + Some file formats will want to report lines slightly differently than + they are recorded. For example, Python records the last line of a + multi-line statement, but reports are nicer if they mention the first + line. + + Your plug-in can optionally define this method to perform these kinds + of adjustment. + + `lines` is a sequence of integers, the recorded line numbers. + + Returns a set of integers, the adjusted line numbers. + + The base implementation returns the numbers unchanged. + + """ + return set(lines) + + def arcs(self) -> Set[TArc]: + """Get the executable arcs in this file. + + To support branch coverage, your plug-in needs to be able to indicate + possible execution paths, as a set of line number pairs. Each pair is + a `(prev, next)` pair indicating that execution can transition from the + `prev` line number to the `next` line number. + + Returns a set of pairs of line numbers. The default implementation + returns an empty set. + + """ + return set() + + def no_branch_lines(self) -> Set[TLineNo]: + """Get the lines excused from branch coverage in this file. + + Your plug-in can use any method it likes to allow the user to exclude + lines from consideration of branch coverage. + + Returns a set of line numbers. + + The base implementation returns the empty set. + + """ + return set() + + def translate_arcs(self, arcs: Iterable[TArc]) -> Set[TArc]: + """Translate recorded arcs into reported arcs. + + Similar to :meth:`translate_lines`, but for arcs. `arcs` is a set of + line number pairs. + + Returns a set of line number pairs. + + The default implementation returns `arcs` unchanged. + + """ + return set(arcs) + + def exit_counts(self) -> Dict[TLineNo, int]: + """Get a count of exits from that each line. + + To determine which lines are branches, coverage.py looks for lines that + have more than one exit. This function creates a dict mapping each + executable line number to a count of how many exits it has. + + To be honest, this feels wrong, and should be refactored. Let me know + if you attempt to implement this method in your plug-in... + + """ + return {} + + def missing_arc_description( + self, + start: TLineNo, + end: TLineNo, + executed_arcs: Optional[Iterable[TArc]] = None, # pylint: disable=unused-argument + ) -> str: + """Provide an English sentence describing a missing arc. + + The `start` and `end` arguments are the line numbers of the missing + arc. Negative numbers indicate entering or exiting code objects. + + The `executed_arcs` argument is a set of line number pairs, the arcs + that were executed in this file. + + By default, this simply returns the string "Line {start} didn't jump + to {end}". + + """ + return f"Line {start} didn't jump to line {end}" + + def source_token_lines(self) -> TSourceTokenLines: + """Generate a series of tokenized lines, one for each line in `source`. + + These tokens are used for syntax-colored reports. + + Each line is a list of pairs, each pair is a token:: + + [("key", "def"), ("ws", " "), ("nam", "hello"), ("op", "("), ... ] + + Each pair has a token class, and the token text. The token classes + are: + + * ``"com"``: a comment + * ``"key"``: a keyword + * ``"nam"``: a name, or identifier + * ``"num"``: a number + * ``"op"``: an operator + * ``"str"``: a string literal + * ``"ws"``: some white space + * ``"txt"``: some other kind of text + + If you concatenate all the token texts, and then join them with + newlines, you should have your original source back. + + The default implementation simply returns each line tagged as + ``"txt"``. + + """ + for line in self.source().splitlines(): + yield [("txt", line)] + + def __eq__(self, other: Any) -> bool: + return isinstance(other, FileReporter) and self.filename == other.filename + + def __lt__(self, other: Any) -> bool: + return isinstance(other, FileReporter) and self.filename < other.filename + + # This object doesn't need to be hashed. + __hash__ = None # type: ignore[assignment] diff --git a/venv/lib/python3.10/site-packages/coverage/plugin_support.py b/venv/lib/python3.10/site-packages/coverage/plugin_support.py new file mode 100644 index 0000000..c99fb5e --- /dev/null +++ b/venv/lib/python3.10/site-packages/coverage/plugin_support.py @@ -0,0 +1,297 @@ +# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 +# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt + +"""Support for plugins.""" + +from __future__ import annotations + +import os +import os.path +import sys + +from types import FrameType +from typing import Any, Dict, Iterable, Iterator, List, Optional, Set, Tuple, Union + +from coverage.exceptions import PluginError +from coverage.misc import isolate_module +from coverage.plugin import CoveragePlugin, FileTracer, FileReporter +from coverage.types import ( + TArc, TConfigurable, TDebugCtl, TLineNo, TPluginConfig, TSourceTokenLines, +) + +os = isolate_module(os) + + +class Plugins: + """The currently loaded collection of coverage.py plugins.""" + + def __init__(self) -> None: + self.order: List[CoveragePlugin] = [] + self.names: Dict[str, CoveragePlugin] = {} + self.file_tracers: List[CoveragePlugin] = [] + self.configurers: List[CoveragePlugin] = [] + self.context_switchers: List[CoveragePlugin] = [] + + self.current_module: Optional[str] = None + self.debug: Optional[TDebugCtl] + + @classmethod + def load_plugins( + cls, + modules: Iterable[str], + config: TPluginConfig, + debug: Optional[TDebugCtl] = None, + ) -> Plugins: + """Load plugins from `modules`. + + Returns a Plugins object with the loaded and configured plugins. + + """ + plugins = cls() + plugins.debug = debug + + for module in modules: + plugins.current_module = module + __import__(module) + mod = sys.modules[module] + + coverage_init = getattr(mod, "coverage_init", None) + if not coverage_init: + raise PluginError( + f"Plugin module {module!r} didn't define a coverage_init function" + ) + + options = config.get_plugin_options(module) + coverage_init(plugins, options) + + plugins.current_module = None + return plugins + + def add_file_tracer(self, plugin: CoveragePlugin) -> None: + """Add a file tracer plugin. + + `plugin` is an instance of a third-party plugin class. It must + implement the :meth:`CoveragePlugin.file_tracer` method. + + """ + self._add_plugin(plugin, self.file_tracers) + + def add_configurer(self, plugin: CoveragePlugin) -> None: + """Add a configuring plugin. + + `plugin` is an instance of a third-party plugin class. It must + implement the :meth:`CoveragePlugin.configure` method. + + """ + self._add_plugin(plugin, self.configurers) + + def add_dynamic_context(self, plugin: CoveragePlugin) -> None: + """Add a dynamic context plugin. + + `plugin` is an instance of a third-party plugin class. It must + implement the :meth:`CoveragePlugin.dynamic_context` method. + + """ + self._add_plugin(plugin, self.context_switchers) + + def add_noop(self, plugin: CoveragePlugin) -> None: + """Add a plugin that does nothing. + + This is only useful for testing the plugin support. + + """ + self._add_plugin(plugin, None) + + def _add_plugin( + self, + plugin: CoveragePlugin, + specialized: Optional[List[CoveragePlugin]], + ) -> None: + """Add a plugin object. + + `plugin` is a :class:`CoveragePlugin` instance to add. `specialized` + is a list to append the plugin to. + + """ + plugin_name = f"{self.current_module}.{plugin.__class__.__name__}" + if self.debug and self.debug.should("plugin"): + self.debug.write(f"Loaded plugin {self.current_module!r}: {plugin!r}") + labelled = LabelledDebug(f"plugin {self.current_module!r}", self.debug) + plugin = DebugPluginWrapper(plugin, labelled) + + plugin._coverage_plugin_name = plugin_name + plugin._coverage_enabled = True + self.order.append(plugin) + self.names[plugin_name] = plugin + if specialized is not None: + specialized.append(plugin) + + def __bool__(self) -> bool: + return bool(self.order) + + def __iter__(self) -> Iterator[CoveragePlugin]: + return iter(self.order) + + def get(self, plugin_name: str) -> CoveragePlugin: + """Return a plugin by name.""" + return self.names[plugin_name] + + +class LabelledDebug: + """A Debug writer, but with labels for prepending to the messages.""" + + def __init__(self, label: str, debug: TDebugCtl, prev_labels: Iterable[str] = ()): + self.labels = list(prev_labels) + [label] + self.debug = debug + + def add_label(self, label: str) -> LabelledDebug: + """Add a label to the writer, and return a new `LabelledDebug`.""" + return LabelledDebug(label, self.debug, self.labels) + + def message_prefix(self) -> str: + """The prefix to use on messages, combining the labels.""" + prefixes = self.labels + [""] + return ":\n".join(" "*i+label for i, label in enumerate(prefixes)) + + def write(self, message: str) -> None: + """Write `message`, but with the labels prepended.""" + self.debug.write(f"{self.message_prefix()}{message}") + + +class DebugPluginWrapper(CoveragePlugin): + """Wrap a plugin, and use debug to report on what it's doing.""" + + def __init__(self, plugin: CoveragePlugin, debug: LabelledDebug) -> None: + super().__init__() + self.plugin = plugin + self.debug = debug + + def file_tracer(self, filename: str) -> Optional[FileTracer]: + tracer = self.plugin.file_tracer(filename) + self.debug.write(f"file_tracer({filename!r}) --> {tracer!r}") + if tracer: + debug = self.debug.add_label(f"file {filename!r}") + tracer = DebugFileTracerWrapper(tracer, debug) + return tracer + + def file_reporter(self, filename: str) -> Union[FileReporter, str]: + reporter = self.plugin.file_reporter(filename) + assert isinstance(reporter, FileReporter) + self.debug.write(f"file_reporter({filename!r}) --> {reporter!r}") + if reporter: + debug = self.debug.add_label(f"file {filename!r}") + reporter = DebugFileReporterWrapper(filename, reporter, debug) + return reporter + + def dynamic_context(self, frame: FrameType) -> Optional[str]: + context = self.plugin.dynamic_context(frame) + self.debug.write(f"dynamic_context({frame!r}) --> {context!r}") + return context + + def find_executable_files(self, src_dir: str) -> Iterable[str]: + executable_files = self.plugin.find_executable_files(src_dir) + self.debug.write(f"find_executable_files({src_dir!r}) --> {executable_files!r}") + return executable_files + + def configure(self, config: TConfigurable) -> None: + self.debug.write(f"configure({config!r})") + self.plugin.configure(config) + + def sys_info(self) -> Iterable[Tuple[str, Any]]: + return self.plugin.sys_info() + + +class DebugFileTracerWrapper(FileTracer): + """A debugging `FileTracer`.""" + + def __init__(self, tracer: FileTracer, debug: LabelledDebug) -> None: + self.tracer = tracer + self.debug = debug + + def _show_frame(self, frame: FrameType) -> str: + """A short string identifying a frame, for debug messages.""" + return "%s@%d" % ( + os.path.basename(frame.f_code.co_filename), + frame.f_lineno, + ) + + def source_filename(self) -> str: + sfilename = self.tracer.source_filename() + self.debug.write(f"source_filename() --> {sfilename!r}") + return sfilename + + def has_dynamic_source_filename(self) -> bool: + has = self.tracer.has_dynamic_source_filename() + self.debug.write(f"has_dynamic_source_filename() --> {has!r}") + return has + + def dynamic_source_filename(self, filename: str, frame: FrameType) -> Optional[str]: + dyn = self.tracer.dynamic_source_filename(filename, frame) + self.debug.write("dynamic_source_filename({!r}, {}) --> {!r}".format( + filename, self._show_frame(frame), dyn, + )) + return dyn + + def line_number_range(self, frame: FrameType) -> Tuple[TLineNo, TLineNo]: + pair = self.tracer.line_number_range(frame) + self.debug.write(f"line_number_range({self._show_frame(frame)}) --> {pair!r}") + return pair + + +class DebugFileReporterWrapper(FileReporter): + """A debugging `FileReporter`.""" + + def __init__(self, filename: str, reporter: FileReporter, debug: LabelledDebug) -> None: + super().__init__(filename) + self.reporter = reporter + self.debug = debug + + def relative_filename(self) -> str: + ret = self.reporter.relative_filename() + self.debug.write(f"relative_filename() --> {ret!r}") + return ret + + def lines(self) -> Set[TLineNo]: + ret = self.reporter.lines() + self.debug.write(f"lines() --> {ret!r}") + return ret + + def excluded_lines(self) -> Set[TLineNo]: + ret = self.reporter.excluded_lines() + self.debug.write(f"excluded_lines() --> {ret!r}") + return ret + + def translate_lines(self, lines: Iterable[TLineNo]) -> Set[TLineNo]: + ret = self.reporter.translate_lines(lines) + self.debug.write(f"translate_lines({lines!r}) --> {ret!r}") + return ret + + def translate_arcs(self, arcs: Iterable[TArc]) -> Set[TArc]: + ret = self.reporter.translate_arcs(arcs) + self.debug.write(f"translate_arcs({arcs!r}) --> {ret!r}") + return ret + + def no_branch_lines(self) -> Set[TLineNo]: + ret = self.reporter.no_branch_lines() + self.debug.write(f"no_branch_lines() --> {ret!r}") + return ret + + def exit_counts(self) -> Dict[TLineNo, int]: + ret = self.reporter.exit_counts() + self.debug.write(f"exit_counts() --> {ret!r}") + return ret + + def arcs(self) -> Set[TArc]: + ret = self.reporter.arcs() + self.debug.write(f"arcs() --> {ret!r}") + return ret + + def source(self) -> str: + ret = self.reporter.source() + self.debug.write("source() --> %d chars" % (len(ret),)) + return ret + + def source_token_lines(self) -> TSourceTokenLines: + ret = list(self.reporter.source_token_lines()) + self.debug.write("source_token_lines() --> %d tokens" % (len(ret),)) + return ret diff --git a/venv/lib/python3.10/site-packages/coverage/py.typed b/venv/lib/python3.10/site-packages/coverage/py.typed new file mode 100644 index 0000000..bacd23a --- /dev/null +++ b/venv/lib/python3.10/site-packages/coverage/py.typed @@ -0,0 +1 @@ +# Marker file for PEP 561 to indicate that this package has type hints. diff --git a/venv/lib/python3.10/site-packages/coverage/python.py b/venv/lib/python3.10/site-packages/coverage/python.py new file mode 100644 index 0000000..3deb681 --- /dev/null +++ b/venv/lib/python3.10/site-packages/coverage/python.py @@ -0,0 +1,256 @@ +# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 +# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt + +"""Python source expertise for coverage.py""" + +from __future__ import annotations + +import os.path +import types +import zipimport + +from typing import Dict, Iterable, Optional, Set, TYPE_CHECKING + +from coverage import env +from coverage.exceptions import CoverageException, NoSource +from coverage.files import canonical_filename, relative_filename, zip_location +from coverage.misc import expensive, isolate_module, join_regex +from coverage.parser import PythonParser +from coverage.phystokens import source_token_lines, source_encoding +from coverage.plugin import FileReporter +from coverage.types import TArc, TLineNo, TMorf, TSourceTokenLines + +if TYPE_CHECKING: + from coverage import Coverage + +os = isolate_module(os) + + +def read_python_source(filename: str) -> bytes: + """Read the Python source text from `filename`. + + Returns bytes. + + """ + with open(filename, "rb") as f: + source = f.read() + + return source.replace(b"\r\n", b"\n").replace(b"\r", b"\n") + + +def get_python_source(filename: str) -> str: + """Return the source code, as unicode.""" + base, ext = os.path.splitext(filename) + if ext == ".py" and env.WINDOWS: + exts = [".py", ".pyw"] + else: + exts = [ext] + + source_bytes: Optional[bytes] + for ext in exts: + try_filename = base + ext + if os.path.exists(try_filename): + # A regular text file: open it. + source_bytes = read_python_source(try_filename) + break + + # Maybe it's in a zip file? + source_bytes = get_zip_bytes(try_filename) + if source_bytes is not None: + break + else: + # Couldn't find source. + raise NoSource(f"No source for code: '{filename}'.") + + # Replace \f because of http://bugs.python.org/issue19035 + source_bytes = source_bytes.replace(b"\f", b" ") + source = source_bytes.decode(source_encoding(source_bytes), "replace") + + # Python code should always end with a line with a newline. + if source and source[-1] != "\n": + source += "\n" + + return source + + +def get_zip_bytes(filename: str) -> Optional[bytes]: + """Get data from `filename` if it is a zip file path. + + Returns the bytestring data read from the zip file, or None if no zip file + could be found or `filename` isn't in it. The data returned will be + an empty string if the file is empty. + + """ + zipfile_inner = zip_location(filename) + if zipfile_inner is not None: + zipfile, inner = zipfile_inner + try: + zi = zipimport.zipimporter(zipfile) + except zipimport.ZipImportError: + return None + try: + data = zi.get_data(inner) + except OSError: + return None + return data + return None + + +def source_for_file(filename: str) -> str: + """Return the source filename for `filename`. + + Given a file name being traced, return the best guess as to the source + file to attribute it to. + + """ + if filename.endswith(".py"): + # .py files are themselves source files. + return filename + + elif filename.endswith((".pyc", ".pyo")): + # Bytecode files probably have source files near them. + py_filename = filename[:-1] + if os.path.exists(py_filename): + # Found a .py file, use that. + return py_filename + if env.WINDOWS: + # On Windows, it could be a .pyw file. + pyw_filename = py_filename + "w" + if os.path.exists(pyw_filename): + return pyw_filename + # Didn't find source, but it's probably the .py file we want. + return py_filename + + # No idea, just use the file name as-is. + return filename + + +def source_for_morf(morf: TMorf) -> str: + """Get the source filename for the module-or-file `morf`.""" + if hasattr(morf, "__file__") and morf.__file__: + filename = morf.__file__ + elif isinstance(morf, types.ModuleType): + # A module should have had .__file__, otherwise we can't use it. + # This could be a PEP-420 namespace package. + raise CoverageException(f"Module {morf} has no file") + else: + filename = morf + + filename = source_for_file(filename) + return filename + + +class PythonFileReporter(FileReporter): + """Report support for a Python file.""" + + def __init__(self, morf: TMorf, coverage: Optional[Coverage] = None) -> None: + self.coverage = coverage + + filename = source_for_morf(morf) + + fname = filename + canonicalize = True + if self.coverage is not None: + if self.coverage.config.relative_files: + canonicalize = False + if canonicalize: + fname = canonical_filename(filename) + super().__init__(fname) + + if hasattr(morf, "__name__"): + name = morf.__name__.replace(".", os.sep) + if os.path.basename(filename).startswith("__init__."): + name += os.sep + "__init__" + name += ".py" + else: + name = relative_filename(filename) + self.relname = name + + self._source: Optional[str] = None + self._parser: Optional[PythonParser] = None + self._excluded = None + + def __repr__(self) -> str: + return f"" + + def relative_filename(self) -> str: + return self.relname + + @property + def parser(self) -> PythonParser: + """Lazily create a :class:`PythonParser`.""" + assert self.coverage is not None + if self._parser is None: + self._parser = PythonParser( + filename=self.filename, + exclude=self.coverage._exclude_regex("exclude"), + ) + self._parser.parse_source() + return self._parser + + def lines(self) -> Set[TLineNo]: + """Return the line numbers of statements in the file.""" + return self.parser.statements + + def excluded_lines(self) -> Set[TLineNo]: + """Return the line numbers of statements in the file.""" + return self.parser.excluded + + def translate_lines(self, lines: Iterable[TLineNo]) -> Set[TLineNo]: + return self.parser.translate_lines(lines) + + def translate_arcs(self, arcs: Iterable[TArc]) -> Set[TArc]: + return self.parser.translate_arcs(arcs) + + @expensive + def no_branch_lines(self) -> Set[TLineNo]: + assert self.coverage is not None + no_branch = self.parser.lines_matching( + join_regex(self.coverage.config.partial_list), + join_regex(self.coverage.config.partial_always_list), + ) + return no_branch + + @expensive + def arcs(self) -> Set[TArc]: + return self.parser.arcs() + + @expensive + def exit_counts(self) -> Dict[TLineNo, int]: + return self.parser.exit_counts() + + def missing_arc_description( + self, + start: TLineNo, + end: TLineNo, + executed_arcs: Optional[Iterable[TArc]] = None, + ) -> str: + return self.parser.missing_arc_description(start, end, executed_arcs) + + def source(self) -> str: + if self._source is None: + self._source = get_python_source(self.filename) + return self._source + + def should_be_python(self) -> bool: + """Does it seem like this file should contain Python? + + This is used to decide if a file reported as part of the execution of + a program was really likely to have contained Python in the first + place. + + """ + # Get the file extension. + _, ext = os.path.splitext(self.filename) + + # Anything named *.py* should be Python. + if ext.startswith(".py"): + return True + # A file with no extension should be Python. + if not ext: + return True + # Everything else is probably not Python. + return False + + def source_token_lines(self) -> TSourceTokenLines: + return source_token_lines(self.source()) diff --git a/venv/lib/python3.10/site-packages/coverage/pytracer.py b/venv/lib/python3.10/site-packages/coverage/pytracer.py new file mode 100644 index 0000000..fe24507 --- /dev/null +++ b/venv/lib/python3.10/site-packages/coverage/pytracer.py @@ -0,0 +1,336 @@ +# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 +# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt + +"""Raw data collector for coverage.py.""" + +from __future__ import annotations + +import atexit +import dis +import sys +import threading + +from types import FrameType, ModuleType +from typing import Any, Callable, Dict, List, Optional, Set, Tuple, cast + +from coverage import env +from coverage.types import ( + TArc, TFileDisposition, TLineNo, TTraceData, TTraceFileData, TTraceFn, + TTracer, TWarnFn, +) + +# We need the YIELD_VALUE opcode below, in a comparison-friendly form. +RESUME = dis.opmap.get("RESUME") +RETURN_VALUE = dis.opmap["RETURN_VALUE"] +if RESUME is None: + YIELD_VALUE = dis.opmap["YIELD_VALUE"] + YIELD_FROM = dis.opmap["YIELD_FROM"] + YIELD_FROM_OFFSET = 0 if env.PYPY else 2 + +# When running meta-coverage, this file can try to trace itself, which confuses +# everything. Don't trace ourselves. + +THIS_FILE = __file__.rstrip("co") + +class PyTracer(TTracer): + """Python implementation of the raw data tracer.""" + + # Because of poor implementations of trace-function-manipulating tools, + # the Python trace function must be kept very simple. In particular, there + # must be only one function ever set as the trace function, both through + # sys.settrace, and as the return value from the trace function. Put + # another way, the trace function must always return itself. It cannot + # swap in other functions, or return None to avoid tracing a particular + # frame. + # + # The trace manipulator that introduced this restriction is DecoratorTools, + # which sets a trace function, and then later restores the pre-existing one + # by calling sys.settrace with a function it found in the current frame. + # + # Systems that use DecoratorTools (or similar trace manipulations) must use + # PyTracer to get accurate results. The command-line --timid argument is + # used to force the use of this tracer. + + def __init__(self) -> None: + # Attributes set from the collector: + self.data: TTraceData + self.trace_arcs = False + self.should_trace: Callable[[str, FrameType], TFileDisposition] + self.should_trace_cache: Dict[str, Optional[TFileDisposition]] + self.should_start_context: Optional[Callable[[FrameType], Optional[str]]] = None + self.switch_context: Optional[Callable[[Optional[str]], None]] = None + self.warn: TWarnFn + + # The threading module to use, if any. + self.threading: Optional[ModuleType] = None + + self.cur_file_data: Optional[TTraceFileData] = None + self.last_line: TLineNo = 0 + self.cur_file_name: Optional[str] = None + self.context: Optional[str] = None + self.started_context = False + + self.data_stack: List[Tuple[Optional[TTraceFileData], Optional[str], TLineNo, bool]] = [] + self.thread: Optional[threading.Thread] = None + self.stopped = False + self._activity = False + + self.in_atexit = False + # On exit, self.in_atexit = True + atexit.register(setattr, self, "in_atexit", True) + + # Cache a bound method on the instance, so that we don't have to + # re-create a bound method object all the time. + self._cached_bound_method_trace: TTraceFn = self._trace + + def __repr__(self) -> str: + me = id(self) + points = sum(len(v) for v in self.data.values()) + files = len(self.data) + return f"" + + def log(self, marker: str, *args: Any) -> None: + """For hard-core logging of what this tracer is doing.""" + with open("/tmp/debug_trace.txt", "a") as f: + f.write("{} {}[{}]".format( + marker, + id(self), + len(self.data_stack), + )) + if 0: # if you want thread ids.. + f.write(".{:x}.{:x}".format( # type: ignore[unreachable] + self.thread.ident, + self.threading.current_thread().ident, + )) + f.write(" {}".format(" ".join(map(str, args)))) + if 0: # if you want callers.. + f.write(" | ") # type: ignore[unreachable] + stack = " / ".join( + (fname or "???").rpartition("/")[-1] + for _, fname, _, _ in self.data_stack + ) + f.write(stack) + f.write("\n") + + def _trace( + self, + frame: FrameType, + event: str, + arg: Any, # pylint: disable=unused-argument + lineno: Optional[TLineNo] = None, # pylint: disable=unused-argument + ) -> Optional[TTraceFn]: + """The trace function passed to sys.settrace.""" + + if THIS_FILE in frame.f_code.co_filename: + return None + + #self.log(":", frame.f_code.co_filename, frame.f_lineno, frame.f_code.co_name + "()", event) + + if (self.stopped and sys.gettrace() == self._cached_bound_method_trace): # pylint: disable=comparison-with-callable + # The PyTrace.stop() method has been called, possibly by another + # thread, let's deactivate ourselves now. + if 0: + f = frame # type: ignore[unreachable] + self.log("---\nX", f.f_code.co_filename, f.f_lineno) + while f: + self.log(">", f.f_code.co_filename, f.f_lineno, f.f_code.co_name, f.f_trace) + f = f.f_back + sys.settrace(None) + try: + self.cur_file_data, self.cur_file_name, self.last_line, self.started_context = ( + self.data_stack.pop() + ) + except IndexError: + self.log( + "Empty stack!", + frame.f_code.co_filename, + frame.f_lineno, + frame.f_code.co_name + ) + return None + + # if event != "call" and frame.f_code.co_filename != self.cur_file_name: + # self.log("---\n*", frame.f_code.co_filename, self.cur_file_name, frame.f_lineno) + + if event == "call": + # Should we start a new context? + if self.should_start_context and self.context is None: + context_maybe = self.should_start_context(frame) + if context_maybe is not None: + self.context = context_maybe + started_context = True + assert self.switch_context is not None + self.switch_context(self.context) + else: + started_context = False + else: + started_context = False + self.started_context = started_context + + # Entering a new frame. Decide if we should trace in this file. + self._activity = True + self.data_stack.append( + ( + self.cur_file_data, + self.cur_file_name, + self.last_line, + started_context, + ) + ) + + # Improve tracing performance: when calling a function, both caller + # and callee are often within the same file. if that's the case, we + # don't have to re-check whether to trace the corresponding + # function (which is a little bit expensive since it involves + # dictionary lookups). This optimization is only correct if we + # didn't start a context. + filename = frame.f_code.co_filename + if filename != self.cur_file_name or started_context: + self.cur_file_name = filename + disp = self.should_trace_cache.get(filename) + if disp is None: + disp = self.should_trace(filename, frame) + self.should_trace_cache[filename] = disp + + self.cur_file_data = None + if disp.trace: + tracename = disp.source_filename + assert tracename is not None + if tracename not in self.data: + self.data[tracename] = set() # type: ignore[assignment] + self.cur_file_data = self.data[tracename] + else: + frame.f_trace_lines = False + elif not self.cur_file_data: + frame.f_trace_lines = False + + # The call event is really a "start frame" event, and happens for + # function calls and re-entering generators. The f_lasti field is + # -1 for calls, and a real offset for generators. Use <0 as the + # line number for calls, and the real line number for generators. + if RESUME is not None: + # The current opcode is guaranteed to be RESUME. The argument + # determines what kind of resume it is. + oparg = frame.f_code.co_code[frame.f_lasti + 1] + real_call = (oparg == 0) + else: + real_call = (getattr(frame, "f_lasti", -1) < 0) + if real_call: + self.last_line = -frame.f_code.co_firstlineno + else: + self.last_line = frame.f_lineno + + elif event == "line": + # Record an executed line. + if self.cur_file_data is not None: + flineno: TLineNo = frame.f_lineno + + if self.trace_arcs: + cast(Set[TArc], self.cur_file_data).add((self.last_line, flineno)) + else: + cast(Set[TLineNo], self.cur_file_data).add(flineno) + self.last_line = flineno + + elif event == "return": + if self.trace_arcs and self.cur_file_data: + # Record an arc leaving the function, but beware that a + # "return" event might just mean yielding from a generator. + code = frame.f_code.co_code + lasti = frame.f_lasti + if RESUME is not None: + if len(code) == lasti + 2: + # A return from the end of a code object is a real return. + real_return = True + else: + # it's a real return. + real_return = (code[lasti + 2] != RESUME) + else: + if code[lasti] == RETURN_VALUE: + real_return = True + elif code[lasti] == YIELD_VALUE: + real_return = False + elif len(code) <= lasti + YIELD_FROM_OFFSET: + real_return = True + elif code[lasti + YIELD_FROM_OFFSET] == YIELD_FROM: + real_return = False + else: + real_return = True + if real_return: + first = frame.f_code.co_firstlineno + cast(Set[TArc], self.cur_file_data).add((self.last_line, -first)) + + # Leaving this function, pop the filename stack. + self.cur_file_data, self.cur_file_name, self.last_line, self.started_context = ( + self.data_stack.pop() + ) + # Leaving a context? + if self.started_context: + assert self.switch_context is not None + self.context = None + self.switch_context(None) + return self._cached_bound_method_trace + + def start(self) -> TTraceFn: + """Start this Tracer. + + Return a Python function suitable for use with sys.settrace(). + + """ + self.stopped = False + if self.threading: + if self.thread is None: + self.thread = self.threading.current_thread() + else: + if self.thread.ident != self.threading.current_thread().ident: + # Re-starting from a different thread!? Don't set the trace + # function, but we are marked as running again, so maybe it + # will be ok? + #self.log("~", "starting on different threads") + return self._cached_bound_method_trace + + sys.settrace(self._cached_bound_method_trace) + return self._cached_bound_method_trace + + def stop(self) -> None: + """Stop this Tracer.""" + # Get the active tracer callback before setting the stop flag to be + # able to detect if the tracer was changed prior to stopping it. + tf = sys.gettrace() + + # Set the stop flag. The actual call to sys.settrace(None) will happen + # in the self._trace callback itself to make sure to call it from the + # right thread. + self.stopped = True + + if self.threading: + assert self.thread is not None + if self.thread.ident != self.threading.current_thread().ident: + # Called on a different thread than started us: we can't unhook + # ourselves, but we've set the flag that we should stop, so we + # won't do any more tracing. + #self.log("~", "stopping on different threads") + return + + if self.warn: + # PyPy clears the trace function before running atexit functions, + # so don't warn if we are in atexit on PyPy and the trace function + # has changed to None. + dont_warn = (env.PYPY and self.in_atexit and tf is None) + if (not dont_warn) and tf != self._cached_bound_method_trace: # pylint: disable=comparison-with-callable + self.warn( + "Trace function changed, data is likely wrong: " + + f"{tf!r} != {self._cached_bound_method_trace!r}", + slug="trace-changed", + ) + + def activity(self) -> bool: + """Has there been any activity?""" + return self._activity + + def reset_activity(self) -> None: + """Reset the activity() flag.""" + self._activity = False + + def get_stats(self) -> Optional[Dict[str, int]]: + """Return a dictionary of statistics, or None.""" + return None diff --git a/venv/lib/python3.10/site-packages/coverage/report.py b/venv/lib/python3.10/site-packages/coverage/report.py new file mode 100644 index 0000000..e1c7a07 --- /dev/null +++ b/venv/lib/python3.10/site-packages/coverage/report.py @@ -0,0 +1,281 @@ +# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 +# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt + +"""Summary reporting""" + +from __future__ import annotations + +import sys + +from typing import Any, IO, Iterable, List, Optional, Tuple, TYPE_CHECKING + +from coverage.exceptions import ConfigError, NoDataError +from coverage.misc import human_sorted_items +from coverage.plugin import FileReporter +from coverage.report_core import get_analysis_to_report +from coverage.results import Analysis, Numbers +from coverage.types import TMorf + +if TYPE_CHECKING: + from coverage import Coverage + + +class SummaryReporter: + """A reporter for writing the summary report.""" + + def __init__(self, coverage: Coverage) -> None: + self.coverage = coverage + self.config = self.coverage.config + self.branches = coverage.get_data().has_arcs() + self.outfile: Optional[IO[str]] = None + self.output_format = self.config.format or "text" + if self.output_format not in {"text", "markdown", "total"}: + raise ConfigError(f"Unknown report format choice: {self.output_format!r}") + self.fr_analysis: List[Tuple[FileReporter, Analysis]] = [] + self.skipped_count = 0 + self.empty_count = 0 + self.total = Numbers(precision=self.config.precision) + + def write(self, line: str) -> None: + """Write a line to the output, adding a newline.""" + assert self.outfile is not None + self.outfile.write(line.rstrip()) + self.outfile.write("\n") + + def write_items(self, items: Iterable[str]) -> None: + """Write a list of strings, joined together.""" + self.write("".join(items)) + + def _report_text( + self, + header: List[str], + lines_values: List[List[Any]], + total_line: List[Any], + end_lines: List[str], + ) -> None: + """Internal method that prints report data in text format. + + `header` is a list with captions. + `lines_values` is list of lists of sortable values. + `total_line` is a list with values of the total line. + `end_lines` is a list of ending lines with information about skipped files. + + """ + # Prepare the formatting strings, header, and column sorting. + max_name = max([len(line[0]) for line in lines_values] + [5]) + 1 + max_n = max(len(total_line[header.index("Cover")]) + 2, len(" Cover")) + 1 + max_n = max([max_n] + [len(line[header.index("Cover")]) + 2 for line in lines_values]) + formats = dict( + Name="{:{name_len}}", + Stmts="{:>7}", + Miss="{:>7}", + Branch="{:>7}", + BrPart="{:>7}", + Cover="{:>{n}}", + Missing="{:>10}", + ) + header_items = [ + formats[item].format(item, name_len=max_name, n=max_n) + for item in header + ] + header_str = "".join(header_items) + rule = "-" * len(header_str) + + # Write the header + self.write(header_str) + self.write(rule) + + formats.update(dict(Cover="{:>{n}}%"), Missing=" {:9}") + for values in lines_values: + # build string with line values + line_items = [ + formats[item].format(str(value), + name_len=max_name, n=max_n-1) for item, value in zip(header, values) + ] + self.write_items(line_items) + + # Write a TOTAL line + if lines_values: + self.write(rule) + + line_items = [ + formats[item].format(str(value), + name_len=max_name, n=max_n-1) for item, value in zip(header, total_line) + ] + self.write_items(line_items) + + for end_line in end_lines: + self.write(end_line) + + def _report_markdown( + self, + header: List[str], + lines_values: List[List[Any]], + total_line: List[Any], + end_lines: List[str], + ) -> None: + """Internal method that prints report data in markdown format. + + `header` is a list with captions. + `lines_values` is a sorted list of lists containing coverage information. + `total_line` is a list with values of the total line. + `end_lines` is a list of ending lines with information about skipped files. + + """ + # Prepare the formatting strings, header, and column sorting. + max_name = max((len(line[0].replace("_", "\\_")) for line in lines_values), default=0) + max_name = max(max_name, len("**TOTAL**")) + 1 + formats = dict( + Name="| {:{name_len}}|", + Stmts="{:>9} |", + Miss="{:>9} |", + Branch="{:>9} |", + BrPart="{:>9} |", + Cover="{:>{n}} |", + Missing="{:>10} |", + ) + max_n = max(len(total_line[header.index("Cover")]) + 6, len(" Cover ")) + header_items = [formats[item].format(item, name_len=max_name, n=max_n) for item in header] + header_str = "".join(header_items) + rule_str = "|" + " ".join(["- |".rjust(len(header_items[0])-1, "-")] + + ["-: |".rjust(len(item)-1, "-") for item in header_items[1:]] + ) + + # Write the header + self.write(header_str) + self.write(rule_str) + + for values in lines_values: + # build string with line values + formats.update(dict(Cover="{:>{n}}% |")) + line_items = [ + formats[item].format(str(value).replace("_", "\\_"), name_len=max_name, n=max_n-1) + for item, value in zip(header, values) + ] + self.write_items(line_items) + + # Write the TOTAL line + formats.update(dict(Name="|{:>{name_len}} |", Cover="{:>{n}} |")) + total_line_items: List[str] = [] + for item, value in zip(header, total_line): + if value == "": + insert = value + elif item == "Cover": + insert = f" **{value}%**" + else: + insert = f" **{value}**" + total_line_items += formats[item].format(insert, name_len=max_name, n=max_n) + self.write_items(total_line_items) + for end_line in end_lines: + self.write(end_line) + + def report(self, morfs: Optional[Iterable[TMorf]], outfile: Optional[IO[str]] = None) -> float: + """Writes a report summarizing coverage statistics per module. + + `outfile` is a text-mode file object to write the summary to. + + """ + self.outfile = outfile or sys.stdout + + self.coverage.get_data().set_query_contexts(self.config.report_contexts) + for fr, analysis in get_analysis_to_report(self.coverage, morfs): + self.report_one_file(fr, analysis) + + if not self.total.n_files and not self.skipped_count: + raise NoDataError("No data to report.") + + if self.output_format == "total": + self.write(self.total.pc_covered_str) + else: + self.tabular_report() + + return self.total.pc_covered + + def tabular_report(self) -> None: + """Writes tabular report formats.""" + # Prepare the header line and column sorting. + header = ["Name", "Stmts", "Miss"] + if self.branches: + header += ["Branch", "BrPart"] + header += ["Cover"] + if self.config.show_missing: + header += ["Missing"] + + column_order = dict(name=0, stmts=1, miss=2, cover=-1) + if self.branches: + column_order.update(dict(branch=3, brpart=4)) + + # `lines_values` is list of lists of sortable values. + lines_values = [] + + for (fr, analysis) in self.fr_analysis: + nums = analysis.numbers + + args = [fr.relative_filename(), nums.n_statements, nums.n_missing] + if self.branches: + args += [nums.n_branches, nums.n_partial_branches] + args += [nums.pc_covered_str] + if self.config.show_missing: + args += [analysis.missing_formatted(branches=True)] + args += [nums.pc_covered] + lines_values.append(args) + + # Line sorting. + sort_option = (self.config.sort or "name").lower() + reverse = False + if sort_option[0] == "-": + reverse = True + sort_option = sort_option[1:] + elif sort_option[0] == "+": + sort_option = sort_option[1:] + sort_idx = column_order.get(sort_option) + if sort_idx is None: + raise ConfigError(f"Invalid sorting option: {self.config.sort!r}") + if sort_option == "name": + lines_values = human_sorted_items(lines_values, reverse=reverse) + else: + lines_values.sort( + key=lambda line: (line[sort_idx], line[0]), # type: ignore[index] + reverse=reverse, + ) + + # Calculate total if we had at least one file. + total_line = ["TOTAL", self.total.n_statements, self.total.n_missing] + if self.branches: + total_line += [self.total.n_branches, self.total.n_partial_branches] + total_line += [self.total.pc_covered_str] + if self.config.show_missing: + total_line += [""] + + # Create other final lines. + end_lines = [] + if self.config.skip_covered and self.skipped_count: + file_suffix = "s" if self.skipped_count>1 else "" + end_lines.append( + f"\n{self.skipped_count} file{file_suffix} skipped due to complete coverage." + ) + if self.config.skip_empty and self.empty_count: + file_suffix = "s" if self.empty_count > 1 else "" + end_lines.append(f"\n{self.empty_count} empty file{file_suffix} skipped.") + + if self.output_format == "markdown": + formatter = self._report_markdown + else: + formatter = self._report_text + formatter(header, lines_values, total_line, end_lines) + + def report_one_file(self, fr: FileReporter, analysis: Analysis) -> None: + """Report on just one file, the callback from report().""" + nums = analysis.numbers + self.total += nums + + no_missing_lines = (nums.n_missing == 0) + no_missing_branches = (nums.n_partial_branches == 0) + if self.config.skip_covered and no_missing_lines and no_missing_branches: + # Don't report on 100% files. + self.skipped_count += 1 + elif self.config.skip_empty and nums.n_statements == 0: + # Don't report on empty files. + self.empty_count += 1 + else: + self.fr_analysis.append((fr, analysis)) diff --git a/venv/lib/python3.10/site-packages/coverage/report_core.py b/venv/lib/python3.10/site-packages/coverage/report_core.py new file mode 100644 index 0000000..1535bf8 --- /dev/null +++ b/venv/lib/python3.10/site-packages/coverage/report_core.py @@ -0,0 +1,119 @@ +# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 +# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt + +"""Reporter foundation for coverage.py.""" + +from __future__ import annotations + +import sys + +from typing import ( + Callable, Iterable, Iterator, IO, Optional, Protocol, Tuple, TYPE_CHECKING, +) + +from coverage.exceptions import NoDataError, NotPython +from coverage.files import prep_patterns, GlobMatcher +from coverage.misc import ensure_dir_for_file, file_be_gone +from coverage.plugin import FileReporter +from coverage.results import Analysis +from coverage.types import TMorf + +if TYPE_CHECKING: + from coverage import Coverage + + +class Reporter(Protocol): + """What we expect of reporters.""" + + report_type: str + + def report(self, morfs: Optional[Iterable[TMorf]], outfile: IO[str]) -> float: + """Generate a report of `morfs`, written to `outfile`.""" + + +def render_report( + output_path: str, + reporter: Reporter, + morfs: Optional[Iterable[TMorf]], + msgfn: Callable[[str], None], +) -> float: + """Run a one-file report generator, managing the output file. + + This function ensures the output file is ready to be written to. Then writes + the report to it. Then closes the file and cleans up. + + """ + file_to_close = None + delete_file = False + + if output_path == "-": + outfile = sys.stdout + else: + # Ensure that the output directory is created; done here because this + # report pre-opens the output file. HtmlReporter does this on its own + # because its task is more complex, being multiple files. + ensure_dir_for_file(output_path) + outfile = open(output_path, "w", encoding="utf-8") + file_to_close = outfile + delete_file = True + + try: + ret = reporter.report(morfs, outfile=outfile) + if file_to_close is not None: + msgfn(f"Wrote {reporter.report_type} to {output_path}") + delete_file = False + return ret + finally: + if file_to_close is not None: + file_to_close.close() + if delete_file: + file_be_gone(output_path) # pragma: part covered (doesn't return) + + +def get_analysis_to_report( + coverage: Coverage, + morfs: Optional[Iterable[TMorf]], +) -> Iterator[Tuple[FileReporter, Analysis]]: + """Get the files to report on. + + For each morf in `morfs`, if it should be reported on (based on the omit + and include configuration options), yield a pair, the `FileReporter` and + `Analysis` for the morf. + + """ + file_reporters = coverage._get_file_reporters(morfs) + config = coverage.config + + if config.report_include: + matcher = GlobMatcher(prep_patterns(config.report_include), "report_include") + file_reporters = [fr for fr in file_reporters if matcher.match(fr.filename)] + + if config.report_omit: + matcher = GlobMatcher(prep_patterns(config.report_omit), "report_omit") + file_reporters = [fr for fr in file_reporters if not matcher.match(fr.filename)] + + if not file_reporters: + raise NoDataError("No data to report.") + + for fr in sorted(file_reporters): + try: + analysis = coverage._analyze(fr) + except NotPython: + # Only report errors for .py files, and only if we didn't + # explicitly suppress those errors. + # NotPython is only raised by PythonFileReporter, which has a + # should_be_python() method. + if fr.should_be_python(): # type: ignore[attr-defined] + if config.ignore_errors: + msg = f"Couldn't parse Python file '{fr.filename}'" + coverage._warn(msg, slug="couldnt-parse") + else: + raise + except Exception as exc: + if config.ignore_errors: + msg = f"Couldn't parse '{fr.filename}': {exc}".rstrip() + coverage._warn(msg, slug="couldnt-parse") + else: + raise + else: + yield (fr, analysis) diff --git a/venv/lib/python3.10/site-packages/coverage/results.py b/venv/lib/python3.10/site-packages/coverage/results.py new file mode 100644 index 0000000..f5f9a37 --- /dev/null +++ b/venv/lib/python3.10/site-packages/coverage/results.py @@ -0,0 +1,385 @@ +# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 +# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt + +"""Results of coverage measurement.""" + +from __future__ import annotations + +import collections + +from typing import Callable, Dict, Iterable, List, Optional, Tuple, TYPE_CHECKING + +from coverage.debug import auto_repr +from coverage.exceptions import ConfigError +from coverage.misc import nice_pair +from coverage.types import TArc, TLineNo + +if TYPE_CHECKING: + from coverage.data import CoverageData + from coverage.plugin import FileReporter + + +class Analysis: + """The results of analyzing a FileReporter.""" + + def __init__( + self, + data: CoverageData, + precision: int, + file_reporter: FileReporter, + file_mapper: Callable[[str], str], + ) -> None: + self.data = data + self.file_reporter = file_reporter + self.filename = file_mapper(self.file_reporter.filename) + self.statements = self.file_reporter.lines() + self.excluded = self.file_reporter.excluded_lines() + + # Identify missing statements. + executed: Iterable[TLineNo] + executed = self.data.lines(self.filename) or [] + executed = self.file_reporter.translate_lines(executed) + self.executed = executed + self.missing = self.statements - self.executed + + if self.data.has_arcs(): + self._arc_possibilities = sorted(self.file_reporter.arcs()) + self.exit_counts = self.file_reporter.exit_counts() + self.no_branch = self.file_reporter.no_branch_lines() + n_branches = self._total_branches() + mba = self.missing_branch_arcs() + n_partial_branches = sum(len(v) for k,v in mba.items() if k not in self.missing) + n_missing_branches = sum(len(v) for k,v in mba.items()) + else: + self._arc_possibilities = [] + self.exit_counts = {} + self.no_branch = set() + n_branches = n_partial_branches = n_missing_branches = 0 + + self.numbers = Numbers( + precision=precision, + n_files=1, + n_statements=len(self.statements), + n_excluded=len(self.excluded), + n_missing=len(self.missing), + n_branches=n_branches, + n_partial_branches=n_partial_branches, + n_missing_branches=n_missing_branches, + ) + + def missing_formatted(self, branches: bool = False) -> str: + """The missing line numbers, formatted nicely. + + Returns a string like "1-2, 5-11, 13-14". + + If `branches` is true, includes the missing branch arcs also. + + """ + if branches and self.has_arcs(): + arcs = self.missing_branch_arcs().items() + else: + arcs = None + + return format_lines(self.statements, self.missing, arcs=arcs) + + def has_arcs(self) -> bool: + """Were arcs measured in this result?""" + return self.data.has_arcs() + + def arc_possibilities(self) -> List[TArc]: + """Returns a sorted list of the arcs in the code.""" + return self._arc_possibilities + + def arcs_executed(self) -> List[TArc]: + """Returns a sorted list of the arcs actually executed in the code.""" + executed: Iterable[TArc] + executed = self.data.arcs(self.filename) or [] + executed = self.file_reporter.translate_arcs(executed) + return sorted(executed) + + def arcs_missing(self) -> List[TArc]: + """Returns a sorted list of the un-executed arcs in the code.""" + possible = self.arc_possibilities() + executed = self.arcs_executed() + missing = ( + p for p in possible + if p not in executed + and p[0] not in self.no_branch + and p[1] not in self.excluded + ) + return sorted(missing) + + def arcs_unpredicted(self) -> List[TArc]: + """Returns a sorted list of the executed arcs missing from the code.""" + possible = self.arc_possibilities() + executed = self.arcs_executed() + # Exclude arcs here which connect a line to itself. They can occur + # in executed data in some cases. This is where they can cause + # trouble, and here is where it's the least burden to remove them. + # Also, generators can somehow cause arcs from "enter" to "exit", so + # make sure we have at least one positive value. + unpredicted = ( + e for e in executed + if e not in possible + and e[0] != e[1] + and (e[0] > 0 or e[1] > 0) + ) + return sorted(unpredicted) + + def _branch_lines(self) -> List[TLineNo]: + """Returns a list of line numbers that have more than one exit.""" + return [l1 for l1,count in self.exit_counts.items() if count > 1] + + def _total_branches(self) -> int: + """How many total branches are there?""" + return sum(count for count in self.exit_counts.values() if count > 1) + + def missing_branch_arcs(self) -> Dict[TLineNo, List[TLineNo]]: + """Return arcs that weren't executed from branch lines. + + Returns {l1:[l2a,l2b,...], ...} + + """ + missing = self.arcs_missing() + branch_lines = set(self._branch_lines()) + mba = collections.defaultdict(list) + for l1, l2 in missing: + if l1 in branch_lines: + mba[l1].append(l2) + return mba + + def executed_branch_arcs(self) -> Dict[TLineNo, List[TLineNo]]: + """Return arcs that were executed from branch lines. + + Returns {l1:[l2a,l2b,...], ...} + + """ + executed = self.arcs_executed() + branch_lines = set(self._branch_lines()) + eba = collections.defaultdict(list) + for l1, l2 in executed: + if l1 in branch_lines: + eba[l1].append(l2) + return eba + + def branch_stats(self) -> Dict[TLineNo, Tuple[int, int]]: + """Get stats about branches. + + Returns a dict mapping line numbers to a tuple: + (total_exits, taken_exits). + """ + + missing_arcs = self.missing_branch_arcs() + stats = {} + for lnum in self._branch_lines(): + exits = self.exit_counts[lnum] + missing = len(missing_arcs[lnum]) + stats[lnum] = (exits, exits - missing) + return stats + + +class Numbers: + """The numerical results of measuring coverage. + + This holds the basic statistics from `Analysis`, and is used to roll + up statistics across files. + + """ + + def __init__( + self, + precision: int = 0, + n_files: int = 0, + n_statements: int = 0, + n_excluded: int = 0, + n_missing: int = 0, + n_branches: int = 0, + n_partial_branches: int = 0, + n_missing_branches: int = 0, + ) -> None: + assert 0 <= precision < 10 + self._precision = precision + self._near0 = 1.0 / 10**precision + self._near100 = 100.0 - self._near0 + self.n_files = n_files + self.n_statements = n_statements + self.n_excluded = n_excluded + self.n_missing = n_missing + self.n_branches = n_branches + self.n_partial_branches = n_partial_branches + self.n_missing_branches = n_missing_branches + + __repr__ = auto_repr + + def init_args(self) -> List[int]: + """Return a list for __init__(*args) to recreate this object.""" + return [ + self._precision, + self.n_files, self.n_statements, self.n_excluded, self.n_missing, + self.n_branches, self.n_partial_branches, self.n_missing_branches, + ] + + @property + def n_executed(self) -> int: + """Returns the number of executed statements.""" + return self.n_statements - self.n_missing + + @property + def n_executed_branches(self) -> int: + """Returns the number of executed branches.""" + return self.n_branches - self.n_missing_branches + + @property + def pc_covered(self) -> float: + """Returns a single percentage value for coverage.""" + if self.n_statements > 0: + numerator, denominator = self.ratio_covered + pc_cov = (100.0 * numerator) / denominator + else: + pc_cov = 100.0 + return pc_cov + + @property + def pc_covered_str(self) -> str: + """Returns the percent covered, as a string, without a percent sign. + + Note that "0" is only returned when the value is truly zero, and "100" + is only returned when the value is truly 100. Rounding can never + result in either "0" or "100". + + """ + return self.display_covered(self.pc_covered) + + def display_covered(self, pc: float) -> str: + """Return a displayable total percentage, as a string. + + Note that "0" is only returned when the value is truly zero, and "100" + is only returned when the value is truly 100. Rounding can never + result in either "0" or "100". + + """ + if 0 < pc < self._near0: + pc = self._near0 + elif self._near100 < pc < 100: + pc = self._near100 + else: + pc = round(pc, self._precision) + return "%.*f" % (self._precision, pc) + + def pc_str_width(self) -> int: + """How many characters wide can pc_covered_str be?""" + width = 3 # "100" + if self._precision > 0: + width += 1 + self._precision + return width + + @property + def ratio_covered(self) -> Tuple[int, int]: + """Return a numerator and denominator for the coverage ratio.""" + numerator = self.n_executed + self.n_executed_branches + denominator = self.n_statements + self.n_branches + return numerator, denominator + + def __add__(self, other: Numbers) -> Numbers: + nums = Numbers(precision=self._precision) + nums.n_files = self.n_files + other.n_files + nums.n_statements = self.n_statements + other.n_statements + nums.n_excluded = self.n_excluded + other.n_excluded + nums.n_missing = self.n_missing + other.n_missing + nums.n_branches = self.n_branches + other.n_branches + nums.n_partial_branches = ( + self.n_partial_branches + other.n_partial_branches + ) + nums.n_missing_branches = ( + self.n_missing_branches + other.n_missing_branches + ) + return nums + + def __radd__(self, other: int) -> Numbers: + # Implementing 0+Numbers allows us to sum() a list of Numbers. + assert other == 0 # we only ever call it this way. + return self + + +def _line_ranges( + statements: Iterable[TLineNo], + lines: Iterable[TLineNo], +) -> List[Tuple[TLineNo, TLineNo]]: + """Produce a list of ranges for `format_lines`.""" + statements = sorted(statements) + lines = sorted(lines) + + pairs = [] + start = None + lidx = 0 + for stmt in statements: + if lidx >= len(lines): + break + if stmt == lines[lidx]: + lidx += 1 + if not start: + start = stmt + end = stmt + elif start: + pairs.append((start, end)) + start = None + if start: + pairs.append((start, end)) + return pairs + + +def format_lines( + statements: Iterable[TLineNo], + lines: Iterable[TLineNo], + arcs: Optional[Iterable[Tuple[TLineNo, List[TLineNo]]]] = None, +) -> str: + """Nicely format a list of line numbers. + + Format a list of line numbers for printing by coalescing groups of lines as + long as the lines represent consecutive statements. This will coalesce + even if there are gaps between statements. + + For example, if `statements` is [1,2,3,4,5,10,11,12,13,14] and + `lines` is [1,2,5,10,11,13,14] then the result will be "1-2, 5-11, 13-14". + + Both `lines` and `statements` can be any iterable. All of the elements of + `lines` must be in `statements`, and all of the values must be positive + integers. + + If `arcs` is provided, they are (start,[end,end,end]) pairs that will be + included in the output as long as start isn't in `lines`. + + """ + line_items = [(pair[0], nice_pair(pair)) for pair in _line_ranges(statements, lines)] + if arcs is not None: + line_exits = sorted(arcs) + for line, exits in line_exits: + for ex in sorted(exits): + if line not in lines and ex not in lines: + dest = (ex if ex > 0 else "exit") + line_items.append((line, f"{line}->{dest}")) + + ret = ", ".join(t[-1] for t in sorted(line_items)) + return ret + + +def should_fail_under(total: float, fail_under: float, precision: int) -> bool: + """Determine if a total should fail due to fail-under. + + `total` is a float, the coverage measurement total. `fail_under` is the + fail_under setting to compare with. `precision` is the number of digits + to consider after the decimal point. + + Returns True if the total should fail. + + """ + # We can never achieve higher than 100% coverage, or less than zero. + if not (0 <= fail_under <= 100.0): + msg = f"fail_under={fail_under} is invalid. Must be between 0 and 100." + raise ConfigError(msg) + + # Special case for fail_under=100, it must really be 100. + if fail_under == 100.0 and total != 100.0: + return True + + return round(total, precision) < fail_under diff --git a/venv/lib/python3.10/site-packages/coverage/sqldata.py b/venv/lib/python3.10/site-packages/coverage/sqldata.py new file mode 100644 index 0000000..9dc8ef9 --- /dev/null +++ b/venv/lib/python3.10/site-packages/coverage/sqldata.py @@ -0,0 +1,1095 @@ +# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 +# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt + +"""SQLite coverage data.""" + +from __future__ import annotations + +import collections +import datetime +import functools +import glob +import itertools +import os +import random +import socket +import sqlite3 +import sys +import textwrap +import threading +import zlib + +from typing import ( + cast, Any, Callable, Collection, Dict, List, Mapping, + Optional, Sequence, Set, Tuple, TypeVar, Union, +) + +from coverage.debug import NoDebugging, auto_repr +from coverage.exceptions import CoverageException, DataError +from coverage.files import PathAliases +from coverage.misc import file_be_gone, isolate_module +from coverage.numbits import numbits_to_nums, numbits_union, nums_to_numbits +from coverage.sqlitedb import SqliteDb +from coverage.types import FilePath, TArc, TDebugCtl, TLineNo, TWarnFn +from coverage.version import __version__ + +os = isolate_module(os) + +# If you change the schema: increment the SCHEMA_VERSION and update the +# docs in docs/dbschema.rst by running "make cogdoc". + +SCHEMA_VERSION = 7 + +# Schema versions: +# 1: Released in 5.0a2 +# 2: Added contexts in 5.0a3. +# 3: Replaced line table with line_map table. +# 4: Changed line_map.bitmap to line_map.numbits. +# 5: Added foreign key declarations. +# 6: Key-value in meta. +# 7: line_map -> line_bits + +SCHEMA = """\ +CREATE TABLE coverage_schema ( + -- One row, to record the version of the schema in this db. + version integer +); + +CREATE TABLE meta ( + -- Key-value pairs, to record metadata about the data + key text, + value text, + unique (key) + -- Possible keys: + -- 'has_arcs' boolean -- Is this data recording branches? + -- 'sys_argv' text -- The coverage command line that recorded the data. + -- 'version' text -- The version of coverage.py that made the file. + -- 'when' text -- Datetime when the file was created. +); + +CREATE TABLE file ( + -- A row per file measured. + id integer primary key, + path text, + unique (path) +); + +CREATE TABLE context ( + -- A row per context measured. + id integer primary key, + context text, + unique (context) +); + +CREATE TABLE line_bits ( + -- If recording lines, a row per context per file executed. + -- All of the line numbers for that file/context are in one numbits. + file_id integer, -- foreign key to `file`. + context_id integer, -- foreign key to `context`. + numbits blob, -- see the numbits functions in coverage.numbits + foreign key (file_id) references file (id), + foreign key (context_id) references context (id), + unique (file_id, context_id) +); + +CREATE TABLE arc ( + -- If recording branches, a row per context per from/to line transition executed. + file_id integer, -- foreign key to `file`. + context_id integer, -- foreign key to `context`. + fromno integer, -- line number jumped from. + tono integer, -- line number jumped to. + foreign key (file_id) references file (id), + foreign key (context_id) references context (id), + unique (file_id, context_id, fromno, tono) +); + +CREATE TABLE tracer ( + -- A row per file indicating the tracer used for that file. + file_id integer primary key, + tracer text, + foreign key (file_id) references file (id) +); +""" + +TMethod = TypeVar("TMethod", bound=Callable[..., Any]) + +def _locked(method: TMethod) -> TMethod: + """A decorator for methods that should hold self._lock.""" + @functools.wraps(method) + def _wrapped(self: CoverageData, *args: Any, **kwargs: Any) -> Any: + if self._debug.should("lock"): + self._debug.write(f"Locking {self._lock!r} for {method.__name__}") + with self._lock: + if self._debug.should("lock"): + self._debug.write(f"Locked {self._lock!r} for {method.__name__}") + return method(self, *args, **kwargs) + return _wrapped # type: ignore[return-value] + + +class CoverageData: + """Manages collected coverage data, including file storage. + + This class is the public supported API to the data that coverage.py + collects during program execution. It includes information about what code + was executed. It does not include information from the analysis phase, to + determine what lines could have been executed, or what lines were not + executed. + + .. note:: + + The data file is currently a SQLite database file, with a + :ref:`documented schema `. The schema is subject to change + though, so be careful about querying it directly. Use this API if you + can to isolate yourself from changes. + + There are a number of kinds of data that can be collected: + + * **lines**: the line numbers of source lines that were executed. + These are always available. + + * **arcs**: pairs of source and destination line numbers for transitions + between source lines. These are only available if branch coverage was + used. + + * **file tracer names**: the module names of the file tracer plugins that + handled each file in the data. + + Lines, arcs, and file tracer names are stored for each source file. File + names in this API are case-sensitive, even on platforms with + case-insensitive file systems. + + A data file either stores lines, or arcs, but not both. + + A data file is associated with the data when the :class:`CoverageData` + is created, using the parameters `basename`, `suffix`, and `no_disk`. The + base name can be queried with :meth:`base_filename`, and the actual file + name being used is available from :meth:`data_filename`. + + To read an existing coverage.py data file, use :meth:`read`. You can then + access the line, arc, or file tracer data with :meth:`lines`, :meth:`arcs`, + or :meth:`file_tracer`. + + The :meth:`has_arcs` method indicates whether arc data is available. You + can get a set of the files in the data with :meth:`measured_files`. As + with most Python containers, you can determine if there is any data at all + by using this object as a boolean value. + + The contexts for each line in a file can be read with + :meth:`contexts_by_lineno`. + + To limit querying to certain contexts, use :meth:`set_query_context` or + :meth:`set_query_contexts`. These will narrow the focus of subsequent + :meth:`lines`, :meth:`arcs`, and :meth:`contexts_by_lineno` calls. The set + of all measured context names can be retrieved with + :meth:`measured_contexts`. + + Most data files will be created by coverage.py itself, but you can use + methods here to create data files if you like. The :meth:`add_lines`, + :meth:`add_arcs`, and :meth:`add_file_tracers` methods add data, in ways + that are convenient for coverage.py. + + To record data for contexts, use :meth:`set_context` to set a context to + be used for subsequent :meth:`add_lines` and :meth:`add_arcs` calls. + + To add a source file without any measured data, use :meth:`touch_file`, + or :meth:`touch_files` for a list of such files. + + Write the data to its file with :meth:`write`. + + You can clear the data in memory with :meth:`erase`. Data for specific + files can be removed from the database with :meth:`purge_files`. + + Two data collections can be combined by using :meth:`update` on one + :class:`CoverageData`, passing it the other. + + Data in a :class:`CoverageData` can be serialized and deserialized with + :meth:`dumps` and :meth:`loads`. + + The methods used during the coverage.py collection phase + (:meth:`add_lines`, :meth:`add_arcs`, :meth:`set_context`, and + :meth:`add_file_tracers`) are thread-safe. Other methods may not be. + + """ + + def __init__( + self, + basename: Optional[FilePath] = None, + suffix: Optional[Union[str, bool]] = None, + no_disk: bool = False, + warn: Optional[TWarnFn] = None, + debug: Optional[TDebugCtl] = None, + ) -> None: + """Create a :class:`CoverageData` object to hold coverage-measured data. + + Arguments: + basename (str): the base name of the data file, defaulting to + ".coverage". This can be a path to a file in another directory. + suffix (str or bool): has the same meaning as the `data_suffix` + argument to :class:`coverage.Coverage`. + no_disk (bool): if True, keep all data in memory, and don't + write any disk file. + warn: a warning callback function, accepting a warning message + argument. + debug: a `DebugControl` object (optional) + + """ + self._no_disk = no_disk + self._basename = os.path.abspath(basename or ".coverage") + self._suffix = suffix + self._warn = warn + self._debug = debug or NoDebugging() + + self._choose_filename() + # Maps filenames to row ids. + self._file_map: Dict[str, int] = {} + # Maps thread ids to SqliteDb objects. + self._dbs: Dict[int, SqliteDb] = {} + self._pid = os.getpid() + # Synchronize the operations used during collection. + self._lock = threading.RLock() + + # Are we in sync with the data file? + self._have_used = False + + self._has_lines = False + self._has_arcs = False + + self._current_context: Optional[str] = None + self._current_context_id: Optional[int] = None + self._query_context_ids: Optional[List[int]] = None + + __repr__ = auto_repr + + def _choose_filename(self) -> None: + """Set self._filename based on inited attributes.""" + if self._no_disk: + self._filename = ":memory:" + else: + self._filename = self._basename + suffix = filename_suffix(self._suffix) + if suffix: + self._filename += "." + suffix + + def _reset(self) -> None: + """Reset our attributes.""" + if not self._no_disk: + for db in self._dbs.values(): + db.close() + self._dbs = {} + self._file_map = {} + self._have_used = False + self._current_context_id = None + + def _open_db(self) -> None: + """Open an existing db file, and read its metadata.""" + if self._debug.should("dataio"): + self._debug.write(f"Opening data file {self._filename!r}") + self._dbs[threading.get_ident()] = SqliteDb(self._filename, self._debug) + self._read_db() + + def _read_db(self) -> None: + """Read the metadata from a database so that we are ready to use it.""" + with self._dbs[threading.get_ident()] as db: + try: + row = db.execute_one("select version from coverage_schema") + assert row is not None + except Exception as exc: + if "no such table: coverage_schema" in str(exc): + self._init_db(db) + else: + raise DataError( + "Data file {!r} doesn't seem to be a coverage data file: {}".format( + self._filename, exc + ) + ) from exc + else: + schema_version = row[0] + if schema_version != SCHEMA_VERSION: + raise DataError( + "Couldn't use data file {!r}: wrong schema: {} instead of {}".format( + self._filename, schema_version, SCHEMA_VERSION + ) + ) + + row = db.execute_one("select value from meta where key = 'has_arcs'") + if row is not None: + self._has_arcs = bool(int(row[0])) + self._has_lines = not self._has_arcs + + with db.execute("select id, path from file") as cur: + for file_id, path in cur: + self._file_map[path] = file_id + + def _init_db(self, db: SqliteDb) -> None: + """Write the initial contents of the database.""" + if self._debug.should("dataio"): + self._debug.write(f"Initing data file {self._filename!r}") + db.executescript(SCHEMA) + db.execute_void("insert into coverage_schema (version) values (?)", (SCHEMA_VERSION,)) + + # When writing metadata, avoid information that will needlessly change + # the hash of the data file, unless we're debugging processes. + meta_data = [ + ("version", __version__), + ] + if self._debug.should("process"): + meta_data.extend([ + ("sys_argv", str(getattr(sys, "argv", None))), + ("when", datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")), + ]) + db.executemany_void("insert or ignore into meta (key, value) values (?, ?)", meta_data) + + def _connect(self) -> SqliteDb: + """Get the SqliteDb object to use.""" + if threading.get_ident() not in self._dbs: + self._open_db() + return self._dbs[threading.get_ident()] + + def __bool__(self) -> bool: + if (threading.get_ident() not in self._dbs and not os.path.exists(self._filename)): + return False + try: + with self._connect() as con: + with con.execute("select * from file limit 1") as cur: + return bool(list(cur)) + except CoverageException: + return False + + def dumps(self) -> bytes: + """Serialize the current data to a byte string. + + The format of the serialized data is not documented. It is only + suitable for use with :meth:`loads` in the same version of + coverage.py. + + Note that this serialization is not what gets stored in coverage data + files. This method is meant to produce bytes that can be transmitted + elsewhere and then deserialized with :meth:`loads`. + + Returns: + A byte string of serialized data. + + .. versionadded:: 5.0 + + """ + if self._debug.should("dataio"): + self._debug.write(f"Dumping data from data file {self._filename!r}") + with self._connect() as con: + script = con.dump() + return b"z" + zlib.compress(script.encode("utf-8")) + + def loads(self, data: bytes) -> None: + """Deserialize data from :meth:`dumps`. + + Use with a newly-created empty :class:`CoverageData` object. It's + undefined what happens if the object already has data in it. + + Note that this is not for reading data from a coverage data file. It + is only for use on data you produced with :meth:`dumps`. + + Arguments: + data: A byte string of serialized data produced by :meth:`dumps`. + + .. versionadded:: 5.0 + + """ + if self._debug.should("dataio"): + self._debug.write(f"Loading data into data file {self._filename!r}") + if data[:1] != b"z": + raise DataError( + f"Unrecognized serialization: {data[:40]!r} (head of {len(data)} bytes)" + ) + script = zlib.decompress(data[1:]).decode("utf-8") + self._dbs[threading.get_ident()] = db = SqliteDb(self._filename, self._debug) + with db: + db.executescript(script) + self._read_db() + self._have_used = True + + def _file_id(self, filename: str, add: bool = False) -> Optional[int]: + """Get the file id for `filename`. + + If filename is not in the database yet, add it if `add` is True. + If `add` is not True, return None. + """ + if filename not in self._file_map: + if add: + with self._connect() as con: + self._file_map[filename] = con.execute_for_rowid( + "insert or replace into file (path) values (?)", + (filename,) + ) + return self._file_map.get(filename) + + def _context_id(self, context: str) -> Optional[int]: + """Get the id for a context.""" + assert context is not None + self._start_using() + with self._connect() as con: + row = con.execute_one("select id from context where context = ?", (context,)) + if row is not None: + return cast(int, row[0]) + else: + return None + + @_locked + def set_context(self, context: Optional[str]) -> None: + """Set the current context for future :meth:`add_lines` etc. + + `context` is a str, the name of the context to use for the next data + additions. The context persists until the next :meth:`set_context`. + + .. versionadded:: 5.0 + + """ + if self._debug.should("dataop"): + self._debug.write(f"Setting context: {context!r}") + self._current_context = context + self._current_context_id = None + + def _set_context_id(self) -> None: + """Use the _current_context to set _current_context_id.""" + context = self._current_context or "" + context_id = self._context_id(context) + if context_id is not None: + self._current_context_id = context_id + else: + with self._connect() as con: + self._current_context_id = con.execute_for_rowid( + "insert into context (context) values (?)", + (context,) + ) + + def base_filename(self) -> str: + """The base filename for storing data. + + .. versionadded:: 5.0 + + """ + return self._basename + + def data_filename(self) -> str: + """Where is the data stored? + + .. versionadded:: 5.0 + + """ + return self._filename + + @_locked + def add_lines(self, line_data: Mapping[str, Collection[TLineNo]]) -> None: + """Add measured line data. + + `line_data` is a dictionary mapping file names to iterables of ints:: + + { filename: { line1, line2, ... }, ...} + + """ + if self._debug.should("dataop"): + self._debug.write("Adding lines: %d files, %d lines total" % ( + len(line_data), sum(bool(len(lines)) for lines in line_data.values()) + )) + self._start_using() + self._choose_lines_or_arcs(lines=True) + if not line_data: + return + with self._connect() as con: + self._set_context_id() + for filename, linenos in line_data.items(): + linemap = nums_to_numbits(linenos) + file_id = self._file_id(filename, add=True) + query = "select numbits from line_bits where file_id = ? and context_id = ?" + with con.execute(query, (file_id, self._current_context_id)) as cur: + existing = list(cur) + if existing: + linemap = numbits_union(linemap, existing[0][0]) + + con.execute_void( + "insert or replace into line_bits " + + " (file_id, context_id, numbits) values (?, ?, ?)", + (file_id, self._current_context_id, linemap), + ) + + @_locked + def add_arcs(self, arc_data: Mapping[str, Collection[TArc]]) -> None: + """Add measured arc data. + + `arc_data` is a dictionary mapping file names to iterables of pairs of + ints:: + + { filename: { (l1,l2), (l1,l2), ... }, ...} + + """ + if self._debug.should("dataop"): + self._debug.write("Adding arcs: %d files, %d arcs total" % ( + len(arc_data), sum(len(arcs) for arcs in arc_data.values()) + )) + self._start_using() + self._choose_lines_or_arcs(arcs=True) + if not arc_data: + return + with self._connect() as con: + self._set_context_id() + for filename, arcs in arc_data.items(): + if not arcs: + continue + file_id = self._file_id(filename, add=True) + data = [(file_id, self._current_context_id, fromno, tono) for fromno, tono in arcs] + con.executemany_void( + "insert or ignore into arc " + + "(file_id, context_id, fromno, tono) values (?, ?, ?, ?)", + data, + ) + + def _choose_lines_or_arcs(self, lines: bool = False, arcs: bool = False) -> None: + """Force the data file to choose between lines and arcs.""" + assert lines or arcs + assert not (lines and arcs) + if lines and self._has_arcs: + if self._debug.should("dataop"): + self._debug.write("Error: Can't add line measurements to existing branch data") + raise DataError("Can't add line measurements to existing branch data") + if arcs and self._has_lines: + if self._debug.should("dataop"): + self._debug.write("Error: Can't add branch measurements to existing line data") + raise DataError("Can't add branch measurements to existing line data") + if not self._has_arcs and not self._has_lines: + self._has_lines = lines + self._has_arcs = arcs + with self._connect() as con: + con.execute_void( + "insert or ignore into meta (key, value) values (?, ?)", + ("has_arcs", str(int(arcs))) + ) + + @_locked + def add_file_tracers(self, file_tracers: Mapping[str, str]) -> None: + """Add per-file plugin information. + + `file_tracers` is { filename: plugin_name, ... } + + """ + if self._debug.should("dataop"): + self._debug.write("Adding file tracers: %d files" % (len(file_tracers),)) + if not file_tracers: + return + self._start_using() + with self._connect() as con: + for filename, plugin_name in file_tracers.items(): + file_id = self._file_id(filename, add=True) + existing_plugin = self.file_tracer(filename) + if existing_plugin: + if existing_plugin != plugin_name: + raise DataError( + "Conflicting file tracer name for '{}': {!r} vs {!r}".format( + filename, existing_plugin, plugin_name, + ) + ) + elif plugin_name: + con.execute_void( + "insert into tracer (file_id, tracer) values (?, ?)", + (file_id, plugin_name) + ) + + def touch_file(self, filename: str, plugin_name: str = "") -> None: + """Ensure that `filename` appears in the data, empty if needed. + + `plugin_name` is the name of the plugin responsible for this file. + It is used to associate the right filereporter, etc. + """ + self.touch_files([filename], plugin_name) + + def touch_files(self, filenames: Collection[str], plugin_name: Optional[str] = None) -> None: + """Ensure that `filenames` appear in the data, empty if needed. + + `plugin_name` is the name of the plugin responsible for these files. + It is used to associate the right filereporter, etc. + """ + if self._debug.should("dataop"): + self._debug.write(f"Touching {filenames!r}") + self._start_using() + with self._connect(): # Use this to get one transaction. + if not self._has_arcs and not self._has_lines: + raise DataError("Can't touch files in an empty CoverageData") + + for filename in filenames: + self._file_id(filename, add=True) + if plugin_name: + # Set the tracer for this file + self.add_file_tracers({filename: plugin_name}) + + def purge_files(self, filenames: Collection[str]) -> None: + """Purge any existing coverage data for the given `filenames`. + + .. versionadded:: 7.2 + + """ + if self._debug.should("dataop"): + self._debug.write(f"Purging data for {filenames!r}") + self._start_using() + with self._connect() as con: + + if self._has_lines: + sql = "delete from line_bits where file_id=?" + elif self._has_arcs: + sql = "delete from arc where file_id=?" + else: + raise DataError("Can't purge files in an empty CoverageData") + + for filename in filenames: + file_id = self._file_id(filename, add=False) + if file_id is None: + continue + con.execute_void(sql, (file_id,)) + + def update(self, other_data: CoverageData, aliases: Optional[PathAliases] = None) -> None: + """Update this data with data from several other :class:`CoverageData` instances. + + If `aliases` is provided, it's a `PathAliases` object that is used to + re-map paths to match the local machine's. Note: `aliases` is None + only when called directly from the test suite. + + """ + if self._debug.should("dataop"): + self._debug.write("Updating with data from {!r}".format( + getattr(other_data, "_filename", "???"), + )) + if self._has_lines and other_data._has_arcs: + raise DataError("Can't combine arc data with line data") + if self._has_arcs and other_data._has_lines: + raise DataError("Can't combine line data with arc data") + + aliases = aliases or PathAliases() + + # Force the database we're writing to to exist before we start nesting contexts. + self._start_using() + + # Collector for all arcs, lines and tracers + other_data.read() + with other_data._connect() as con: + # Get files data. + with con.execute("select path from file") as cur: + files = {path: aliases.map(path) for (path,) in cur} + + # Get contexts data. + with con.execute("select context from context") as cur: + contexts = [context for (context,) in cur] + + # Get arc data. + with con.execute( + "select file.path, context.context, arc.fromno, arc.tono " + + "from arc " + + "inner join file on file.id = arc.file_id " + + "inner join context on context.id = arc.context_id" + ) as cur: + arcs = [ + (files[path], context, fromno, tono) + for (path, context, fromno, tono) in cur + ] + + # Get line data. + with con.execute( + "select file.path, context.context, line_bits.numbits " + + "from line_bits " + + "inner join file on file.id = line_bits.file_id " + + "inner join context on context.id = line_bits.context_id" + ) as cur: + lines: Dict[Tuple[str, str], bytes] = {} + for path, context, numbits in cur: + key = (files[path], context) + if key in lines: + numbits = numbits_union(lines[key], numbits) + lines[key] = numbits + + # Get tracer data. + with con.execute( + "select file.path, tracer " + + "from tracer " + + "inner join file on file.id = tracer.file_id" + ) as cur: + tracers = {files[path]: tracer for (path, tracer) in cur} + + with self._connect() as con: + assert con.con is not None + con.con.isolation_level = "IMMEDIATE" + + # Get all tracers in the DB. Files not in the tracers are assumed + # to have an empty string tracer. Since Sqlite does not support + # full outer joins, we have to make two queries to fill the + # dictionary. + with con.execute("select path from file") as cur: + this_tracers = {path: "" for path, in cur} + with con.execute( + "select file.path, tracer from tracer " + + "inner join file on file.id = tracer.file_id" + ) as cur: + this_tracers.update({ + aliases.map(path): tracer + for path, tracer in cur + }) + + # Create all file and context rows in the DB. + con.executemany_void( + "insert or ignore into file (path) values (?)", + ((file,) for file in files.values()) + ) + with con.execute("select id, path from file") as cur: + file_ids = {path: id for id, path in cur} + self._file_map.update(file_ids) + con.executemany_void( + "insert or ignore into context (context) values (?)", + ((context,) for context in contexts) + ) + with con.execute("select id, context from context") as cur: + context_ids = {context: id for id, context in cur} + + # Prepare tracers and fail, if a conflict is found. + # tracer_paths is used to ensure consistency over the tracer data + # and tracer_map tracks the tracers to be inserted. + tracer_map = {} + for path in files.values(): + this_tracer = this_tracers.get(path) + other_tracer = tracers.get(path, "") + # If there is no tracer, there is always the None tracer. + if this_tracer is not None and this_tracer != other_tracer: + raise DataError( + "Conflicting file tracer name for '{}': {!r} vs {!r}".format( + path, this_tracer, other_tracer + ) + ) + tracer_map[path] = other_tracer + + # Prepare arc and line rows to be inserted by converting the file + # and context strings with integer ids. Then use the efficient + # `executemany()` to insert all rows at once. + arc_rows = ( + (file_ids[file], context_ids[context], fromno, tono) + for file, context, fromno, tono in arcs + ) + + # Get line data. + with con.execute( + "select file.path, context.context, line_bits.numbits " + + "from line_bits " + + "inner join file on file.id = line_bits.file_id " + + "inner join context on context.id = line_bits.context_id" + ) as cur: + for path, context, numbits in cur: + key = (aliases.map(path), context) + if key in lines: + numbits = numbits_union(lines[key], numbits) + lines[key] = numbits + + if arcs: + self._choose_lines_or_arcs(arcs=True) + + # Write the combined data. + con.executemany_void( + "insert or ignore into arc " + + "(file_id, context_id, fromno, tono) values (?, ?, ?, ?)", + arc_rows + ) + + if lines: + self._choose_lines_or_arcs(lines=True) + con.execute_void("delete from line_bits") + con.executemany_void( + "insert into line_bits " + + "(file_id, context_id, numbits) values (?, ?, ?)", + [ + (file_ids[file], context_ids[context], numbits) + for (file, context), numbits in lines.items() + ] + ) + con.executemany_void( + "insert or ignore into tracer (file_id, tracer) values (?, ?)", + ((file_ids[filename], tracer) for filename, tracer in tracer_map.items()) + ) + + if not self._no_disk: + # Update all internal cache data. + self._reset() + self.read() + + def erase(self, parallel: bool = False) -> None: + """Erase the data in this object. + + If `parallel` is true, then also deletes data files created from the + basename by parallel-mode. + + """ + self._reset() + if self._no_disk: + return + if self._debug.should("dataio"): + self._debug.write(f"Erasing data file {self._filename!r}") + file_be_gone(self._filename) + if parallel: + data_dir, local = os.path.split(self._filename) + local_abs_path = os.path.join(os.path.abspath(data_dir), local) + pattern = glob.escape(local_abs_path) + ".*" + for filename in glob.glob(pattern): + if self._debug.should("dataio"): + self._debug.write(f"Erasing parallel data file {filename!r}") + file_be_gone(filename) + + def read(self) -> None: + """Start using an existing data file.""" + if os.path.exists(self._filename): + with self._connect(): + self._have_used = True + + def write(self) -> None: + """Ensure the data is written to the data file.""" + pass + + def _start_using(self) -> None: + """Call this before using the database at all.""" + if self._pid != os.getpid(): + # Looks like we forked! Have to start a new data file. + self._reset() + self._choose_filename() + self._pid = os.getpid() + if not self._have_used: + self.erase() + self._have_used = True + + def has_arcs(self) -> bool: + """Does the database have arcs (True) or lines (False).""" + return bool(self._has_arcs) + + def measured_files(self) -> Set[str]: + """A set of all files that have been measured. + + Note that a file may be mentioned as measured even though no lines or + arcs for that file are present in the data. + + """ + return set(self._file_map) + + def measured_contexts(self) -> Set[str]: + """A set of all contexts that have been measured. + + .. versionadded:: 5.0 + + """ + self._start_using() + with self._connect() as con: + with con.execute("select distinct(context) from context") as cur: + contexts = {row[0] for row in cur} + return contexts + + def file_tracer(self, filename: str) -> Optional[str]: + """Get the plugin name of the file tracer for a file. + + Returns the name of the plugin that handles this file. If the file was + measured, but didn't use a plugin, then "" is returned. If the file + was not measured, then None is returned. + + """ + self._start_using() + with self._connect() as con: + file_id = self._file_id(filename) + if file_id is None: + return None + row = con.execute_one("select tracer from tracer where file_id = ?", (file_id,)) + if row is not None: + return row[0] or "" + return "" # File was measured, but no tracer associated. + + def set_query_context(self, context: str) -> None: + """Set a context for subsequent querying. + + The next :meth:`lines`, :meth:`arcs`, or :meth:`contexts_by_lineno` + calls will be limited to only one context. `context` is a string which + must match a context exactly. If it does not, no exception is raised, + but queries will return no data. + + .. versionadded:: 5.0 + + """ + self._start_using() + with self._connect() as con: + with con.execute("select id from context where context = ?", (context,)) as cur: + self._query_context_ids = [row[0] for row in cur.fetchall()] + + def set_query_contexts(self, contexts: Optional[Sequence[str]]) -> None: + """Set a number of contexts for subsequent querying. + + The next :meth:`lines`, :meth:`arcs`, or :meth:`contexts_by_lineno` + calls will be limited to the specified contexts. `contexts` is a list + of Python regular expressions. Contexts will be matched using + :func:`re.search `. Data will be included in query + results if they are part of any of the contexts matched. + + .. versionadded:: 5.0 + + """ + self._start_using() + if contexts: + with self._connect() as con: + context_clause = " or ".join(["context regexp ?"] * len(contexts)) + with con.execute("select id from context where " + context_clause, contexts) as cur: + self._query_context_ids = [row[0] for row in cur.fetchall()] + else: + self._query_context_ids = None + + def lines(self, filename: str) -> Optional[List[TLineNo]]: + """Get the list of lines executed for a source file. + + If the file was not measured, returns None. A file might be measured, + and have no lines executed, in which case an empty list is returned. + + If the file was executed, returns a list of integers, the line numbers + executed in the file. The list is in no particular order. + + """ + self._start_using() + if self.has_arcs(): + arcs = self.arcs(filename) + if arcs is not None: + all_lines = itertools.chain.from_iterable(arcs) + return list({l for l in all_lines if l > 0}) + + with self._connect() as con: + file_id = self._file_id(filename) + if file_id is None: + return None + else: + query = "select numbits from line_bits where file_id = ?" + data = [file_id] + if self._query_context_ids is not None: + ids_array = ", ".join("?" * len(self._query_context_ids)) + query += " and context_id in (" + ids_array + ")" + data += self._query_context_ids + with con.execute(query, data) as cur: + bitmaps = list(cur) + nums = set() + for row in bitmaps: + nums.update(numbits_to_nums(row[0])) + return list(nums) + + def arcs(self, filename: str) -> Optional[List[TArc]]: + """Get the list of arcs executed for a file. + + If the file was not measured, returns None. A file might be measured, + and have no arcs executed, in which case an empty list is returned. + + If the file was executed, returns a list of 2-tuples of integers. Each + pair is a starting line number and an ending line number for a + transition from one line to another. The list is in no particular + order. + + Negative numbers have special meaning. If the starting line number is + -N, it represents an entry to the code object that starts at line N. + If the ending ling number is -N, it's an exit from the code object that + starts at line N. + + """ + self._start_using() + with self._connect() as con: + file_id = self._file_id(filename) + if file_id is None: + return None + else: + query = "select distinct fromno, tono from arc where file_id = ?" + data = [file_id] + if self._query_context_ids is not None: + ids_array = ", ".join("?" * len(self._query_context_ids)) + query += " and context_id in (" + ids_array + ")" + data += self._query_context_ids + with con.execute(query, data) as cur: + return list(cur) + + def contexts_by_lineno(self, filename: str) -> Dict[TLineNo, List[str]]: + """Get the contexts for each line in a file. + + Returns: + A dict mapping line numbers to a list of context names. + + .. versionadded:: 5.0 + + """ + self._start_using() + with self._connect() as con: + file_id = self._file_id(filename) + if file_id is None: + return {} + + lineno_contexts_map = collections.defaultdict(set) + if self.has_arcs(): + query = ( + "select arc.fromno, arc.tono, context.context " + + "from arc, context " + + "where arc.file_id = ? and arc.context_id = context.id" + ) + data = [file_id] + if self._query_context_ids is not None: + ids_array = ", ".join("?" * len(self._query_context_ids)) + query += " and arc.context_id in (" + ids_array + ")" + data += self._query_context_ids + with con.execute(query, data) as cur: + for fromno, tono, context in cur: + if fromno > 0: + lineno_contexts_map[fromno].add(context) + if tono > 0: + lineno_contexts_map[tono].add(context) + else: + query = ( + "select l.numbits, c.context from line_bits l, context c " + + "where l.context_id = c.id " + + "and file_id = ?" + ) + data = [file_id] + if self._query_context_ids is not None: + ids_array = ", ".join("?" * len(self._query_context_ids)) + query += " and l.context_id in (" + ids_array + ")" + data += self._query_context_ids + with con.execute(query, data) as cur: + for numbits, context in cur: + for lineno in numbits_to_nums(numbits): + lineno_contexts_map[lineno].add(context) + + return {lineno: list(contexts) for lineno, contexts in lineno_contexts_map.items()} + + @classmethod + def sys_info(cls) -> List[Tuple[str, Any]]: + """Our information for `Coverage.sys_info`. + + Returns a list of (key, value) pairs. + + """ + with SqliteDb(":memory:", debug=NoDebugging()) as db: + with db.execute("pragma temp_store") as cur: + temp_store = [row[0] for row in cur] + with db.execute("pragma compile_options") as cur: + copts = [row[0] for row in cur] + copts = textwrap.wrap(", ".join(copts), width=75) + + return [ + ("sqlite3_sqlite_version", sqlite3.sqlite_version), + ("sqlite3_temp_store", temp_store), + ("sqlite3_compile_options", copts), + ] + + +def filename_suffix(suffix: Union[str, bool, None]) -> Union[str, None]: + """Compute a filename suffix for a data file. + + If `suffix` is a string or None, simply return it. If `suffix` is True, + then build a suffix incorporating the hostname, process id, and a random + number. + + Returns a string or None. + + """ + if suffix is True: + # If data_suffix was a simple true value, then make a suffix with + # plenty of distinguishing information. We do this here in + # `save()` at the last minute so that the pid will be correct even + # if the process forks. + dice = random.Random(os.urandom(8)).randint(0, 999999) + suffix = "%s.%s.%06d" % (socket.gethostname(), os.getpid(), dice) + elif suffix is False: + suffix = None + return suffix diff --git a/venv/lib/python3.10/site-packages/coverage/sqlitedb.py b/venv/lib/python3.10/site-packages/coverage/sqlitedb.py new file mode 100644 index 0000000..4d15b5b --- /dev/null +++ b/venv/lib/python3.10/site-packages/coverage/sqlitedb.py @@ -0,0 +1,215 @@ +# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 +# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt + +"""SQLite abstraction for coverage.py""" + +from __future__ import annotations + +import contextlib +import re +import sqlite3 + +from typing import cast, Any, Iterable, Iterator, List, Optional, Tuple + +from coverage.debug import auto_repr, clipped_repr, exc_one_line +from coverage.exceptions import DataError +from coverage.types import TDebugCtl + + +class SqliteDb: + """A simple abstraction over a SQLite database. + + Use as a context manager, then you can use it like a + :class:`python:sqlite3.Connection` object:: + + with SqliteDb(filename, debug_control) as db: + with db.execute("select a, b from some_table") as cur: + for a, b in cur: + etc(a, b) + + """ + def __init__(self, filename: str, debug: TDebugCtl) -> None: + self.debug = debug + self.filename = filename + self.nest = 0 + self.con: Optional[sqlite3.Connection] = None + + __repr__ = auto_repr + + def _connect(self) -> None: + """Connect to the db and do universal initialization.""" + if self.con is not None: + return + + # It can happen that Python switches threads while the tracer writes + # data. The second thread will also try to write to the data, + # effectively causing a nested context. However, given the idempotent + # nature of the tracer operations, sharing a connection among threads + # is not a problem. + if self.debug.should("sql"): + self.debug.write(f"Connecting to {self.filename!r}") + try: + self.con = sqlite3.connect(self.filename, check_same_thread=False) + except sqlite3.Error as exc: + raise DataError(f"Couldn't use data file {self.filename!r}: {exc}") from exc + + self.con.create_function("REGEXP", 2, lambda txt, pat: re.search(txt, pat) is not None) + + # This pragma makes writing faster. It disables rollbacks, but we never need them. + self.execute_void("pragma journal_mode=off") + # This pragma makes writing faster. It can fail in unusual situations + # (https://github.com/nedbat/coveragepy/issues/1646), so use fail_ok=True + # to keep things going. + self.execute_void("pragma synchronous=off", fail_ok=True) + + def close(self) -> None: + """If needed, close the connection.""" + if self.con is not None and self.filename != ":memory:": + self.con.close() + self.con = None + + def __enter__(self) -> SqliteDb: + if self.nest == 0: + self._connect() + assert self.con is not None + self.con.__enter__() + self.nest += 1 + return self + + def __exit__(self, exc_type, exc_value, traceback) -> None: # type: ignore[no-untyped-def] + self.nest -= 1 + if self.nest == 0: + try: + assert self.con is not None + self.con.__exit__(exc_type, exc_value, traceback) + self.close() + except Exception as exc: + if self.debug.should("sql"): + self.debug.write(f"EXCEPTION from __exit__: {exc_one_line(exc)}") + raise DataError(f"Couldn't end data file {self.filename!r}: {exc}") from exc + + def _execute(self, sql: str, parameters: Iterable[Any]) -> sqlite3.Cursor: + """Same as :meth:`python:sqlite3.Connection.execute`.""" + if self.debug.should("sql"): + tail = f" with {parameters!r}" if parameters else "" + self.debug.write(f"Executing {sql!r}{tail}") + try: + assert self.con is not None + try: + return self.con.execute(sql, parameters) # type: ignore[arg-type] + except Exception: + # In some cases, an error might happen that isn't really an + # error. Try again immediately. + # https://github.com/nedbat/coveragepy/issues/1010 + return self.con.execute(sql, parameters) # type: ignore[arg-type] + except sqlite3.Error as exc: + msg = str(exc) + if self.filename != ":memory:": + try: + # `execute` is the first thing we do with the database, so try + # hard to provide useful hints if something goes wrong now. + with open(self.filename, "rb") as bad_file: + cov4_sig = b"!coverage.py: This is a private format" + if bad_file.read(len(cov4_sig)) == cov4_sig: + msg = ( + "Looks like a coverage 4.x data file. " + + "Are you mixing versions of coverage?" + ) + except Exception: + pass + if self.debug.should("sql"): + self.debug.write(f"EXCEPTION from execute: {exc_one_line(exc)}") + raise DataError(f"Couldn't use data file {self.filename!r}: {msg}") from exc + + @contextlib.contextmanager + def execute( + self, + sql: str, + parameters: Iterable[Any] = (), + ) -> Iterator[sqlite3.Cursor]: + """Context managed :meth:`python:sqlite3.Connection.execute`. + + Use with a ``with`` statement to auto-close the returned cursor. + """ + cur = self._execute(sql, parameters) + try: + yield cur + finally: + cur.close() + + def execute_void(self, sql: str, parameters: Iterable[Any] = (), fail_ok: bool = False) -> None: + """Same as :meth:`python:sqlite3.Connection.execute` when you don't need the cursor. + + If `fail_ok` is True, then SQLite errors are ignored. + """ + try: + # PyPy needs the .close() calls here, or sqlite gets twisted up: + # https://bitbucket.org/pypy/pypy/issues/2872/default-isolation-mode-is-different-on + self._execute(sql, parameters).close() + except DataError: + if not fail_ok: + raise + + def execute_for_rowid(self, sql: str, parameters: Iterable[Any] = ()) -> int: + """Like execute, but returns the lastrowid.""" + with self.execute(sql, parameters) as cur: + assert cur.lastrowid is not None + rowid: int = cur.lastrowid + if self.debug.should("sqldata"): + self.debug.write(f"Row id result: {rowid!r}") + return rowid + + def execute_one(self, sql: str, parameters: Iterable[Any] = ()) -> Optional[Tuple[Any, ...]]: + """Execute a statement and return the one row that results. + + This is like execute(sql, parameters).fetchone(), except it is + correct in reading the entire result set. This will raise an + exception if more than one row results. + + Returns a row, or None if there were no rows. + """ + with self.execute(sql, parameters) as cur: + rows = list(cur) + if len(rows) == 0: + return None + elif len(rows) == 1: + return cast(Tuple[Any, ...], rows[0]) + else: + raise AssertionError(f"SQL {sql!r} shouldn't return {len(rows)} rows") + + def _executemany(self, sql: str, data: List[Any]) -> sqlite3.Cursor: + """Same as :meth:`python:sqlite3.Connection.executemany`.""" + if self.debug.should("sql"): + final = ":" if self.debug.should("sqldata") else "" + self.debug.write(f"Executing many {sql!r} with {len(data)} rows{final}") + if self.debug.should("sqldata"): + for i, row in enumerate(data): + self.debug.write(f"{i:4d}: {row!r}") + assert self.con is not None + try: + return self.con.executemany(sql, data) + except Exception: + # In some cases, an error might happen that isn't really an + # error. Try again immediately. + # https://github.com/nedbat/coveragepy/issues/1010 + return self.con.executemany(sql, data) + + def executemany_void(self, sql: str, data: Iterable[Any]) -> None: + """Same as :meth:`python:sqlite3.Connection.executemany` when you don't need the cursor.""" + data = list(data) + if data: + self._executemany(sql, data).close() + + def executescript(self, script: str) -> None: + """Same as :meth:`python:sqlite3.Connection.executescript`.""" + if self.debug.should("sql"): + self.debug.write("Executing script with {} chars: {}".format( + len(script), clipped_repr(script, 100), + )) + assert self.con is not None + self.con.executescript(script).close() + + def dump(self) -> str: + """Return a multi-line string, the SQL dump of the database.""" + assert self.con is not None + return "\n".join(self.con.iterdump()) diff --git a/venv/lib/python3.10/site-packages/coverage/templite.py b/venv/lib/python3.10/site-packages/coverage/templite.py new file mode 100644 index 0000000..11ea847 --- /dev/null +++ b/venv/lib/python3.10/site-packages/coverage/templite.py @@ -0,0 +1,309 @@ +# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 +# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt + +"""A simple Python template renderer, for a nano-subset of Django syntax. + +For a detailed discussion of this code, see this chapter from 500 Lines: +http://aosabook.org/en/500L/a-template-engine.html + +""" + +# Coincidentally named the same as http://code.activestate.com/recipes/496702/ + +from __future__ import annotations + +import re + +from typing import ( + Any, Callable, Dict, List, NoReturn, Optional, Set, Union, cast, +) + + +class TempliteSyntaxError(ValueError): + """Raised when a template has a syntax error.""" + pass + + +class TempliteValueError(ValueError): + """Raised when an expression won't evaluate in a template.""" + pass + + +class CodeBuilder: + """Build source code conveniently.""" + + def __init__(self, indent: int = 0) -> None: + self.code: List[Union[str, CodeBuilder]] = [] + self.indent_level = indent + + def __str__(self) -> str: + return "".join(str(c) for c in self.code) + + def add_line(self, line: str) -> None: + """Add a line of source to the code. + + Indentation and newline will be added for you, don't provide them. + + """ + self.code.extend([" " * self.indent_level, line, "\n"]) + + def add_section(self) -> CodeBuilder: + """Add a section, a sub-CodeBuilder.""" + section = CodeBuilder(self.indent_level) + self.code.append(section) + return section + + INDENT_STEP = 4 # PEP8 says so! + + def indent(self) -> None: + """Increase the current indent for following lines.""" + self.indent_level += self.INDENT_STEP + + def dedent(self) -> None: + """Decrease the current indent for following lines.""" + self.indent_level -= self.INDENT_STEP + + def get_globals(self) -> Dict[str, Any]: + """Execute the code, and return a dict of globals it defines.""" + # A check that the caller really finished all the blocks they started. + assert self.indent_level == 0 + # Get the Python source as a single string. + python_source = str(self) + # Execute the source, defining globals, and return them. + global_namespace: Dict[str, Any] = {} + exec(python_source, global_namespace) + return global_namespace + + +class Templite: + """A simple template renderer, for a nano-subset of Django syntax. + + Supported constructs are extended variable access:: + + {{var.modifier.modifier|filter|filter}} + + loops:: + + {% for var in list %}...{% endfor %} + + and ifs:: + + {% if var %}...{% endif %} + + Comments are within curly-hash markers:: + + {# This will be ignored #} + + Lines between `{% joined %}` and `{% endjoined %}` will have lines stripped + and joined. Be careful, this could join words together! + + Any of these constructs can have a hyphen at the end (`-}}`, `-%}`, `-#}`), + which will collapse the white space following the tag. + + Construct a Templite with the template text, then use `render` against a + dictionary context to create a finished string:: + + templite = Templite(''' +

Hello {{name|upper}}!

+ {% for topic in topics %} +

You are interested in {{topic}}.

+ {% endif %} + ''', + {"upper": str.upper}, + ) + text = templite.render({ + "name": "Ned", + "topics": ["Python", "Geometry", "Juggling"], + }) + + """ + def __init__(self, text: str, *contexts: Dict[str, Any]) -> None: + """Construct a Templite with the given `text`. + + `contexts` are dictionaries of values to use for future renderings. + These are good for filters and global values. + + """ + self.context = {} + for context in contexts: + self.context.update(context) + + self.all_vars: Set[str] = set() + self.loop_vars: Set[str] = set() + + # We construct a function in source form, then compile it and hold onto + # it, and execute it to render the template. + code = CodeBuilder() + + code.add_line("def render_function(context, do_dots):") + code.indent() + vars_code = code.add_section() + code.add_line("result = []") + code.add_line("append_result = result.append") + code.add_line("extend_result = result.extend") + code.add_line("to_str = str") + + buffered: List[str] = [] + + def flush_output() -> None: + """Force `buffered` to the code builder.""" + if len(buffered) == 1: + code.add_line("append_result(%s)" % buffered[0]) + elif len(buffered) > 1: + code.add_line("extend_result([%s])" % ", ".join(buffered)) + del buffered[:] + + ops_stack = [] + + # Split the text to form a list of tokens. + tokens = re.split(r"(?s)({{.*?}}|{%.*?%}|{#.*?#})", text) + + squash = in_joined = False + + for token in tokens: + if token.startswith("{"): + start, end = 2, -2 + squash = (token[-3] == "-") + if squash: + end = -3 + + if token.startswith("{#"): + # Comment: ignore it and move on. + continue + elif token.startswith("{{"): + # An expression to evaluate. + expr = self._expr_code(token[start:end].strip()) + buffered.append("to_str(%s)" % expr) + else: + # token.startswith("{%") + # Action tag: split into words and parse further. + flush_output() + + words = token[start:end].strip().split() + if words[0] == "if": + # An if statement: evaluate the expression to determine if. + if len(words) != 2: + self._syntax_error("Don't understand if", token) + ops_stack.append("if") + code.add_line("if %s:" % self._expr_code(words[1])) + code.indent() + elif words[0] == "for": + # A loop: iterate over expression result. + if len(words) != 4 or words[2] != "in": + self._syntax_error("Don't understand for", token) + ops_stack.append("for") + self._variable(words[1], self.loop_vars) + code.add_line( + "for c_{} in {}:".format( + words[1], + self._expr_code(words[3]) + ) + ) + code.indent() + elif words[0] == "joined": + ops_stack.append("joined") + in_joined = True + elif words[0].startswith("end"): + # Endsomething. Pop the ops stack. + if len(words) != 1: + self._syntax_error("Don't understand end", token) + end_what = words[0][3:] + if not ops_stack: + self._syntax_error("Too many ends", token) + start_what = ops_stack.pop() + if start_what != end_what: + self._syntax_error("Mismatched end tag", end_what) + if end_what == "joined": + in_joined = False + else: + code.dedent() + else: + self._syntax_error("Don't understand tag", words[0]) + else: + # Literal content. If it isn't empty, output it. + if in_joined: + token = re.sub(r"\s*\n\s*", "", token.strip()) + elif squash: + token = token.lstrip() + if token: + buffered.append(repr(token)) + + if ops_stack: + self._syntax_error("Unmatched action tag", ops_stack[-1]) + + flush_output() + + for var_name in self.all_vars - self.loop_vars: + vars_code.add_line(f"c_{var_name} = context[{var_name!r}]") + + code.add_line("return ''.join(result)") + code.dedent() + self._render_function = cast( + Callable[ + [Dict[str, Any], Callable[..., Any]], + str + ], + code.get_globals()["render_function"], + ) + + def _expr_code(self, expr: str) -> str: + """Generate a Python expression for `expr`.""" + if "|" in expr: + pipes = expr.split("|") + code = self._expr_code(pipes[0]) + for func in pipes[1:]: + self._variable(func, self.all_vars) + code = f"c_{func}({code})" + elif "." in expr: + dots = expr.split(".") + code = self._expr_code(dots[0]) + args = ", ".join(repr(d) for d in dots[1:]) + code = f"do_dots({code}, {args})" + else: + self._variable(expr, self.all_vars) + code = "c_%s" % expr + return code + + def _syntax_error(self, msg: str, thing: Any) -> NoReturn: + """Raise a syntax error using `msg`, and showing `thing`.""" + raise TempliteSyntaxError(f"{msg}: {thing!r}") + + def _variable(self, name: str, vars_set: Set[str]) -> None: + """Track that `name` is used as a variable. + + Adds the name to `vars_set`, a set of variable names. + + Raises an syntax error if `name` is not a valid name. + + """ + if not re.match(r"[_a-zA-Z][_a-zA-Z0-9]*$", name): + self._syntax_error("Not a valid name", name) + vars_set.add(name) + + def render(self, context: Optional[Dict[str, Any]] = None) -> str: + """Render this template by applying it to `context`. + + `context` is a dictionary of values to use in this rendering. + + """ + # Make the complete context we'll use. + render_context = dict(self.context) + if context: + render_context.update(context) + return self._render_function(render_context, self._do_dots) + + def _do_dots(self, value: Any, *dots: str) -> Any: + """Evaluate dotted expressions at run-time.""" + for dot in dots: + try: + value = getattr(value, dot) + except AttributeError: + try: + value = value[dot] + except (TypeError, KeyError) as exc: + raise TempliteValueError( + f"Couldn't evaluate {value!r}.{dot}" + ) from exc + if callable(value): + value = value() + return value diff --git a/venv/lib/python3.10/site-packages/coverage/tomlconfig.py b/venv/lib/python3.10/site-packages/coverage/tomlconfig.py new file mode 100644 index 0000000..139cb2c --- /dev/null +++ b/venv/lib/python3.10/site-packages/coverage/tomlconfig.py @@ -0,0 +1,208 @@ +# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 +# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt + +"""TOML configuration support for coverage.py""" + +from __future__ import annotations + +import os +import re + +from typing import Any, Callable, Dict, Iterable, List, Optional, Tuple, Type, TypeVar + +from coverage import env +from coverage.exceptions import ConfigError +from coverage.misc import import_third_party, substitute_variables +from coverage.types import TConfigSectionOut, TConfigValueOut + + +if env.PYVERSION >= (3, 11, 0, "alpha", 7): + import tomllib # pylint: disable=import-error + has_tomllib = True +else: + # TOML support on Python 3.10 and below is an install-time extra option. + tomllib, has_tomllib = import_third_party("tomli") + + +class TomlDecodeError(Exception): + """An exception class that exists even when toml isn't installed.""" + pass + + +TWant = TypeVar("TWant") + +class TomlConfigParser: + """TOML file reading with the interface of HandyConfigParser.""" + + # This class has the same interface as config.HandyConfigParser, no + # need for docstrings. + # pylint: disable=missing-function-docstring + + def __init__(self, our_file: bool) -> None: + self.our_file = our_file + self.data: Dict[str, Any] = {} + + def read(self, filenames: Iterable[str]) -> List[str]: + # RawConfigParser takes a filename or list of filenames, but we only + # ever call this with a single filename. + assert isinstance(filenames, (bytes, str, os.PathLike)) + filename = os.fspath(filenames) + + try: + with open(filename, encoding='utf-8') as fp: + toml_text = fp.read() + except OSError: + return [] + if has_tomllib: + try: + self.data = tomllib.loads(toml_text) + except tomllib.TOMLDecodeError as err: + raise TomlDecodeError(str(err)) from err + return [filename] + else: + has_toml = re.search(r"^\[tool\.coverage(\.|])", toml_text, flags=re.MULTILINE) + if self.our_file or has_toml: + # Looks like they meant to read TOML, but we can't read it. + msg = "Can't read {!r} without TOML support. Install with [toml] extra" + raise ConfigError(msg.format(filename)) + return [] + + def _get_section(self, section: str) -> Tuple[Optional[str], Optional[TConfigSectionOut]]: + """Get a section from the data. + + Arguments: + section (str): A section name, which can be dotted. + + Returns: + name (str): the actual name of the section that was found, if any, + or None. + data (str): the dict of data in the section, or None if not found. + + """ + prefixes = ["tool.coverage."] + for prefix in prefixes: + real_section = prefix + section + parts = real_section.split(".") + try: + data = self.data[parts[0]] + for part in parts[1:]: + data = data[part] + except KeyError: + continue + break + else: + return None, None + return real_section, data + + def _get(self, section: str, option: str) -> Tuple[str, TConfigValueOut]: + """Like .get, but returns the real section name and the value.""" + name, data = self._get_section(section) + if data is None: + raise ConfigError(f"No section: {section!r}") + assert name is not None + try: + value = data[option] + except KeyError: + raise ConfigError(f"No option {option!r} in section: {name!r}") from None + return name, value + + def _get_single(self, section: str, option: str) -> Any: + """Get a single-valued option. + + Performs environment substitution if the value is a string. Other types + will be converted later as needed. + """ + name, value = self._get(section, option) + if isinstance(value, str): + value = substitute_variables(value, os.environ) + return name, value + + def has_option(self, section: str, option: str) -> bool: + _, data = self._get_section(section) + if data is None: + return False + return option in data + + def real_section(self, section: str) -> Optional[str]: + name, _ = self._get_section(section) + return name + + def has_section(self, section: str) -> bool: + name, _ = self._get_section(section) + return bool(name) + + def options(self, section: str) -> List[str]: + _, data = self._get_section(section) + if data is None: + raise ConfigError(f"No section: {section!r}") + return list(data.keys()) + + def get_section(self, section: str) -> TConfigSectionOut: + _, data = self._get_section(section) + return data or {} + + def get(self, section: str, option: str) -> Any: + _, value = self._get_single(section, option) + return value + + def _check_type( + self, + section: str, + option: str, + value: Any, + type_: Type[TWant], + converter: Optional[Callable[[Any], TWant]], + type_desc: str, + ) -> TWant: + """Check that `value` has the type we want, converting if needed. + + Returns the resulting value of the desired type. + """ + if isinstance(value, type_): + return value + if isinstance(value, str) and converter is not None: + try: + return converter(value) + except Exception as e: + raise ValueError( + f"Option [{section}]{option} couldn't convert to {type_desc}: {value!r}" + ) from e + raise ValueError( + f"Option [{section}]{option} is not {type_desc}: {value!r}" + ) + + def getboolean(self, section: str, option: str) -> bool: + name, value = self._get_single(section, option) + bool_strings = {"true": True, "false": False} + return self._check_type(name, option, value, bool, bool_strings.__getitem__, "a boolean") + + def _get_list(self, section: str, option: str) -> Tuple[str, List[str]]: + """Get a list of strings, substituting environment variables in the elements.""" + name, values = self._get(section, option) + values = self._check_type(name, option, values, list, None, "a list") + values = [substitute_variables(value, os.environ) for value in values] + return name, values + + def getlist(self, section: str, option: str) -> List[str]: + _, values = self._get_list(section, option) + return values + + def getregexlist(self, section: str, option: str) -> List[str]: + name, values = self._get_list(section, option) + for value in values: + value = value.strip() + try: + re.compile(value) + except re.error as e: + raise ConfigError(f"Invalid [{name}].{option} value {value!r}: {e}") from e + return values + + def getint(self, section: str, option: str) -> int: + name, value = self._get_single(section, option) + return self._check_type(name, option, value, int, int, "an integer") + + def getfloat(self, section: str, option: str) -> float: + name, value = self._get_single(section, option) + if isinstance(value, int): + value = float(value) + return self._check_type(name, option, value, float, float, "a float") diff --git a/venv/lib/python3.10/site-packages/coverage/tracer.cpython-310-x86_64-linux-gnu.so b/venv/lib/python3.10/site-packages/coverage/tracer.cpython-310-x86_64-linux-gnu.so new file mode 100755 index 0000000..f2f0073 Binary files /dev/null and b/venv/lib/python3.10/site-packages/coverage/tracer.cpython-310-x86_64-linux-gnu.so differ diff --git a/venv/lib/python3.10/site-packages/coverage/types.py b/venv/lib/python3.10/site-packages/coverage/types.py new file mode 100644 index 0000000..82f1ad3 --- /dev/null +++ b/venv/lib/python3.10/site-packages/coverage/types.py @@ -0,0 +1,190 @@ +# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 +# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt + +""" +Types for use throughout coverage.py. +""" + +from __future__ import annotations + +import os +import pathlib + +from types import FrameType, ModuleType +from typing import ( + Any, Callable, Dict, Iterable, List, Mapping, Optional, Protocol, + Set, Tuple, Type, Union, TYPE_CHECKING, +) + +if TYPE_CHECKING: + from coverage.plugin import FileTracer + +## File paths + +# For arguments that are file paths: +if TYPE_CHECKING: + FilePath = Union[str, os.PathLike[str]] +else: + # PathLike < python3.9 doesn't support subscription + FilePath = Union[str, os.PathLike] +# For testing FilePath arguments +FilePathClasses = [str, pathlib.Path] +FilePathType = Union[Type[str], Type[pathlib.Path]] + +## Python tracing + +class TTraceFn(Protocol): + """A Python trace function.""" + def __call__( + self, + frame: FrameType, + event: str, + arg: Any, + lineno: Optional[TLineNo] = None # Our own twist, see collector.py + ) -> Optional[TTraceFn]: + ... + +## Coverage.py tracing + +# Line numbers are pervasive enough that they deserve their own type. +TLineNo = int + +TArc = Tuple[TLineNo, TLineNo] + +class TFileDisposition(Protocol): + """A simple value type for recording what to do with a file.""" + + original_filename: str + canonical_filename: str + source_filename: Optional[str] + trace: bool + reason: str + file_tracer: Optional[FileTracer] + has_dynamic_filename: bool + + +# When collecting data, we use a dictionary with a few possible shapes. The +# keys are always file names. +# - If measuring line coverage, the values are sets of line numbers. +# - If measuring arcs in the Python tracer, the values are sets of arcs (pairs +# of line numbers). +# - If measuring arcs in the C tracer, the values are sets of packed arcs (two +# line numbers combined into one integer). + +TTraceFileData = Union[Set[TLineNo], Set[TArc], Set[int]] + +TTraceData = Dict[str, TTraceFileData] + +class TTracer(Protocol): + """Either CTracer or PyTracer.""" + + data: TTraceData + trace_arcs: bool + should_trace: Callable[[str, FrameType], TFileDisposition] + should_trace_cache: Mapping[str, Optional[TFileDisposition]] + should_start_context: Optional[Callable[[FrameType], Optional[str]]] + switch_context: Optional[Callable[[Optional[str]], None]] + warn: TWarnFn + + def __init__(self) -> None: + ... + + def start(self) -> TTraceFn: + """Start this tracer, returning a trace function.""" + + def stop(self) -> None: + """Stop this tracer.""" + + def activity(self) -> bool: + """Has there been any activity?""" + + def reset_activity(self) -> None: + """Reset the activity() flag.""" + + def get_stats(self) -> Optional[Dict[str, int]]: + """Return a dictionary of statistics, or None.""" + +## Coverage + +# Many places use kwargs as Coverage kwargs. +TCovKwargs = Any + + +## Configuration + +# One value read from a config file. +TConfigValueIn = Optional[Union[bool, int, float, str, Iterable[str]]] +TConfigValueOut = Optional[Union[bool, int, float, str, List[str]]] +# An entire config section, mapping option names to values. +TConfigSectionIn = Mapping[str, TConfigValueIn] +TConfigSectionOut = Mapping[str, TConfigValueOut] + +class TConfigurable(Protocol): + """Something that can proxy to the coverage configuration settings.""" + + def get_option(self, option_name: str) -> Optional[TConfigValueOut]: + """Get an option from the configuration. + + `option_name` is a colon-separated string indicating the section and + option name. For example, the ``branch`` option in the ``[run]`` + section of the config file would be indicated with `"run:branch"`. + + Returns the value of the option. + + """ + + def set_option(self, option_name: str, value: Union[TConfigValueIn, TConfigSectionIn]) -> None: + """Set an option in the configuration. + + `option_name` is a colon-separated string indicating the section and + option name. For example, the ``branch`` option in the ``[run]`` + section of the config file would be indicated with `"run:branch"`. + + `value` is the new value for the option. + + """ + +class TPluginConfig(Protocol): + """Something that can provide options to a plugin.""" + + def get_plugin_options(self, plugin: str) -> TConfigSectionOut: + """Get the options for a plugin.""" + + +## Parsing + +TMorf = Union[ModuleType, str] + +TSourceTokenLines = Iterable[List[Tuple[str, str]]] + +## Plugins + +class TPlugin(Protocol): + """What all plugins have in common.""" + _coverage_plugin_name: str + _coverage_enabled: bool + + +## Debugging + +class TWarnFn(Protocol): + """A callable warn() function.""" + def __call__(self, msg: str, slug: Optional[str] = None, once: bool = False) -> None: + ... + + +class TDebugCtl(Protocol): + """A DebugControl object, or something like it.""" + + def should(self, option: str) -> bool: + """Decide whether to output debug information in category `option`.""" + + def write(self, msg: str) -> None: + """Write a line of debug output.""" + + +class TWritable(Protocol): + """Anything that can be written to.""" + + def write(self, msg: str) -> None: + """Write a message.""" diff --git a/venv/lib/python3.10/site-packages/coverage/version.py b/venv/lib/python3.10/site-packages/coverage/version.py new file mode 100644 index 0000000..83d396a --- /dev/null +++ b/venv/lib/python3.10/site-packages/coverage/version.py @@ -0,0 +1,50 @@ +# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 +# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt + +"""The version and URL for coverage.py""" +# This file is exec'ed in setup.py, don't import anything! + +from __future__ import annotations + +# version_info: same semantics as sys.version_info. +# _dev: the .devN suffix if any. +version_info = (7, 3, 0, "final", 0) +_dev = 0 + + +def _make_version( + major: int, + minor: int, + micro: int, + releaselevel: str = "final", + serial: int = 0, + dev: int = 0, +) -> str: + """Create a readable version string from version_info tuple components.""" + assert releaselevel in ["alpha", "beta", "candidate", "final"] + version = "%d.%d.%d" % (major, minor, micro) + if releaselevel != "final": + short = {"alpha": "a", "beta": "b", "candidate": "rc"}[releaselevel] + version += f"{short}{serial}" + if dev != 0: + version += f".dev{dev}" + return version + + +def _make_url( + major: int, + minor: int, + micro: int, + releaselevel: str, + serial: int = 0, + dev: int = 0, +) -> str: + """Make the URL people should start at for this version of coverage.py.""" + return ( + "https://coverage.readthedocs.io/en/" + + _make_version(major, minor, micro, releaselevel, serial, dev) + ) + + +__version__ = _make_version(*version_info, _dev) +__url__ = _make_url(*version_info, _dev) diff --git a/venv/lib/python3.10/site-packages/coverage/xmlreport.py b/venv/lib/python3.10/site-packages/coverage/xmlreport.py new file mode 100644 index 0000000..819b4c6 --- /dev/null +++ b/venv/lib/python3.10/site-packages/coverage/xmlreport.py @@ -0,0 +1,260 @@ +# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 +# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt + +"""XML reporting for coverage.py""" + +from __future__ import annotations + +import os +import os.path +import sys +import time +import xml.dom.minidom + +from dataclasses import dataclass +from typing import Any, Dict, IO, Iterable, Optional, TYPE_CHECKING + +from coverage import __version__, files +from coverage.misc import isolate_module, human_sorted, human_sorted_items +from coverage.plugin import FileReporter +from coverage.report_core import get_analysis_to_report +from coverage.results import Analysis +from coverage.types import TMorf +from coverage.version import __url__ + +if TYPE_CHECKING: + from coverage import Coverage + +os = isolate_module(os) + + +DTD_URL = "https://raw.githubusercontent.com/cobertura/web/master/htdocs/xml/coverage-04.dtd" + + +def rate(hit: int, num: int) -> str: + """Return the fraction of `hit`/`num`, as a string.""" + if num == 0: + return "1" + else: + return "%.4g" % (hit / num) + + +@dataclass +class PackageData: + """Data we keep about each "package" (in Java terms).""" + elements: Dict[str, xml.dom.minidom.Element] + hits: int + lines: int + br_hits: int + branches: int + + +def appendChild(parent: Any, child: Any) -> None: + """Append a child to a parent, in a way mypy will shut up about.""" + parent.appendChild(child) + + +class XmlReporter: + """A reporter for writing Cobertura-style XML coverage results.""" + + report_type = "XML report" + + def __init__(self, coverage: Coverage) -> None: + self.coverage = coverage + self.config = self.coverage.config + + self.source_paths = set() + if self.config.source: + for src in self.config.source: + if os.path.exists(src): + if self.config.relative_files: + src = src.rstrip(r"\/") + else: + src = files.canonical_filename(src) + self.source_paths.add(src) + self.packages: Dict[str, PackageData] = {} + self.xml_out: xml.dom.minidom.Document + + def report(self, morfs: Optional[Iterable[TMorf]], outfile: Optional[IO[str]] = None) -> float: + """Generate a Cobertura-compatible XML report for `morfs`. + + `morfs` is a list of modules or file names. + + `outfile` is a file object to write the XML to. + + """ + # Initial setup. + outfile = outfile or sys.stdout + has_arcs = self.coverage.get_data().has_arcs() + + # Create the DOM that will store the data. + impl = xml.dom.minidom.getDOMImplementation() + assert impl is not None + self.xml_out = impl.createDocument(None, "coverage", None) + + # Write header stuff. + xcoverage = self.xml_out.documentElement + xcoverage.setAttribute("version", __version__) + xcoverage.setAttribute("timestamp", str(int(time.time()*1000))) + xcoverage.appendChild(self.xml_out.createComment( + f" Generated by coverage.py: {__url__} " + )) + xcoverage.appendChild(self.xml_out.createComment(f" Based on {DTD_URL} ")) + + # Call xml_file for each file in the data. + for fr, analysis in get_analysis_to_report(self.coverage, morfs): + self.xml_file(fr, analysis, has_arcs) + + xsources = self.xml_out.createElement("sources") + xcoverage.appendChild(xsources) + + # Populate the XML DOM with the source info. + for path in human_sorted(self.source_paths): + xsource = self.xml_out.createElement("source") + appendChild(xsources, xsource) + txt = self.xml_out.createTextNode(path) + appendChild(xsource, txt) + + lnum_tot, lhits_tot = 0, 0 + bnum_tot, bhits_tot = 0, 0 + + xpackages = self.xml_out.createElement("packages") + xcoverage.appendChild(xpackages) + + # Populate the XML DOM with the package info. + for pkg_name, pkg_data in human_sorted_items(self.packages.items()): + xpackage = self.xml_out.createElement("package") + appendChild(xpackages, xpackage) + xclasses = self.xml_out.createElement("classes") + appendChild(xpackage, xclasses) + for _, class_elt in human_sorted_items(pkg_data.elements.items()): + appendChild(xclasses, class_elt) + xpackage.setAttribute("name", pkg_name.replace(os.sep, ".")) + xpackage.setAttribute("line-rate", rate(pkg_data.hits, pkg_data.lines)) + if has_arcs: + branch_rate = rate(pkg_data.br_hits, pkg_data.branches) + else: + branch_rate = "0" + xpackage.setAttribute("branch-rate", branch_rate) + xpackage.setAttribute("complexity", "0") + + lhits_tot += pkg_data.hits + lnum_tot += pkg_data.lines + bhits_tot += pkg_data.br_hits + bnum_tot += pkg_data.branches + + xcoverage.setAttribute("lines-valid", str(lnum_tot)) + xcoverage.setAttribute("lines-covered", str(lhits_tot)) + xcoverage.setAttribute("line-rate", rate(lhits_tot, lnum_tot)) + if has_arcs: + xcoverage.setAttribute("branches-valid", str(bnum_tot)) + xcoverage.setAttribute("branches-covered", str(bhits_tot)) + xcoverage.setAttribute("branch-rate", rate(bhits_tot, bnum_tot)) + else: + xcoverage.setAttribute("branches-covered", "0") + xcoverage.setAttribute("branches-valid", "0") + xcoverage.setAttribute("branch-rate", "0") + xcoverage.setAttribute("complexity", "0") + + # Write the output file. + outfile.write(serialize_xml(self.xml_out)) + + # Return the total percentage. + denom = lnum_tot + bnum_tot + if denom == 0: + pct = 0.0 + else: + pct = 100.0 * (lhits_tot + bhits_tot) / denom + return pct + + def xml_file(self, fr: FileReporter, analysis: Analysis, has_arcs: bool) -> None: + """Add to the XML report for a single file.""" + + if self.config.skip_empty: + if analysis.numbers.n_statements == 0: + return + + # Create the "lines" and "package" XML elements, which + # are populated later. Note that a package == a directory. + filename = fr.filename.replace("\\", "/") + for source_path in self.source_paths: + if not self.config.relative_files: + source_path = files.canonical_filename(source_path) + if filename.startswith(source_path.replace("\\", "/") + "/"): + rel_name = filename[len(source_path)+1:] + break + else: + rel_name = fr.relative_filename().replace("\\", "/") + self.source_paths.add(fr.filename[:-len(rel_name)].rstrip(r"\/")) + + dirname = os.path.dirname(rel_name) or "." + dirname = "/".join(dirname.split("/")[:self.config.xml_package_depth]) + package_name = dirname.replace("/", ".") + + package = self.packages.setdefault(package_name, PackageData({}, 0, 0, 0, 0)) + + xclass: xml.dom.minidom.Element = self.xml_out.createElement("class") + + appendChild(xclass, self.xml_out.createElement("methods")) + + xlines = self.xml_out.createElement("lines") + appendChild(xclass, xlines) + + xclass.setAttribute("name", os.path.relpath(rel_name, dirname)) + xclass.setAttribute("filename", rel_name.replace("\\", "/")) + xclass.setAttribute("complexity", "0") + + branch_stats = analysis.branch_stats() + missing_branch_arcs = analysis.missing_branch_arcs() + + # For each statement, create an XML "line" element. + for line in sorted(analysis.statements): + xline = self.xml_out.createElement("line") + xline.setAttribute("number", str(line)) + + # Q: can we get info about the number of times a statement is + # executed? If so, that should be recorded here. + xline.setAttribute("hits", str(int(line not in analysis.missing))) + + if has_arcs: + if line in branch_stats: + total, taken = branch_stats[line] + xline.setAttribute("branch", "true") + xline.setAttribute( + "condition-coverage", + "%d%% (%d/%d)" % (100*taken//total, taken, total) + ) + if line in missing_branch_arcs: + annlines = ["exit" if b < 0 else str(b) for b in missing_branch_arcs[line]] + xline.setAttribute("missing-branches", ",".join(annlines)) + appendChild(xlines, xline) + + class_lines = len(analysis.statements) + class_hits = class_lines - len(analysis.missing) + + if has_arcs: + class_branches = sum(t for t, k in branch_stats.values()) + missing_branches = sum(t - k for t, k in branch_stats.values()) + class_br_hits = class_branches - missing_branches + else: + class_branches = 0 + class_br_hits = 0 + + # Finalize the statistics that are collected in the XML DOM. + xclass.setAttribute("line-rate", rate(class_hits, class_lines)) + if has_arcs: + branch_rate = rate(class_br_hits, class_branches) + else: + branch_rate = "0" + xclass.setAttribute("branch-rate", branch_rate) + + package.elements[rel_name] = xclass + package.hits += class_hits + package.lines += class_lines + package.br_hits += class_br_hits + package.branches += class_branches + + +def serialize_xml(dom: xml.dom.minidom.Document) -> str: + """Serialize a minidom node to XML.""" + return dom.toprettyxml() diff --git a/venv/lib/python3.10/site-packages/wheel-0.38.4.dist-info/INSTALLER b/venv/lib/python3.10/site-packages/distlib-0.3.7.dist-info/INSTALLER similarity index 100% rename from venv/lib/python3.10/site-packages/wheel-0.38.4.dist-info/INSTALLER rename to venv/lib/python3.10/site-packages/distlib-0.3.7.dist-info/INSTALLER diff --git a/venv/lib/python3.10/site-packages/distlib-0.3.7.dist-info/LICENSE.txt b/venv/lib/python3.10/site-packages/distlib-0.3.7.dist-info/LICENSE.txt new file mode 100644 index 0000000..c31ac56 --- /dev/null +++ b/venv/lib/python3.10/site-packages/distlib-0.3.7.dist-info/LICENSE.txt @@ -0,0 +1,284 @@ +A. HISTORY OF THE SOFTWARE +========================== + +Python was created in the early 1990s by Guido van Rossum at Stichting +Mathematisch Centrum (CWI, see http://www.cwi.nl) in the Netherlands +as a successor of a language called ABC. Guido remains Python's +principal author, although it includes many contributions from others. + +In 1995, Guido continued his work on Python at the Corporation for +National Research Initiatives (CNRI, see http://www.cnri.reston.va.us) +in Reston, Virginia where he released several versions of the +software. + +In May 2000, Guido and the Python core development team moved to +BeOpen.com to form the BeOpen PythonLabs team. In October of the same +year, the PythonLabs team moved to Digital Creations (now Zope +Corporation, see http://www.zope.com). In 2001, the Python Software +Foundation (PSF, see http://www.python.org/psf/) was formed, a +non-profit organization created specifically to own Python-related +Intellectual Property. Zope Corporation is a sponsoring member of +the PSF. + +All Python releases are Open Source (see http://www.opensource.org for +the Open Source Definition). Historically, most, but not all, Python +releases have also been GPL-compatible; the table below summarizes +the various releases. + + Release Derived Year Owner GPL- + from compatible? (1) + + 0.9.0 thru 1.2 1991-1995 CWI yes + 1.3 thru 1.5.2 1.2 1995-1999 CNRI yes + 1.6 1.5.2 2000 CNRI no + 2.0 1.6 2000 BeOpen.com no + 1.6.1 1.6 2001 CNRI yes (2) + 2.1 2.0+1.6.1 2001 PSF no + 2.0.1 2.0+1.6.1 2001 PSF yes + 2.1.1 2.1+2.0.1 2001 PSF yes + 2.2 2.1.1 2001 PSF yes + 2.1.2 2.1.1 2002 PSF yes + 2.1.3 2.1.2 2002 PSF yes + 2.2.1 2.2 2002 PSF yes + 2.2.2 2.2.1 2002 PSF yes + 2.2.3 2.2.2 2003 PSF yes + 2.3 2.2.2 2002-2003 PSF yes + 2.3.1 2.3 2002-2003 PSF yes + 2.3.2 2.3.1 2002-2003 PSF yes + 2.3.3 2.3.2 2002-2003 PSF yes + 2.3.4 2.3.3 2004 PSF yes + 2.3.5 2.3.4 2005 PSF yes + 2.4 2.3 2004 PSF yes + 2.4.1 2.4 2005 PSF yes + 2.4.2 2.4.1 2005 PSF yes + 2.4.3 2.4.2 2006 PSF yes + 2.4.4 2.4.3 2006 PSF yes + 2.5 2.4 2006 PSF yes + 2.5.1 2.5 2007 PSF yes + 2.5.2 2.5.1 2008 PSF yes + 2.5.3 2.5.2 2008 PSF yes + 2.6 2.5 2008 PSF yes + 2.6.1 2.6 2008 PSF yes + 2.6.2 2.6.1 2009 PSF yes + 2.6.3 2.6.2 2009 PSF yes + 2.6.4 2.6.3 2009 PSF yes + 2.6.5 2.6.4 2010 PSF yes + 3.0 2.6 2008 PSF yes + 3.0.1 3.0 2009 PSF yes + 3.1 3.0.1 2009 PSF yes + 3.1.1 3.1 2009 PSF yes + 3.1.2 3.1 2010 PSF yes + 3.2 3.1 2010 PSF yes + +Footnotes: + +(1) GPL-compatible doesn't mean that we're distributing Python under + the GPL. All Python licenses, unlike the GPL, let you distribute + a modified version without making your changes open source. The + GPL-compatible licenses make it possible to combine Python with + other software that is released under the GPL; the others don't. + +(2) According to Richard Stallman, 1.6.1 is not GPL-compatible, + because its license has a choice of law clause. According to + CNRI, however, Stallman's lawyer has told CNRI's lawyer that 1.6.1 + is "not incompatible" with the GPL. + +Thanks to the many outside volunteers who have worked under Guido's +direction to make these releases possible. + + +B. TERMS AND CONDITIONS FOR ACCESSING OR OTHERWISE USING PYTHON +=============================================================== + +PYTHON SOFTWARE FOUNDATION LICENSE VERSION 2 +-------------------------------------------- + +1. This LICENSE AGREEMENT is between the Python Software Foundation +("PSF"), and the Individual or Organization ("Licensee") accessing and +otherwise using this software ("Python") in source or binary form and +its associated documentation. + +2. Subject to the terms and conditions of this License Agreement, PSF hereby +grants Licensee a nonexclusive, royalty-free, world-wide license to reproduce, +analyze, test, perform and/or display publicly, prepare derivative works, +distribute, and otherwise use Python alone or in any derivative version, +provided, however, that PSF's License Agreement and PSF's notice of copyright, +i.e., "Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010 +Python Software Foundation; All Rights Reserved" are retained in Python alone or +in any derivative version prepared by Licensee. + +3. In the event Licensee prepares a derivative work that is based on +or incorporates Python or any part thereof, and wants to make +the derivative work available to others as provided herein, then +Licensee hereby agrees to include in any such work a brief summary of +the changes made to Python. + +4. PSF is making Python available to Licensee on an "AS IS" +basis. PSF MAKES NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR +IMPLIED. BY WAY OF EXAMPLE, BUT NOT LIMITATION, PSF MAKES NO AND +DISCLAIMS ANY REPRESENTATION OR WARRANTY OF MERCHANTABILITY OR FITNESS +FOR ANY PARTICULAR PURPOSE OR THAT THE USE OF PYTHON WILL NOT +INFRINGE ANY THIRD PARTY RIGHTS. + +5. PSF SHALL NOT BE LIABLE TO LICENSEE OR ANY OTHER USERS OF PYTHON +FOR ANY INCIDENTAL, SPECIAL, OR CONSEQUENTIAL DAMAGES OR LOSS AS +A RESULT OF MODIFYING, DISTRIBUTING, OR OTHERWISE USING PYTHON, +OR ANY DERIVATIVE THEREOF, EVEN IF ADVISED OF THE POSSIBILITY THEREOF. + +6. This License Agreement will automatically terminate upon a material +breach of its terms and conditions. + +7. Nothing in this License Agreement shall be deemed to create any +relationship of agency, partnership, or joint venture between PSF and +Licensee. This License Agreement does not grant permission to use PSF +trademarks or trade name in a trademark sense to endorse or promote +products or services of Licensee, or any third party. + +8. By copying, installing or otherwise using Python, Licensee +agrees to be bound by the terms and conditions of this License +Agreement. + + +BEOPEN.COM LICENSE AGREEMENT FOR PYTHON 2.0 +------------------------------------------- + +BEOPEN PYTHON OPEN SOURCE LICENSE AGREEMENT VERSION 1 + +1. This LICENSE AGREEMENT is between BeOpen.com ("BeOpen"), having an +office at 160 Saratoga Avenue, Santa Clara, CA 95051, and the +Individual or Organization ("Licensee") accessing and otherwise using +this software in source or binary form and its associated +documentation ("the Software"). + +2. Subject to the terms and conditions of this BeOpen Python License +Agreement, BeOpen hereby grants Licensee a non-exclusive, +royalty-free, world-wide license to reproduce, analyze, test, perform +and/or display publicly, prepare derivative works, distribute, and +otherwise use the Software alone or in any derivative version, +provided, however, that the BeOpen Python License is retained in the +Software, alone or in any derivative version prepared by Licensee. + +3. BeOpen is making the Software available to Licensee on an "AS IS" +basis. BEOPEN MAKES NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR +IMPLIED. BY WAY OF EXAMPLE, BUT NOT LIMITATION, BEOPEN MAKES NO AND +DISCLAIMS ANY REPRESENTATION OR WARRANTY OF MERCHANTABILITY OR FITNESS +FOR ANY PARTICULAR PURPOSE OR THAT THE USE OF THE SOFTWARE WILL NOT +INFRINGE ANY THIRD PARTY RIGHTS. + +4. BEOPEN SHALL NOT BE LIABLE TO LICENSEE OR ANY OTHER USERS OF THE +SOFTWARE FOR ANY INCIDENTAL, SPECIAL, OR CONSEQUENTIAL DAMAGES OR LOSS +AS A RESULT OF USING, MODIFYING OR DISTRIBUTING THE SOFTWARE, OR ANY +DERIVATIVE THEREOF, EVEN IF ADVISED OF THE POSSIBILITY THEREOF. + +5. This License Agreement will automatically terminate upon a material +breach of its terms and conditions. + +6. This License Agreement shall be governed by and interpreted in all +respects by the law of the State of California, excluding conflict of +law provisions. Nothing in this License Agreement shall be deemed to +create any relationship of agency, partnership, or joint venture +between BeOpen and Licensee. This License Agreement does not grant +permission to use BeOpen trademarks or trade names in a trademark +sense to endorse or promote products or services of Licensee, or any +third party. As an exception, the "BeOpen Python" logos available at +http://www.pythonlabs.com/logos.html may be used according to the +permissions granted on that web page. + +7. By copying, installing or otherwise using the software, Licensee +agrees to be bound by the terms and conditions of this License +Agreement. + + +CNRI LICENSE AGREEMENT FOR PYTHON 1.6.1 +--------------------------------------- + +1. This LICENSE AGREEMENT is between the Corporation for National +Research Initiatives, having an office at 1895 Preston White Drive, +Reston, VA 20191 ("CNRI"), and the Individual or Organization +("Licensee") accessing and otherwise using Python 1.6.1 software in +source or binary form and its associated documentation. + +2. Subject to the terms and conditions of this License Agreement, CNRI +hereby grants Licensee a nonexclusive, royalty-free, world-wide +license to reproduce, analyze, test, perform and/or display publicly, +prepare derivative works, distribute, and otherwise use Python 1.6.1 +alone or in any derivative version, provided, however, that CNRI's +License Agreement and CNRI's notice of copyright, i.e., "Copyright (c) +1995-2001 Corporation for National Research Initiatives; All Rights +Reserved" are retained in Python 1.6.1 alone or in any derivative +version prepared by Licensee. Alternately, in lieu of CNRI's License +Agreement, Licensee may substitute the following text (omitting the +quotes): "Python 1.6.1 is made available subject to the terms and +conditions in CNRI's License Agreement. This Agreement together with +Python 1.6.1 may be located on the Internet using the following +unique, persistent identifier (known as a handle): 1895.22/1013. This +Agreement may also be obtained from a proxy server on the Internet +using the following URL: http://hdl.handle.net/1895.22/1013". + +3. In the event Licensee prepares a derivative work that is based on +or incorporates Python 1.6.1 or any part thereof, and wants to make +the derivative work available to others as provided herein, then +Licensee hereby agrees to include in any such work a brief summary of +the changes made to Python 1.6.1. + +4. CNRI is making Python 1.6.1 available to Licensee on an "AS IS" +basis. CNRI MAKES NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR +IMPLIED. BY WAY OF EXAMPLE, BUT NOT LIMITATION, CNRI MAKES NO AND +DISCLAIMS ANY REPRESENTATION OR WARRANTY OF MERCHANTABILITY OR FITNESS +FOR ANY PARTICULAR PURPOSE OR THAT THE USE OF PYTHON 1.6.1 WILL NOT +INFRINGE ANY THIRD PARTY RIGHTS. + +5. CNRI SHALL NOT BE LIABLE TO LICENSEE OR ANY OTHER USERS OF PYTHON +1.6.1 FOR ANY INCIDENTAL, SPECIAL, OR CONSEQUENTIAL DAMAGES OR LOSS AS +A RESULT OF MODIFYING, DISTRIBUTING, OR OTHERWISE USING PYTHON 1.6.1, +OR ANY DERIVATIVE THEREOF, EVEN IF ADVISED OF THE POSSIBILITY THEREOF. + +6. This License Agreement will automatically terminate upon a material +breach of its terms and conditions. + +7. This License Agreement shall be governed by the federal +intellectual property law of the United States, including without +limitation the federal copyright law, and, to the extent such +U.S. federal law does not apply, by the law of the Commonwealth of +Virginia, excluding Virginia's conflict of law provisions. +Notwithstanding the foregoing, with regard to derivative works based +on Python 1.6.1 that incorporate non-separable material that was +previously distributed under the GNU General Public License (GPL), the +law of the Commonwealth of Virginia shall govern this License +Agreement only as to issues arising under or with respect to +Paragraphs 4, 5, and 7 of this License Agreement. Nothing in this +License Agreement shall be deemed to create any relationship of +agency, partnership, or joint venture between CNRI and Licensee. This +License Agreement does not grant permission to use CNRI trademarks or +trade name in a trademark sense to endorse or promote products or +services of Licensee, or any third party. + +8. By clicking on the "ACCEPT" button where indicated, or by copying, +installing or otherwise using Python 1.6.1, Licensee agrees to be +bound by the terms and conditions of this License Agreement. + + ACCEPT + + +CWI LICENSE AGREEMENT FOR PYTHON 0.9.0 THROUGH 1.2 +-------------------------------------------------- + +Copyright (c) 1991 - 1995, Stichting Mathematisch Centrum Amsterdam, +The Netherlands. All rights reserved. + +Permission to use, copy, modify, and distribute this software and its +documentation for any purpose and without fee is hereby granted, +provided that the above copyright notice appear in all copies and that +both that copyright notice and this permission notice appear in +supporting documentation, and that the name of Stichting Mathematisch +Centrum or CWI not be used in advertising or publicity pertaining to +distribution of the software without specific, written prior +permission. + +STICHTING MATHEMATISCH CENTRUM DISCLAIMS ALL WARRANTIES WITH REGARD TO +THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND +FITNESS, IN NO EVENT SHALL STICHTING MATHEMATISCH CENTRUM BE LIABLE +FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES +WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN +ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT +OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. diff --git a/venv/lib/python3.10/site-packages/distlib-0.3.7.dist-info/METADATA b/venv/lib/python3.10/site-packages/distlib-0.3.7.dist-info/METADATA new file mode 100644 index 0000000..97167ab --- /dev/null +++ b/venv/lib/python3.10/site-packages/distlib-0.3.7.dist-info/METADATA @@ -0,0 +1,116 @@ +Metadata-Version: 2.1 +Name: distlib +Version: 0.3.7 +Summary: Distribution utilities +Home-page: https://github.com/pypa/distlib +Author: Vinay Sajip +Author-email: vinay_sajip@red-dove.com +License: PSF-2.0 +Project-URL: Documentation, https://distlib.readthedocs.io/ +Project-URL: Source, https://github.com/pypa/distlib +Project-URL: Tracker, https://github.com/pypa/distlib/issues +Platform: any +Classifier: Development Status :: 5 - Production/Stable +Classifier: Environment :: Console +Classifier: Intended Audience :: Developers +Classifier: License :: OSI Approved :: Python Software Foundation License +Classifier: Operating System :: OS Independent +Classifier: Programming Language :: Python +Classifier: Programming Language :: Python :: 2 +Classifier: Programming Language :: Python :: 3 +Classifier: Programming Language :: Python :: 2.7 +Classifier: Programming Language :: Python :: 3.6 +Classifier: Programming Language :: Python :: 3.7 +Classifier: Programming Language :: Python :: 3.8 +Classifier: Programming Language :: Python :: 3.9 +Classifier: Programming Language :: Python :: 3.10 +Classifier: Programming Language :: Python :: 3.11 +Classifier: Topic :: Software Development +License-File: LICENSE.txt + +|badge1| |badge2| + +.. |badge1| image:: https://img.shields.io/github/workflow/status/pypa/distlib/Tests + :alt: GitHub test status + +.. |badge2| image:: https://img.shields.io/codecov/c/github/pypa/distlib + :target: https://app.codecov.io/gh/pypa/distlib + :alt: GitHub coverage status + +What is it? +----------- + +Distlib is a library which implements low-level functions that relate to +packaging and distribution of Python software. It is intended to be used as the +basis for third-party packaging tools. The documentation is available at + +https://distlib.readthedocs.io/ + +Main features +------------- + +Distlib currently offers the following features: + +* The package ``distlib.database``, which implements a database of installed + distributions, as defined by :pep:`376`, and distribution dependency graph + logic. Support is also provided for non-installed distributions (i.e. + distributions registered with metadata on an index like PyPI), including + the ability to scan for dependencies and building dependency graphs. +* The package ``distlib.index``, which implements an interface to perform + operations on an index, such as registering a project, uploading a + distribution or uploading documentation. Support is included for verifying + SSL connections (with domain matching) and signing/verifying packages using + GnuPG. +* The package ``distlib.metadata``, which implements distribution metadata as + defined by :pep:`643`, :pep:`566`, :pep:`345`, :pep:`314` and :pep:`241`. +* The package ``distlib.markers``, which implements environment markers as + defined by :pep:`508`. +* The package ``distlib.manifest``, which implements lists of files used + in packaging source distributions. +* The package ``distlib.locators``, which allows finding distributions, whether + on PyPI (XML-RPC or via the "simple" interface), local directories or some + other source. +* The package ``distlib.resources``, which allows access to data files stored + in Python packages, both in the file system and in .zip files. +* The package ``distlib.scripts``, which allows installing of scripts with + adjustment of shebang lines and support for native Windows executable + launchers. +* The package ``distlib.version``, which implements version specifiers as + defined by :pep:`440`, but also support for working with "legacy" versions and + semantic versions. +* The package ``distlib.wheel``, which provides support for building and + installing from the Wheel format for binary distributions (see :pep:`427`). +* The package ``distlib.util``, which contains miscellaneous functions and + classes which are useful in packaging, but which do not fit neatly into + one of the other packages in ``distlib``.* The package implements enhanced + globbing functionality such as the ability to use ``**`` in patterns to + specify recursing into subdirectories. + + +Python version and platform compatibility +----------------------------------------- + +Distlib is intended to be used on and is tested on Python versions 2.7 and 3.6 - 3.10, +pypy-2.7 and pypy3 on Linux, Windows, and macOS. + +Project status +-------------- + +The project has reached a mature status in its development: there is a comprehensive +test suite and it has been exercised on Windows, Ubuntu and macOS. The project is used +by well-known projects such as `pip `_ and `caniusepython3 +`_. + +This project was migrated from Mercurial to Git and from BitBucket to GitHub, and +although all information of importance has been retained across the migration, some +commit references in issues and issue comments may have become invalid. + +Code of Conduct +--------------- + +Everyone interacting in the distlib project's codebases, issue trackers, chat +rooms, and mailing lists is expected to follow the `PyPA Code of Conduct`_. + +.. _PyPA Code of Conduct: https://www.pypa.io/en/latest/code-of-conduct/ + + diff --git a/venv/lib/python3.10/site-packages/distlib-0.3.7.dist-info/RECORD b/venv/lib/python3.10/site-packages/distlib-0.3.7.dist-info/RECORD new file mode 100644 index 0000000..9c05ec2 --- /dev/null +++ b/venv/lib/python3.10/site-packages/distlib-0.3.7.dist-info/RECORD @@ -0,0 +1,38 @@ +distlib-0.3.7.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +distlib-0.3.7.dist-info/LICENSE.txt,sha256=gI4QyKarjesUn_mz-xn0R6gICUYG1xKpylf-rTVSWZ0,14531 +distlib-0.3.7.dist-info/METADATA,sha256=Hlid19X_JkBisxUJDKBhv1ttoXPaj0gVakllz2s1Lq0,5105 +distlib-0.3.7.dist-info/RECORD,, +distlib-0.3.7.dist-info/WHEEL,sha256=z9j0xAa_JmUKMpmz72K0ZGALSM_n-wQVmGbleXx2VHg,110 +distlib-0.3.7.dist-info/top_level.txt,sha256=9BERqitu_vzyeyILOcGzX9YyA2AB_xlC4-81V6xoizk,8 +distlib/__init__.py,sha256=01WTG2eLvhSUSSu8sB4nmtpa0Qk9E8_1kmwxRJicjyo,581 +distlib/__pycache__/__init__.cpython-310.pyc,, +distlib/__pycache__/compat.cpython-310.pyc,, +distlib/__pycache__/database.cpython-310.pyc,, +distlib/__pycache__/index.cpython-310.pyc,, +distlib/__pycache__/locators.cpython-310.pyc,, +distlib/__pycache__/manifest.cpython-310.pyc,, +distlib/__pycache__/markers.cpython-310.pyc,, +distlib/__pycache__/metadata.cpython-310.pyc,, +distlib/__pycache__/resources.cpython-310.pyc,, +distlib/__pycache__/scripts.cpython-310.pyc,, +distlib/__pycache__/util.cpython-310.pyc,, +distlib/__pycache__/version.cpython-310.pyc,, +distlib/__pycache__/wheel.cpython-310.pyc,, +distlib/compat.py,sha256=tfoMrj6tujk7G4UC2owL6ArgDuCKabgBxuJRGZSmpko,41259 +distlib/database.py,sha256=C8mvo2PxBfZt1A-tGgkiKSrykE2lQ25db9LrqZihjj8,51910 +distlib/index.py,sha256=HFiDG7LMoaBs829WuotrfIwcErOOExUOR_AeBtw_TCU,20834 +distlib/locators.py,sha256=wNzG-zERzS_XGls-nBPVVyLRHa2skUlkn0-5n0trMWA,51991 +distlib/manifest.py,sha256=oeqD8nM6UnxBrKUs0OR5PWG7vjm4CQTgiZ9GRmS28kg,14813 +distlib/markers.py,sha256=Mq_Zrvol9TfHAH1IWx2fbX4Pki-OjyyWyYorTtCiXn4,5159 +distlib/metadata.py,sha256=pB9WZ9mBfmQxc9OVIldLS5CjOoQRvKAvUwwQyKwKQtQ,39693 +distlib/resources.py,sha256=LwbPksc0A1JMbi6XnuPdMBUn83X7BPuFNWqPGEKI698,10820 +distlib/scripts.py,sha256=7xPz4wuxzJDujE9-nLdm0qdA_JVhU3jcRqf0oy3ZkDI,18121 +distlib/t32.exe,sha256=a0GV5kCoWsMutvliiCKmIgV98eRZ33wXoS-XrqvJQVs,97792 +distlib/t64-arm.exe,sha256=68TAa32V504xVBnufojh0PcenpR3U4wAqTqf-MZqbPw,182784 +distlib/t64.exe,sha256=gaYY8hy4fbkHYTTnA4i26ct8IQZzkBG2pRdy0iyuBrc,108032 +distlib/util.py,sha256=_Gh2y0z_yzMKKV9DuczHxr1uLXD8pv0lgWBNB0vwQ6g,66979 +distlib/version.py,sha256=lYfX2RpCy7aXqb-yJUGRLwPfjzbCNTq9lRtT6T62vNc,23777 +distlib/w32.exe,sha256=R4csx3-OGM9kL4aPIzQKRo5TfmRSHZo6QWyLhDhNBks,91648 +distlib/w64-arm.exe,sha256=xdyYhKj0WDcVUOCb05blQYvzdYIKMbmJn2SZvzkcey4,168448 +distlib/w64.exe,sha256=ejGf-rojoBfXseGLpya6bFTFPWRG21X5KvU8J5iU-K0,101888 +distlib/wheel.py,sha256=Rgqs658VsJ3R2845qwnZD8XQryV2CzWw2mghwLvxxsI,43898 diff --git a/venv/lib/python3.10/site-packages/distlib-0.3.7.dist-info/WHEEL b/venv/lib/python3.10/site-packages/distlib-0.3.7.dist-info/WHEEL new file mode 100644 index 0000000..0b18a28 --- /dev/null +++ b/venv/lib/python3.10/site-packages/distlib-0.3.7.dist-info/WHEEL @@ -0,0 +1,6 @@ +Wheel-Version: 1.0 +Generator: bdist_wheel (0.37.1) +Root-Is-Purelib: true +Tag: py2-none-any +Tag: py3-none-any + diff --git a/venv/lib/python3.10/site-packages/distlib-0.3.7.dist-info/top_level.txt b/venv/lib/python3.10/site-packages/distlib-0.3.7.dist-info/top_level.txt new file mode 100644 index 0000000..f68bb07 --- /dev/null +++ b/venv/lib/python3.10/site-packages/distlib-0.3.7.dist-info/top_level.txt @@ -0,0 +1 @@ +distlib diff --git a/venv/lib/python3.10/site-packages/distlib/__init__.py b/venv/lib/python3.10/site-packages/distlib/__init__.py new file mode 100644 index 0000000..ad8a082 --- /dev/null +++ b/venv/lib/python3.10/site-packages/distlib/__init__.py @@ -0,0 +1,23 @@ +# -*- coding: utf-8 -*- +# +# Copyright (C) 2012-2022 Vinay Sajip. +# Licensed to the Python Software Foundation under a contributor agreement. +# See LICENSE.txt and CONTRIBUTORS.txt. +# +import logging + +__version__ = '0.3.7' + +class DistlibException(Exception): + pass + +try: + from logging import NullHandler +except ImportError: # pragma: no cover + class NullHandler(logging.Handler): + def handle(self, record): pass + def emit(self, record): pass + def createLock(self): self.lock = None + +logger = logging.getLogger(__name__) +logger.addHandler(NullHandler()) diff --git a/venv/lib/python3.10/site-packages/distlib/compat.py b/venv/lib/python3.10/site-packages/distlib/compat.py new file mode 100644 index 0000000..1fe3d22 --- /dev/null +++ b/venv/lib/python3.10/site-packages/distlib/compat.py @@ -0,0 +1,1116 @@ +# -*- coding: utf-8 -*- +# +# Copyright (C) 2013-2017 Vinay Sajip. +# Licensed to the Python Software Foundation under a contributor agreement. +# See LICENSE.txt and CONTRIBUTORS.txt. +# +from __future__ import absolute_import + +import os +import re +import sys + +try: + import ssl +except ImportError: # pragma: no cover + ssl = None + +if sys.version_info[0] < 3: # pragma: no cover + from StringIO import StringIO + string_types = basestring, + text_type = unicode + from types import FileType as file_type + import __builtin__ as builtins + import ConfigParser as configparser + from urlparse import urlparse, urlunparse, urljoin, urlsplit, urlunsplit + from urllib import (urlretrieve, quote as _quote, unquote, url2pathname, + pathname2url, ContentTooShortError, splittype) + + def quote(s): + if isinstance(s, unicode): + s = s.encode('utf-8') + return _quote(s) + + import urllib2 + from urllib2 import (Request, urlopen, URLError, HTTPError, + HTTPBasicAuthHandler, HTTPPasswordMgr, + HTTPHandler, HTTPRedirectHandler, + build_opener) + if ssl: + from urllib2 import HTTPSHandler + import httplib + import xmlrpclib + import Queue as queue + from HTMLParser import HTMLParser + import htmlentitydefs + raw_input = raw_input + from itertools import ifilter as filter + from itertools import ifilterfalse as filterfalse + + # Leaving this around for now, in case it needs resurrecting in some way + # _userprog = None + # def splituser(host): + # """splituser('user[:passwd]@host[:port]') --> 'user[:passwd]', 'host[:port]'.""" + # global _userprog + # if _userprog is None: + # import re + # _userprog = re.compile('^(.*)@(.*)$') + + # match = _userprog.match(host) + # if match: return match.group(1, 2) + # return None, host + +else: # pragma: no cover + from io import StringIO + string_types = str, + text_type = str + from io import TextIOWrapper as file_type + import builtins + import configparser + import shutil + from urllib.parse import (urlparse, urlunparse, urljoin, quote, + unquote, urlsplit, urlunsplit, splittype) + from urllib.request import (urlopen, urlretrieve, Request, url2pathname, + pathname2url, + HTTPBasicAuthHandler, HTTPPasswordMgr, + HTTPHandler, HTTPRedirectHandler, + build_opener) + if ssl: + from urllib.request import HTTPSHandler + from urllib.error import HTTPError, URLError, ContentTooShortError + import http.client as httplib + import urllib.request as urllib2 + import xmlrpc.client as xmlrpclib + import queue + from html.parser import HTMLParser + import html.entities as htmlentitydefs + raw_input = input + from itertools import filterfalse + filter = filter + + +try: + from ssl import match_hostname, CertificateError +except ImportError: # pragma: no cover + class CertificateError(ValueError): + pass + + + def _dnsname_match(dn, hostname, max_wildcards=1): + """Matching according to RFC 6125, section 6.4.3 + + http://tools.ietf.org/html/rfc6125#section-6.4.3 + """ + pats = [] + if not dn: + return False + + parts = dn.split('.') + leftmost, remainder = parts[0], parts[1:] + + wildcards = leftmost.count('*') + if wildcards > max_wildcards: + # Issue #17980: avoid denials of service by refusing more + # than one wildcard per fragment. A survey of established + # policy among SSL implementations showed it to be a + # reasonable choice. + raise CertificateError( + "too many wildcards in certificate DNS name: " + repr(dn)) + + # speed up common case w/o wildcards + if not wildcards: + return dn.lower() == hostname.lower() + + # RFC 6125, section 6.4.3, subitem 1. + # The client SHOULD NOT attempt to match a presented identifier in which + # the wildcard character comprises a label other than the left-most label. + if leftmost == '*': + # When '*' is a fragment by itself, it matches a non-empty dotless + # fragment. + pats.append('[^.]+') + elif leftmost.startswith('xn--') or hostname.startswith('xn--'): + # RFC 6125, section 6.4.3, subitem 3. + # The client SHOULD NOT attempt to match a presented identifier + # where the wildcard character is embedded within an A-label or + # U-label of an internationalized domain name. + pats.append(re.escape(leftmost)) + else: + # Otherwise, '*' matches any dotless string, e.g. www* + pats.append(re.escape(leftmost).replace(r'\*', '[^.]*')) + + # add the remaining fragments, ignore any wildcards + for frag in remainder: + pats.append(re.escape(frag)) + + pat = re.compile(r'\A' + r'\.'.join(pats) + r'\Z', re.IGNORECASE) + return pat.match(hostname) + + + def match_hostname(cert, hostname): + """Verify that *cert* (in decoded format as returned by + SSLSocket.getpeercert()) matches the *hostname*. RFC 2818 and RFC 6125 + rules are followed, but IP addresses are not accepted for *hostname*. + + CertificateError is raised on failure. On success, the function + returns nothing. + """ + if not cert: + raise ValueError("empty or no certificate, match_hostname needs a " + "SSL socket or SSL context with either " + "CERT_OPTIONAL or CERT_REQUIRED") + dnsnames = [] + san = cert.get('subjectAltName', ()) + for key, value in san: + if key == 'DNS': + if _dnsname_match(value, hostname): + return + dnsnames.append(value) + if not dnsnames: + # The subject is only checked when there is no dNSName entry + # in subjectAltName + for sub in cert.get('subject', ()): + for key, value in sub: + # XXX according to RFC 2818, the most specific Common Name + # must be used. + if key == 'commonName': + if _dnsname_match(value, hostname): + return + dnsnames.append(value) + if len(dnsnames) > 1: + raise CertificateError("hostname %r " + "doesn't match either of %s" + % (hostname, ', '.join(map(repr, dnsnames)))) + elif len(dnsnames) == 1: + raise CertificateError("hostname %r " + "doesn't match %r" + % (hostname, dnsnames[0])) + else: + raise CertificateError("no appropriate commonName or " + "subjectAltName fields were found") + + +try: + from types import SimpleNamespace as Container +except ImportError: # pragma: no cover + class Container(object): + """ + A generic container for when multiple values need to be returned + """ + def __init__(self, **kwargs): + self.__dict__.update(kwargs) + + +try: + from shutil import which +except ImportError: # pragma: no cover + # Implementation from Python 3.3 + def which(cmd, mode=os.F_OK | os.X_OK, path=None): + """Given a command, mode, and a PATH string, return the path which + conforms to the given mode on the PATH, or None if there is no such + file. + + `mode` defaults to os.F_OK | os.X_OK. `path` defaults to the result + of os.environ.get("PATH"), or can be overridden with a custom search + path. + + """ + # Check that a given file can be accessed with the correct mode. + # Additionally check that `file` is not a directory, as on Windows + # directories pass the os.access check. + def _access_check(fn, mode): + return (os.path.exists(fn) and os.access(fn, mode) + and not os.path.isdir(fn)) + + # If we're given a path with a directory part, look it up directly rather + # than referring to PATH directories. This includes checking relative to the + # current directory, e.g. ./script + if os.path.dirname(cmd): + if _access_check(cmd, mode): + return cmd + return None + + if path is None: + path = os.environ.get("PATH", os.defpath) + if not path: + return None + path = path.split(os.pathsep) + + if sys.platform == "win32": + # The current directory takes precedence on Windows. + if not os.curdir in path: + path.insert(0, os.curdir) + + # PATHEXT is necessary to check on Windows. + pathext = os.environ.get("PATHEXT", "").split(os.pathsep) + # See if the given file matches any of the expected path extensions. + # This will allow us to short circuit when given "python.exe". + # If it does match, only test that one, otherwise we have to try + # others. + if any(cmd.lower().endswith(ext.lower()) for ext in pathext): + files = [cmd] + else: + files = [cmd + ext for ext in pathext] + else: + # On other platforms you don't have things like PATHEXT to tell you + # what file suffixes are executable, so just pass on cmd as-is. + files = [cmd] + + seen = set() + for dir in path: + normdir = os.path.normcase(dir) + if not normdir in seen: + seen.add(normdir) + for thefile in files: + name = os.path.join(dir, thefile) + if _access_check(name, mode): + return name + return None + + +# ZipFile is a context manager in 2.7, but not in 2.6 + +from zipfile import ZipFile as BaseZipFile + +if hasattr(BaseZipFile, '__enter__'): # pragma: no cover + ZipFile = BaseZipFile +else: # pragma: no cover + from zipfile import ZipExtFile as BaseZipExtFile + + class ZipExtFile(BaseZipExtFile): + def __init__(self, base): + self.__dict__.update(base.__dict__) + + def __enter__(self): + return self + + def __exit__(self, *exc_info): + self.close() + # return None, so if an exception occurred, it will propagate + + class ZipFile(BaseZipFile): + def __enter__(self): + return self + + def __exit__(self, *exc_info): + self.close() + # return None, so if an exception occurred, it will propagate + + def open(self, *args, **kwargs): + base = BaseZipFile.open(self, *args, **kwargs) + return ZipExtFile(base) + +try: + from platform import python_implementation +except ImportError: # pragma: no cover + def python_implementation(): + """Return a string identifying the Python implementation.""" + if 'PyPy' in sys.version: + return 'PyPy' + if os.name == 'java': + return 'Jython' + if sys.version.startswith('IronPython'): + return 'IronPython' + return 'CPython' + +import shutil +import sysconfig + +try: + callable = callable +except NameError: # pragma: no cover + from collections.abc import Callable + + def callable(obj): + return isinstance(obj, Callable) + + +try: + fsencode = os.fsencode + fsdecode = os.fsdecode +except AttributeError: # pragma: no cover + # Issue #99: on some systems (e.g. containerised), + # sys.getfilesystemencoding() returns None, and we need a real value, + # so fall back to utf-8. From the CPython 2.7 docs relating to Unix and + # sys.getfilesystemencoding(): the return value is "the user’s preference + # according to the result of nl_langinfo(CODESET), or None if the + # nl_langinfo(CODESET) failed." + _fsencoding = sys.getfilesystemencoding() or 'utf-8' + if _fsencoding == 'mbcs': + _fserrors = 'strict' + else: + _fserrors = 'surrogateescape' + + def fsencode(filename): + if isinstance(filename, bytes): + return filename + elif isinstance(filename, text_type): + return filename.encode(_fsencoding, _fserrors) + else: + raise TypeError("expect bytes or str, not %s" % + type(filename).__name__) + + def fsdecode(filename): + if isinstance(filename, text_type): + return filename + elif isinstance(filename, bytes): + return filename.decode(_fsencoding, _fserrors) + else: + raise TypeError("expect bytes or str, not %s" % + type(filename).__name__) + +try: + from tokenize import detect_encoding +except ImportError: # pragma: no cover + from codecs import BOM_UTF8, lookup + import re + + cookie_re = re.compile(r"coding[:=]\s*([-\w.]+)") + + def _get_normal_name(orig_enc): + """Imitates get_normal_name in tokenizer.c.""" + # Only care about the first 12 characters. + enc = orig_enc[:12].lower().replace("_", "-") + if enc == "utf-8" or enc.startswith("utf-8-"): + return "utf-8" + if enc in ("latin-1", "iso-8859-1", "iso-latin-1") or \ + enc.startswith(("latin-1-", "iso-8859-1-", "iso-latin-1-")): + return "iso-8859-1" + return orig_enc + + def detect_encoding(readline): + """ + The detect_encoding() function is used to detect the encoding that should + be used to decode a Python source file. It requires one argument, readline, + in the same way as the tokenize() generator. + + It will call readline a maximum of twice, and return the encoding used + (as a string) and a list of any lines (left as bytes) it has read in. + + It detects the encoding from the presence of a utf-8 bom or an encoding + cookie as specified in pep-0263. If both a bom and a cookie are present, + but disagree, a SyntaxError will be raised. If the encoding cookie is an + invalid charset, raise a SyntaxError. Note that if a utf-8 bom is found, + 'utf-8-sig' is returned. + + If no encoding is specified, then the default of 'utf-8' will be returned. + """ + try: + filename = readline.__self__.name + except AttributeError: + filename = None + bom_found = False + encoding = None + default = 'utf-8' + def read_or_stop(): + try: + return readline() + except StopIteration: + return b'' + + def find_cookie(line): + try: + # Decode as UTF-8. Either the line is an encoding declaration, + # in which case it should be pure ASCII, or it must be UTF-8 + # per default encoding. + line_string = line.decode('utf-8') + except UnicodeDecodeError: + msg = "invalid or missing encoding declaration" + if filename is not None: + msg = '{} for {!r}'.format(msg, filename) + raise SyntaxError(msg) + + matches = cookie_re.findall(line_string) + if not matches: + return None + encoding = _get_normal_name(matches[0]) + try: + codec = lookup(encoding) + except LookupError: + # This behaviour mimics the Python interpreter + if filename is None: + msg = "unknown encoding: " + encoding + else: + msg = "unknown encoding for {!r}: {}".format(filename, + encoding) + raise SyntaxError(msg) + + if bom_found: + if codec.name != 'utf-8': + # This behaviour mimics the Python interpreter + if filename is None: + msg = 'encoding problem: utf-8' + else: + msg = 'encoding problem for {!r}: utf-8'.format(filename) + raise SyntaxError(msg) + encoding += '-sig' + return encoding + + first = read_or_stop() + if first.startswith(BOM_UTF8): + bom_found = True + first = first[3:] + default = 'utf-8-sig' + if not first: + return default, [] + + encoding = find_cookie(first) + if encoding: + return encoding, [first] + + second = read_or_stop() + if not second: + return default, [first] + + encoding = find_cookie(second) + if encoding: + return encoding, [first, second] + + return default, [first, second] + +# For converting & <-> & etc. +try: + from html import escape +except ImportError: + from cgi import escape +if sys.version_info[:2] < (3, 4): + unescape = HTMLParser().unescape +else: + from html import unescape + +try: + from collections import ChainMap +except ImportError: # pragma: no cover + from collections import MutableMapping + + try: + from reprlib import recursive_repr as _recursive_repr + except ImportError: + def _recursive_repr(fillvalue='...'): + ''' + Decorator to make a repr function return fillvalue for a recursive + call + ''' + + def decorating_function(user_function): + repr_running = set() + + def wrapper(self): + key = id(self), get_ident() + if key in repr_running: + return fillvalue + repr_running.add(key) + try: + result = user_function(self) + finally: + repr_running.discard(key) + return result + + # Can't use functools.wraps() here because of bootstrap issues + wrapper.__module__ = getattr(user_function, '__module__') + wrapper.__doc__ = getattr(user_function, '__doc__') + wrapper.__name__ = getattr(user_function, '__name__') + wrapper.__annotations__ = getattr(user_function, '__annotations__', {}) + return wrapper + + return decorating_function + + class ChainMap(MutableMapping): + ''' A ChainMap groups multiple dicts (or other mappings) together + to create a single, updateable view. + + The underlying mappings are stored in a list. That list is public and can + accessed or updated using the *maps* attribute. There is no other state. + + Lookups search the underlying mappings successively until a key is found. + In contrast, writes, updates, and deletions only operate on the first + mapping. + + ''' + + def __init__(self, *maps): + '''Initialize a ChainMap by setting *maps* to the given mappings. + If no mappings are provided, a single empty dictionary is used. + + ''' + self.maps = list(maps) or [{}] # always at least one map + + def __missing__(self, key): + raise KeyError(key) + + def __getitem__(self, key): + for mapping in self.maps: + try: + return mapping[key] # can't use 'key in mapping' with defaultdict + except KeyError: + pass + return self.__missing__(key) # support subclasses that define __missing__ + + def get(self, key, default=None): + return self[key] if key in self else default + + def __len__(self): + return len(set().union(*self.maps)) # reuses stored hash values if possible + + def __iter__(self): + return iter(set().union(*self.maps)) + + def __contains__(self, key): + return any(key in m for m in self.maps) + + def __bool__(self): + return any(self.maps) + + @_recursive_repr() + def __repr__(self): + return '{0.__class__.__name__}({1})'.format( + self, ', '.join(map(repr, self.maps))) + + @classmethod + def fromkeys(cls, iterable, *args): + 'Create a ChainMap with a single dict created from the iterable.' + return cls(dict.fromkeys(iterable, *args)) + + def copy(self): + 'New ChainMap or subclass with a new copy of maps[0] and refs to maps[1:]' + return self.__class__(self.maps[0].copy(), *self.maps[1:]) + + __copy__ = copy + + def new_child(self): # like Django's Context.push() + 'New ChainMap with a new dict followed by all previous maps.' + return self.__class__({}, *self.maps) + + @property + def parents(self): # like Django's Context.pop() + 'New ChainMap from maps[1:].' + return self.__class__(*self.maps[1:]) + + def __setitem__(self, key, value): + self.maps[0][key] = value + + def __delitem__(self, key): + try: + del self.maps[0][key] + except KeyError: + raise KeyError('Key not found in the first mapping: {!r}'.format(key)) + + def popitem(self): + 'Remove and return an item pair from maps[0]. Raise KeyError is maps[0] is empty.' + try: + return self.maps[0].popitem() + except KeyError: + raise KeyError('No keys found in the first mapping.') + + def pop(self, key, *args): + 'Remove *key* from maps[0] and return its value. Raise KeyError if *key* not in maps[0].' + try: + return self.maps[0].pop(key, *args) + except KeyError: + raise KeyError('Key not found in the first mapping: {!r}'.format(key)) + + def clear(self): + 'Clear maps[0], leaving maps[1:] intact.' + self.maps[0].clear() + +try: + from importlib.util import cache_from_source # Python >= 3.4 +except ImportError: # pragma: no cover + def cache_from_source(path, debug_override=None): + assert path.endswith('.py') + if debug_override is None: + debug_override = __debug__ + if debug_override: + suffix = 'c' + else: + suffix = 'o' + return path + suffix + +try: + from collections import OrderedDict +except ImportError: # pragma: no cover +## {{{ http://code.activestate.com/recipes/576693/ (r9) +# Backport of OrderedDict() class that runs on Python 2.4, 2.5, 2.6, 2.7 and pypy. +# Passes Python2.7's test suite and incorporates all the latest updates. + try: + from thread import get_ident as _get_ident + except ImportError: + from dummy_thread import get_ident as _get_ident + + try: + from _abcoll import KeysView, ValuesView, ItemsView + except ImportError: + pass + + + class OrderedDict(dict): + 'Dictionary that remembers insertion order' + # An inherited dict maps keys to values. + # The inherited dict provides __getitem__, __len__, __contains__, and get. + # The remaining methods are order-aware. + # Big-O running times for all methods are the same as for regular dictionaries. + + # The internal self.__map dictionary maps keys to links in a doubly linked list. + # The circular doubly linked list starts and ends with a sentinel element. + # The sentinel element never gets deleted (this simplifies the algorithm). + # Each link is stored as a list of length three: [PREV, NEXT, KEY]. + + def __init__(self, *args, **kwds): + '''Initialize an ordered dictionary. Signature is the same as for + regular dictionaries, but keyword arguments are not recommended + because their insertion order is arbitrary. + + ''' + if len(args) > 1: + raise TypeError('expected at most 1 arguments, got %d' % len(args)) + try: + self.__root + except AttributeError: + self.__root = root = [] # sentinel node + root[:] = [root, root, None] + self.__map = {} + self.__update(*args, **kwds) + + def __setitem__(self, key, value, dict_setitem=dict.__setitem__): + 'od.__setitem__(i, y) <==> od[i]=y' + # Setting a new item creates a new link which goes at the end of the linked + # list, and the inherited dictionary is updated with the new key/value pair. + if key not in self: + root = self.__root + last = root[0] + last[1] = root[0] = self.__map[key] = [last, root, key] + dict_setitem(self, key, value) + + def __delitem__(self, key, dict_delitem=dict.__delitem__): + 'od.__delitem__(y) <==> del od[y]' + # Deleting an existing item uses self.__map to find the link which is + # then removed by updating the links in the predecessor and successor nodes. + dict_delitem(self, key) + link_prev, link_next, key = self.__map.pop(key) + link_prev[1] = link_next + link_next[0] = link_prev + + def __iter__(self): + 'od.__iter__() <==> iter(od)' + root = self.__root + curr = root[1] + while curr is not root: + yield curr[2] + curr = curr[1] + + def __reversed__(self): + 'od.__reversed__() <==> reversed(od)' + root = self.__root + curr = root[0] + while curr is not root: + yield curr[2] + curr = curr[0] + + def clear(self): + 'od.clear() -> None. Remove all items from od.' + try: + for node in self.__map.itervalues(): + del node[:] + root = self.__root + root[:] = [root, root, None] + self.__map.clear() + except AttributeError: + pass + dict.clear(self) + + def popitem(self, last=True): + '''od.popitem() -> (k, v), return and remove a (key, value) pair. + Pairs are returned in LIFO order if last is true or FIFO order if false. + + ''' + if not self: + raise KeyError('dictionary is empty') + root = self.__root + if last: + link = root[0] + link_prev = link[0] + link_prev[1] = root + root[0] = link_prev + else: + link = root[1] + link_next = link[1] + root[1] = link_next + link_next[0] = root + key = link[2] + del self.__map[key] + value = dict.pop(self, key) + return key, value + + # -- the following methods do not depend on the internal structure -- + + def keys(self): + 'od.keys() -> list of keys in od' + return list(self) + + def values(self): + 'od.values() -> list of values in od' + return [self[key] for key in self] + + def items(self): + 'od.items() -> list of (key, value) pairs in od' + return [(key, self[key]) for key in self] + + def iterkeys(self): + 'od.iterkeys() -> an iterator over the keys in od' + return iter(self) + + def itervalues(self): + 'od.itervalues -> an iterator over the values in od' + for k in self: + yield self[k] + + def iteritems(self): + 'od.iteritems -> an iterator over the (key, value) items in od' + for k in self: + yield (k, self[k]) + + def update(*args, **kwds): + '''od.update(E, **F) -> None. Update od from dict/iterable E and F. + + If E is a dict instance, does: for k in E: od[k] = E[k] + If E has a .keys() method, does: for k in E.keys(): od[k] = E[k] + Or if E is an iterable of items, does: for k, v in E: od[k] = v + In either case, this is followed by: for k, v in F.items(): od[k] = v + + ''' + if len(args) > 2: + raise TypeError('update() takes at most 2 positional ' + 'arguments (%d given)' % (len(args),)) + elif not args: + raise TypeError('update() takes at least 1 argument (0 given)') + self = args[0] + # Make progressively weaker assumptions about "other" + other = () + if len(args) == 2: + other = args[1] + if isinstance(other, dict): + for key in other: + self[key] = other[key] + elif hasattr(other, 'keys'): + for key in other.keys(): + self[key] = other[key] + else: + for key, value in other: + self[key] = value + for key, value in kwds.items(): + self[key] = value + + __update = update # let subclasses override update without breaking __init__ + + __marker = object() + + def pop(self, key, default=__marker): + '''od.pop(k[,d]) -> v, remove specified key and return the corresponding value. + If key is not found, d is returned if given, otherwise KeyError is raised. + + ''' + if key in self: + result = self[key] + del self[key] + return result + if default is self.__marker: + raise KeyError(key) + return default + + def setdefault(self, key, default=None): + 'od.setdefault(k[,d]) -> od.get(k,d), also set od[k]=d if k not in od' + if key in self: + return self[key] + self[key] = default + return default + + def __repr__(self, _repr_running=None): + 'od.__repr__() <==> repr(od)' + if not _repr_running: _repr_running = {} + call_key = id(self), _get_ident() + if call_key in _repr_running: + return '...' + _repr_running[call_key] = 1 + try: + if not self: + return '%s()' % (self.__class__.__name__,) + return '%s(%r)' % (self.__class__.__name__, self.items()) + finally: + del _repr_running[call_key] + + def __reduce__(self): + 'Return state information for pickling' + items = [[k, self[k]] for k in self] + inst_dict = vars(self).copy() + for k in vars(OrderedDict()): + inst_dict.pop(k, None) + if inst_dict: + return (self.__class__, (items,), inst_dict) + return self.__class__, (items,) + + def copy(self): + 'od.copy() -> a shallow copy of od' + return self.__class__(self) + + @classmethod + def fromkeys(cls, iterable, value=None): + '''OD.fromkeys(S[, v]) -> New ordered dictionary with keys from S + and values equal to v (which defaults to None). + + ''' + d = cls() + for key in iterable: + d[key] = value + return d + + def __eq__(self, other): + '''od.__eq__(y) <==> od==y. Comparison to another OD is order-sensitive + while comparison to a regular mapping is order-insensitive. + + ''' + if isinstance(other, OrderedDict): + return len(self)==len(other) and self.items() == other.items() + return dict.__eq__(self, other) + + def __ne__(self, other): + return not self == other + + # -- the following methods are only used in Python 2.7 -- + + def viewkeys(self): + "od.viewkeys() -> a set-like object providing a view on od's keys" + return KeysView(self) + + def viewvalues(self): + "od.viewvalues() -> an object providing a view on od's values" + return ValuesView(self) + + def viewitems(self): + "od.viewitems() -> a set-like object providing a view on od's items" + return ItemsView(self) + +try: + from logging.config import BaseConfigurator, valid_ident +except ImportError: # pragma: no cover + IDENTIFIER = re.compile('^[a-z_][a-z0-9_]*$', re.I) + + + def valid_ident(s): + m = IDENTIFIER.match(s) + if not m: + raise ValueError('Not a valid Python identifier: %r' % s) + return True + + + # The ConvertingXXX classes are wrappers around standard Python containers, + # and they serve to convert any suitable values in the container. The + # conversion converts base dicts, lists and tuples to their wrapped + # equivalents, whereas strings which match a conversion format are converted + # appropriately. + # + # Each wrapper should have a configurator attribute holding the actual + # configurator to use for conversion. + + class ConvertingDict(dict): + """A converting dictionary wrapper.""" + + def __getitem__(self, key): + value = dict.__getitem__(self, key) + result = self.configurator.convert(value) + #If the converted value is different, save for next time + if value is not result: + self[key] = result + if type(result) in (ConvertingDict, ConvertingList, + ConvertingTuple): + result.parent = self + result.key = key + return result + + def get(self, key, default=None): + value = dict.get(self, key, default) + result = self.configurator.convert(value) + #If the converted value is different, save for next time + if value is not result: + self[key] = result + if type(result) in (ConvertingDict, ConvertingList, + ConvertingTuple): + result.parent = self + result.key = key + return result + + def pop(self, key, default=None): + value = dict.pop(self, key, default) + result = self.configurator.convert(value) + if value is not result: + if type(result) in (ConvertingDict, ConvertingList, + ConvertingTuple): + result.parent = self + result.key = key + return result + + class ConvertingList(list): + """A converting list wrapper.""" + def __getitem__(self, key): + value = list.__getitem__(self, key) + result = self.configurator.convert(value) + #If the converted value is different, save for next time + if value is not result: + self[key] = result + if type(result) in (ConvertingDict, ConvertingList, + ConvertingTuple): + result.parent = self + result.key = key + return result + + def pop(self, idx=-1): + value = list.pop(self, idx) + result = self.configurator.convert(value) + if value is not result: + if type(result) in (ConvertingDict, ConvertingList, + ConvertingTuple): + result.parent = self + return result + + class ConvertingTuple(tuple): + """A converting tuple wrapper.""" + def __getitem__(self, key): + value = tuple.__getitem__(self, key) + result = self.configurator.convert(value) + if value is not result: + if type(result) in (ConvertingDict, ConvertingList, + ConvertingTuple): + result.parent = self + result.key = key + return result + + class BaseConfigurator(object): + """ + The configurator base class which defines some useful defaults. + """ + + CONVERT_PATTERN = re.compile(r'^(?P[a-z]+)://(?P.*)$') + + WORD_PATTERN = re.compile(r'^\s*(\w+)\s*') + DOT_PATTERN = re.compile(r'^\.\s*(\w+)\s*') + INDEX_PATTERN = re.compile(r'^\[\s*(\w+)\s*\]\s*') + DIGIT_PATTERN = re.compile(r'^\d+$') + + value_converters = { + 'ext' : 'ext_convert', + 'cfg' : 'cfg_convert', + } + + # We might want to use a different one, e.g. importlib + importer = staticmethod(__import__) + + def __init__(self, config): + self.config = ConvertingDict(config) + self.config.configurator = self + + def resolve(self, s): + """ + Resolve strings to objects using standard import and attribute + syntax. + """ + name = s.split('.') + used = name.pop(0) + try: + found = self.importer(used) + for frag in name: + used += '.' + frag + try: + found = getattr(found, frag) + except AttributeError: + self.importer(used) + found = getattr(found, frag) + return found + except ImportError: + e, tb = sys.exc_info()[1:] + v = ValueError('Cannot resolve %r: %s' % (s, e)) + v.__cause__, v.__traceback__ = e, tb + raise v + + def ext_convert(self, value): + """Default converter for the ext:// protocol.""" + return self.resolve(value) + + def cfg_convert(self, value): + """Default converter for the cfg:// protocol.""" + rest = value + m = self.WORD_PATTERN.match(rest) + if m is None: + raise ValueError("Unable to convert %r" % value) + else: + rest = rest[m.end():] + d = self.config[m.groups()[0]] + #print d, rest + while rest: + m = self.DOT_PATTERN.match(rest) + if m: + d = d[m.groups()[0]] + else: + m = self.INDEX_PATTERN.match(rest) + if m: + idx = m.groups()[0] + if not self.DIGIT_PATTERN.match(idx): + d = d[idx] + else: + try: + n = int(idx) # try as number first (most likely) + d = d[n] + except TypeError: + d = d[idx] + if m: + rest = rest[m.end():] + else: + raise ValueError('Unable to convert ' + '%r at %r' % (value, rest)) + #rest should be empty + return d + + def convert(self, value): + """ + Convert values to an appropriate type. dicts, lists and tuples are + replaced by their converting alternatives. Strings are checked to + see if they have a conversion format and are converted if they do. + """ + if not isinstance(value, ConvertingDict) and isinstance(value, dict): + value = ConvertingDict(value) + value.configurator = self + elif not isinstance(value, ConvertingList) and isinstance(value, list): + value = ConvertingList(value) + value.configurator = self + elif not isinstance(value, ConvertingTuple) and\ + isinstance(value, tuple): + value = ConvertingTuple(value) + value.configurator = self + elif isinstance(value, string_types): + m = self.CONVERT_PATTERN.match(value) + if m: + d = m.groupdict() + prefix = d['prefix'] + converter = self.value_converters.get(prefix, None) + if converter: + suffix = d['suffix'] + converter = getattr(self, converter) + value = converter(suffix) + return value + + def configure_custom(self, config): + """Configure an object with a user-supplied factory.""" + c = config.pop('()') + if not callable(c): + c = self.resolve(c) + props = config.pop('.', None) + # Check for valid identifiers + kwargs = dict([(k, config[k]) for k in config if valid_ident(k)]) + result = c(**kwargs) + if props: + for name, value in props.items(): + setattr(result, name, value) + return result + + def as_tuple(self, value): + """Utility function which converts lists to tuples.""" + if isinstance(value, list): + value = tuple(value) + return value diff --git a/venv/lib/python3.10/site-packages/distlib/database.py b/venv/lib/python3.10/site-packages/distlib/database.py new file mode 100644 index 0000000..bc16e88 --- /dev/null +++ b/venv/lib/python3.10/site-packages/distlib/database.py @@ -0,0 +1,1353 @@ +# -*- coding: utf-8 -*- +# +# Copyright (C) 2012-2017 The Python Software Foundation. +# See LICENSE.txt and CONTRIBUTORS.txt. +# +"""PEP 376 implementation.""" + +from __future__ import unicode_literals + +import base64 +import codecs +import contextlib +import hashlib +import logging +import os +import posixpath +import sys +import zipimport + +from . import DistlibException, resources +from .compat import StringIO +from .version import get_scheme, UnsupportedVersionError +from .metadata import (Metadata, METADATA_FILENAME, WHEEL_METADATA_FILENAME, + LEGACY_METADATA_FILENAME) +from .util import (parse_requirement, cached_property, parse_name_and_version, + read_exports, write_exports, CSVReader, CSVWriter) + + +__all__ = ['Distribution', 'BaseInstalledDistribution', + 'InstalledDistribution', 'EggInfoDistribution', + 'DistributionPath'] + + +logger = logging.getLogger(__name__) + +EXPORTS_FILENAME = 'pydist-exports.json' +COMMANDS_FILENAME = 'pydist-commands.json' + +DIST_FILES = ('INSTALLER', METADATA_FILENAME, 'RECORD', 'REQUESTED', + 'RESOURCES', EXPORTS_FILENAME, 'SHARED') + +DISTINFO_EXT = '.dist-info' + + +class _Cache(object): + """ + A simple cache mapping names and .dist-info paths to distributions + """ + def __init__(self): + """ + Initialise an instance. There is normally one for each DistributionPath. + """ + self.name = {} + self.path = {} + self.generated = False + + def clear(self): + """ + Clear the cache, setting it to its initial state. + """ + self.name.clear() + self.path.clear() + self.generated = False + + def add(self, dist): + """ + Add a distribution to the cache. + :param dist: The distribution to add. + """ + if dist.path not in self.path: + self.path[dist.path] = dist + self.name.setdefault(dist.key, []).append(dist) + + +class DistributionPath(object): + """ + Represents a set of distributions installed on a path (typically sys.path). + """ + def __init__(self, path=None, include_egg=False): + """ + Create an instance from a path, optionally including legacy (distutils/ + setuptools/distribute) distributions. + :param path: The path to use, as a list of directories. If not specified, + sys.path is used. + :param include_egg: If True, this instance will look for and return legacy + distributions as well as those based on PEP 376. + """ + if path is None: + path = sys.path + self.path = path + self._include_dist = True + self._include_egg = include_egg + + self._cache = _Cache() + self._cache_egg = _Cache() + self._cache_enabled = True + self._scheme = get_scheme('default') + + def _get_cache_enabled(self): + return self._cache_enabled + + def _set_cache_enabled(self, value): + self._cache_enabled = value + + cache_enabled = property(_get_cache_enabled, _set_cache_enabled) + + def clear_cache(self): + """ + Clears the internal cache. + """ + self._cache.clear() + self._cache_egg.clear() + + + def _yield_distributions(self): + """ + Yield .dist-info and/or .egg(-info) distributions. + """ + # We need to check if we've seen some resources already, because on + # some Linux systems (e.g. some Debian/Ubuntu variants) there are + # symlinks which alias other files in the environment. + seen = set() + for path in self.path: + finder = resources.finder_for_path(path) + if finder is None: + continue + r = finder.find('') + if not r or not r.is_container: + continue + rset = sorted(r.resources) + for entry in rset: + r = finder.find(entry) + if not r or r.path in seen: + continue + try: + if self._include_dist and entry.endswith(DISTINFO_EXT): + possible_filenames = [METADATA_FILENAME, + WHEEL_METADATA_FILENAME, + LEGACY_METADATA_FILENAME] + for metadata_filename in possible_filenames: + metadata_path = posixpath.join(entry, metadata_filename) + pydist = finder.find(metadata_path) + if pydist: + break + else: + continue + + with contextlib.closing(pydist.as_stream()) as stream: + metadata = Metadata(fileobj=stream, scheme='legacy') + logger.debug('Found %s', r.path) + seen.add(r.path) + yield new_dist_class(r.path, metadata=metadata, + env=self) + elif self._include_egg and entry.endswith(('.egg-info', + '.egg')): + logger.debug('Found %s', r.path) + seen.add(r.path) + yield old_dist_class(r.path, self) + except Exception as e: + msg = 'Unable to read distribution at %s, perhaps due to bad metadata: %s' + logger.warning(msg, r.path, e) + import warnings + warnings.warn(msg % (r.path, e), stacklevel=2) + + def _generate_cache(self): + """ + Scan the path for distributions and populate the cache with + those that are found. + """ + gen_dist = not self._cache.generated + gen_egg = self._include_egg and not self._cache_egg.generated + if gen_dist or gen_egg: + for dist in self._yield_distributions(): + if isinstance(dist, InstalledDistribution): + self._cache.add(dist) + else: + self._cache_egg.add(dist) + + if gen_dist: + self._cache.generated = True + if gen_egg: + self._cache_egg.generated = True + + @classmethod + def distinfo_dirname(cls, name, version): + """ + The *name* and *version* parameters are converted into their + filename-escaped form, i.e. any ``'-'`` characters are replaced + with ``'_'`` other than the one in ``'dist-info'`` and the one + separating the name from the version number. + + :parameter name: is converted to a standard distribution name by replacing + any runs of non- alphanumeric characters with a single + ``'-'``. + :type name: string + :parameter version: is converted to a standard version string. Spaces + become dots, and all other non-alphanumeric characters + (except dots) become dashes, with runs of multiple + dashes condensed to a single dash. + :type version: string + :returns: directory name + :rtype: string""" + name = name.replace('-', '_') + return '-'.join([name, version]) + DISTINFO_EXT + + def get_distributions(self): + """ + Provides an iterator that looks for distributions and returns + :class:`InstalledDistribution` or + :class:`EggInfoDistribution` instances for each one of them. + + :rtype: iterator of :class:`InstalledDistribution` and + :class:`EggInfoDistribution` instances + """ + if not self._cache_enabled: + for dist in self._yield_distributions(): + yield dist + else: + self._generate_cache() + + for dist in self._cache.path.values(): + yield dist + + if self._include_egg: + for dist in self._cache_egg.path.values(): + yield dist + + def get_distribution(self, name): + """ + Looks for a named distribution on the path. + + This function only returns the first result found, as no more than one + value is expected. If nothing is found, ``None`` is returned. + + :rtype: :class:`InstalledDistribution`, :class:`EggInfoDistribution` + or ``None`` + """ + result = None + name = name.lower() + if not self._cache_enabled: + for dist in self._yield_distributions(): + if dist.key == name: + result = dist + break + else: + self._generate_cache() + + if name in self._cache.name: + result = self._cache.name[name][0] + elif self._include_egg and name in self._cache_egg.name: + result = self._cache_egg.name[name][0] + return result + + def provides_distribution(self, name, version=None): + """ + Iterates over all distributions to find which distributions provide *name*. + If a *version* is provided, it will be used to filter the results. + + This function only returns the first result found, since no more than + one values are expected. If the directory is not found, returns ``None``. + + :parameter version: a version specifier that indicates the version + required, conforming to the format in ``PEP-345`` + + :type name: string + :type version: string + """ + matcher = None + if version is not None: + try: + matcher = self._scheme.matcher('%s (%s)' % (name, version)) + except ValueError: + raise DistlibException('invalid name or version: %r, %r' % + (name, version)) + + for dist in self.get_distributions(): + # We hit a problem on Travis where enum34 was installed and doesn't + # have a provides attribute ... + if not hasattr(dist, 'provides'): + logger.debug('No "provides": %s', dist) + else: + provided = dist.provides + + for p in provided: + p_name, p_ver = parse_name_and_version(p) + if matcher is None: + if p_name == name: + yield dist + break + else: + if p_name == name and matcher.match(p_ver): + yield dist + break + + def get_file_path(self, name, relative_path): + """ + Return the path to a resource file. + """ + dist = self.get_distribution(name) + if dist is None: + raise LookupError('no distribution named %r found' % name) + return dist.get_resource_path(relative_path) + + def get_exported_entries(self, category, name=None): + """ + Return all of the exported entries in a particular category. + + :param category: The category to search for entries. + :param name: If specified, only entries with that name are returned. + """ + for dist in self.get_distributions(): + r = dist.exports + if category in r: + d = r[category] + if name is not None: + if name in d: + yield d[name] + else: + for v in d.values(): + yield v + + +class Distribution(object): + """ + A base class for distributions, whether installed or from indexes. + Either way, it must have some metadata, so that's all that's needed + for construction. + """ + + build_time_dependency = False + """ + Set to True if it's known to be only a build-time dependency (i.e. + not needed after installation). + """ + + requested = False + """A boolean that indicates whether the ``REQUESTED`` metadata file is + present (in other words, whether the package was installed by user + request or it was installed as a dependency).""" + + def __init__(self, metadata): + """ + Initialise an instance. + :param metadata: The instance of :class:`Metadata` describing this + distribution. + """ + self.metadata = metadata + self.name = metadata.name + self.key = self.name.lower() # for case-insensitive comparisons + self.version = metadata.version + self.locator = None + self.digest = None + self.extras = None # additional features requested + self.context = None # environment marker overrides + self.download_urls = set() + self.digests = {} + + @property + def source_url(self): + """ + The source archive download URL for this distribution. + """ + return self.metadata.source_url + + download_url = source_url # Backward compatibility + + @property + def name_and_version(self): + """ + A utility property which displays the name and version in parentheses. + """ + return '%s (%s)' % (self.name, self.version) + + @property + def provides(self): + """ + A set of distribution names and versions provided by this distribution. + :return: A set of "name (version)" strings. + """ + plist = self.metadata.provides + s = '%s (%s)' % (self.name, self.version) + if s not in plist: + plist.append(s) + return plist + + def _get_requirements(self, req_attr): + md = self.metadata + reqts = getattr(md, req_attr) + logger.debug('%s: got requirements %r from metadata: %r', self.name, req_attr, + reqts) + return set(md.get_requirements(reqts, extras=self.extras, + env=self.context)) + + @property + def run_requires(self): + return self._get_requirements('run_requires') + + @property + def meta_requires(self): + return self._get_requirements('meta_requires') + + @property + def build_requires(self): + return self._get_requirements('build_requires') + + @property + def test_requires(self): + return self._get_requirements('test_requires') + + @property + def dev_requires(self): + return self._get_requirements('dev_requires') + + def matches_requirement(self, req): + """ + Say if this instance matches (fulfills) a requirement. + :param req: The requirement to match. + :rtype req: str + :return: True if it matches, else False. + """ + # Requirement may contain extras - parse to lose those + # from what's passed to the matcher + r = parse_requirement(req) + scheme = get_scheme(self.metadata.scheme) + try: + matcher = scheme.matcher(r.requirement) + except UnsupportedVersionError: + # XXX compat-mode if cannot read the version + logger.warning('could not read version %r - using name only', + req) + name = req.split()[0] + matcher = scheme.matcher(name) + + name = matcher.key # case-insensitive + + result = False + for p in self.provides: + p_name, p_ver = parse_name_and_version(p) + if p_name != name: + continue + try: + result = matcher.match(p_ver) + break + except UnsupportedVersionError: + pass + return result + + def __repr__(self): + """ + Return a textual representation of this instance, + """ + if self.source_url: + suffix = ' [%s]' % self.source_url + else: + suffix = '' + return '' % (self.name, self.version, suffix) + + def __eq__(self, other): + """ + See if this distribution is the same as another. + :param other: The distribution to compare with. To be equal to one + another. distributions must have the same type, name, + version and source_url. + :return: True if it is the same, else False. + """ + if type(other) is not type(self): + result = False + else: + result = (self.name == other.name and + self.version == other.version and + self.source_url == other.source_url) + return result + + def __hash__(self): + """ + Compute hash in a way which matches the equality test. + """ + return hash(self.name) + hash(self.version) + hash(self.source_url) + + +class BaseInstalledDistribution(Distribution): + """ + This is the base class for installed distributions (whether PEP 376 or + legacy). + """ + + hasher = None + + def __init__(self, metadata, path, env=None): + """ + Initialise an instance. + :param metadata: An instance of :class:`Metadata` which describes the + distribution. This will normally have been initialised + from a metadata file in the ``path``. + :param path: The path of the ``.dist-info`` or ``.egg-info`` + directory for the distribution. + :param env: This is normally the :class:`DistributionPath` + instance where this distribution was found. + """ + super(BaseInstalledDistribution, self).__init__(metadata) + self.path = path + self.dist_path = env + + def get_hash(self, data, hasher=None): + """ + Get the hash of some data, using a particular hash algorithm, if + specified. + + :param data: The data to be hashed. + :type data: bytes + :param hasher: The name of a hash implementation, supported by hashlib, + or ``None``. Examples of valid values are ``'sha1'``, + ``'sha224'``, ``'sha384'``, '``sha256'``, ``'md5'`` and + ``'sha512'``. If no hasher is specified, the ``hasher`` + attribute of the :class:`InstalledDistribution` instance + is used. If the hasher is determined to be ``None``, MD5 + is used as the hashing algorithm. + :returns: The hash of the data. If a hasher was explicitly specified, + the returned hash will be prefixed with the specified hasher + followed by '='. + :rtype: str + """ + if hasher is None: + hasher = self.hasher + if hasher is None: + hasher = hashlib.md5 + prefix = '' + else: + hasher = getattr(hashlib, hasher) + prefix = '%s=' % self.hasher + digest = hasher(data).digest() + digest = base64.urlsafe_b64encode(digest).rstrip(b'=').decode('ascii') + return '%s%s' % (prefix, digest) + + +class InstalledDistribution(BaseInstalledDistribution): + """ + Created with the *path* of the ``.dist-info`` directory provided to the + constructor. It reads the metadata contained in ``pydist.json`` when it is + instantiated., or uses a passed in Metadata instance (useful for when + dry-run mode is being used). + """ + + hasher = 'sha256' + + def __init__(self, path, metadata=None, env=None): + self.modules = [] + self.finder = finder = resources.finder_for_path(path) + if finder is None: + raise ValueError('finder unavailable for %s' % path) + if env and env._cache_enabled and path in env._cache.path: + metadata = env._cache.path[path].metadata + elif metadata is None: + r = finder.find(METADATA_FILENAME) + # Temporary - for Wheel 0.23 support + if r is None: + r = finder.find(WHEEL_METADATA_FILENAME) + # Temporary - for legacy support + if r is None: + r = finder.find(LEGACY_METADATA_FILENAME) + if r is None: + raise ValueError('no %s found in %s' % (METADATA_FILENAME, + path)) + with contextlib.closing(r.as_stream()) as stream: + metadata = Metadata(fileobj=stream, scheme='legacy') + + super(InstalledDistribution, self).__init__(metadata, path, env) + + if env and env._cache_enabled: + env._cache.add(self) + + r = finder.find('REQUESTED') + self.requested = r is not None + p = os.path.join(path, 'top_level.txt') + if os.path.exists(p): + with open(p, 'rb') as f: + data = f.read().decode('utf-8') + self.modules = data.splitlines() + + def __repr__(self): + return '' % ( + self.name, self.version, self.path) + + def __str__(self): + return "%s %s" % (self.name, self.version) + + def _get_records(self): + """ + Get the list of installed files for the distribution + :return: A list of tuples of path, hash and size. Note that hash and + size might be ``None`` for some entries. The path is exactly + as stored in the file (which is as in PEP 376). + """ + results = [] + r = self.get_distinfo_resource('RECORD') + with contextlib.closing(r.as_stream()) as stream: + with CSVReader(stream=stream) as record_reader: + # Base location is parent dir of .dist-info dir + #base_location = os.path.dirname(self.path) + #base_location = os.path.abspath(base_location) + for row in record_reader: + missing = [None for i in range(len(row), 3)] + path, checksum, size = row + missing + #if not os.path.isabs(path): + # path = path.replace('/', os.sep) + # path = os.path.join(base_location, path) + results.append((path, checksum, size)) + return results + + @cached_property + def exports(self): + """ + Return the information exported by this distribution. + :return: A dictionary of exports, mapping an export category to a dict + of :class:`ExportEntry` instances describing the individual + export entries, and keyed by name. + """ + result = {} + r = self.get_distinfo_resource(EXPORTS_FILENAME) + if r: + result = self.read_exports() + return result + + def read_exports(self): + """ + Read exports data from a file in .ini format. + + :return: A dictionary of exports, mapping an export category to a list + of :class:`ExportEntry` instances describing the individual + export entries. + """ + result = {} + r = self.get_distinfo_resource(EXPORTS_FILENAME) + if r: + with contextlib.closing(r.as_stream()) as stream: + result = read_exports(stream) + return result + + def write_exports(self, exports): + """ + Write a dictionary of exports to a file in .ini format. + :param exports: A dictionary of exports, mapping an export category to + a list of :class:`ExportEntry` instances describing the + individual export entries. + """ + rf = self.get_distinfo_file(EXPORTS_FILENAME) + with open(rf, 'w') as f: + write_exports(exports, f) + + def get_resource_path(self, relative_path): + """ + NOTE: This API may change in the future. + + Return the absolute path to a resource file with the given relative + path. + + :param relative_path: The path, relative to .dist-info, of the resource + of interest. + :return: The absolute path where the resource is to be found. + """ + r = self.get_distinfo_resource('RESOURCES') + with contextlib.closing(r.as_stream()) as stream: + with CSVReader(stream=stream) as resources_reader: + for relative, destination in resources_reader: + if relative == relative_path: + return destination + raise KeyError('no resource file with relative path %r ' + 'is installed' % relative_path) + + def list_installed_files(self): + """ + Iterates over the ``RECORD`` entries and returns a tuple + ``(path, hash, size)`` for each line. + + :returns: iterator of (path, hash, size) + """ + for result in self._get_records(): + yield result + + def write_installed_files(self, paths, prefix, dry_run=False): + """ + Writes the ``RECORD`` file, using the ``paths`` iterable passed in. Any + existing ``RECORD`` file is silently overwritten. + + prefix is used to determine when to write absolute paths. + """ + prefix = os.path.join(prefix, '') + base = os.path.dirname(self.path) + base_under_prefix = base.startswith(prefix) + base = os.path.join(base, '') + record_path = self.get_distinfo_file('RECORD') + logger.info('creating %s', record_path) + if dry_run: + return None + with CSVWriter(record_path) as writer: + for path in paths: + if os.path.isdir(path) or path.endswith(('.pyc', '.pyo')): + # do not put size and hash, as in PEP-376 + hash_value = size = '' + else: + size = '%d' % os.path.getsize(path) + with open(path, 'rb') as fp: + hash_value = self.get_hash(fp.read()) + if path.startswith(base) or (base_under_prefix and + path.startswith(prefix)): + path = os.path.relpath(path, base) + writer.writerow((path, hash_value, size)) + + # add the RECORD file itself + if record_path.startswith(base): + record_path = os.path.relpath(record_path, base) + writer.writerow((record_path, '', '')) + return record_path + + def check_installed_files(self): + """ + Checks that the hashes and sizes of the files in ``RECORD`` are + matched by the files themselves. Returns a (possibly empty) list of + mismatches. Each entry in the mismatch list will be a tuple consisting + of the path, 'exists', 'size' or 'hash' according to what didn't match + (existence is checked first, then size, then hash), the expected + value and the actual value. + """ + mismatches = [] + base = os.path.dirname(self.path) + record_path = self.get_distinfo_file('RECORD') + for path, hash_value, size in self.list_installed_files(): + if not os.path.isabs(path): + path = os.path.join(base, path) + if path == record_path: + continue + if not os.path.exists(path): + mismatches.append((path, 'exists', True, False)) + elif os.path.isfile(path): + actual_size = str(os.path.getsize(path)) + if size and actual_size != size: + mismatches.append((path, 'size', size, actual_size)) + elif hash_value: + if '=' in hash_value: + hasher = hash_value.split('=', 1)[0] + else: + hasher = None + + with open(path, 'rb') as f: + actual_hash = self.get_hash(f.read(), hasher) + if actual_hash != hash_value: + mismatches.append((path, 'hash', hash_value, actual_hash)) + return mismatches + + @cached_property + def shared_locations(self): + """ + A dictionary of shared locations whose keys are in the set 'prefix', + 'purelib', 'platlib', 'scripts', 'headers', 'data' and 'namespace'. + The corresponding value is the absolute path of that category for + this distribution, and takes into account any paths selected by the + user at installation time (e.g. via command-line arguments). In the + case of the 'namespace' key, this would be a list of absolute paths + for the roots of namespace packages in this distribution. + + The first time this property is accessed, the relevant information is + read from the SHARED file in the .dist-info directory. + """ + result = {} + shared_path = os.path.join(self.path, 'SHARED') + if os.path.isfile(shared_path): + with codecs.open(shared_path, 'r', encoding='utf-8') as f: + lines = f.read().splitlines() + for line in lines: + key, value = line.split('=', 1) + if key == 'namespace': + result.setdefault(key, []).append(value) + else: + result[key] = value + return result + + def write_shared_locations(self, paths, dry_run=False): + """ + Write shared location information to the SHARED file in .dist-info. + :param paths: A dictionary as described in the documentation for + :meth:`shared_locations`. + :param dry_run: If True, the action is logged but no file is actually + written. + :return: The path of the file written to. + """ + shared_path = os.path.join(self.path, 'SHARED') + logger.info('creating %s', shared_path) + if dry_run: + return None + lines = [] + for key in ('prefix', 'lib', 'headers', 'scripts', 'data'): + path = paths[key] + if os.path.isdir(paths[key]): + lines.append('%s=%s' % (key, path)) + for ns in paths.get('namespace', ()): + lines.append('namespace=%s' % ns) + + with codecs.open(shared_path, 'w', encoding='utf-8') as f: + f.write('\n'.join(lines)) + return shared_path + + def get_distinfo_resource(self, path): + if path not in DIST_FILES: + raise DistlibException('invalid path for a dist-info file: ' + '%r at %r' % (path, self.path)) + finder = resources.finder_for_path(self.path) + if finder is None: + raise DistlibException('Unable to get a finder for %s' % self.path) + return finder.find(path) + + def get_distinfo_file(self, path): + """ + Returns a path located under the ``.dist-info`` directory. Returns a + string representing the path. + + :parameter path: a ``'/'``-separated path relative to the + ``.dist-info`` directory or an absolute path; + If *path* is an absolute path and doesn't start + with the ``.dist-info`` directory path, + a :class:`DistlibException` is raised + :type path: str + :rtype: str + """ + # Check if it is an absolute path # XXX use relpath, add tests + if path.find(os.sep) >= 0: + # it's an absolute path? + distinfo_dirname, path = path.split(os.sep)[-2:] + if distinfo_dirname != self.path.split(os.sep)[-1]: + raise DistlibException( + 'dist-info file %r does not belong to the %r %s ' + 'distribution' % (path, self.name, self.version)) + + # The file must be relative + if path not in DIST_FILES: + raise DistlibException('invalid path for a dist-info file: ' + '%r at %r' % (path, self.path)) + + return os.path.join(self.path, path) + + def list_distinfo_files(self): + """ + Iterates over the ``RECORD`` entries and returns paths for each line if + the path is pointing to a file located in the ``.dist-info`` directory + or one of its subdirectories. + + :returns: iterator of paths + """ + base = os.path.dirname(self.path) + for path, checksum, size in self._get_records(): + # XXX add separator or use real relpath algo + if not os.path.isabs(path): + path = os.path.join(base, path) + if path.startswith(self.path): + yield path + + def __eq__(self, other): + return (isinstance(other, InstalledDistribution) and + self.path == other.path) + + # See http://docs.python.org/reference/datamodel#object.__hash__ + __hash__ = object.__hash__ + + +class EggInfoDistribution(BaseInstalledDistribution): + """Created with the *path* of the ``.egg-info`` directory or file provided + to the constructor. It reads the metadata contained in the file itself, or + if the given path happens to be a directory, the metadata is read from the + file ``PKG-INFO`` under that directory.""" + + requested = True # as we have no way of knowing, assume it was + shared_locations = {} + + def __init__(self, path, env=None): + def set_name_and_version(s, n, v): + s.name = n + s.key = n.lower() # for case-insensitive comparisons + s.version = v + + self.path = path + self.dist_path = env + if env and env._cache_enabled and path in env._cache_egg.path: + metadata = env._cache_egg.path[path].metadata + set_name_and_version(self, metadata.name, metadata.version) + else: + metadata = self._get_metadata(path) + + # Need to be set before caching + set_name_and_version(self, metadata.name, metadata.version) + + if env and env._cache_enabled: + env._cache_egg.add(self) + super(EggInfoDistribution, self).__init__(metadata, path, env) + + def _get_metadata(self, path): + requires = None + + def parse_requires_data(data): + """Create a list of dependencies from a requires.txt file. + + *data*: the contents of a setuptools-produced requires.txt file. + """ + reqs = [] + lines = data.splitlines() + for line in lines: + line = line.strip() + # sectioned files have bare newlines (separating sections) + if not line: # pragma: no cover + continue + if line.startswith('['): # pragma: no cover + logger.warning('Unexpected line: quitting requirement scan: %r', + line) + break + r = parse_requirement(line) + if not r: # pragma: no cover + logger.warning('Not recognised as a requirement: %r', line) + continue + if r.extras: # pragma: no cover + logger.warning('extra requirements in requires.txt are ' + 'not supported') + if not r.constraints: + reqs.append(r.name) + else: + cons = ', '.join('%s%s' % c for c in r.constraints) + reqs.append('%s (%s)' % (r.name, cons)) + return reqs + + def parse_requires_path(req_path): + """Create a list of dependencies from a requires.txt file. + + *req_path*: the path to a setuptools-produced requires.txt file. + """ + + reqs = [] + try: + with codecs.open(req_path, 'r', 'utf-8') as fp: + reqs = parse_requires_data(fp.read()) + except IOError: + pass + return reqs + + tl_path = tl_data = None + if path.endswith('.egg'): + if os.path.isdir(path): + p = os.path.join(path, 'EGG-INFO') + meta_path = os.path.join(p, 'PKG-INFO') + metadata = Metadata(path=meta_path, scheme='legacy') + req_path = os.path.join(p, 'requires.txt') + tl_path = os.path.join(p, 'top_level.txt') + requires = parse_requires_path(req_path) + else: + # FIXME handle the case where zipfile is not available + zipf = zipimport.zipimporter(path) + fileobj = StringIO( + zipf.get_data('EGG-INFO/PKG-INFO').decode('utf8')) + metadata = Metadata(fileobj=fileobj, scheme='legacy') + try: + data = zipf.get_data('EGG-INFO/requires.txt') + tl_data = zipf.get_data('EGG-INFO/top_level.txt').decode('utf-8') + requires = parse_requires_data(data.decode('utf-8')) + except IOError: + requires = None + elif path.endswith('.egg-info'): + if os.path.isdir(path): + req_path = os.path.join(path, 'requires.txt') + requires = parse_requires_path(req_path) + path = os.path.join(path, 'PKG-INFO') + tl_path = os.path.join(path, 'top_level.txt') + metadata = Metadata(path=path, scheme='legacy') + else: + raise DistlibException('path must end with .egg-info or .egg, ' + 'got %r' % path) + + if requires: + metadata.add_requirements(requires) + # look for top-level modules in top_level.txt, if present + if tl_data is None: + if tl_path is not None and os.path.exists(tl_path): + with open(tl_path, 'rb') as f: + tl_data = f.read().decode('utf-8') + if not tl_data: + tl_data = [] + else: + tl_data = tl_data.splitlines() + self.modules = tl_data + return metadata + + def __repr__(self): + return '' % ( + self.name, self.version, self.path) + + def __str__(self): + return "%s %s" % (self.name, self.version) + + def check_installed_files(self): + """ + Checks that the hashes and sizes of the files in ``RECORD`` are + matched by the files themselves. Returns a (possibly empty) list of + mismatches. Each entry in the mismatch list will be a tuple consisting + of the path, 'exists', 'size' or 'hash' according to what didn't match + (existence is checked first, then size, then hash), the expected + value and the actual value. + """ + mismatches = [] + record_path = os.path.join(self.path, 'installed-files.txt') + if os.path.exists(record_path): + for path, _, _ in self.list_installed_files(): + if path == record_path: + continue + if not os.path.exists(path): + mismatches.append((path, 'exists', True, False)) + return mismatches + + def list_installed_files(self): + """ + Iterates over the ``installed-files.txt`` entries and returns a tuple + ``(path, hash, size)`` for each line. + + :returns: a list of (path, hash, size) + """ + + def _md5(path): + f = open(path, 'rb') + try: + content = f.read() + finally: + f.close() + return hashlib.md5(content).hexdigest() + + def _size(path): + return os.stat(path).st_size + + record_path = os.path.join(self.path, 'installed-files.txt') + result = [] + if os.path.exists(record_path): + with codecs.open(record_path, 'r', encoding='utf-8') as f: + for line in f: + line = line.strip() + p = os.path.normpath(os.path.join(self.path, line)) + # "./" is present as a marker between installed files + # and installation metadata files + if not os.path.exists(p): + logger.warning('Non-existent file: %s', p) + if p.endswith(('.pyc', '.pyo')): + continue + #otherwise fall through and fail + if not os.path.isdir(p): + result.append((p, _md5(p), _size(p))) + result.append((record_path, None, None)) + return result + + def list_distinfo_files(self, absolute=False): + """ + Iterates over the ``installed-files.txt`` entries and returns paths for + each line if the path is pointing to a file located in the + ``.egg-info`` directory or one of its subdirectories. + + :parameter absolute: If *absolute* is ``True``, each returned path is + transformed into a local absolute path. Otherwise the + raw value from ``installed-files.txt`` is returned. + :type absolute: boolean + :returns: iterator of paths + """ + record_path = os.path.join(self.path, 'installed-files.txt') + if os.path.exists(record_path): + skip = True + with codecs.open(record_path, 'r', encoding='utf-8') as f: + for line in f: + line = line.strip() + if line == './': + skip = False + continue + if not skip: + p = os.path.normpath(os.path.join(self.path, line)) + if p.startswith(self.path): + if absolute: + yield p + else: + yield line + + def __eq__(self, other): + return (isinstance(other, EggInfoDistribution) and + self.path == other.path) + + # See http://docs.python.org/reference/datamodel#object.__hash__ + __hash__ = object.__hash__ + +new_dist_class = InstalledDistribution +old_dist_class = EggInfoDistribution + + +class DependencyGraph(object): + """ + Represents a dependency graph between distributions. + + The dependency relationships are stored in an ``adjacency_list`` that maps + distributions to a list of ``(other, label)`` tuples where ``other`` + is a distribution and the edge is labeled with ``label`` (i.e. the version + specifier, if such was provided). Also, for more efficient traversal, for + every distribution ``x``, a list of predecessors is kept in + ``reverse_list[x]``. An edge from distribution ``a`` to + distribution ``b`` means that ``a`` depends on ``b``. If any missing + dependencies are found, they are stored in ``missing``, which is a + dictionary that maps distributions to a list of requirements that were not + provided by any other distributions. + """ + + def __init__(self): + self.adjacency_list = {} + self.reverse_list = {} + self.missing = {} + + def add_distribution(self, distribution): + """Add the *distribution* to the graph. + + :type distribution: :class:`distutils2.database.InstalledDistribution` + or :class:`distutils2.database.EggInfoDistribution` + """ + self.adjacency_list[distribution] = [] + self.reverse_list[distribution] = [] + #self.missing[distribution] = [] + + def add_edge(self, x, y, label=None): + """Add an edge from distribution *x* to distribution *y* with the given + *label*. + + :type x: :class:`distutils2.database.InstalledDistribution` or + :class:`distutils2.database.EggInfoDistribution` + :type y: :class:`distutils2.database.InstalledDistribution` or + :class:`distutils2.database.EggInfoDistribution` + :type label: ``str`` or ``None`` + """ + self.adjacency_list[x].append((y, label)) + # multiple edges are allowed, so be careful + if x not in self.reverse_list[y]: + self.reverse_list[y].append(x) + + def add_missing(self, distribution, requirement): + """ + Add a missing *requirement* for the given *distribution*. + + :type distribution: :class:`distutils2.database.InstalledDistribution` + or :class:`distutils2.database.EggInfoDistribution` + :type requirement: ``str`` + """ + logger.debug('%s missing %r', distribution, requirement) + self.missing.setdefault(distribution, []).append(requirement) + + def _repr_dist(self, dist): + return '%s %s' % (dist.name, dist.version) + + def repr_node(self, dist, level=1): + """Prints only a subgraph""" + output = [self._repr_dist(dist)] + for other, label in self.adjacency_list[dist]: + dist = self._repr_dist(other) + if label is not None: + dist = '%s [%s]' % (dist, label) + output.append(' ' * level + str(dist)) + suboutput = self.repr_node(other, level + 1) + subs = suboutput.split('\n') + output.extend(subs[1:]) + return '\n'.join(output) + + def to_dot(self, f, skip_disconnected=True): + """Writes a DOT output for the graph to the provided file *f*. + + If *skip_disconnected* is set to ``True``, then all distributions + that are not dependent on any other distribution are skipped. + + :type f: has to support ``file``-like operations + :type skip_disconnected: ``bool`` + """ + disconnected = [] + + f.write("digraph dependencies {\n") + for dist, adjs in self.adjacency_list.items(): + if len(adjs) == 0 and not skip_disconnected: + disconnected.append(dist) + for other, label in adjs: + if not label is None: + f.write('"%s" -> "%s" [label="%s"]\n' % + (dist.name, other.name, label)) + else: + f.write('"%s" -> "%s"\n' % (dist.name, other.name)) + if not skip_disconnected and len(disconnected) > 0: + f.write('subgraph disconnected {\n') + f.write('label = "Disconnected"\n') + f.write('bgcolor = red\n') + + for dist in disconnected: + f.write('"%s"' % dist.name) + f.write('\n') + f.write('}\n') + f.write('}\n') + + def topological_sort(self): + """ + Perform a topological sort of the graph. + :return: A tuple, the first element of which is a topologically sorted + list of distributions, and the second element of which is a + list of distributions that cannot be sorted because they have + circular dependencies and so form a cycle. + """ + result = [] + # Make a shallow copy of the adjacency list + alist = {} + for k, v in self.adjacency_list.items(): + alist[k] = v[:] + while True: + # See what we can remove in this run + to_remove = [] + for k, v in list(alist.items())[:]: + if not v: + to_remove.append(k) + del alist[k] + if not to_remove: + # What's left in alist (if anything) is a cycle. + break + # Remove from the adjacency list of others + for k, v in alist.items(): + alist[k] = [(d, r) for d, r in v if d not in to_remove] + logger.debug('Moving to result: %s', + ['%s (%s)' % (d.name, d.version) for d in to_remove]) + result.extend(to_remove) + return result, list(alist.keys()) + + def __repr__(self): + """Representation of the graph""" + output = [] + for dist, adjs in self.adjacency_list.items(): + output.append(self.repr_node(dist)) + return '\n'.join(output) + + +def make_graph(dists, scheme='default'): + """Makes a dependency graph from the given distributions. + + :parameter dists: a list of distributions + :type dists: list of :class:`distutils2.database.InstalledDistribution` and + :class:`distutils2.database.EggInfoDistribution` instances + :rtype: a :class:`DependencyGraph` instance + """ + scheme = get_scheme(scheme) + graph = DependencyGraph() + provided = {} # maps names to lists of (version, dist) tuples + + # first, build the graph and find out what's provided + for dist in dists: + graph.add_distribution(dist) + + for p in dist.provides: + name, version = parse_name_and_version(p) + logger.debug('Add to provided: %s, %s, %s', name, version, dist) + provided.setdefault(name, []).append((version, dist)) + + # now make the edges + for dist in dists: + requires = (dist.run_requires | dist.meta_requires | + dist.build_requires | dist.dev_requires) + for req in requires: + try: + matcher = scheme.matcher(req) + except UnsupportedVersionError: + # XXX compat-mode if cannot read the version + logger.warning('could not read version %r - using name only', + req) + name = req.split()[0] + matcher = scheme.matcher(name) + + name = matcher.key # case-insensitive + + matched = False + if name in provided: + for version, provider in provided[name]: + try: + match = matcher.match(version) + except UnsupportedVersionError: + match = False + + if match: + graph.add_edge(dist, provider, req) + matched = True + break + if not matched: + graph.add_missing(dist, req) + return graph + + +def get_dependent_dists(dists, dist): + """Recursively generate a list of distributions from *dists* that are + dependent on *dist*. + + :param dists: a list of distributions + :param dist: a distribution, member of *dists* for which we are interested + """ + if dist not in dists: + raise DistlibException('given distribution %r is not a member ' + 'of the list' % dist.name) + graph = make_graph(dists) + + dep = [dist] # dependent distributions + todo = graph.reverse_list[dist] # list of nodes we should inspect + + while todo: + d = todo.pop() + dep.append(d) + for succ in graph.reverse_list[d]: + if succ not in dep: + todo.append(succ) + + dep.pop(0) # remove dist from dep, was there to prevent infinite loops + return dep + + +def get_required_dists(dists, dist): + """Recursively generate a list of distributions from *dists* that are + required by *dist*. + + :param dists: a list of distributions + :param dist: a distribution, member of *dists* for which we are interested + in finding the dependencies. + """ + if dist not in dists: + raise DistlibException('given distribution %r is not a member ' + 'of the list' % dist.name) + graph = make_graph(dists) + + req = set() # required distributions + todo = graph.adjacency_list[dist] # list of nodes we should inspect + seen = set(t[0] for t in todo) # already added to todo + + while todo: + d = todo.pop()[0] + req.add(d) + pred_list = graph.adjacency_list[d] + for pred in pred_list: + d = pred[0] + if d not in req and d not in seen: + seen.add(d) + todo.append(pred) + return req + + +def make_dist(name, version, **kwargs): + """ + A convenience method for making a dist given just a name and version. + """ + summary = kwargs.pop('summary', 'Placeholder for summary') + md = Metadata(**kwargs) + md.name = name + md.version = version + md.summary = summary or 'Placeholder for summary' + return Distribution(md) diff --git a/venv/lib/python3.10/site-packages/distlib/index.py b/venv/lib/python3.10/site-packages/distlib/index.py new file mode 100644 index 0000000..9b6d129 --- /dev/null +++ b/venv/lib/python3.10/site-packages/distlib/index.py @@ -0,0 +1,508 @@ +# -*- coding: utf-8 -*- +# +# Copyright (C) 2013 Vinay Sajip. +# Licensed to the Python Software Foundation under a contributor agreement. +# See LICENSE.txt and CONTRIBUTORS.txt. +# +import hashlib +import logging +import os +import shutil +import subprocess +import tempfile +try: + from threading import Thread +except ImportError: # pragma: no cover + from dummy_threading import Thread + +from . import DistlibException +from .compat import (HTTPBasicAuthHandler, Request, HTTPPasswordMgr, + urlparse, build_opener, string_types) +from .util import zip_dir, ServerProxy + +logger = logging.getLogger(__name__) + +DEFAULT_INDEX = 'https://pypi.org/pypi' +DEFAULT_REALM = 'pypi' + +class PackageIndex(object): + """ + This class represents a package index compatible with PyPI, the Python + Package Index. + """ + + boundary = b'----------ThIs_Is_tHe_distlib_index_bouNdaRY_$' + + def __init__(self, url=None): + """ + Initialise an instance. + + :param url: The URL of the index. If not specified, the URL for PyPI is + used. + """ + self.url = url or DEFAULT_INDEX + self.read_configuration() + scheme, netloc, path, params, query, frag = urlparse(self.url) + if params or query or frag or scheme not in ('http', 'https'): + raise DistlibException('invalid repository: %s' % self.url) + self.password_handler = None + self.ssl_verifier = None + self.gpg = None + self.gpg_home = None + with open(os.devnull, 'w') as sink: + # Use gpg by default rather than gpg2, as gpg2 insists on + # prompting for passwords + for s in ('gpg', 'gpg2'): + try: + rc = subprocess.check_call([s, '--version'], stdout=sink, + stderr=sink) + if rc == 0: + self.gpg = s + break + except OSError: + pass + + def _get_pypirc_command(self): + """ + Get the distutils command for interacting with PyPI configurations. + :return: the command. + """ + from .util import _get_pypirc_command as cmd + return cmd() + + def read_configuration(self): + """ + Read the PyPI access configuration as supported by distutils. This populates + ``username``, ``password``, ``realm`` and ``url`` attributes from the + configuration. + """ + from .util import _load_pypirc + cfg = _load_pypirc(self) + self.username = cfg.get('username') + self.password = cfg.get('password') + self.realm = cfg.get('realm', 'pypi') + self.url = cfg.get('repository', self.url) + + def save_configuration(self): + """ + Save the PyPI access configuration. You must have set ``username`` and + ``password`` attributes before calling this method. + """ + self.check_credentials() + from .util import _store_pypirc + _store_pypirc(self) + + def check_credentials(self): + """ + Check that ``username`` and ``password`` have been set, and raise an + exception if not. + """ + if self.username is None or self.password is None: + raise DistlibException('username and password must be set') + pm = HTTPPasswordMgr() + _, netloc, _, _, _, _ = urlparse(self.url) + pm.add_password(self.realm, netloc, self.username, self.password) + self.password_handler = HTTPBasicAuthHandler(pm) + + def register(self, metadata): # pragma: no cover + """ + Register a distribution on PyPI, using the provided metadata. + + :param metadata: A :class:`Metadata` instance defining at least a name + and version number for the distribution to be + registered. + :return: The HTTP response received from PyPI upon submission of the + request. + """ + self.check_credentials() + metadata.validate() + d = metadata.todict() + d[':action'] = 'verify' + request = self.encode_request(d.items(), []) + response = self.send_request(request) + d[':action'] = 'submit' + request = self.encode_request(d.items(), []) + return self.send_request(request) + + def _reader(self, name, stream, outbuf): + """ + Thread runner for reading lines of from a subprocess into a buffer. + + :param name: The logical name of the stream (used for logging only). + :param stream: The stream to read from. This will typically a pipe + connected to the output stream of a subprocess. + :param outbuf: The list to append the read lines to. + """ + while True: + s = stream.readline() + if not s: + break + s = s.decode('utf-8').rstrip() + outbuf.append(s) + logger.debug('%s: %s' % (name, s)) + stream.close() + + def get_sign_command(self, filename, signer, sign_password, keystore=None): # pragma: no cover + """ + Return a suitable command for signing a file. + + :param filename: The pathname to the file to be signed. + :param signer: The identifier of the signer of the file. + :param sign_password: The passphrase for the signer's + private key used for signing. + :param keystore: The path to a directory which contains the keys + used in verification. If not specified, the + instance's ``gpg_home`` attribute is used instead. + :return: The signing command as a list suitable to be + passed to :class:`subprocess.Popen`. + """ + cmd = [self.gpg, '--status-fd', '2', '--no-tty'] + if keystore is None: + keystore = self.gpg_home + if keystore: + cmd.extend(['--homedir', keystore]) + if sign_password is not None: + cmd.extend(['--batch', '--passphrase-fd', '0']) + td = tempfile.mkdtemp() + sf = os.path.join(td, os.path.basename(filename) + '.asc') + cmd.extend(['--detach-sign', '--armor', '--local-user', + signer, '--output', sf, filename]) + logger.debug('invoking: %s', ' '.join(cmd)) + return cmd, sf + + def run_command(self, cmd, input_data=None): + """ + Run a command in a child process , passing it any input data specified. + + :param cmd: The command to run. + :param input_data: If specified, this must be a byte string containing + data to be sent to the child process. + :return: A tuple consisting of the subprocess' exit code, a list of + lines read from the subprocess' ``stdout``, and a list of + lines read from the subprocess' ``stderr``. + """ + kwargs = { + 'stdout': subprocess.PIPE, + 'stderr': subprocess.PIPE, + } + if input_data is not None: + kwargs['stdin'] = subprocess.PIPE + stdout = [] + stderr = [] + p = subprocess.Popen(cmd, **kwargs) + # We don't use communicate() here because we may need to + # get clever with interacting with the command + t1 = Thread(target=self._reader, args=('stdout', p.stdout, stdout)) + t1.start() + t2 = Thread(target=self._reader, args=('stderr', p.stderr, stderr)) + t2.start() + if input_data is not None: + p.stdin.write(input_data) + p.stdin.close() + + p.wait() + t1.join() + t2.join() + return p.returncode, stdout, stderr + + def sign_file(self, filename, signer, sign_password, keystore=None): # pragma: no cover + """ + Sign a file. + + :param filename: The pathname to the file to be signed. + :param signer: The identifier of the signer of the file. + :param sign_password: The passphrase for the signer's + private key used for signing. + :param keystore: The path to a directory which contains the keys + used in signing. If not specified, the instance's + ``gpg_home`` attribute is used instead. + :return: The absolute pathname of the file where the signature is + stored. + """ + cmd, sig_file = self.get_sign_command(filename, signer, sign_password, + keystore) + rc, stdout, stderr = self.run_command(cmd, + sign_password.encode('utf-8')) + if rc != 0: + raise DistlibException('sign command failed with error ' + 'code %s' % rc) + return sig_file + + def upload_file(self, metadata, filename, signer=None, sign_password=None, + filetype='sdist', pyversion='source', keystore=None): + """ + Upload a release file to the index. + + :param metadata: A :class:`Metadata` instance defining at least a name + and version number for the file to be uploaded. + :param filename: The pathname of the file to be uploaded. + :param signer: The identifier of the signer of the file. + :param sign_password: The passphrase for the signer's + private key used for signing. + :param filetype: The type of the file being uploaded. This is the + distutils command which produced that file, e.g. + ``sdist`` or ``bdist_wheel``. + :param pyversion: The version of Python which the release relates + to. For code compatible with any Python, this would + be ``source``, otherwise it would be e.g. ``3.2``. + :param keystore: The path to a directory which contains the keys + used in signing. If not specified, the instance's + ``gpg_home`` attribute is used instead. + :return: The HTTP response received from PyPI upon submission of the + request. + """ + self.check_credentials() + if not os.path.exists(filename): + raise DistlibException('not found: %s' % filename) + metadata.validate() + d = metadata.todict() + sig_file = None + if signer: + if not self.gpg: + logger.warning('no signing program available - not signed') + else: + sig_file = self.sign_file(filename, signer, sign_password, + keystore) + with open(filename, 'rb') as f: + file_data = f.read() + md5_digest = hashlib.md5(file_data).hexdigest() + sha256_digest = hashlib.sha256(file_data).hexdigest() + d.update({ + ':action': 'file_upload', + 'protocol_version': '1', + 'filetype': filetype, + 'pyversion': pyversion, + 'md5_digest': md5_digest, + 'sha256_digest': sha256_digest, + }) + files = [('content', os.path.basename(filename), file_data)] + if sig_file: + with open(sig_file, 'rb') as f: + sig_data = f.read() + files.append(('gpg_signature', os.path.basename(sig_file), + sig_data)) + shutil.rmtree(os.path.dirname(sig_file)) + request = self.encode_request(d.items(), files) + return self.send_request(request) + + def upload_documentation(self, metadata, doc_dir): # pragma: no cover + """ + Upload documentation to the index. + + :param metadata: A :class:`Metadata` instance defining at least a name + and version number for the documentation to be + uploaded. + :param doc_dir: The pathname of the directory which contains the + documentation. This should be the directory that + contains the ``index.html`` for the documentation. + :return: The HTTP response received from PyPI upon submission of the + request. + """ + self.check_credentials() + if not os.path.isdir(doc_dir): + raise DistlibException('not a directory: %r' % doc_dir) + fn = os.path.join(doc_dir, 'index.html') + if not os.path.exists(fn): + raise DistlibException('not found: %r' % fn) + metadata.validate() + name, version = metadata.name, metadata.version + zip_data = zip_dir(doc_dir).getvalue() + fields = [(':action', 'doc_upload'), + ('name', name), ('version', version)] + files = [('content', name, zip_data)] + request = self.encode_request(fields, files) + return self.send_request(request) + + def get_verify_command(self, signature_filename, data_filename, + keystore=None): + """ + Return a suitable command for verifying a file. + + :param signature_filename: The pathname to the file containing the + signature. + :param data_filename: The pathname to the file containing the + signed data. + :param keystore: The path to a directory which contains the keys + used in verification. If not specified, the + instance's ``gpg_home`` attribute is used instead. + :return: The verifying command as a list suitable to be + passed to :class:`subprocess.Popen`. + """ + cmd = [self.gpg, '--status-fd', '2', '--no-tty'] + if keystore is None: + keystore = self.gpg_home + if keystore: + cmd.extend(['--homedir', keystore]) + cmd.extend(['--verify', signature_filename, data_filename]) + logger.debug('invoking: %s', ' '.join(cmd)) + return cmd + + def verify_signature(self, signature_filename, data_filename, + keystore=None): + """ + Verify a signature for a file. + + :param signature_filename: The pathname to the file containing the + signature. + :param data_filename: The pathname to the file containing the + signed data. + :param keystore: The path to a directory which contains the keys + used in verification. If not specified, the + instance's ``gpg_home`` attribute is used instead. + :return: True if the signature was verified, else False. + """ + if not self.gpg: + raise DistlibException('verification unavailable because gpg ' + 'unavailable') + cmd = self.get_verify_command(signature_filename, data_filename, + keystore) + rc, stdout, stderr = self.run_command(cmd) + if rc not in (0, 1): + raise DistlibException('verify command failed with error ' + 'code %s' % rc) + return rc == 0 + + def download_file(self, url, destfile, digest=None, reporthook=None): + """ + This is a convenience method for downloading a file from an URL. + Normally, this will be a file from the index, though currently + no check is made for this (i.e. a file can be downloaded from + anywhere). + + The method is just like the :func:`urlretrieve` function in the + standard library, except that it allows digest computation to be + done during download and checking that the downloaded data + matched any expected value. + + :param url: The URL of the file to be downloaded (assumed to be + available via an HTTP GET request). + :param destfile: The pathname where the downloaded file is to be + saved. + :param digest: If specified, this must be a (hasher, value) + tuple, where hasher is the algorithm used (e.g. + ``'md5'``) and ``value`` is the expected value. + :param reporthook: The same as for :func:`urlretrieve` in the + standard library. + """ + if digest is None: + digester = None + logger.debug('No digest specified') + else: + if isinstance(digest, (list, tuple)): + hasher, digest = digest + else: + hasher = 'md5' + digester = getattr(hashlib, hasher)() + logger.debug('Digest specified: %s' % digest) + # The following code is equivalent to urlretrieve. + # We need to do it this way so that we can compute the + # digest of the file as we go. + with open(destfile, 'wb') as dfp: + # addinfourl is not a context manager on 2.x + # so we have to use try/finally + sfp = self.send_request(Request(url)) + try: + headers = sfp.info() + blocksize = 8192 + size = -1 + read = 0 + blocknum = 0 + if "content-length" in headers: + size = int(headers["Content-Length"]) + if reporthook: + reporthook(blocknum, blocksize, size) + while True: + block = sfp.read(blocksize) + if not block: + break + read += len(block) + dfp.write(block) + if digester: + digester.update(block) + blocknum += 1 + if reporthook: + reporthook(blocknum, blocksize, size) + finally: + sfp.close() + + # check that we got the whole file, if we can + if size >= 0 and read < size: + raise DistlibException( + 'retrieval incomplete: got only %d out of %d bytes' + % (read, size)) + # if we have a digest, it must match. + if digester: + actual = digester.hexdigest() + if digest != actual: + raise DistlibException('%s digest mismatch for %s: expected ' + '%s, got %s' % (hasher, destfile, + digest, actual)) + logger.debug('Digest verified: %s', digest) + + def send_request(self, req): + """ + Send a standard library :class:`Request` to PyPI and return its + response. + + :param req: The request to send. + :return: The HTTP response from PyPI (a standard library HTTPResponse). + """ + handlers = [] + if self.password_handler: + handlers.append(self.password_handler) + if self.ssl_verifier: + handlers.append(self.ssl_verifier) + opener = build_opener(*handlers) + return opener.open(req) + + def encode_request(self, fields, files): + """ + Encode fields and files for posting to an HTTP server. + + :param fields: The fields to send as a list of (fieldname, value) + tuples. + :param files: The files to send as a list of (fieldname, filename, + file_bytes) tuple. + """ + # Adapted from packaging, which in turn was adapted from + # http://code.activestate.com/recipes/146306 + + parts = [] + boundary = self.boundary + for k, values in fields: + if not isinstance(values, (list, tuple)): + values = [values] + + for v in values: + parts.extend(( + b'--' + boundary, + ('Content-Disposition: form-data; name="%s"' % + k).encode('utf-8'), + b'', + v.encode('utf-8'))) + for key, filename, value in files: + parts.extend(( + b'--' + boundary, + ('Content-Disposition: form-data; name="%s"; filename="%s"' % + (key, filename)).encode('utf-8'), + b'', + value)) + + parts.extend((b'--' + boundary + b'--', b'')) + + body = b'\r\n'.join(parts) + ct = b'multipart/form-data; boundary=' + boundary + headers = { + 'Content-type': ct, + 'Content-length': str(len(body)) + } + return Request(self.url, body, headers) + + def search(self, terms, operator=None): # pragma: no cover + if isinstance(terms, string_types): + terms = {'name': terms} + rpc_proxy = ServerProxy(self.url, timeout=3.0) + try: + return rpc_proxy.search(terms, operator or 'and') + finally: + rpc_proxy('close')() diff --git a/venv/lib/python3.10/site-packages/distlib/locators.py b/venv/lib/python3.10/site-packages/distlib/locators.py new file mode 100644 index 0000000..966ebc0 --- /dev/null +++ b/venv/lib/python3.10/site-packages/distlib/locators.py @@ -0,0 +1,1300 @@ +# -*- coding: utf-8 -*- +# +# Copyright (C) 2012-2015 Vinay Sajip. +# Licensed to the Python Software Foundation under a contributor agreement. +# See LICENSE.txt and CONTRIBUTORS.txt. +# + +import gzip +from io import BytesIO +import json +import logging +import os +import posixpath +import re +try: + import threading +except ImportError: # pragma: no cover + import dummy_threading as threading +import zlib + +from . import DistlibException +from .compat import (urljoin, urlparse, urlunparse, url2pathname, pathname2url, + queue, quote, unescape, build_opener, + HTTPRedirectHandler as BaseRedirectHandler, text_type, + Request, HTTPError, URLError) +from .database import Distribution, DistributionPath, make_dist +from .metadata import Metadata, MetadataInvalidError +from .util import (cached_property, ensure_slash, split_filename, get_project_data, + parse_requirement, parse_name_and_version, ServerProxy, + normalize_name) +from .version import get_scheme, UnsupportedVersionError +from .wheel import Wheel, is_compatible + +logger = logging.getLogger(__name__) + +HASHER_HASH = re.compile(r'^(\w+)=([a-f0-9]+)') +CHARSET = re.compile(r';\s*charset\s*=\s*(.*)\s*$', re.I) +HTML_CONTENT_TYPE = re.compile('text/html|application/x(ht)?ml') +DEFAULT_INDEX = 'https://pypi.org/pypi' + +def get_all_distribution_names(url=None): + """ + Return all distribution names known by an index. + :param url: The URL of the index. + :return: A list of all known distribution names. + """ + if url is None: + url = DEFAULT_INDEX + client = ServerProxy(url, timeout=3.0) + try: + return client.list_packages() + finally: + client('close')() + +class RedirectHandler(BaseRedirectHandler): + """ + A class to work around a bug in some Python 3.2.x releases. + """ + # There's a bug in the base version for some 3.2.x + # (e.g. 3.2.2 on Ubuntu Oneiric). If a Location header + # returns e.g. /abc, it bails because it says the scheme '' + # is bogus, when actually it should use the request's + # URL for the scheme. See Python issue #13696. + def http_error_302(self, req, fp, code, msg, headers): + # Some servers (incorrectly) return multiple Location headers + # (so probably same goes for URI). Use first header. + newurl = None + for key in ('location', 'uri'): + if key in headers: + newurl = headers[key] + break + if newurl is None: # pragma: no cover + return + urlparts = urlparse(newurl) + if urlparts.scheme == '': + newurl = urljoin(req.get_full_url(), newurl) + if hasattr(headers, 'replace_header'): + headers.replace_header(key, newurl) + else: + headers[key] = newurl + return BaseRedirectHandler.http_error_302(self, req, fp, code, msg, + headers) + + http_error_301 = http_error_303 = http_error_307 = http_error_302 + +class Locator(object): + """ + A base class for locators - things that locate distributions. + """ + source_extensions = ('.tar.gz', '.tar.bz2', '.tar', '.zip', '.tgz', '.tbz') + binary_extensions = ('.egg', '.exe', '.whl') + excluded_extensions = ('.pdf',) + + # A list of tags indicating which wheels you want to match. The default + # value of None matches against the tags compatible with the running + # Python. If you want to match other values, set wheel_tags on a locator + # instance to a list of tuples (pyver, abi, arch) which you want to match. + wheel_tags = None + + downloadable_extensions = source_extensions + ('.whl',) + + def __init__(self, scheme='default'): + """ + Initialise an instance. + :param scheme: Because locators look for most recent versions, they + need to know the version scheme to use. This specifies + the current PEP-recommended scheme - use ``'legacy'`` + if you need to support existing distributions on PyPI. + """ + self._cache = {} + self.scheme = scheme + # Because of bugs in some of the handlers on some of the platforms, + # we use our own opener rather than just using urlopen. + self.opener = build_opener(RedirectHandler()) + # If get_project() is called from locate(), the matcher instance + # is set from the requirement passed to locate(). See issue #18 for + # why this can be useful to know. + self.matcher = None + self.errors = queue.Queue() + + def get_errors(self): + """ + Return any errors which have occurred. + """ + result = [] + while not self.errors.empty(): # pragma: no cover + try: + e = self.errors.get(False) + result.append(e) + except self.errors.Empty: + continue + self.errors.task_done() + return result + + def clear_errors(self): + """ + Clear any errors which may have been logged. + """ + # Just get the errors and throw them away + self.get_errors() + + def clear_cache(self): + self._cache.clear() + + def _get_scheme(self): + return self._scheme + + def _set_scheme(self, value): + self._scheme = value + + scheme = property(_get_scheme, _set_scheme) + + def _get_project(self, name): + """ + For a given project, get a dictionary mapping available versions to Distribution + instances. + + This should be implemented in subclasses. + + If called from a locate() request, self.matcher will be set to a + matcher for the requirement to satisfy, otherwise it will be None. + """ + raise NotImplementedError('Please implement in the subclass') + + def get_distribution_names(self): + """ + Return all the distribution names known to this locator. + """ + raise NotImplementedError('Please implement in the subclass') + + def get_project(self, name): + """ + For a given project, get a dictionary mapping available versions to Distribution + instances. + + This calls _get_project to do all the work, and just implements a caching layer on top. + """ + if self._cache is None: # pragma: no cover + result = self._get_project(name) + elif name in self._cache: + result = self._cache[name] + else: + self.clear_errors() + result = self._get_project(name) + self._cache[name] = result + return result + + def score_url(self, url): + """ + Give an url a score which can be used to choose preferred URLs + for a given project release. + """ + t = urlparse(url) + basename = posixpath.basename(t.path) + compatible = True + is_wheel = basename.endswith('.whl') + is_downloadable = basename.endswith(self.downloadable_extensions) + if is_wheel: + compatible = is_compatible(Wheel(basename), self.wheel_tags) + return (t.scheme == 'https', 'pypi.org' in t.netloc, + is_downloadable, is_wheel, compatible, basename) + + def prefer_url(self, url1, url2): + """ + Choose one of two URLs where both are candidates for distribution + archives for the same version of a distribution (for example, + .tar.gz vs. zip). + + The current implementation favours https:// URLs over http://, archives + from PyPI over those from other locations, wheel compatibility (if a + wheel) and then the archive name. + """ + result = url2 + if url1: + s1 = self.score_url(url1) + s2 = self.score_url(url2) + if s1 > s2: + result = url1 + if result != url2: + logger.debug('Not replacing %r with %r', url1, url2) + else: + logger.debug('Replacing %r with %r', url1, url2) + return result + + def split_filename(self, filename, project_name): + """ + Attempt to split a filename in project name, version and Python version. + """ + return split_filename(filename, project_name) + + def convert_url_to_download_info(self, url, project_name): + """ + See if a URL is a candidate for a download URL for a project (the URL + has typically been scraped from an HTML page). + + If it is, a dictionary is returned with keys "name", "version", + "filename" and "url"; otherwise, None is returned. + """ + def same_project(name1, name2): + return normalize_name(name1) == normalize_name(name2) + + result = None + scheme, netloc, path, params, query, frag = urlparse(url) + if frag.lower().startswith('egg='): # pragma: no cover + logger.debug('%s: version hint in fragment: %r', + project_name, frag) + m = HASHER_HASH.match(frag) + if m: + algo, digest = m.groups() + else: + algo, digest = None, None + origpath = path + if path and path[-1] == '/': # pragma: no cover + path = path[:-1] + if path.endswith('.whl'): + try: + wheel = Wheel(path) + if not is_compatible(wheel, self.wheel_tags): + logger.debug('Wheel not compatible: %s', path) + else: + if project_name is None: + include = True + else: + include = same_project(wheel.name, project_name) + if include: + result = { + 'name': wheel.name, + 'version': wheel.version, + 'filename': wheel.filename, + 'url': urlunparse((scheme, netloc, origpath, + params, query, '')), + 'python-version': ', '.join( + ['.'.join(list(v[2:])) for v in wheel.pyver]), + } + except Exception as e: # pragma: no cover + logger.warning('invalid path for wheel: %s', path) + elif not path.endswith(self.downloadable_extensions): # pragma: no cover + logger.debug('Not downloadable: %s', path) + else: # downloadable extension + path = filename = posixpath.basename(path) + for ext in self.downloadable_extensions: + if path.endswith(ext): + path = path[:-len(ext)] + t = self.split_filename(path, project_name) + if not t: # pragma: no cover + logger.debug('No match for project/version: %s', path) + else: + name, version, pyver = t + if not project_name or same_project(project_name, name): + result = { + 'name': name, + 'version': version, + 'filename': filename, + 'url': urlunparse((scheme, netloc, origpath, + params, query, '')), + #'packagetype': 'sdist', + } + if pyver: # pragma: no cover + result['python-version'] = pyver + break + if result and algo: + result['%s_digest' % algo] = digest + return result + + def _get_digest(self, info): + """ + Get a digest from a dictionary by looking at a "digests" dictionary + or keys of the form 'algo_digest'. + + Returns a 2-tuple (algo, digest) if found, else None. Currently + looks only for SHA256, then MD5. + """ + result = None + if 'digests' in info: + digests = info['digests'] + for algo in ('sha256', 'md5'): + if algo in digests: + result = (algo, digests[algo]) + break + if not result: + for algo in ('sha256', 'md5'): + key = '%s_digest' % algo + if key in info: + result = (algo, info[key]) + break + return result + + def _update_version_data(self, result, info): + """ + Update a result dictionary (the final result from _get_project) with a + dictionary for a specific version, which typically holds information + gleaned from a filename or URL for an archive for the distribution. + """ + name = info.pop('name') + version = info.pop('version') + if version in result: + dist = result[version] + md = dist.metadata + else: + dist = make_dist(name, version, scheme=self.scheme) + md = dist.metadata + dist.digest = digest = self._get_digest(info) + url = info['url'] + result['digests'][url] = digest + if md.source_url != info['url']: + md.source_url = self.prefer_url(md.source_url, url) + result['urls'].setdefault(version, set()).add(url) + dist.locator = self + result[version] = dist + + def locate(self, requirement, prereleases=False): + """ + Find the most recent distribution which matches the given + requirement. + + :param requirement: A requirement of the form 'foo (1.0)' or perhaps + 'foo (>= 1.0, < 2.0, != 1.3)' + :param prereleases: If ``True``, allow pre-release versions + to be located. Otherwise, pre-release versions + are not returned. + :return: A :class:`Distribution` instance, or ``None`` if no such + distribution could be located. + """ + result = None + r = parse_requirement(requirement) + if r is None: # pragma: no cover + raise DistlibException('Not a valid requirement: %r' % requirement) + scheme = get_scheme(self.scheme) + self.matcher = matcher = scheme.matcher(r.requirement) + logger.debug('matcher: %s (%s)', matcher, type(matcher).__name__) + versions = self.get_project(r.name) + if len(versions) > 2: # urls and digests keys are present + # sometimes, versions are invalid + slist = [] + vcls = matcher.version_class + for k in versions: + if k in ('urls', 'digests'): + continue + try: + if not matcher.match(k): + pass # logger.debug('%s did not match %r', matcher, k) + else: + if prereleases or not vcls(k).is_prerelease: + slist.append(k) + # else: + # logger.debug('skipping pre-release ' + # 'version %s of %s', k, matcher.name) + except Exception: # pragma: no cover + logger.warning('error matching %s with %r', matcher, k) + pass # slist.append(k) + if len(slist) > 1: + slist = sorted(slist, key=scheme.key) + if slist: + logger.debug('sorted list: %s', slist) + version = slist[-1] + result = versions[version] + if result: + if r.extras: + result.extras = r.extras + result.download_urls = versions.get('urls', {}).get(version, set()) + d = {} + sd = versions.get('digests', {}) + for url in result.download_urls: + if url in sd: # pragma: no cover + d[url] = sd[url] + result.digests = d + self.matcher = None + return result + + +class PyPIRPCLocator(Locator): + """ + This locator uses XML-RPC to locate distributions. It therefore + cannot be used with simple mirrors (that only mirror file content). + """ + def __init__(self, url, **kwargs): + """ + Initialise an instance. + + :param url: The URL to use for XML-RPC. + :param kwargs: Passed to the superclass constructor. + """ + super(PyPIRPCLocator, self).__init__(**kwargs) + self.base_url = url + self.client = ServerProxy(url, timeout=3.0) + + def get_distribution_names(self): + """ + Return all the distribution names known to this locator. + """ + return set(self.client.list_packages()) + + def _get_project(self, name): + result = {'urls': {}, 'digests': {}} + versions = self.client.package_releases(name, True) + for v in versions: + urls = self.client.release_urls(name, v) + data = self.client.release_data(name, v) + metadata = Metadata(scheme=self.scheme) + metadata.name = data['name'] + metadata.version = data['version'] + metadata.license = data.get('license') + metadata.keywords = data.get('keywords', []) + metadata.summary = data.get('summary') + dist = Distribution(metadata) + if urls: + info = urls[0] + metadata.source_url = info['url'] + dist.digest = self._get_digest(info) + dist.locator = self + result[v] = dist + for info in urls: + url = info['url'] + digest = self._get_digest(info) + result['urls'].setdefault(v, set()).add(url) + result['digests'][url] = digest + return result + +class PyPIJSONLocator(Locator): + """ + This locator uses PyPI's JSON interface. It's very limited in functionality + and probably not worth using. + """ + def __init__(self, url, **kwargs): + super(PyPIJSONLocator, self).__init__(**kwargs) + self.base_url = ensure_slash(url) + + def get_distribution_names(self): + """ + Return all the distribution names known to this locator. + """ + raise NotImplementedError('Not available from this locator') + + def _get_project(self, name): + result = {'urls': {}, 'digests': {}} + url = urljoin(self.base_url, '%s/json' % quote(name)) + try: + resp = self.opener.open(url) + data = resp.read().decode() # for now + d = json.loads(data) + md = Metadata(scheme=self.scheme) + data = d['info'] + md.name = data['name'] + md.version = data['version'] + md.license = data.get('license') + md.keywords = data.get('keywords', []) + md.summary = data.get('summary') + dist = Distribution(md) + dist.locator = self + urls = d['urls'] + result[md.version] = dist + for info in d['urls']: + url = info['url'] + dist.download_urls.add(url) + dist.digests[url] = self._get_digest(info) + result['urls'].setdefault(md.version, set()).add(url) + result['digests'][url] = self._get_digest(info) + # Now get other releases + for version, infos in d['releases'].items(): + if version == md.version: + continue # already done + omd = Metadata(scheme=self.scheme) + omd.name = md.name + omd.version = version + odist = Distribution(omd) + odist.locator = self + result[version] = odist + for info in infos: + url = info['url'] + odist.download_urls.add(url) + odist.digests[url] = self._get_digest(info) + result['urls'].setdefault(version, set()).add(url) + result['digests'][url] = self._get_digest(info) +# for info in urls: +# md.source_url = info['url'] +# dist.digest = self._get_digest(info) +# dist.locator = self +# for info in urls: +# url = info['url'] +# result['urls'].setdefault(md.version, set()).add(url) +# result['digests'][url] = self._get_digest(info) + except Exception as e: + self.errors.put(text_type(e)) + logger.exception('JSON fetch failed: %s', e) + return result + + +class Page(object): + """ + This class represents a scraped HTML page. + """ + # The following slightly hairy-looking regex just looks for the contents of + # an anchor link, which has an attribute "href" either immediately preceded + # or immediately followed by a "rel" attribute. The attribute values can be + # declared with double quotes, single quotes or no quotes - which leads to + # the length of the expression. + _href = re.compile(""" +(rel\\s*=\\s*(?:"(?P[^"]*)"|'(?P[^']*)'|(?P[^>\\s\n]*))\\s+)? +href\\s*=\\s*(?:"(?P[^"]*)"|'(?P[^']*)'|(?P[^>\\s\n]*)) +(\\s+rel\\s*=\\s*(?:"(?P[^"]*)"|'(?P[^']*)'|(?P[^>\\s\n]*)))? +""", re.I | re.S | re.X) + _base = re.compile(r"""]+)""", re.I | re.S) + + def __init__(self, data, url): + """ + Initialise an instance with the Unicode page contents and the URL they + came from. + """ + self.data = data + self.base_url = self.url = url + m = self._base.search(self.data) + if m: + self.base_url = m.group(1) + + _clean_re = re.compile(r'[^a-z0-9$&+,/:;=?@.#%_\\|-]', re.I) + + @cached_property + def links(self): + """ + Return the URLs of all the links on a page together with information + about their "rel" attribute, for determining which ones to treat as + downloads and which ones to queue for further scraping. + """ + def clean(url): + "Tidy up an URL." + scheme, netloc, path, params, query, frag = urlparse(url) + return urlunparse((scheme, netloc, quote(path), + params, query, frag)) + + result = set() + for match in self._href.finditer(self.data): + d = match.groupdict('') + rel = (d['rel1'] or d['rel2'] or d['rel3'] or + d['rel4'] or d['rel5'] or d['rel6']) + url = d['url1'] or d['url2'] or d['url3'] + url = urljoin(self.base_url, url) + url = unescape(url) + url = self._clean_re.sub(lambda m: '%%%2x' % ord(m.group(0)), url) + result.add((url, rel)) + # We sort the result, hoping to bring the most recent versions + # to the front + result = sorted(result, key=lambda t: t[0], reverse=True) + return result + + +class SimpleScrapingLocator(Locator): + """ + A locator which scrapes HTML pages to locate downloads for a distribution. + This runs multiple threads to do the I/O; performance is at least as good + as pip's PackageFinder, which works in an analogous fashion. + """ + + # These are used to deal with various Content-Encoding schemes. + decoders = { + 'deflate': zlib.decompress, + 'gzip': lambda b: gzip.GzipFile(fileobj=BytesIO(b)).read(), + 'none': lambda b: b, + } + + def __init__(self, url, timeout=None, num_workers=10, **kwargs): + """ + Initialise an instance. + :param url: The root URL to use for scraping. + :param timeout: The timeout, in seconds, to be applied to requests. + This defaults to ``None`` (no timeout specified). + :param num_workers: The number of worker threads you want to do I/O, + This defaults to 10. + :param kwargs: Passed to the superclass. + """ + super(SimpleScrapingLocator, self).__init__(**kwargs) + self.base_url = ensure_slash(url) + self.timeout = timeout + self._page_cache = {} + self._seen = set() + self._to_fetch = queue.Queue() + self._bad_hosts = set() + self.skip_externals = False + self.num_workers = num_workers + self._lock = threading.RLock() + # See issue #45: we need to be resilient when the locator is used + # in a thread, e.g. with concurrent.futures. We can't use self._lock + # as it is for coordinating our internal threads - the ones created + # in _prepare_threads. + self._gplock = threading.RLock() + self.platform_check = False # See issue #112 + + def _prepare_threads(self): + """ + Threads are created only when get_project is called, and terminate + before it returns. They are there primarily to parallelise I/O (i.e. + fetching web pages). + """ + self._threads = [] + for i in range(self.num_workers): + t = threading.Thread(target=self._fetch) + t.daemon = True + t.start() + self._threads.append(t) + + def _wait_threads(self): + """ + Tell all the threads to terminate (by sending a sentinel value) and + wait for them to do so. + """ + # Note that you need two loops, since you can't say which + # thread will get each sentinel + for t in self._threads: + self._to_fetch.put(None) # sentinel + for t in self._threads: + t.join() + self._threads = [] + + def _get_project(self, name): + result = {'urls': {}, 'digests': {}} + with self._gplock: + self.result = result + self.project_name = name + url = urljoin(self.base_url, '%s/' % quote(name)) + self._seen.clear() + self._page_cache.clear() + self._prepare_threads() + try: + logger.debug('Queueing %s', url) + self._to_fetch.put(url) + self._to_fetch.join() + finally: + self._wait_threads() + del self.result + return result + + platform_dependent = re.compile(r'\b(linux_(i\d86|x86_64|arm\w+)|' + r'win(32|_amd64)|macosx_?\d+)\b', re.I) + + def _is_platform_dependent(self, url): + """ + Does an URL refer to a platform-specific download? + """ + return self.platform_dependent.search(url) + + def _process_download(self, url): + """ + See if an URL is a suitable download for a project. + + If it is, register information in the result dictionary (for + _get_project) about the specific version it's for. + + Note that the return value isn't actually used other than as a boolean + value. + """ + if self.platform_check and self._is_platform_dependent(url): + info = None + else: + info = self.convert_url_to_download_info(url, self.project_name) + logger.debug('process_download: %s -> %s', url, info) + if info: + with self._lock: # needed because self.result is shared + self._update_version_data(self.result, info) + return info + + def _should_queue(self, link, referrer, rel): + """ + Determine whether a link URL from a referring page and with a + particular "rel" attribute should be queued for scraping. + """ + scheme, netloc, path, _, _, _ = urlparse(link) + if path.endswith(self.source_extensions + self.binary_extensions + + self.excluded_extensions): + result = False + elif self.skip_externals and not link.startswith(self.base_url): + result = False + elif not referrer.startswith(self.base_url): + result = False + elif rel not in ('homepage', 'download'): + result = False + elif scheme not in ('http', 'https', 'ftp'): + result = False + elif self._is_platform_dependent(link): + result = False + else: + host = netloc.split(':', 1)[0] + if host.lower() == 'localhost': + result = False + else: + result = True + logger.debug('should_queue: %s (%s) from %s -> %s', link, rel, + referrer, result) + return result + + def _fetch(self): + """ + Get a URL to fetch from the work queue, get the HTML page, examine its + links for download candidates and candidates for further scraping. + + This is a handy method to run in a thread. + """ + while True: + url = self._to_fetch.get() + try: + if url: + page = self.get_page(url) + if page is None: # e.g. after an error + continue + for link, rel in page.links: + if link not in self._seen: + try: + self._seen.add(link) + if (not self._process_download(link) and + self._should_queue(link, url, rel)): + logger.debug('Queueing %s from %s', link, url) + self._to_fetch.put(link) + except MetadataInvalidError: # e.g. invalid versions + pass + except Exception as e: # pragma: no cover + self.errors.put(text_type(e)) + finally: + # always do this, to avoid hangs :-) + self._to_fetch.task_done() + if not url: + #logger.debug('Sentinel seen, quitting.') + break + + def get_page(self, url): + """ + Get the HTML for an URL, possibly from an in-memory cache. + + XXX TODO Note: this cache is never actually cleared. It's assumed that + the data won't get stale over the lifetime of a locator instance (not + necessarily true for the default_locator). + """ + # http://peak.telecommunity.com/DevCenter/EasyInstall#package-index-api + scheme, netloc, path, _, _, _ = urlparse(url) + if scheme == 'file' and os.path.isdir(url2pathname(path)): + url = urljoin(ensure_slash(url), 'index.html') + + if url in self._page_cache: + result = self._page_cache[url] + logger.debug('Returning %s from cache: %s', url, result) + else: + host = netloc.split(':', 1)[0] + result = None + if host in self._bad_hosts: + logger.debug('Skipping %s due to bad host %s', url, host) + else: + req = Request(url, headers={'Accept-encoding': 'identity'}) + try: + logger.debug('Fetching %s', url) + resp = self.opener.open(req, timeout=self.timeout) + logger.debug('Fetched %s', url) + headers = resp.info() + content_type = headers.get('Content-Type', '') + if HTML_CONTENT_TYPE.match(content_type): + final_url = resp.geturl() + data = resp.read() + encoding = headers.get('Content-Encoding') + if encoding: + decoder = self.decoders[encoding] # fail if not found + data = decoder(data) + encoding = 'utf-8' + m = CHARSET.search(content_type) + if m: + encoding = m.group(1) + try: + data = data.decode(encoding) + except UnicodeError: # pragma: no cover + data = data.decode('latin-1') # fallback + result = Page(data, final_url) + self._page_cache[final_url] = result + except HTTPError as e: + if e.code != 404: + logger.exception('Fetch failed: %s: %s', url, e) + except URLError as e: # pragma: no cover + logger.exception('Fetch failed: %s: %s', url, e) + with self._lock: + self._bad_hosts.add(host) + except Exception as e: # pragma: no cover + logger.exception('Fetch failed: %s: %s', url, e) + finally: + self._page_cache[url] = result # even if None (failure) + return result + + _distname_re = re.compile(']*>([^<]+)<') + + def get_distribution_names(self): + """ + Return all the distribution names known to this locator. + """ + result = set() + page = self.get_page(self.base_url) + if not page: + raise DistlibException('Unable to get %s' % self.base_url) + for match in self._distname_re.finditer(page.data): + result.add(match.group(1)) + return result + +class DirectoryLocator(Locator): + """ + This class locates distributions in a directory tree. + """ + + def __init__(self, path, **kwargs): + """ + Initialise an instance. + :param path: The root of the directory tree to search. + :param kwargs: Passed to the superclass constructor, + except for: + * recursive - if True (the default), subdirectories are + recursed into. If False, only the top-level directory + is searched, + """ + self.recursive = kwargs.pop('recursive', True) + super(DirectoryLocator, self).__init__(**kwargs) + path = os.path.abspath(path) + if not os.path.isdir(path): # pragma: no cover + raise DistlibException('Not a directory: %r' % path) + self.base_dir = path + + def should_include(self, filename, parent): + """ + Should a filename be considered as a candidate for a distribution + archive? As well as the filename, the directory which contains it + is provided, though not used by the current implementation. + """ + return filename.endswith(self.downloadable_extensions) + + def _get_project(self, name): + result = {'urls': {}, 'digests': {}} + for root, dirs, files in os.walk(self.base_dir): + for fn in files: + if self.should_include(fn, root): + fn = os.path.join(root, fn) + url = urlunparse(('file', '', + pathname2url(os.path.abspath(fn)), + '', '', '')) + info = self.convert_url_to_download_info(url, name) + if info: + self._update_version_data(result, info) + if not self.recursive: + break + return result + + def get_distribution_names(self): + """ + Return all the distribution names known to this locator. + """ + result = set() + for root, dirs, files in os.walk(self.base_dir): + for fn in files: + if self.should_include(fn, root): + fn = os.path.join(root, fn) + url = urlunparse(('file', '', + pathname2url(os.path.abspath(fn)), + '', '', '')) + info = self.convert_url_to_download_info(url, None) + if info: + result.add(info['name']) + if not self.recursive: + break + return result + +class JSONLocator(Locator): + """ + This locator uses special extended metadata (not available on PyPI) and is + the basis of performant dependency resolution in distlib. Other locators + require archive downloads before dependencies can be determined! As you + might imagine, that can be slow. + """ + def get_distribution_names(self): + """ + Return all the distribution names known to this locator. + """ + raise NotImplementedError('Not available from this locator') + + def _get_project(self, name): + result = {'urls': {}, 'digests': {}} + data = get_project_data(name) + if data: + for info in data.get('files', []): + if info['ptype'] != 'sdist' or info['pyversion'] != 'source': + continue + # We don't store summary in project metadata as it makes + # the data bigger for no benefit during dependency + # resolution + dist = make_dist(data['name'], info['version'], + summary=data.get('summary', + 'Placeholder for summary'), + scheme=self.scheme) + md = dist.metadata + md.source_url = info['url'] + # TODO SHA256 digest + if 'digest' in info and info['digest']: + dist.digest = ('md5', info['digest']) + md.dependencies = info.get('requirements', {}) + dist.exports = info.get('exports', {}) + result[dist.version] = dist + result['urls'].setdefault(dist.version, set()).add(info['url']) + return result + +class DistPathLocator(Locator): + """ + This locator finds installed distributions in a path. It can be useful for + adding to an :class:`AggregatingLocator`. + """ + def __init__(self, distpath, **kwargs): + """ + Initialise an instance. + + :param distpath: A :class:`DistributionPath` instance to search. + """ + super(DistPathLocator, self).__init__(**kwargs) + assert isinstance(distpath, DistributionPath) + self.distpath = distpath + + def _get_project(self, name): + dist = self.distpath.get_distribution(name) + if dist is None: + result = {'urls': {}, 'digests': {}} + else: + result = { + dist.version: dist, + 'urls': {dist.version: set([dist.source_url])}, + 'digests': {dist.version: set([None])} + } + return result + + +class AggregatingLocator(Locator): + """ + This class allows you to chain and/or merge a list of locators. + """ + def __init__(self, *locators, **kwargs): + """ + Initialise an instance. + + :param locators: The list of locators to search. + :param kwargs: Passed to the superclass constructor, + except for: + * merge - if False (the default), the first successful + search from any of the locators is returned. If True, + the results from all locators are merged (this can be + slow). + """ + self.merge = kwargs.pop('merge', False) + self.locators = locators + super(AggregatingLocator, self).__init__(**kwargs) + + def clear_cache(self): + super(AggregatingLocator, self).clear_cache() + for locator in self.locators: + locator.clear_cache() + + def _set_scheme(self, value): + self._scheme = value + for locator in self.locators: + locator.scheme = value + + scheme = property(Locator.scheme.fget, _set_scheme) + + def _get_project(self, name): + result = {} + for locator in self.locators: + d = locator.get_project(name) + if d: + if self.merge: + files = result.get('urls', {}) + digests = result.get('digests', {}) + # next line could overwrite result['urls'], result['digests'] + result.update(d) + df = result.get('urls') + if files and df: + for k, v in files.items(): + if k in df: + df[k] |= v + else: + df[k] = v + dd = result.get('digests') + if digests and dd: + dd.update(digests) + else: + # See issue #18. If any dists are found and we're looking + # for specific constraints, we only return something if + # a match is found. For example, if a DirectoryLocator + # returns just foo (1.0) while we're looking for + # foo (>= 2.0), we'll pretend there was nothing there so + # that subsequent locators can be queried. Otherwise we + # would just return foo (1.0) which would then lead to a + # failure to find foo (>= 2.0), because other locators + # weren't searched. Note that this only matters when + # merge=False. + if self.matcher is None: + found = True + else: + found = False + for k in d: + if self.matcher.match(k): + found = True + break + if found: + result = d + break + return result + + def get_distribution_names(self): + """ + Return all the distribution names known to this locator. + """ + result = set() + for locator in self.locators: + try: + result |= locator.get_distribution_names() + except NotImplementedError: + pass + return result + + +# We use a legacy scheme simply because most of the dists on PyPI use legacy +# versions which don't conform to PEP 440. +default_locator = AggregatingLocator( + # JSONLocator(), # don't use as PEP 426 is withdrawn + SimpleScrapingLocator('https://pypi.org/simple/', + timeout=3.0), + scheme='legacy') + +locate = default_locator.locate + + +class DependencyFinder(object): + """ + Locate dependencies for distributions. + """ + + def __init__(self, locator=None): + """ + Initialise an instance, using the specified locator + to locate distributions. + """ + self.locator = locator or default_locator + self.scheme = get_scheme(self.locator.scheme) + + def add_distribution(self, dist): + """ + Add a distribution to the finder. This will update internal information + about who provides what. + :param dist: The distribution to add. + """ + logger.debug('adding distribution %s', dist) + name = dist.key + self.dists_by_name[name] = dist + self.dists[(name, dist.version)] = dist + for p in dist.provides: + name, version = parse_name_and_version(p) + logger.debug('Add to provided: %s, %s, %s', name, version, dist) + self.provided.setdefault(name, set()).add((version, dist)) + + def remove_distribution(self, dist): + """ + Remove a distribution from the finder. This will update internal + information about who provides what. + :param dist: The distribution to remove. + """ + logger.debug('removing distribution %s', dist) + name = dist.key + del self.dists_by_name[name] + del self.dists[(name, dist.version)] + for p in dist.provides: + name, version = parse_name_and_version(p) + logger.debug('Remove from provided: %s, %s, %s', name, version, dist) + s = self.provided[name] + s.remove((version, dist)) + if not s: + del self.provided[name] + + def get_matcher(self, reqt): + """ + Get a version matcher for a requirement. + :param reqt: The requirement + :type reqt: str + :return: A version matcher (an instance of + :class:`distlib.version.Matcher`). + """ + try: + matcher = self.scheme.matcher(reqt) + except UnsupportedVersionError: # pragma: no cover + # XXX compat-mode if cannot read the version + name = reqt.split()[0] + matcher = self.scheme.matcher(name) + return matcher + + def find_providers(self, reqt): + """ + Find the distributions which can fulfill a requirement. + + :param reqt: The requirement. + :type reqt: str + :return: A set of distribution which can fulfill the requirement. + """ + matcher = self.get_matcher(reqt) + name = matcher.key # case-insensitive + result = set() + provided = self.provided + if name in provided: + for version, provider in provided[name]: + try: + match = matcher.match(version) + except UnsupportedVersionError: + match = False + + if match: + result.add(provider) + break + return result + + def try_to_replace(self, provider, other, problems): + """ + Attempt to replace one provider with another. This is typically used + when resolving dependencies from multiple sources, e.g. A requires + (B >= 1.0) while C requires (B >= 1.1). + + For successful replacement, ``provider`` must meet all the requirements + which ``other`` fulfills. + + :param provider: The provider we are trying to replace with. + :param other: The provider we're trying to replace. + :param problems: If False is returned, this will contain what + problems prevented replacement. This is currently + a tuple of the literal string 'cantreplace', + ``provider``, ``other`` and the set of requirements + that ``provider`` couldn't fulfill. + :return: True if we can replace ``other`` with ``provider``, else + False. + """ + rlist = self.reqts[other] + unmatched = set() + for s in rlist: + matcher = self.get_matcher(s) + if not matcher.match(provider.version): + unmatched.add(s) + if unmatched: + # can't replace other with provider + problems.add(('cantreplace', provider, other, + frozenset(unmatched))) + result = False + else: + # can replace other with provider + self.remove_distribution(other) + del self.reqts[other] + for s in rlist: + self.reqts.setdefault(provider, set()).add(s) + self.add_distribution(provider) + result = True + return result + + def find(self, requirement, meta_extras=None, prereleases=False): + """ + Find a distribution and all distributions it depends on. + + :param requirement: The requirement specifying the distribution to + find, or a Distribution instance. + :param meta_extras: A list of meta extras such as :test:, :build: and + so on. + :param prereleases: If ``True``, allow pre-release versions to be + returned - otherwise, don't return prereleases + unless they're all that's available. + + Return a set of :class:`Distribution` instances and a set of + problems. + + The distributions returned should be such that they have the + :attr:`required` attribute set to ``True`` if they were + from the ``requirement`` passed to ``find()``, and they have the + :attr:`build_time_dependency` attribute set to ``True`` unless they + are post-installation dependencies of the ``requirement``. + + The problems should be a tuple consisting of the string + ``'unsatisfied'`` and the requirement which couldn't be satisfied + by any distribution known to the locator. + """ + + self.provided = {} + self.dists = {} + self.dists_by_name = {} + self.reqts = {} + + meta_extras = set(meta_extras or []) + if ':*:' in meta_extras: + meta_extras.remove(':*:') + # :meta: and :run: are implicitly included + meta_extras |= set([':test:', ':build:', ':dev:']) + + if isinstance(requirement, Distribution): + dist = odist = requirement + logger.debug('passed %s as requirement', odist) + else: + dist = odist = self.locator.locate(requirement, + prereleases=prereleases) + if dist is None: + raise DistlibException('Unable to locate %r' % requirement) + logger.debug('located %s', odist) + dist.requested = True + problems = set() + todo = set([dist]) + install_dists = set([odist]) + while todo: + dist = todo.pop() + name = dist.key # case-insensitive + if name not in self.dists_by_name: + self.add_distribution(dist) + else: + #import pdb; pdb.set_trace() + other = self.dists_by_name[name] + if other != dist: + self.try_to_replace(dist, other, problems) + + ireqts = dist.run_requires | dist.meta_requires + sreqts = dist.build_requires + ereqts = set() + if meta_extras and dist in install_dists: + for key in ('test', 'build', 'dev'): + e = ':%s:' % key + if e in meta_extras: + ereqts |= getattr(dist, '%s_requires' % key) + all_reqts = ireqts | sreqts | ereqts + for r in all_reqts: + providers = self.find_providers(r) + if not providers: + logger.debug('No providers found for %r', r) + provider = self.locator.locate(r, prereleases=prereleases) + # If no provider is found and we didn't consider + # prereleases, consider them now. + if provider is None and not prereleases: + provider = self.locator.locate(r, prereleases=True) + if provider is None: + logger.debug('Cannot satisfy %r', r) + problems.add(('unsatisfied', r)) + else: + n, v = provider.key, provider.version + if (n, v) not in self.dists: + todo.add(provider) + providers.add(provider) + if r in ireqts and dist in install_dists: + install_dists.add(provider) + logger.debug('Adding %s to install_dists', + provider.name_and_version) + for p in providers: + name = p.key + if name not in self.dists_by_name: + self.reqts.setdefault(p, set()).add(r) + else: + other = self.dists_by_name[name] + if other != p: + # see if other can be replaced by p + self.try_to_replace(p, other, problems) + + dists = set(self.dists.values()) + for dist in dists: + dist.build_time_dependency = dist not in install_dists + if dist.build_time_dependency: + logger.debug('%s is a build-time dependency only.', + dist.name_and_version) + logger.debug('find done for %s', odist) + return dists, problems diff --git a/venv/lib/python3.10/site-packages/distlib/manifest.py b/venv/lib/python3.10/site-packages/distlib/manifest.py new file mode 100644 index 0000000..18beba3 --- /dev/null +++ b/venv/lib/python3.10/site-packages/distlib/manifest.py @@ -0,0 +1,394 @@ +# -*- coding: utf-8 -*- +# +# Copyright (C) 2012-2013 Python Software Foundation. +# See LICENSE.txt and CONTRIBUTORS.txt. +# +""" +Class representing the list of files in a distribution. + +Equivalent to distutils.filelist, but fixes some problems. +""" +import fnmatch +import logging +import os +import re +import sys + +from . import DistlibException +from .compat import fsdecode +from .util import convert_path + + +__all__ = ['Manifest'] + +logger = logging.getLogger(__name__) + +# a \ followed by some spaces + EOL +_COLLAPSE_PATTERN = re.compile('\\\\w*\n', re.M) +_COMMENTED_LINE = re.compile('#.*?(?=\n)|\n(?=$)', re.M | re.S) + +# +# Due to the different results returned by fnmatch.translate, we need +# to do slightly different processing for Python 2.7 and 3.2 ... this needed +# to be brought in for Python 3.6 onwards. +# +_PYTHON_VERSION = sys.version_info[:2] + +class Manifest(object): + """ + A list of files built by exploring the filesystem and filtered by applying various + patterns to what we find there. + """ + + def __init__(self, base=None): + """ + Initialise an instance. + + :param base: The base directory to explore under. + """ + self.base = os.path.abspath(os.path.normpath(base or os.getcwd())) + self.prefix = self.base + os.sep + self.allfiles = None + self.files = set() + + # + # Public API + # + + def findall(self): + """Find all files under the base and set ``allfiles`` to the absolute + pathnames of files found. + """ + from stat import S_ISREG, S_ISDIR, S_ISLNK + + self.allfiles = allfiles = [] + root = self.base + stack = [root] + pop = stack.pop + push = stack.append + + while stack: + root = pop() + names = os.listdir(root) + + for name in names: + fullname = os.path.join(root, name) + + # Avoid excess stat calls -- just one will do, thank you! + stat = os.stat(fullname) + mode = stat.st_mode + if S_ISREG(mode): + allfiles.append(fsdecode(fullname)) + elif S_ISDIR(mode) and not S_ISLNK(mode): + push(fullname) + + def add(self, item): + """ + Add a file to the manifest. + + :param item: The pathname to add. This can be relative to the base. + """ + if not item.startswith(self.prefix): + item = os.path.join(self.base, item) + self.files.add(os.path.normpath(item)) + + def add_many(self, items): + """ + Add a list of files to the manifest. + + :param items: The pathnames to add. These can be relative to the base. + """ + for item in items: + self.add(item) + + def sorted(self, wantdirs=False): + """ + Return sorted files in directory order + """ + + def add_dir(dirs, d): + dirs.add(d) + logger.debug('add_dir added %s', d) + if d != self.base: + parent, _ = os.path.split(d) + assert parent not in ('', '/') + add_dir(dirs, parent) + + result = set(self.files) # make a copy! + if wantdirs: + dirs = set() + for f in result: + add_dir(dirs, os.path.dirname(f)) + result |= dirs + return [os.path.join(*path_tuple) for path_tuple in + sorted(os.path.split(path) for path in result)] + + def clear(self): + """Clear all collected files.""" + self.files = set() + self.allfiles = [] + + def process_directive(self, directive): + """ + Process a directive which either adds some files from ``allfiles`` to + ``files``, or removes some files from ``files``. + + :param directive: The directive to process. This should be in a format + compatible with distutils ``MANIFEST.in`` files: + + http://docs.python.org/distutils/sourcedist.html#commands + """ + # Parse the line: split it up, make sure the right number of words + # is there, and return the relevant words. 'action' is always + # defined: it's the first word of the line. Which of the other + # three are defined depends on the action; it'll be either + # patterns, (dir and patterns), or (dirpattern). + action, patterns, thedir, dirpattern = self._parse_directive(directive) + + # OK, now we know that the action is valid and we have the + # right number of words on the line for that action -- so we + # can proceed with minimal error-checking. + if action == 'include': + for pattern in patterns: + if not self._include_pattern(pattern, anchor=True): + logger.warning('no files found matching %r', pattern) + + elif action == 'exclude': + for pattern in patterns: + found = self._exclude_pattern(pattern, anchor=True) + #if not found: + # logger.warning('no previously-included files ' + # 'found matching %r', pattern) + + elif action == 'global-include': + for pattern in patterns: + if not self._include_pattern(pattern, anchor=False): + logger.warning('no files found matching %r ' + 'anywhere in distribution', pattern) + + elif action == 'global-exclude': + for pattern in patterns: + found = self._exclude_pattern(pattern, anchor=False) + #if not found: + # logger.warning('no previously-included files ' + # 'matching %r found anywhere in ' + # 'distribution', pattern) + + elif action == 'recursive-include': + for pattern in patterns: + if not self._include_pattern(pattern, prefix=thedir): + logger.warning('no files found matching %r ' + 'under directory %r', pattern, thedir) + + elif action == 'recursive-exclude': + for pattern in patterns: + found = self._exclude_pattern(pattern, prefix=thedir) + #if not found: + # logger.warning('no previously-included files ' + # 'matching %r found under directory %r', + # pattern, thedir) + + elif action == 'graft': + if not self._include_pattern(None, prefix=dirpattern): + logger.warning('no directories found matching %r', + dirpattern) + + elif action == 'prune': + if not self._exclude_pattern(None, prefix=dirpattern): + logger.warning('no previously-included directories found ' + 'matching %r', dirpattern) + else: # pragma: no cover + # This should never happen, as it should be caught in + # _parse_template_line + raise DistlibException( + 'invalid action %r' % action) + + # + # Private API + # + + def _parse_directive(self, directive): + """ + Validate a directive. + :param directive: The directive to validate. + :return: A tuple of action, patterns, thedir, dir_patterns + """ + words = directive.split() + if len(words) == 1 and words[0] not in ('include', 'exclude', + 'global-include', + 'global-exclude', + 'recursive-include', + 'recursive-exclude', + 'graft', 'prune'): + # no action given, let's use the default 'include' + words.insert(0, 'include') + + action = words[0] + patterns = thedir = dir_pattern = None + + if action in ('include', 'exclude', + 'global-include', 'global-exclude'): + if len(words) < 2: + raise DistlibException( + '%r expects ...' % action) + + patterns = [convert_path(word) for word in words[1:]] + + elif action in ('recursive-include', 'recursive-exclude'): + if len(words) < 3: + raise DistlibException( + '%r expects ...' % action) + + thedir = convert_path(words[1]) + patterns = [convert_path(word) for word in words[2:]] + + elif action in ('graft', 'prune'): + if len(words) != 2: + raise DistlibException( + '%r expects a single ' % action) + + dir_pattern = convert_path(words[1]) + + else: + raise DistlibException('unknown action %r' % action) + + return action, patterns, thedir, dir_pattern + + def _include_pattern(self, pattern, anchor=True, prefix=None, + is_regex=False): + """Select strings (presumably filenames) from 'self.files' that + match 'pattern', a Unix-style wildcard (glob) pattern. + + Patterns are not quite the same as implemented by the 'fnmatch' + module: '*' and '?' match non-special characters, where "special" + is platform-dependent: slash on Unix; colon, slash, and backslash on + DOS/Windows; and colon on Mac OS. + + If 'anchor' is true (the default), then the pattern match is more + stringent: "*.py" will match "foo.py" but not "foo/bar.py". If + 'anchor' is false, both of these will match. + + If 'prefix' is supplied, then only filenames starting with 'prefix' + (itself a pattern) and ending with 'pattern', with anything in between + them, will match. 'anchor' is ignored in this case. + + If 'is_regex' is true, 'anchor' and 'prefix' are ignored, and + 'pattern' is assumed to be either a string containing a regex or a + regex object -- no translation is done, the regex is just compiled + and used as-is. + + Selected strings will be added to self.files. + + Return True if files are found. + """ + # XXX docstring lying about what the special chars are? + found = False + pattern_re = self._translate_pattern(pattern, anchor, prefix, is_regex) + + # delayed loading of allfiles list + if self.allfiles is None: + self.findall() + + for name in self.allfiles: + if pattern_re.search(name): + self.files.add(name) + found = True + return found + + def _exclude_pattern(self, pattern, anchor=True, prefix=None, + is_regex=False): + """Remove strings (presumably filenames) from 'files' that match + 'pattern'. + + Other parameters are the same as for 'include_pattern()', above. + The list 'self.files' is modified in place. Return True if files are + found. + + This API is public to allow e.g. exclusion of SCM subdirs, e.g. when + packaging source distributions + """ + found = False + pattern_re = self._translate_pattern(pattern, anchor, prefix, is_regex) + for f in list(self.files): + if pattern_re.search(f): + self.files.remove(f) + found = True + return found + + def _translate_pattern(self, pattern, anchor=True, prefix=None, + is_regex=False): + """Translate a shell-like wildcard pattern to a compiled regular + expression. + + Return the compiled regex. If 'is_regex' true, + then 'pattern' is directly compiled to a regex (if it's a string) + or just returned as-is (assumes it's a regex object). + """ + if is_regex: + if isinstance(pattern, str): + return re.compile(pattern) + else: + return pattern + + if _PYTHON_VERSION > (3, 2): + # ditch start and end characters + start, _, end = self._glob_to_re('_').partition('_') + + if pattern: + pattern_re = self._glob_to_re(pattern) + if _PYTHON_VERSION > (3, 2): + assert pattern_re.startswith(start) and pattern_re.endswith(end) + else: + pattern_re = '' + + base = re.escape(os.path.join(self.base, '')) + if prefix is not None: + # ditch end of pattern character + if _PYTHON_VERSION <= (3, 2): + empty_pattern = self._glob_to_re('') + prefix_re = self._glob_to_re(prefix)[:-len(empty_pattern)] + else: + prefix_re = self._glob_to_re(prefix) + assert prefix_re.startswith(start) and prefix_re.endswith(end) + prefix_re = prefix_re[len(start): len(prefix_re) - len(end)] + sep = os.sep + if os.sep == '\\': + sep = r'\\' + if _PYTHON_VERSION <= (3, 2): + pattern_re = '^' + base + sep.join((prefix_re, + '.*' + pattern_re)) + else: + pattern_re = pattern_re[len(start): len(pattern_re) - len(end)] + pattern_re = r'%s%s%s%s.*%s%s' % (start, base, prefix_re, sep, + pattern_re, end) + else: # no prefix -- respect anchor flag + if anchor: + if _PYTHON_VERSION <= (3, 2): + pattern_re = '^' + base + pattern_re + else: + pattern_re = r'%s%s%s' % (start, base, pattern_re[len(start):]) + + return re.compile(pattern_re) + + def _glob_to_re(self, pattern): + """Translate a shell-like glob pattern to a regular expression. + + Return a string containing the regex. Differs from + 'fnmatch.translate()' in that '*' does not match "special characters" + (which are platform-specific). + """ + pattern_re = fnmatch.translate(pattern) + + # '?' and '*' in the glob pattern become '.' and '.*' in the RE, which + # IMHO is wrong -- '?' and '*' aren't supposed to match slash in Unix, + # and by extension they shouldn't match such "special characters" under + # any OS. So change all non-escaped dots in the RE to match any + # character except the special characters (currently: just os.sep). + sep = os.sep + if os.sep == '\\': + # we're using a regex to manipulate a regex, so we need + # to escape the backslash twice + sep = r'\\\\' + escaped = r'\1[^%s]' % sep + pattern_re = re.sub(r'((? y, + '!=': lambda x, y: x != y, + '<': lambda x, y: x < y, + '<=': lambda x, y: x == y or x < y, + '>': lambda x, y: x > y, + '>=': lambda x, y: x == y or x > y, + 'and': lambda x, y: x and y, + 'or': lambda x, y: x or y, + 'in': lambda x, y: x in y, + 'not in': lambda x, y: x not in y, + } + + def evaluate(self, expr, context): + """ + Evaluate a marker expression returned by the :func:`parse_requirement` + function in the specified context. + """ + if isinstance(expr, string_types): + if expr[0] in '\'"': + result = expr[1:-1] + else: + if expr not in context: + raise SyntaxError('unknown variable: %s' % expr) + result = context[expr] + else: + assert isinstance(expr, dict) + op = expr['op'] + if op not in self.operations: + raise NotImplementedError('op not implemented: %s' % op) + elhs = expr['lhs'] + erhs = expr['rhs'] + if _is_literal(expr['lhs']) and _is_literal(expr['rhs']): + raise SyntaxError('invalid comparison: %s %s %s' % (elhs, op, erhs)) + + lhs = self.evaluate(elhs, context) + rhs = self.evaluate(erhs, context) + if ((_is_version_marker(elhs) or _is_version_marker(erhs)) and + op in ('<', '<=', '>', '>=', '===', '==', '!=', '~=')): + lhs = NV(lhs) + rhs = NV(rhs) + elif _is_version_marker(elhs) and op in ('in', 'not in'): + lhs = NV(lhs) + rhs = _get_versions(rhs) + result = self.operations[op](lhs, rhs) + return result + +_DIGITS = re.compile(r'\d+\.\d+') + +def default_context(): + def format_full_version(info): + version = '%s.%s.%s' % (info.major, info.minor, info.micro) + kind = info.releaselevel + if kind != 'final': + version += kind[0] + str(info.serial) + return version + + if hasattr(sys, 'implementation'): + implementation_version = format_full_version(sys.implementation.version) + implementation_name = sys.implementation.name + else: + implementation_version = '0' + implementation_name = '' + + ppv = platform.python_version() + m = _DIGITS.match(ppv) + pv = m.group(0) + result = { + 'implementation_name': implementation_name, + 'implementation_version': implementation_version, + 'os_name': os.name, + 'platform_machine': platform.machine(), + 'platform_python_implementation': platform.python_implementation(), + 'platform_release': platform.release(), + 'platform_system': platform.system(), + 'platform_version': platform.version(), + 'platform_in_venv': str(in_venv()), + 'python_full_version': ppv, + 'python_version': pv, + 'sys_platform': sys.platform, + } + return result + +DEFAULT_CONTEXT = default_context() +del default_context + +evaluator = Evaluator() + +def interpret(marker, execution_context=None): + """ + Interpret a marker and return a result depending on environment. + + :param marker: The marker to interpret. + :type marker: str + :param execution_context: The context used for name lookup. + :type execution_context: mapping + """ + try: + expr, rest = parse_marker(marker) + except Exception as e: + raise SyntaxError('Unable to interpret marker syntax: %s: %s' % (marker, e)) + if rest and rest[0] != '#': + raise SyntaxError('unexpected trailing data in marker: %s: %s' % (marker, rest)) + context = dict(DEFAULT_CONTEXT) + if execution_context: + context.update(execution_context) + return evaluator.evaluate(expr, context) diff --git a/venv/lib/python3.10/site-packages/distlib/metadata.py b/venv/lib/python3.10/site-packages/distlib/metadata.py new file mode 100644 index 0000000..7189aee --- /dev/null +++ b/venv/lib/python3.10/site-packages/distlib/metadata.py @@ -0,0 +1,1068 @@ +# -*- coding: utf-8 -*- +# +# Copyright (C) 2012 The Python Software Foundation. +# See LICENSE.txt and CONTRIBUTORS.txt. +# +"""Implementation of the Metadata for Python packages PEPs. + +Supports all metadata formats (1.0, 1.1, 1.2, 1.3/2.1 and 2.2). +""" +from __future__ import unicode_literals + +import codecs +from email import message_from_file +import json +import logging +import re + + +from . import DistlibException, __version__ +from .compat import StringIO, string_types, text_type +from .markers import interpret +from .util import extract_by_key, get_extras +from .version import get_scheme, PEP440_VERSION_RE + +logger = logging.getLogger(__name__) + + +class MetadataMissingError(DistlibException): + """A required metadata is missing""" + + +class MetadataConflictError(DistlibException): + """Attempt to read or write metadata fields that are conflictual.""" + + +class MetadataUnrecognizedVersionError(DistlibException): + """Unknown metadata version number.""" + + +class MetadataInvalidError(DistlibException): + """A metadata value is invalid""" + +# public API of this module +__all__ = ['Metadata', 'PKG_INFO_ENCODING', 'PKG_INFO_PREFERRED_VERSION'] + +# Encoding used for the PKG-INFO files +PKG_INFO_ENCODING = 'utf-8' + +# preferred version. Hopefully will be changed +# to 1.2 once PEP 345 is supported everywhere +PKG_INFO_PREFERRED_VERSION = '1.1' + +_LINE_PREFIX_1_2 = re.compile('\n \\|') +_LINE_PREFIX_PRE_1_2 = re.compile('\n ') +_241_FIELDS = ('Metadata-Version', 'Name', 'Version', 'Platform', + 'Summary', 'Description', + 'Keywords', 'Home-page', 'Author', 'Author-email', + 'License') + +_314_FIELDS = ('Metadata-Version', 'Name', 'Version', 'Platform', + 'Supported-Platform', 'Summary', 'Description', + 'Keywords', 'Home-page', 'Author', 'Author-email', + 'License', 'Classifier', 'Download-URL', 'Obsoletes', + 'Provides', 'Requires') + +_314_MARKERS = ('Obsoletes', 'Provides', 'Requires', 'Classifier', + 'Download-URL') + +_345_FIELDS = ('Metadata-Version', 'Name', 'Version', 'Platform', + 'Supported-Platform', 'Summary', 'Description', + 'Keywords', 'Home-page', 'Author', 'Author-email', + 'Maintainer', 'Maintainer-email', 'License', + 'Classifier', 'Download-URL', 'Obsoletes-Dist', + 'Project-URL', 'Provides-Dist', 'Requires-Dist', + 'Requires-Python', 'Requires-External') + +_345_MARKERS = ('Provides-Dist', 'Requires-Dist', 'Requires-Python', + 'Obsoletes-Dist', 'Requires-External', 'Maintainer', + 'Maintainer-email', 'Project-URL') + +_426_FIELDS = ('Metadata-Version', 'Name', 'Version', 'Platform', + 'Supported-Platform', 'Summary', 'Description', + 'Keywords', 'Home-page', 'Author', 'Author-email', + 'Maintainer', 'Maintainer-email', 'License', + 'Classifier', 'Download-URL', 'Obsoletes-Dist', + 'Project-URL', 'Provides-Dist', 'Requires-Dist', + 'Requires-Python', 'Requires-External', 'Private-Version', + 'Obsoleted-By', 'Setup-Requires-Dist', 'Extension', + 'Provides-Extra') + +_426_MARKERS = ('Private-Version', 'Provides-Extra', 'Obsoleted-By', + 'Setup-Requires-Dist', 'Extension') + +# See issue #106: Sometimes 'Requires' and 'Provides' occur wrongly in +# the metadata. Include them in the tuple literal below to allow them +# (for now). +# Ditto for Obsoletes - see issue #140. +_566_FIELDS = _426_FIELDS + ('Description-Content-Type', + 'Requires', 'Provides', 'Obsoletes') + +_566_MARKERS = ('Description-Content-Type',) + +_643_MARKERS = ('Dynamic', 'License-File') + +_643_FIELDS = _566_FIELDS + _643_MARKERS + +_ALL_FIELDS = set() +_ALL_FIELDS.update(_241_FIELDS) +_ALL_FIELDS.update(_314_FIELDS) +_ALL_FIELDS.update(_345_FIELDS) +_ALL_FIELDS.update(_426_FIELDS) +_ALL_FIELDS.update(_566_FIELDS) +_ALL_FIELDS.update(_643_FIELDS) + +EXTRA_RE = re.compile(r'''extra\s*==\s*("([^"]+)"|'([^']+)')''') + + +def _version2fieldlist(version): + if version == '1.0': + return _241_FIELDS + elif version == '1.1': + return _314_FIELDS + elif version == '1.2': + return _345_FIELDS + elif version in ('1.3', '2.1'): + # avoid adding field names if already there + return _345_FIELDS + tuple(f for f in _566_FIELDS if f not in _345_FIELDS) + elif version == '2.0': + raise ValueError('Metadata 2.0 is withdrawn and not supported') + # return _426_FIELDS + elif version == '2.2': + return _643_FIELDS + raise MetadataUnrecognizedVersionError(version) + + +def _best_version(fields): + """Detect the best version depending on the fields used.""" + def _has_marker(keys, markers): + return any(marker in keys for marker in markers) + + keys = [key for key, value in fields.items() if value not in ([], 'UNKNOWN', None)] + possible_versions = ['1.0', '1.1', '1.2', '1.3', '2.1', '2.2'] # 2.0 removed + + # first let's try to see if a field is not part of one of the version + for key in keys: + if key not in _241_FIELDS and '1.0' in possible_versions: + possible_versions.remove('1.0') + logger.debug('Removed 1.0 due to %s', key) + if key not in _314_FIELDS and '1.1' in possible_versions: + possible_versions.remove('1.1') + logger.debug('Removed 1.1 due to %s', key) + if key not in _345_FIELDS and '1.2' in possible_versions: + possible_versions.remove('1.2') + logger.debug('Removed 1.2 due to %s', key) + if key not in _566_FIELDS and '1.3' in possible_versions: + possible_versions.remove('1.3') + logger.debug('Removed 1.3 due to %s', key) + if key not in _566_FIELDS and '2.1' in possible_versions: + if key != 'Description': # In 2.1, description allowed after headers + possible_versions.remove('2.1') + logger.debug('Removed 2.1 due to %s', key) + if key not in _643_FIELDS and '2.2' in possible_versions: + possible_versions.remove('2.2') + logger.debug('Removed 2.2 due to %s', key) + # if key not in _426_FIELDS and '2.0' in possible_versions: + # possible_versions.remove('2.0') + # logger.debug('Removed 2.0 due to %s', key) + + # possible_version contains qualified versions + if len(possible_versions) == 1: + return possible_versions[0] # found ! + elif len(possible_versions) == 0: + logger.debug('Out of options - unknown metadata set: %s', fields) + raise MetadataConflictError('Unknown metadata set') + + # let's see if one unique marker is found + is_1_1 = '1.1' in possible_versions and _has_marker(keys, _314_MARKERS) + is_1_2 = '1.2' in possible_versions and _has_marker(keys, _345_MARKERS) + is_2_1 = '2.1' in possible_versions and _has_marker(keys, _566_MARKERS) + # is_2_0 = '2.0' in possible_versions and _has_marker(keys, _426_MARKERS) + is_2_2 = '2.2' in possible_versions and _has_marker(keys, _643_MARKERS) + if int(is_1_1) + int(is_1_2) + int(is_2_1) + int(is_2_2) > 1: + raise MetadataConflictError('You used incompatible 1.1/1.2/2.1/2.2 fields') + + # we have the choice, 1.0, or 1.2, 2.1 or 2.2 + # - 1.0 has a broken Summary field but works with all tools + # - 1.1 is to avoid + # - 1.2 fixes Summary but has little adoption + # - 2.1 adds more features + # - 2.2 is the latest + if not is_1_1 and not is_1_2 and not is_2_1 and not is_2_2: + # we couldn't find any specific marker + if PKG_INFO_PREFERRED_VERSION in possible_versions: + return PKG_INFO_PREFERRED_VERSION + if is_1_1: + return '1.1' + if is_1_2: + return '1.2' + if is_2_1: + return '2.1' + # if is_2_2: + # return '2.2' + + return '2.2' + +# This follows the rules about transforming keys as described in +# https://www.python.org/dev/peps/pep-0566/#id17 +_ATTR2FIELD = { + name.lower().replace("-", "_"): name for name in _ALL_FIELDS +} +_FIELD2ATTR = {field: attr for attr, field in _ATTR2FIELD.items()} + +_PREDICATE_FIELDS = ('Requires-Dist', 'Obsoletes-Dist', 'Provides-Dist') +_VERSIONS_FIELDS = ('Requires-Python',) +_VERSION_FIELDS = ('Version',) +_LISTFIELDS = ('Platform', 'Classifier', 'Obsoletes', + 'Requires', 'Provides', 'Obsoletes-Dist', + 'Provides-Dist', 'Requires-Dist', 'Requires-External', + 'Project-URL', 'Supported-Platform', 'Setup-Requires-Dist', + 'Provides-Extra', 'Extension', 'License-File') +_LISTTUPLEFIELDS = ('Project-URL',) + +_ELEMENTSFIELD = ('Keywords',) + +_UNICODEFIELDS = ('Author', 'Maintainer', 'Summary', 'Description') + +_MISSING = object() + +_FILESAFE = re.compile('[^A-Za-z0-9.]+') + + +def _get_name_and_version(name, version, for_filename=False): + """Return the distribution name with version. + + If for_filename is true, return a filename-escaped form.""" + if for_filename: + # For both name and version any runs of non-alphanumeric or '.' + # characters are replaced with a single '-'. Additionally any + # spaces in the version string become '.' + name = _FILESAFE.sub('-', name) + version = _FILESAFE.sub('-', version.replace(' ', '.')) + return '%s-%s' % (name, version) + + +class LegacyMetadata(object): + """The legacy metadata of a release. + + Supports versions 1.0, 1.1, 1.2, 2.0 and 1.3/2.1 (auto-detected). You can + instantiate the class with one of these arguments (or none): + - *path*, the path to a metadata file + - *fileobj* give a file-like object with metadata as content + - *mapping* is a dict-like object + - *scheme* is a version scheme name + """ + # TODO document the mapping API and UNKNOWN default key + + def __init__(self, path=None, fileobj=None, mapping=None, + scheme='default'): + if [path, fileobj, mapping].count(None) < 2: + raise TypeError('path, fileobj and mapping are exclusive') + self._fields = {} + self.requires_files = [] + self._dependencies = None + self.scheme = scheme + if path is not None: + self.read(path) + elif fileobj is not None: + self.read_file(fileobj) + elif mapping is not None: + self.update(mapping) + self.set_metadata_version() + + def set_metadata_version(self): + self._fields['Metadata-Version'] = _best_version(self._fields) + + def _write_field(self, fileobj, name, value): + fileobj.write('%s: %s\n' % (name, value)) + + def __getitem__(self, name): + return self.get(name) + + def __setitem__(self, name, value): + return self.set(name, value) + + def __delitem__(self, name): + field_name = self._convert_name(name) + try: + del self._fields[field_name] + except KeyError: + raise KeyError(name) + + def __contains__(self, name): + return (name in self._fields or + self._convert_name(name) in self._fields) + + def _convert_name(self, name): + if name in _ALL_FIELDS: + return name + name = name.replace('-', '_').lower() + return _ATTR2FIELD.get(name, name) + + def _default_value(self, name): + if name in _LISTFIELDS or name in _ELEMENTSFIELD: + return [] + return 'UNKNOWN' + + def _remove_line_prefix(self, value): + if self.metadata_version in ('1.0', '1.1'): + return _LINE_PREFIX_PRE_1_2.sub('\n', value) + else: + return _LINE_PREFIX_1_2.sub('\n', value) + + def __getattr__(self, name): + if name in _ATTR2FIELD: + return self[name] + raise AttributeError(name) + + # + # Public API + # + +# dependencies = property(_get_dependencies, _set_dependencies) + + def get_fullname(self, filesafe=False): + """Return the distribution name with version. + + If filesafe is true, return a filename-escaped form.""" + return _get_name_and_version(self['Name'], self['Version'], filesafe) + + def is_field(self, name): + """return True if name is a valid metadata key""" + name = self._convert_name(name) + return name in _ALL_FIELDS + + def is_multi_field(self, name): + name = self._convert_name(name) + return name in _LISTFIELDS + + def read(self, filepath): + """Read the metadata values from a file path.""" + fp = codecs.open(filepath, 'r', encoding='utf-8') + try: + self.read_file(fp) + finally: + fp.close() + + def read_file(self, fileob): + """Read the metadata values from a file object.""" + msg = message_from_file(fileob) + self._fields['Metadata-Version'] = msg['metadata-version'] + + # When reading, get all the fields we can + for field in _ALL_FIELDS: + if field not in msg: + continue + if field in _LISTFIELDS: + # we can have multiple lines + values = msg.get_all(field) + if field in _LISTTUPLEFIELDS and values is not None: + values = [tuple(value.split(',')) for value in values] + self.set(field, values) + else: + # single line + value = msg[field] + if value is not None and value != 'UNKNOWN': + self.set(field, value) + + # PEP 566 specifies that the body be used for the description, if + # available + body = msg.get_payload() + self["Description"] = body if body else self["Description"] + # logger.debug('Attempting to set metadata for %s', self) + # self.set_metadata_version() + + def write(self, filepath, skip_unknown=False): + """Write the metadata fields to filepath.""" + fp = codecs.open(filepath, 'w', encoding='utf-8') + try: + self.write_file(fp, skip_unknown) + finally: + fp.close() + + def write_file(self, fileobject, skip_unknown=False): + """Write the PKG-INFO format data to a file object.""" + self.set_metadata_version() + + for field in _version2fieldlist(self['Metadata-Version']): + values = self.get(field) + if skip_unknown and values in ('UNKNOWN', [], ['UNKNOWN']): + continue + if field in _ELEMENTSFIELD: + self._write_field(fileobject, field, ','.join(values)) + continue + if field not in _LISTFIELDS: + if field == 'Description': + if self.metadata_version in ('1.0', '1.1'): + values = values.replace('\n', '\n ') + else: + values = values.replace('\n', '\n |') + values = [values] + + if field in _LISTTUPLEFIELDS: + values = [','.join(value) for value in values] + + for value in values: + self._write_field(fileobject, field, value) + + def update(self, other=None, **kwargs): + """Set metadata values from the given iterable `other` and kwargs. + + Behavior is like `dict.update`: If `other` has a ``keys`` method, + they are looped over and ``self[key]`` is assigned ``other[key]``. + Else, ``other`` is an iterable of ``(key, value)`` iterables. + + Keys that don't match a metadata field or that have an empty value are + dropped. + """ + def _set(key, value): + if key in _ATTR2FIELD and value: + self.set(self._convert_name(key), value) + + if not other: + # other is None or empty container + pass + elif hasattr(other, 'keys'): + for k in other.keys(): + _set(k, other[k]) + else: + for k, v in other: + _set(k, v) + + if kwargs: + for k, v in kwargs.items(): + _set(k, v) + + def set(self, name, value): + """Control then set a metadata field.""" + name = self._convert_name(name) + + if ((name in _ELEMENTSFIELD or name == 'Platform') and + not isinstance(value, (list, tuple))): + if isinstance(value, string_types): + value = [v.strip() for v in value.split(',')] + else: + value = [] + elif (name in _LISTFIELDS and + not isinstance(value, (list, tuple))): + if isinstance(value, string_types): + value = [value] + else: + value = [] + + if logger.isEnabledFor(logging.WARNING): + project_name = self['Name'] + + scheme = get_scheme(self.scheme) + if name in _PREDICATE_FIELDS and value is not None: + for v in value: + # check that the values are valid + if not scheme.is_valid_matcher(v.split(';')[0]): + logger.warning( + "'%s': '%s' is not valid (field '%s')", + project_name, v, name) + # FIXME this rejects UNKNOWN, is that right? + elif name in _VERSIONS_FIELDS and value is not None: + if not scheme.is_valid_constraint_list(value): + logger.warning("'%s': '%s' is not a valid version (field '%s')", + project_name, value, name) + elif name in _VERSION_FIELDS and value is not None: + if not scheme.is_valid_version(value): + logger.warning("'%s': '%s' is not a valid version (field '%s')", + project_name, value, name) + + if name in _UNICODEFIELDS: + if name == 'Description': + value = self._remove_line_prefix(value) + + self._fields[name] = value + + def get(self, name, default=_MISSING): + """Get a metadata field.""" + name = self._convert_name(name) + if name not in self._fields: + if default is _MISSING: + default = self._default_value(name) + return default + if name in _UNICODEFIELDS: + value = self._fields[name] + return value + elif name in _LISTFIELDS: + value = self._fields[name] + if value is None: + return [] + res = [] + for val in value: + if name not in _LISTTUPLEFIELDS: + res.append(val) + else: + # That's for Project-URL + res.append((val[0], val[1])) + return res + + elif name in _ELEMENTSFIELD: + value = self._fields[name] + if isinstance(value, string_types): + return value.split(',') + return self._fields[name] + + def check(self, strict=False): + """Check if the metadata is compliant. If strict is True then raise if + no Name or Version are provided""" + self.set_metadata_version() + + # XXX should check the versions (if the file was loaded) + missing, warnings = [], [] + + for attr in ('Name', 'Version'): # required by PEP 345 + if attr not in self: + missing.append(attr) + + if strict and missing != []: + msg = 'missing required metadata: %s' % ', '.join(missing) + raise MetadataMissingError(msg) + + for attr in ('Home-page', 'Author'): + if attr not in self: + missing.append(attr) + + # checking metadata 1.2 (XXX needs to check 1.1, 1.0) + if self['Metadata-Version'] != '1.2': + return missing, warnings + + scheme = get_scheme(self.scheme) + + def are_valid_constraints(value): + for v in value: + if not scheme.is_valid_matcher(v.split(';')[0]): + return False + return True + + for fields, controller in ((_PREDICATE_FIELDS, are_valid_constraints), + (_VERSIONS_FIELDS, + scheme.is_valid_constraint_list), + (_VERSION_FIELDS, + scheme.is_valid_version)): + for field in fields: + value = self.get(field, None) + if value is not None and not controller(value): + warnings.append("Wrong value for '%s': %s" % (field, value)) + + return missing, warnings + + def todict(self, skip_missing=False): + """Return fields as a dict. + + Field names will be converted to use the underscore-lowercase style + instead of hyphen-mixed case (i.e. home_page instead of Home-page). + This is as per https://www.python.org/dev/peps/pep-0566/#id17. + """ + self.set_metadata_version() + + fields = _version2fieldlist(self['Metadata-Version']) + + data = {} + + for field_name in fields: + if not skip_missing or field_name in self._fields: + key = _FIELD2ATTR[field_name] + if key != 'project_url': + data[key] = self[field_name] + else: + data[key] = [','.join(u) for u in self[field_name]] + + return data + + def add_requirements(self, requirements): + if self['Metadata-Version'] == '1.1': + # we can't have 1.1 metadata *and* Setuptools requires + for field in ('Obsoletes', 'Requires', 'Provides'): + if field in self: + del self[field] + self['Requires-Dist'] += requirements + + # Mapping API + # TODO could add iter* variants + + def keys(self): + return list(_version2fieldlist(self['Metadata-Version'])) + + def __iter__(self): + for key in self.keys(): + yield key + + def values(self): + return [self[key] for key in self.keys()] + + def items(self): + return [(key, self[key]) for key in self.keys()] + + def __repr__(self): + return '<%s %s %s>' % (self.__class__.__name__, self.name, + self.version) + + +METADATA_FILENAME = 'pydist.json' +WHEEL_METADATA_FILENAME = 'metadata.json' +LEGACY_METADATA_FILENAME = 'METADATA' + + +class Metadata(object): + """ + The metadata of a release. This implementation uses 2.1 + metadata where possible. If not possible, it wraps a LegacyMetadata + instance which handles the key-value metadata format. + """ + + METADATA_VERSION_MATCHER = re.compile(r'^\d+(\.\d+)*$') + + NAME_MATCHER = re.compile('^[0-9A-Z]([0-9A-Z_.-]*[0-9A-Z])?$', re.I) + + FIELDNAME_MATCHER = re.compile('^[A-Z]([0-9A-Z-]*[0-9A-Z])?$', re.I) + + VERSION_MATCHER = PEP440_VERSION_RE + + SUMMARY_MATCHER = re.compile('.{1,2047}') + + METADATA_VERSION = '2.0' + + GENERATOR = 'distlib (%s)' % __version__ + + MANDATORY_KEYS = { + 'name': (), + 'version': (), + 'summary': ('legacy',), + } + + INDEX_KEYS = ('name version license summary description author ' + 'author_email keywords platform home_page classifiers ' + 'download_url') + + DEPENDENCY_KEYS = ('extras run_requires test_requires build_requires ' + 'dev_requires provides meta_requires obsoleted_by ' + 'supports_environments') + + SYNTAX_VALIDATORS = { + 'metadata_version': (METADATA_VERSION_MATCHER, ()), + 'name': (NAME_MATCHER, ('legacy',)), + 'version': (VERSION_MATCHER, ('legacy',)), + 'summary': (SUMMARY_MATCHER, ('legacy',)), + 'dynamic': (FIELDNAME_MATCHER, ('legacy',)), + } + + __slots__ = ('_legacy', '_data', 'scheme') + + def __init__(self, path=None, fileobj=None, mapping=None, + scheme='default'): + if [path, fileobj, mapping].count(None) < 2: + raise TypeError('path, fileobj and mapping are exclusive') + self._legacy = None + self._data = None + self.scheme = scheme + #import pdb; pdb.set_trace() + if mapping is not None: + try: + self._validate_mapping(mapping, scheme) + self._data = mapping + except MetadataUnrecognizedVersionError: + self._legacy = LegacyMetadata(mapping=mapping, scheme=scheme) + self.validate() + else: + data = None + if path: + with open(path, 'rb') as f: + data = f.read() + elif fileobj: + data = fileobj.read() + if data is None: + # Initialised with no args - to be added + self._data = { + 'metadata_version': self.METADATA_VERSION, + 'generator': self.GENERATOR, + } + else: + if not isinstance(data, text_type): + data = data.decode('utf-8') + try: + self._data = json.loads(data) + self._validate_mapping(self._data, scheme) + except ValueError: + # Note: MetadataUnrecognizedVersionError does not + # inherit from ValueError (it's a DistlibException, + # which should not inherit from ValueError). + # The ValueError comes from the json.load - if that + # succeeds and we get a validation error, we want + # that to propagate + self._legacy = LegacyMetadata(fileobj=StringIO(data), + scheme=scheme) + self.validate() + + common_keys = set(('name', 'version', 'license', 'keywords', 'summary')) + + none_list = (None, list) + none_dict = (None, dict) + + mapped_keys = { + 'run_requires': ('Requires-Dist', list), + 'build_requires': ('Setup-Requires-Dist', list), + 'dev_requires': none_list, + 'test_requires': none_list, + 'meta_requires': none_list, + 'extras': ('Provides-Extra', list), + 'modules': none_list, + 'namespaces': none_list, + 'exports': none_dict, + 'commands': none_dict, + 'classifiers': ('Classifier', list), + 'source_url': ('Download-URL', None), + 'metadata_version': ('Metadata-Version', None), + } + + del none_list, none_dict + + def __getattribute__(self, key): + common = object.__getattribute__(self, 'common_keys') + mapped = object.__getattribute__(self, 'mapped_keys') + if key in mapped: + lk, maker = mapped[key] + if self._legacy: + if lk is None: + result = None if maker is None else maker() + else: + result = self._legacy.get(lk) + else: + value = None if maker is None else maker() + if key not in ('commands', 'exports', 'modules', 'namespaces', + 'classifiers'): + result = self._data.get(key, value) + else: + # special cases for PEP 459 + sentinel = object() + result = sentinel + d = self._data.get('extensions') + if d: + if key == 'commands': + result = d.get('python.commands', value) + elif key == 'classifiers': + d = d.get('python.details') + if d: + result = d.get(key, value) + else: + d = d.get('python.exports') + if not d: + d = self._data.get('python.exports') + if d: + result = d.get(key, value) + if result is sentinel: + result = value + elif key not in common: + result = object.__getattribute__(self, key) + elif self._legacy: + result = self._legacy.get(key) + else: + result = self._data.get(key) + return result + + def _validate_value(self, key, value, scheme=None): + if key in self.SYNTAX_VALIDATORS: + pattern, exclusions = self.SYNTAX_VALIDATORS[key] + if (scheme or self.scheme) not in exclusions: + m = pattern.match(value) + if not m: + raise MetadataInvalidError("'%s' is an invalid value for " + "the '%s' property" % (value, + key)) + + def __setattr__(self, key, value): + self._validate_value(key, value) + common = object.__getattribute__(self, 'common_keys') + mapped = object.__getattribute__(self, 'mapped_keys') + if key in mapped: + lk, _ = mapped[key] + if self._legacy: + if lk is None: + raise NotImplementedError + self._legacy[lk] = value + elif key not in ('commands', 'exports', 'modules', 'namespaces', + 'classifiers'): + self._data[key] = value + else: + # special cases for PEP 459 + d = self._data.setdefault('extensions', {}) + if key == 'commands': + d['python.commands'] = value + elif key == 'classifiers': + d = d.setdefault('python.details', {}) + d[key] = value + else: + d = d.setdefault('python.exports', {}) + d[key] = value + elif key not in common: + object.__setattr__(self, key, value) + else: + if key == 'keywords': + if isinstance(value, string_types): + value = value.strip() + if value: + value = value.split() + else: + value = [] + if self._legacy: + self._legacy[key] = value + else: + self._data[key] = value + + @property + def name_and_version(self): + return _get_name_and_version(self.name, self.version, True) + + @property + def provides(self): + if self._legacy: + result = self._legacy['Provides-Dist'] + else: + result = self._data.setdefault('provides', []) + s = '%s (%s)' % (self.name, self.version) + if s not in result: + result.append(s) + return result + + @provides.setter + def provides(self, value): + if self._legacy: + self._legacy['Provides-Dist'] = value + else: + self._data['provides'] = value + + def get_requirements(self, reqts, extras=None, env=None): + """ + Base method to get dependencies, given a set of extras + to satisfy and an optional environment context. + :param reqts: A list of sometimes-wanted dependencies, + perhaps dependent on extras and environment. + :param extras: A list of optional components being requested. + :param env: An optional environment for marker evaluation. + """ + if self._legacy: + result = reqts + else: + result = [] + extras = get_extras(extras or [], self.extras) + for d in reqts: + if 'extra' not in d and 'environment' not in d: + # unconditional + include = True + else: + if 'extra' not in d: + # Not extra-dependent - only environment-dependent + include = True + else: + include = d.get('extra') in extras + if include: + # Not excluded because of extras, check environment + marker = d.get('environment') + if marker: + include = interpret(marker, env) + if include: + result.extend(d['requires']) + for key in ('build', 'dev', 'test'): + e = ':%s:' % key + if e in extras: + extras.remove(e) + # A recursive call, but it should terminate since 'test' + # has been removed from the extras + reqts = self._data.get('%s_requires' % key, []) + result.extend(self.get_requirements(reqts, extras=extras, + env=env)) + return result + + @property + def dictionary(self): + if self._legacy: + return self._from_legacy() + return self._data + + @property + def dependencies(self): + if self._legacy: + raise NotImplementedError + else: + return extract_by_key(self._data, self.DEPENDENCY_KEYS) + + @dependencies.setter + def dependencies(self, value): + if self._legacy: + raise NotImplementedError + else: + self._data.update(value) + + def _validate_mapping(self, mapping, scheme): + if mapping.get('metadata_version') != self.METADATA_VERSION: + raise MetadataUnrecognizedVersionError() + missing = [] + for key, exclusions in self.MANDATORY_KEYS.items(): + if key not in mapping: + if scheme not in exclusions: + missing.append(key) + if missing: + msg = 'Missing metadata items: %s' % ', '.join(missing) + raise MetadataMissingError(msg) + for k, v in mapping.items(): + self._validate_value(k, v, scheme) + + def validate(self): + if self._legacy: + missing, warnings = self._legacy.check(True) + if missing or warnings: + logger.warning('Metadata: missing: %s, warnings: %s', + missing, warnings) + else: + self._validate_mapping(self._data, self.scheme) + + def todict(self): + if self._legacy: + return self._legacy.todict(True) + else: + result = extract_by_key(self._data, self.INDEX_KEYS) + return result + + def _from_legacy(self): + assert self._legacy and not self._data + result = { + 'metadata_version': self.METADATA_VERSION, + 'generator': self.GENERATOR, + } + lmd = self._legacy.todict(True) # skip missing ones + for k in ('name', 'version', 'license', 'summary', 'description', + 'classifier'): + if k in lmd: + if k == 'classifier': + nk = 'classifiers' + else: + nk = k + result[nk] = lmd[k] + kw = lmd.get('Keywords', []) + if kw == ['']: + kw = [] + result['keywords'] = kw + keys = (('requires_dist', 'run_requires'), + ('setup_requires_dist', 'build_requires')) + for ok, nk in keys: + if ok in lmd and lmd[ok]: + result[nk] = [{'requires': lmd[ok]}] + result['provides'] = self.provides + author = {} + maintainer = {} + return result + + LEGACY_MAPPING = { + 'name': 'Name', + 'version': 'Version', + ('extensions', 'python.details', 'license'): 'License', + 'summary': 'Summary', + 'description': 'Description', + ('extensions', 'python.project', 'project_urls', 'Home'): 'Home-page', + ('extensions', 'python.project', 'contacts', 0, 'name'): 'Author', + ('extensions', 'python.project', 'contacts', 0, 'email'): 'Author-email', + 'source_url': 'Download-URL', + ('extensions', 'python.details', 'classifiers'): 'Classifier', + } + + def _to_legacy(self): + def process_entries(entries): + reqts = set() + for e in entries: + extra = e.get('extra') + env = e.get('environment') + rlist = e['requires'] + for r in rlist: + if not env and not extra: + reqts.add(r) + else: + marker = '' + if extra: + marker = 'extra == "%s"' % extra + if env: + if marker: + marker = '(%s) and %s' % (env, marker) + else: + marker = env + reqts.add(';'.join((r, marker))) + return reqts + + assert self._data and not self._legacy + result = LegacyMetadata() + nmd = self._data + # import pdb; pdb.set_trace() + for nk, ok in self.LEGACY_MAPPING.items(): + if not isinstance(nk, tuple): + if nk in nmd: + result[ok] = nmd[nk] + else: + d = nmd + found = True + for k in nk: + try: + d = d[k] + except (KeyError, IndexError): + found = False + break + if found: + result[ok] = d + r1 = process_entries(self.run_requires + self.meta_requires) + r2 = process_entries(self.build_requires + self.dev_requires) + if self.extras: + result['Provides-Extra'] = sorted(self.extras) + result['Requires-Dist'] = sorted(r1) + result['Setup-Requires-Dist'] = sorted(r2) + # TODO: any other fields wanted + return result + + def write(self, path=None, fileobj=None, legacy=False, skip_unknown=True): + if [path, fileobj].count(None) != 1: + raise ValueError('Exactly one of path and fileobj is needed') + self.validate() + if legacy: + if self._legacy: + legacy_md = self._legacy + else: + legacy_md = self._to_legacy() + if path: + legacy_md.write(path, skip_unknown=skip_unknown) + else: + legacy_md.write_file(fileobj, skip_unknown=skip_unknown) + else: + if self._legacy: + d = self._from_legacy() + else: + d = self._data + if fileobj: + json.dump(d, fileobj, ensure_ascii=True, indent=2, + sort_keys=True) + else: + with codecs.open(path, 'w', 'utf-8') as f: + json.dump(d, f, ensure_ascii=True, indent=2, + sort_keys=True) + + def add_requirements(self, requirements): + if self._legacy: + self._legacy.add_requirements(requirements) + else: + run_requires = self._data.setdefault('run_requires', []) + always = None + for entry in run_requires: + if 'environment' not in entry and 'extra' not in entry: + always = entry + break + if always is None: + always = { 'requires': requirements } + run_requires.insert(0, always) + else: + rset = set(always['requires']) | set(requirements) + always['requires'] = sorted(rset) + + def __repr__(self): + name = self.name or '(no name)' + version = self.version or 'no version' + return '<%s %s %s (%s)>' % (self.__class__.__name__, + self.metadata_version, name, version) diff --git a/venv/lib/python3.10/site-packages/distlib/resources.py b/venv/lib/python3.10/site-packages/distlib/resources.py new file mode 100644 index 0000000..fef52aa --- /dev/null +++ b/venv/lib/python3.10/site-packages/distlib/resources.py @@ -0,0 +1,358 @@ +# -*- coding: utf-8 -*- +# +# Copyright (C) 2013-2017 Vinay Sajip. +# Licensed to the Python Software Foundation under a contributor agreement. +# See LICENSE.txt and CONTRIBUTORS.txt. +# +from __future__ import unicode_literals + +import bisect +import io +import logging +import os +import pkgutil +import sys +import types +import zipimport + +from . import DistlibException +from .util import cached_property, get_cache_base, Cache + +logger = logging.getLogger(__name__) + + +cache = None # created when needed + + +class ResourceCache(Cache): + def __init__(self, base=None): + if base is None: + # Use native string to avoid issues on 2.x: see Python #20140. + base = os.path.join(get_cache_base(), str('resource-cache')) + super(ResourceCache, self).__init__(base) + + def is_stale(self, resource, path): + """ + Is the cache stale for the given resource? + + :param resource: The :class:`Resource` being cached. + :param path: The path of the resource in the cache. + :return: True if the cache is stale. + """ + # Cache invalidation is a hard problem :-) + return True + + def get(self, resource): + """ + Get a resource into the cache, + + :param resource: A :class:`Resource` instance. + :return: The pathname of the resource in the cache. + """ + prefix, path = resource.finder.get_cache_info(resource) + if prefix is None: + result = path + else: + result = os.path.join(self.base, self.prefix_to_dir(prefix), path) + dirname = os.path.dirname(result) + if not os.path.isdir(dirname): + os.makedirs(dirname) + if not os.path.exists(result): + stale = True + else: + stale = self.is_stale(resource, path) + if stale: + # write the bytes of the resource to the cache location + with open(result, 'wb') as f: + f.write(resource.bytes) + return result + + +class ResourceBase(object): + def __init__(self, finder, name): + self.finder = finder + self.name = name + + +class Resource(ResourceBase): + """ + A class representing an in-package resource, such as a data file. This is + not normally instantiated by user code, but rather by a + :class:`ResourceFinder` which manages the resource. + """ + is_container = False # Backwards compatibility + + def as_stream(self): + """ + Get the resource as a stream. + + This is not a property to make it obvious that it returns a new stream + each time. + """ + return self.finder.get_stream(self) + + @cached_property + def file_path(self): + global cache + if cache is None: + cache = ResourceCache() + return cache.get(self) + + @cached_property + def bytes(self): + return self.finder.get_bytes(self) + + @cached_property + def size(self): + return self.finder.get_size(self) + + +class ResourceContainer(ResourceBase): + is_container = True # Backwards compatibility + + @cached_property + def resources(self): + return self.finder.get_resources(self) + + +class ResourceFinder(object): + """ + Resource finder for file system resources. + """ + + if sys.platform.startswith('java'): + skipped_extensions = ('.pyc', '.pyo', '.class') + else: + skipped_extensions = ('.pyc', '.pyo') + + def __init__(self, module): + self.module = module + self.loader = getattr(module, '__loader__', None) + self.base = os.path.dirname(getattr(module, '__file__', '')) + + def _adjust_path(self, path): + return os.path.realpath(path) + + def _make_path(self, resource_name): + # Issue #50: need to preserve type of path on Python 2.x + # like os.path._get_sep + if isinstance(resource_name, bytes): # should only happen on 2.x + sep = b'/' + else: + sep = '/' + parts = resource_name.split(sep) + parts.insert(0, self.base) + result = os.path.join(*parts) + return self._adjust_path(result) + + def _find(self, path): + return os.path.exists(path) + + def get_cache_info(self, resource): + return None, resource.path + + def find(self, resource_name): + path = self._make_path(resource_name) + if not self._find(path): + result = None + else: + if self._is_directory(path): + result = ResourceContainer(self, resource_name) + else: + result = Resource(self, resource_name) + result.path = path + return result + + def get_stream(self, resource): + return open(resource.path, 'rb') + + def get_bytes(self, resource): + with open(resource.path, 'rb') as f: + return f.read() + + def get_size(self, resource): + return os.path.getsize(resource.path) + + def get_resources(self, resource): + def allowed(f): + return (f != '__pycache__' and not + f.endswith(self.skipped_extensions)) + return set([f for f in os.listdir(resource.path) if allowed(f)]) + + def is_container(self, resource): + return self._is_directory(resource.path) + + _is_directory = staticmethod(os.path.isdir) + + def iterator(self, resource_name): + resource = self.find(resource_name) + if resource is not None: + todo = [resource] + while todo: + resource = todo.pop(0) + yield resource + if resource.is_container: + rname = resource.name + for name in resource.resources: + if not rname: + new_name = name + else: + new_name = '/'.join([rname, name]) + child = self.find(new_name) + if child.is_container: + todo.append(child) + else: + yield child + + +class ZipResourceFinder(ResourceFinder): + """ + Resource finder for resources in .zip files. + """ + def __init__(self, module): + super(ZipResourceFinder, self).__init__(module) + archive = self.loader.archive + self.prefix_len = 1 + len(archive) + # PyPy doesn't have a _files attr on zipimporter, and you can't set one + if hasattr(self.loader, '_files'): + self._files = self.loader._files + else: + self._files = zipimport._zip_directory_cache[archive] + self.index = sorted(self._files) + + def _adjust_path(self, path): + return path + + def _find(self, path): + path = path[self.prefix_len:] + if path in self._files: + result = True + else: + if path and path[-1] != os.sep: + path = path + os.sep + i = bisect.bisect(self.index, path) + try: + result = self.index[i].startswith(path) + except IndexError: + result = False + if not result: + logger.debug('_find failed: %r %r', path, self.loader.prefix) + else: + logger.debug('_find worked: %r %r', path, self.loader.prefix) + return result + + def get_cache_info(self, resource): + prefix = self.loader.archive + path = resource.path[1 + len(prefix):] + return prefix, path + + def get_bytes(self, resource): + return self.loader.get_data(resource.path) + + def get_stream(self, resource): + return io.BytesIO(self.get_bytes(resource)) + + def get_size(self, resource): + path = resource.path[self.prefix_len:] + return self._files[path][3] + + def get_resources(self, resource): + path = resource.path[self.prefix_len:] + if path and path[-1] != os.sep: + path += os.sep + plen = len(path) + result = set() + i = bisect.bisect(self.index, path) + while i < len(self.index): + if not self.index[i].startswith(path): + break + s = self.index[i][plen:] + result.add(s.split(os.sep, 1)[0]) # only immediate children + i += 1 + return result + + def _is_directory(self, path): + path = path[self.prefix_len:] + if path and path[-1] != os.sep: + path += os.sep + i = bisect.bisect(self.index, path) + try: + result = self.index[i].startswith(path) + except IndexError: + result = False + return result + + +_finder_registry = { + type(None): ResourceFinder, + zipimport.zipimporter: ZipResourceFinder +} + +try: + # In Python 3.6, _frozen_importlib -> _frozen_importlib_external + try: + import _frozen_importlib_external as _fi + except ImportError: + import _frozen_importlib as _fi + _finder_registry[_fi.SourceFileLoader] = ResourceFinder + _finder_registry[_fi.FileFinder] = ResourceFinder + # See issue #146 + _finder_registry[_fi.SourcelessFileLoader] = ResourceFinder + del _fi +except (ImportError, AttributeError): + pass + + +def register_finder(loader, finder_maker): + _finder_registry[type(loader)] = finder_maker + + +_finder_cache = {} + + +def finder(package): + """ + Return a resource finder for a package. + :param package: The name of the package. + :return: A :class:`ResourceFinder` instance for the package. + """ + if package in _finder_cache: + result = _finder_cache[package] + else: + if package not in sys.modules: + __import__(package) + module = sys.modules[package] + path = getattr(module, '__path__', None) + if path is None: + raise DistlibException('You cannot get a finder for a module, ' + 'only for a package') + loader = getattr(module, '__loader__', None) + finder_maker = _finder_registry.get(type(loader)) + if finder_maker is None: + raise DistlibException('Unable to locate finder for %r' % package) + result = finder_maker(module) + _finder_cache[package] = result + return result + + +_dummy_module = types.ModuleType(str('__dummy__')) + + +def finder_for_path(path): + """ + Return a resource finder for a path, which should represent a container. + + :param path: The path. + :return: A :class:`ResourceFinder` instance for the path. + """ + result = None + # calls any path hooks, gets importer into cache + pkgutil.get_importer(path) + loader = sys.path_importer_cache.get(path) + finder = _finder_registry.get(type(loader)) + if finder: + module = _dummy_module + module.__file__ = os.path.join(path, '') + module.__loader__ = loader + result = finder(module) + return result diff --git a/venv/lib/python3.10/site-packages/distlib/scripts.py b/venv/lib/python3.10/site-packages/distlib/scripts.py new file mode 100644 index 0000000..d8fdb3a --- /dev/null +++ b/venv/lib/python3.10/site-packages/distlib/scripts.py @@ -0,0 +1,438 @@ +# -*- coding: utf-8 -*- +# +# Copyright (C) 2013-2015 Vinay Sajip. +# Licensed to the Python Software Foundation under a contributor agreement. +# See LICENSE.txt and CONTRIBUTORS.txt. +# +from io import BytesIO +import logging +import os +import re +import struct +import sys +import time +from zipfile import ZipInfo + +from .compat import sysconfig, detect_encoding, ZipFile +from .resources import finder +from .util import (FileOperator, get_export_entry, convert_path, + get_executable, get_platform, in_venv) + +logger = logging.getLogger(__name__) + +_DEFAULT_MANIFEST = ''' + + + + + + + + + + + + +'''.strip() + +# check if Python is called on the first line with this expression +FIRST_LINE_RE = re.compile(b'^#!.*pythonw?[0-9.]*([ \t].*)?$') +SCRIPT_TEMPLATE = r'''# -*- coding: utf-8 -*- +import re +import sys +from %(module)s import %(import_name)s +if __name__ == '__main__': + sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0]) + sys.exit(%(func)s()) +''' + + +def enquote_executable(executable): + if ' ' in executable: + # make sure we quote only the executable in case of env + # for example /usr/bin/env "/dir with spaces/bin/jython" + # instead of "/usr/bin/env /dir with spaces/bin/jython" + # otherwise whole + if executable.startswith('/usr/bin/env '): + env, _executable = executable.split(' ', 1) + if ' ' in _executable and not _executable.startswith('"'): + executable = '%s "%s"' % (env, _executable) + else: + if not executable.startswith('"'): + executable = '"%s"' % executable + return executable + +# Keep the old name around (for now), as there is at least one project using it! +_enquote_executable = enquote_executable + +class ScriptMaker(object): + """ + A class to copy or create scripts from source scripts or callable + specifications. + """ + script_template = SCRIPT_TEMPLATE + + executable = None # for shebangs + + def __init__(self, source_dir, target_dir, add_launchers=True, + dry_run=False, fileop=None): + self.source_dir = source_dir + self.target_dir = target_dir + self.add_launchers = add_launchers + self.force = False + self.clobber = False + # It only makes sense to set mode bits on POSIX. + self.set_mode = (os.name == 'posix') or (os.name == 'java' and + os._name == 'posix') + self.variants = set(('', 'X.Y')) + self._fileop = fileop or FileOperator(dry_run) + + self._is_nt = os.name == 'nt' or ( + os.name == 'java' and os._name == 'nt') + self.version_info = sys.version_info + + def _get_alternate_executable(self, executable, options): + if options.get('gui', False) and self._is_nt: # pragma: no cover + dn, fn = os.path.split(executable) + fn = fn.replace('python', 'pythonw') + executable = os.path.join(dn, fn) + return executable + + if sys.platform.startswith('java'): # pragma: no cover + def _is_shell(self, executable): + """ + Determine if the specified executable is a script + (contains a #! line) + """ + try: + with open(executable) as fp: + return fp.read(2) == '#!' + except (OSError, IOError): + logger.warning('Failed to open %s', executable) + return False + + def _fix_jython_executable(self, executable): + if self._is_shell(executable): + # Workaround for Jython is not needed on Linux systems. + import java + + if java.lang.System.getProperty('os.name') == 'Linux': + return executable + elif executable.lower().endswith('jython.exe'): + # Use wrapper exe for Jython on Windows + return executable + return '/usr/bin/env %s' % executable + + def _build_shebang(self, executable, post_interp): + """ + Build a shebang line. In the simple case (on Windows, or a shebang line + which is not too long or contains spaces) use a simple formulation for + the shebang. Otherwise, use /bin/sh as the executable, with a contrived + shebang which allows the script to run either under Python or sh, using + suitable quoting. Thanks to Harald Nordgren for his input. + + See also: http://www.in-ulm.de/~mascheck/various/shebang/#length + https://hg.mozilla.org/mozilla-central/file/tip/mach + """ + if os.name != 'posix': + simple_shebang = True + else: + # Add 3 for '#!' prefix and newline suffix. + shebang_length = len(executable) + len(post_interp) + 3 + if sys.platform == 'darwin': + max_shebang_length = 512 + else: + max_shebang_length = 127 + simple_shebang = ((b' ' not in executable) and + (shebang_length <= max_shebang_length)) + + if simple_shebang: + result = b'#!' + executable + post_interp + b'\n' + else: + result = b'#!/bin/sh\n' + result += b"'''exec' " + executable + post_interp + b' "$0" "$@"\n' + result += b"' '''" + return result + + def _get_shebang(self, encoding, post_interp=b'', options=None): + enquote = True + if self.executable: + executable = self.executable + enquote = False # assume this will be taken care of + elif not sysconfig.is_python_build(): + executable = get_executable() + elif in_venv(): # pragma: no cover + executable = os.path.join(sysconfig.get_path('scripts'), + 'python%s' % sysconfig.get_config_var('EXE')) + else: # pragma: no cover + if os.name == 'nt': + # for Python builds from source on Windows, no Python executables with + # a version suffix are created, so we use python.exe + executable = os.path.join(sysconfig.get_config_var('BINDIR'), + 'python%s' % (sysconfig.get_config_var('EXE'))) + else: + executable = os.path.join( + sysconfig.get_config_var('BINDIR'), + 'python%s%s' % (sysconfig.get_config_var('VERSION'), + sysconfig.get_config_var('EXE'))) + if options: + executable = self._get_alternate_executable(executable, options) + + if sys.platform.startswith('java'): # pragma: no cover + executable = self._fix_jython_executable(executable) + + # Normalise case for Windows - COMMENTED OUT + # executable = os.path.normcase(executable) + # N.B. The normalising operation above has been commented out: See + # issue #124. Although paths in Windows are generally case-insensitive, + # they aren't always. For example, a path containing a ẞ (which is a + # LATIN CAPITAL LETTER SHARP S - U+1E9E) is normcased to ß (which is a + # LATIN SMALL LETTER SHARP S' - U+00DF). The two are not considered by + # Windows as equivalent in path names. + + # If the user didn't specify an executable, it may be necessary to + # cater for executable paths with spaces (not uncommon on Windows) + if enquote: + executable = enquote_executable(executable) + # Issue #51: don't use fsencode, since we later try to + # check that the shebang is decodable using utf-8. + executable = executable.encode('utf-8') + # in case of IronPython, play safe and enable frames support + if (sys.platform == 'cli' and '-X:Frames' not in post_interp + and '-X:FullFrames' not in post_interp): # pragma: no cover + post_interp += b' -X:Frames' + shebang = self._build_shebang(executable, post_interp) + # Python parser starts to read a script using UTF-8 until + # it gets a #coding:xxx cookie. The shebang has to be the + # first line of a file, the #coding:xxx cookie cannot be + # written before. So the shebang has to be decodable from + # UTF-8. + try: + shebang.decode('utf-8') + except UnicodeDecodeError: # pragma: no cover + raise ValueError( + 'The shebang (%r) is not decodable from utf-8' % shebang) + # If the script is encoded to a custom encoding (use a + # #coding:xxx cookie), the shebang has to be decodable from + # the script encoding too. + if encoding != 'utf-8': + try: + shebang.decode(encoding) + except UnicodeDecodeError: # pragma: no cover + raise ValueError( + 'The shebang (%r) is not decodable ' + 'from the script encoding (%r)' % (shebang, encoding)) + return shebang + + def _get_script_text(self, entry): + return self.script_template % dict(module=entry.prefix, + import_name=entry.suffix.split('.')[0], + func=entry.suffix) + + manifest = _DEFAULT_MANIFEST + + def get_manifest(self, exename): + base = os.path.basename(exename) + return self.manifest % base + + def _write_script(self, names, shebang, script_bytes, filenames, ext): + use_launcher = self.add_launchers and self._is_nt + linesep = os.linesep.encode('utf-8') + if not shebang.endswith(linesep): + shebang += linesep + if not use_launcher: + script_bytes = shebang + script_bytes + else: # pragma: no cover + if ext == 'py': + launcher = self._get_launcher('t') + else: + launcher = self._get_launcher('w') + stream = BytesIO() + with ZipFile(stream, 'w') as zf: + source_date_epoch = os.environ.get('SOURCE_DATE_EPOCH') + if source_date_epoch: + date_time = time.gmtime(int(source_date_epoch))[:6] + zinfo = ZipInfo(filename='__main__.py', date_time=date_time) + zf.writestr(zinfo, script_bytes) + else: + zf.writestr('__main__.py', script_bytes) + zip_data = stream.getvalue() + script_bytes = launcher + shebang + zip_data + for name in names: + outname = os.path.join(self.target_dir, name) + if use_launcher: # pragma: no cover + n, e = os.path.splitext(outname) + if e.startswith('.py'): + outname = n + outname = '%s.exe' % outname + try: + self._fileop.write_binary_file(outname, script_bytes) + except Exception: + # Failed writing an executable - it might be in use. + logger.warning('Failed to write executable - trying to ' + 'use .deleteme logic') + dfname = '%s.deleteme' % outname + if os.path.exists(dfname): + os.remove(dfname) # Not allowed to fail here + os.rename(outname, dfname) # nor here + self._fileop.write_binary_file(outname, script_bytes) + logger.debug('Able to replace executable using ' + '.deleteme logic') + try: + os.remove(dfname) + except Exception: + pass # still in use - ignore error + else: + if self._is_nt and not outname.endswith('.' + ext): # pragma: no cover + outname = '%s.%s' % (outname, ext) + if os.path.exists(outname) and not self.clobber: + logger.warning('Skipping existing file %s', outname) + continue + self._fileop.write_binary_file(outname, script_bytes) + if self.set_mode: + self._fileop.set_executable_mode([outname]) + filenames.append(outname) + + variant_separator = '-' + + def get_script_filenames(self, name): + result = set() + if '' in self.variants: + result.add(name) + if 'X' in self.variants: + result.add('%s%s' % (name, self.version_info[0])) + if 'X.Y' in self.variants: + result.add('%s%s%s.%s' % (name, self.variant_separator, + self.version_info[0], self.version_info[1])) + return result + + def _make_script(self, entry, filenames, options=None): + post_interp = b'' + if options: + args = options.get('interpreter_args', []) + if args: + args = ' %s' % ' '.join(args) + post_interp = args.encode('utf-8') + shebang = self._get_shebang('utf-8', post_interp, options=options) + script = self._get_script_text(entry).encode('utf-8') + scriptnames = self.get_script_filenames(entry.name) + if options and options.get('gui', False): + ext = 'pyw' + else: + ext = 'py' + self._write_script(scriptnames, shebang, script, filenames, ext) + + def _copy_script(self, script, filenames): + adjust = False + script = os.path.join(self.source_dir, convert_path(script)) + outname = os.path.join(self.target_dir, os.path.basename(script)) + if not self.force and not self._fileop.newer(script, outname): + logger.debug('not copying %s (up-to-date)', script) + return + + # Always open the file, but ignore failures in dry-run mode -- + # that way, we'll get accurate feedback if we can read the + # script. + try: + f = open(script, 'rb') + except IOError: # pragma: no cover + if not self.dry_run: + raise + f = None + else: + first_line = f.readline() + if not first_line: # pragma: no cover + logger.warning('%s is an empty file (skipping)', script) + return + + match = FIRST_LINE_RE.match(first_line.replace(b'\r\n', b'\n')) + if match: + adjust = True + post_interp = match.group(1) or b'' + + if not adjust: + if f: + f.close() + self._fileop.copy_file(script, outname) + if self.set_mode: + self._fileop.set_executable_mode([outname]) + filenames.append(outname) + else: + logger.info('copying and adjusting %s -> %s', script, + self.target_dir) + if not self._fileop.dry_run: + encoding, lines = detect_encoding(f.readline) + f.seek(0) + shebang = self._get_shebang(encoding, post_interp) + if b'pythonw' in first_line: # pragma: no cover + ext = 'pyw' + else: + ext = 'py' + n = os.path.basename(outname) + self._write_script([n], shebang, f.read(), filenames, ext) + if f: + f.close() + + @property + def dry_run(self): + return self._fileop.dry_run + + @dry_run.setter + def dry_run(self, value): + self._fileop.dry_run = value + + if os.name == 'nt' or (os.name == 'java' and os._name == 'nt'): # pragma: no cover + # Executable launcher support. + # Launchers are from https://bitbucket.org/vinay.sajip/simple_launcher/ + + def _get_launcher(self, kind): + if struct.calcsize('P') == 8: # 64-bit + bits = '64' + else: + bits = '32' + platform_suffix = '-arm' if get_platform() == 'win-arm64' else '' + name = '%s%s%s.exe' % (kind, bits, platform_suffix) + # Issue 31: don't hardcode an absolute package name, but + # determine it relative to the current package + distlib_package = __name__.rsplit('.', 1)[0] + resource = finder(distlib_package).find(name) + if not resource: + msg = ('Unable to find resource %s in package %s' % (name, + distlib_package)) + raise ValueError(msg) + return resource.bytes + + # Public API follows + + def make(self, specification, options=None): + """ + Make a script. + + :param specification: The specification, which is either a valid export + entry specification (to make a script from a + callable) or a filename (to make a script by + copying from a source location). + :param options: A dictionary of options controlling script generation. + :return: A list of all absolute pathnames written to. + """ + filenames = [] + entry = get_export_entry(specification) + if entry is None: + self._copy_script(specification, filenames) + else: + self._make_script(entry, filenames, options=options) + return filenames + + def make_multiple(self, specifications, options=None): + """ + Take a list of specifications and make scripts from them, + :param specifications: A list of specifications. + :return: A list of all absolute pathnames written to, + """ + filenames = [] + for specification in specifications: + filenames.extend(self.make(specification, options)) + return filenames diff --git a/venv/lib/python3.10/site-packages/pip/_vendor/distlib/t32.exe b/venv/lib/python3.10/site-packages/distlib/t32.exe similarity index 100% rename from venv/lib/python3.10/site-packages/pip/_vendor/distlib/t32.exe rename to venv/lib/python3.10/site-packages/distlib/t32.exe diff --git a/venv/lib/python3.10/site-packages/pip/_vendor/distlib/t64-arm.exe b/venv/lib/python3.10/site-packages/distlib/t64-arm.exe similarity index 100% rename from venv/lib/python3.10/site-packages/pip/_vendor/distlib/t64-arm.exe rename to venv/lib/python3.10/site-packages/distlib/t64-arm.exe diff --git a/venv/lib/python3.10/site-packages/pip/_vendor/distlib/t64.exe b/venv/lib/python3.10/site-packages/distlib/t64.exe similarity index 100% rename from venv/lib/python3.10/site-packages/pip/_vendor/distlib/t64.exe rename to venv/lib/python3.10/site-packages/distlib/t64.exe diff --git a/venv/lib/python3.10/site-packages/distlib/util.py b/venv/lib/python3.10/site-packages/distlib/util.py new file mode 100644 index 0000000..04429ad --- /dev/null +++ b/venv/lib/python3.10/site-packages/distlib/util.py @@ -0,0 +1,1949 @@ +# +# Copyright (C) 2012-2021 The Python Software Foundation. +# See LICENSE.txt and CONTRIBUTORS.txt. +# +import codecs +from collections import deque +import contextlib +import csv +from glob import iglob as std_iglob +import io +import json +import logging +import os +import py_compile +import re +import socket +try: + import ssl +except ImportError: # pragma: no cover + ssl = None +import subprocess +import sys +import tarfile +import tempfile +import textwrap + +try: + import threading +except ImportError: # pragma: no cover + import dummy_threading as threading +import time + +from . import DistlibException +from .compat import (string_types, text_type, shutil, raw_input, StringIO, + cache_from_source, urlopen, urljoin, httplib, xmlrpclib, + splittype, HTTPHandler, BaseConfigurator, valid_ident, + Container, configparser, URLError, ZipFile, fsdecode, + unquote, urlparse) + +logger = logging.getLogger(__name__) + +# +# Requirement parsing code as per PEP 508 +# + +IDENTIFIER = re.compile(r'^([\w\.-]+)\s*') +VERSION_IDENTIFIER = re.compile(r'^([\w\.*+-]+)\s*') +COMPARE_OP = re.compile(r'^(<=?|>=?|={2,3}|[~!]=)\s*') +MARKER_OP = re.compile(r'^((<=?)|(>=?)|={2,3}|[~!]=|in|not\s+in)\s*') +OR = re.compile(r'^or\b\s*') +AND = re.compile(r'^and\b\s*') +NON_SPACE = re.compile(r'(\S+)\s*') +STRING_CHUNK = re.compile(r'([\s\w\.{}()*+#:;,/?!~`@$%^&=|<>\[\]-]+)') + + +def parse_marker(marker_string): + """ + Parse a marker string and return a dictionary containing a marker expression. + + The dictionary will contain keys "op", "lhs" and "rhs" for non-terminals in + the expression grammar, or strings. A string contained in quotes is to be + interpreted as a literal string, and a string not contained in quotes is a + variable (such as os_name). + """ + def marker_var(remaining): + # either identifier, or literal string + m = IDENTIFIER.match(remaining) + if m: + result = m.groups()[0] + remaining = remaining[m.end():] + elif not remaining: + raise SyntaxError('unexpected end of input') + else: + q = remaining[0] + if q not in '\'"': + raise SyntaxError('invalid expression: %s' % remaining) + oq = '\'"'.replace(q, '') + remaining = remaining[1:] + parts = [q] + while remaining: + # either a string chunk, or oq, or q to terminate + if remaining[0] == q: + break + elif remaining[0] == oq: + parts.append(oq) + remaining = remaining[1:] + else: + m = STRING_CHUNK.match(remaining) + if not m: + raise SyntaxError('error in string literal: %s' % remaining) + parts.append(m.groups()[0]) + remaining = remaining[m.end():] + else: + s = ''.join(parts) + raise SyntaxError('unterminated string: %s' % s) + parts.append(q) + result = ''.join(parts) + remaining = remaining[1:].lstrip() # skip past closing quote + return result, remaining + + def marker_expr(remaining): + if remaining and remaining[0] == '(': + result, remaining = marker(remaining[1:].lstrip()) + if remaining[0] != ')': + raise SyntaxError('unterminated parenthesis: %s' % remaining) + remaining = remaining[1:].lstrip() + else: + lhs, remaining = marker_var(remaining) + while remaining: + m = MARKER_OP.match(remaining) + if not m: + break + op = m.groups()[0] + remaining = remaining[m.end():] + rhs, remaining = marker_var(remaining) + lhs = {'op': op, 'lhs': lhs, 'rhs': rhs} + result = lhs + return result, remaining + + def marker_and(remaining): + lhs, remaining = marker_expr(remaining) + while remaining: + m = AND.match(remaining) + if not m: + break + remaining = remaining[m.end():] + rhs, remaining = marker_expr(remaining) + lhs = {'op': 'and', 'lhs': lhs, 'rhs': rhs} + return lhs, remaining + + def marker(remaining): + lhs, remaining = marker_and(remaining) + while remaining: + m = OR.match(remaining) + if not m: + break + remaining = remaining[m.end():] + rhs, remaining = marker_and(remaining) + lhs = {'op': 'or', 'lhs': lhs, 'rhs': rhs} + return lhs, remaining + + return marker(marker_string) + + +def parse_requirement(req): + """ + Parse a requirement passed in as a string. Return a Container + whose attributes contain the various parts of the requirement. + """ + remaining = req.strip() + if not remaining or remaining.startswith('#'): + return None + m = IDENTIFIER.match(remaining) + if not m: + raise SyntaxError('name expected: %s' % remaining) + distname = m.groups()[0] + remaining = remaining[m.end():] + extras = mark_expr = versions = uri = None + if remaining and remaining[0] == '[': + i = remaining.find(']', 1) + if i < 0: + raise SyntaxError('unterminated extra: %s' % remaining) + s = remaining[1:i] + remaining = remaining[i + 1:].lstrip() + extras = [] + while s: + m = IDENTIFIER.match(s) + if not m: + raise SyntaxError('malformed extra: %s' % s) + extras.append(m.groups()[0]) + s = s[m.end():] + if not s: + break + if s[0] != ',': + raise SyntaxError('comma expected in extras: %s' % s) + s = s[1:].lstrip() + if not extras: + extras = None + if remaining: + if remaining[0] == '@': + # it's a URI + remaining = remaining[1:].lstrip() + m = NON_SPACE.match(remaining) + if not m: + raise SyntaxError('invalid URI: %s' % remaining) + uri = m.groups()[0] + t = urlparse(uri) + # there are issues with Python and URL parsing, so this test + # is a bit crude. See bpo-20271, bpo-23505. Python doesn't + # always parse invalid URLs correctly - it should raise + # exceptions for malformed URLs + if not (t.scheme and t.netloc): + raise SyntaxError('Invalid URL: %s' % uri) + remaining = remaining[m.end():].lstrip() + else: + + def get_versions(ver_remaining): + """ + Return a list of operator, version tuples if any are + specified, else None. + """ + m = COMPARE_OP.match(ver_remaining) + versions = None + if m: + versions = [] + while True: + op = m.groups()[0] + ver_remaining = ver_remaining[m.end():] + m = VERSION_IDENTIFIER.match(ver_remaining) + if not m: + raise SyntaxError('invalid version: %s' % ver_remaining) + v = m.groups()[0] + versions.append((op, v)) + ver_remaining = ver_remaining[m.end():] + if not ver_remaining or ver_remaining[0] != ',': + break + ver_remaining = ver_remaining[1:].lstrip() + # Some packages have a trailing comma which would break things + # See issue #148 + if not ver_remaining: + break + m = COMPARE_OP.match(ver_remaining) + if not m: + raise SyntaxError('invalid constraint: %s' % ver_remaining) + if not versions: + versions = None + return versions, ver_remaining + + if remaining[0] != '(': + versions, remaining = get_versions(remaining) + else: + i = remaining.find(')', 1) + if i < 0: + raise SyntaxError('unterminated parenthesis: %s' % remaining) + s = remaining[1:i] + remaining = remaining[i + 1:].lstrip() + # As a special diversion from PEP 508, allow a version number + # a.b.c in parentheses as a synonym for ~= a.b.c (because this + # is allowed in earlier PEPs) + if COMPARE_OP.match(s): + versions, _ = get_versions(s) + else: + m = VERSION_IDENTIFIER.match(s) + if not m: + raise SyntaxError('invalid constraint: %s' % s) + v = m.groups()[0] + s = s[m.end():].lstrip() + if s: + raise SyntaxError('invalid constraint: %s' % s) + versions = [('~=', v)] + + if remaining: + if remaining[0] != ';': + raise SyntaxError('invalid requirement: %s' % remaining) + remaining = remaining[1:].lstrip() + + mark_expr, remaining = parse_marker(remaining) + + if remaining and remaining[0] != '#': + raise SyntaxError('unexpected trailing data: %s' % remaining) + + if not versions: + rs = distname + else: + rs = '%s %s' % (distname, ', '.join(['%s %s' % con for con in versions])) + return Container(name=distname, extras=extras, constraints=versions, + marker=mark_expr, url=uri, requirement=rs) + + +def get_resources_dests(resources_root, rules): + """Find destinations for resources files""" + + def get_rel_path(root, path): + # normalizes and returns a lstripped-/-separated path + root = root.replace(os.path.sep, '/') + path = path.replace(os.path.sep, '/') + assert path.startswith(root) + return path[len(root):].lstrip('/') + + destinations = {} + for base, suffix, dest in rules: + prefix = os.path.join(resources_root, base) + for abs_base in iglob(prefix): + abs_glob = os.path.join(abs_base, suffix) + for abs_path in iglob(abs_glob): + resource_file = get_rel_path(resources_root, abs_path) + if dest is None: # remove the entry if it was here + destinations.pop(resource_file, None) + else: + rel_path = get_rel_path(abs_base, abs_path) + rel_dest = dest.replace(os.path.sep, '/').rstrip('/') + destinations[resource_file] = rel_dest + '/' + rel_path + return destinations + + +def in_venv(): + if hasattr(sys, 'real_prefix'): + # virtualenv venvs + result = True + else: + # PEP 405 venvs + result = sys.prefix != getattr(sys, 'base_prefix', sys.prefix) + return result + + +def get_executable(): +# The __PYVENV_LAUNCHER__ dance is apparently no longer needed, as +# changes to the stub launcher mean that sys.executable always points +# to the stub on OS X +# if sys.platform == 'darwin' and ('__PYVENV_LAUNCHER__' +# in os.environ): +# result = os.environ['__PYVENV_LAUNCHER__'] +# else: +# result = sys.executable +# return result + # Avoid normcasing: see issue #143 + # result = os.path.normcase(sys.executable) + result = sys.executable + if not isinstance(result, text_type): + result = fsdecode(result) + return result + + +def proceed(prompt, allowed_chars, error_prompt=None, default=None): + p = prompt + while True: + s = raw_input(p) + p = prompt + if not s and default: + s = default + if s: + c = s[0].lower() + if c in allowed_chars: + break + if error_prompt: + p = '%c: %s\n%s' % (c, error_prompt, prompt) + return c + + +def extract_by_key(d, keys): + if isinstance(keys, string_types): + keys = keys.split() + result = {} + for key in keys: + if key in d: + result[key] = d[key] + return result + +def read_exports(stream): + if sys.version_info[0] >= 3: + # needs to be a text stream + stream = codecs.getreader('utf-8')(stream) + # Try to load as JSON, falling back on legacy format + data = stream.read() + stream = StringIO(data) + try: + jdata = json.load(stream) + result = jdata['extensions']['python.exports']['exports'] + for group, entries in result.items(): + for k, v in entries.items(): + s = '%s = %s' % (k, v) + entry = get_export_entry(s) + assert entry is not None + entries[k] = entry + return result + except Exception: + stream.seek(0, 0) + + def read_stream(cp, stream): + if hasattr(cp, 'read_file'): + cp.read_file(stream) + else: + cp.readfp(stream) + + cp = configparser.ConfigParser() + try: + read_stream(cp, stream) + except configparser.MissingSectionHeaderError: + stream.close() + data = textwrap.dedent(data) + stream = StringIO(data) + read_stream(cp, stream) + + result = {} + for key in cp.sections(): + result[key] = entries = {} + for name, value in cp.items(key): + s = '%s = %s' % (name, value) + entry = get_export_entry(s) + assert entry is not None + #entry.dist = self + entries[name] = entry + return result + + +def write_exports(exports, stream): + if sys.version_info[0] >= 3: + # needs to be a text stream + stream = codecs.getwriter('utf-8')(stream) + cp = configparser.ConfigParser() + for k, v in exports.items(): + # TODO check k, v for valid values + cp.add_section(k) + for entry in v.values(): + if entry.suffix is None: + s = entry.prefix + else: + s = '%s:%s' % (entry.prefix, entry.suffix) + if entry.flags: + s = '%s [%s]' % (s, ', '.join(entry.flags)) + cp.set(k, entry.name, s) + cp.write(stream) + + +@contextlib.contextmanager +def tempdir(): + td = tempfile.mkdtemp() + try: + yield td + finally: + shutil.rmtree(td) + +@contextlib.contextmanager +def chdir(d): + cwd = os.getcwd() + try: + os.chdir(d) + yield + finally: + os.chdir(cwd) + + +@contextlib.contextmanager +def socket_timeout(seconds=15): + cto = socket.getdefaulttimeout() + try: + socket.setdefaulttimeout(seconds) + yield + finally: + socket.setdefaulttimeout(cto) + + +class cached_property(object): + def __init__(self, func): + self.func = func + #for attr in ('__name__', '__module__', '__doc__'): + # setattr(self, attr, getattr(func, attr, None)) + + def __get__(self, obj, cls=None): + if obj is None: + return self + value = self.func(obj) + object.__setattr__(obj, self.func.__name__, value) + #obj.__dict__[self.func.__name__] = value = self.func(obj) + return value + +def convert_path(pathname): + """Return 'pathname' as a name that will work on the native filesystem. + + The path is split on '/' and put back together again using the current + directory separator. Needed because filenames in the setup script are + always supplied in Unix style, and have to be converted to the local + convention before we can actually use them in the filesystem. Raises + ValueError on non-Unix-ish systems if 'pathname' either starts or + ends with a slash. + """ + if os.sep == '/': + return pathname + if not pathname: + return pathname + if pathname[0] == '/': + raise ValueError("path '%s' cannot be absolute" % pathname) + if pathname[-1] == '/': + raise ValueError("path '%s' cannot end with '/'" % pathname) + + paths = pathname.split('/') + while os.curdir in paths: + paths.remove(os.curdir) + if not paths: + return os.curdir + return os.path.join(*paths) + + +class FileOperator(object): + def __init__(self, dry_run=False): + self.dry_run = dry_run + self.ensured = set() + self._init_record() + + def _init_record(self): + self.record = False + self.files_written = set() + self.dirs_created = set() + + def record_as_written(self, path): + if self.record: + self.files_written.add(path) + + def newer(self, source, target): + """Tell if the target is newer than the source. + + Returns true if 'source' exists and is more recently modified than + 'target', or if 'source' exists and 'target' doesn't. + + Returns false if both exist and 'target' is the same age or younger + than 'source'. Raise PackagingFileError if 'source' does not exist. + + Note that this test is not very accurate: files created in the same + second will have the same "age". + """ + if not os.path.exists(source): + raise DistlibException("file '%r' does not exist" % + os.path.abspath(source)) + if not os.path.exists(target): + return True + + return os.stat(source).st_mtime > os.stat(target).st_mtime + + def copy_file(self, infile, outfile, check=True): + """Copy a file respecting dry-run and force flags. + """ + self.ensure_dir(os.path.dirname(outfile)) + logger.info('Copying %s to %s', infile, outfile) + if not self.dry_run: + msg = None + if check: + if os.path.islink(outfile): + msg = '%s is a symlink' % outfile + elif os.path.exists(outfile) and not os.path.isfile(outfile): + msg = '%s is a non-regular file' % outfile + if msg: + raise ValueError(msg + ' which would be overwritten') + shutil.copyfile(infile, outfile) + self.record_as_written(outfile) + + def copy_stream(self, instream, outfile, encoding=None): + assert not os.path.isdir(outfile) + self.ensure_dir(os.path.dirname(outfile)) + logger.info('Copying stream %s to %s', instream, outfile) + if not self.dry_run: + if encoding is None: + outstream = open(outfile, 'wb') + else: + outstream = codecs.open(outfile, 'w', encoding=encoding) + try: + shutil.copyfileobj(instream, outstream) + finally: + outstream.close() + self.record_as_written(outfile) + + def write_binary_file(self, path, data): + self.ensure_dir(os.path.dirname(path)) + if not self.dry_run: + if os.path.exists(path): + os.remove(path) + with open(path, 'wb') as f: + f.write(data) + self.record_as_written(path) + + def write_text_file(self, path, data, encoding): + self.write_binary_file(path, data.encode(encoding)) + + def set_mode(self, bits, mask, files): + if os.name == 'posix' or (os.name == 'java' and os._name == 'posix'): + # Set the executable bits (owner, group, and world) on + # all the files specified. + for f in files: + if self.dry_run: + logger.info("changing mode of %s", f) + else: + mode = (os.stat(f).st_mode | bits) & mask + logger.info("changing mode of %s to %o", f, mode) + os.chmod(f, mode) + + set_executable_mode = lambda s, f: s.set_mode(0o555, 0o7777, f) + + def ensure_dir(self, path): + path = os.path.abspath(path) + if path not in self.ensured and not os.path.exists(path): + self.ensured.add(path) + d, f = os.path.split(path) + self.ensure_dir(d) + logger.info('Creating %s' % path) + if not self.dry_run: + os.mkdir(path) + if self.record: + self.dirs_created.add(path) + + def byte_compile(self, path, optimize=False, force=False, prefix=None, hashed_invalidation=False): + dpath = cache_from_source(path, not optimize) + logger.info('Byte-compiling %s to %s', path, dpath) + if not self.dry_run: + if force or self.newer(path, dpath): + if not prefix: + diagpath = None + else: + assert path.startswith(prefix) + diagpath = path[len(prefix):] + compile_kwargs = {} + if hashed_invalidation and hasattr(py_compile, 'PycInvalidationMode'): + compile_kwargs['invalidation_mode'] = py_compile.PycInvalidationMode.CHECKED_HASH + py_compile.compile(path, dpath, diagpath, True, **compile_kwargs) # raise error + self.record_as_written(dpath) + return dpath + + def ensure_removed(self, path): + if os.path.exists(path): + if os.path.isdir(path) and not os.path.islink(path): + logger.debug('Removing directory tree at %s', path) + if not self.dry_run: + shutil.rmtree(path) + if self.record: + if path in self.dirs_created: + self.dirs_created.remove(path) + else: + if os.path.islink(path): + s = 'link' + else: + s = 'file' + logger.debug('Removing %s %s', s, path) + if not self.dry_run: + os.remove(path) + if self.record: + if path in self.files_written: + self.files_written.remove(path) + + def is_writable(self, path): + result = False + while not result: + if os.path.exists(path): + result = os.access(path, os.W_OK) + break + parent = os.path.dirname(path) + if parent == path: + break + path = parent + return result + + def commit(self): + """ + Commit recorded changes, turn off recording, return + changes. + """ + assert self.record + result = self.files_written, self.dirs_created + self._init_record() + return result + + def rollback(self): + if not self.dry_run: + for f in list(self.files_written): + if os.path.exists(f): + os.remove(f) + # dirs should all be empty now, except perhaps for + # __pycache__ subdirs + # reverse so that subdirs appear before their parents + dirs = sorted(self.dirs_created, reverse=True) + for d in dirs: + flist = os.listdir(d) + if flist: + assert flist == ['__pycache__'] + sd = os.path.join(d, flist[0]) + os.rmdir(sd) + os.rmdir(d) # should fail if non-empty + self._init_record() + +def resolve(module_name, dotted_path): + if module_name in sys.modules: + mod = sys.modules[module_name] + else: + mod = __import__(module_name) + if dotted_path is None: + result = mod + else: + parts = dotted_path.split('.') + result = getattr(mod, parts.pop(0)) + for p in parts: + result = getattr(result, p) + return result + + +class ExportEntry(object): + def __init__(self, name, prefix, suffix, flags): + self.name = name + self.prefix = prefix + self.suffix = suffix + self.flags = flags + + @cached_property + def value(self): + return resolve(self.prefix, self.suffix) + + def __repr__(self): # pragma: no cover + return '' % (self.name, self.prefix, + self.suffix, self.flags) + + def __eq__(self, other): + if not isinstance(other, ExportEntry): + result = False + else: + result = (self.name == other.name and + self.prefix == other.prefix and + self.suffix == other.suffix and + self.flags == other.flags) + return result + + __hash__ = object.__hash__ + + +ENTRY_RE = re.compile(r'''(?P([^\[]\S*)) + \s*=\s*(?P(\w+)([:\.]\w+)*) + \s*(\[\s*(?P[\w-]+(=\w+)?(,\s*\w+(=\w+)?)*)\s*\])? + ''', re.VERBOSE) + +def get_export_entry(specification): + m = ENTRY_RE.search(specification) + if not m: + result = None + if '[' in specification or ']' in specification: + raise DistlibException("Invalid specification " + "'%s'" % specification) + else: + d = m.groupdict() + name = d['name'] + path = d['callable'] + colons = path.count(':') + if colons == 0: + prefix, suffix = path, None + else: + if colons != 1: + raise DistlibException("Invalid specification " + "'%s'" % specification) + prefix, suffix = path.split(':') + flags = d['flags'] + if flags is None: + if '[' in specification or ']' in specification: + raise DistlibException("Invalid specification " + "'%s'" % specification) + flags = [] + else: + flags = [f.strip() for f in flags.split(',')] + result = ExportEntry(name, prefix, suffix, flags) + return result + + +def get_cache_base(suffix=None): + """ + Return the default base location for distlib caches. If the directory does + not exist, it is created. Use the suffix provided for the base directory, + and default to '.distlib' if it isn't provided. + + On Windows, if LOCALAPPDATA is defined in the environment, then it is + assumed to be a directory, and will be the parent directory of the result. + On POSIX, and on Windows if LOCALAPPDATA is not defined, the user's home + directory - using os.expanduser('~') - will be the parent directory of + the result. + + The result is just the directory '.distlib' in the parent directory as + determined above, or with the name specified with ``suffix``. + """ + if suffix is None: + suffix = '.distlib' + if os.name == 'nt' and 'LOCALAPPDATA' in os.environ: + result = os.path.expandvars('$localappdata') + else: + # Assume posix, or old Windows + result = os.path.expanduser('~') + # we use 'isdir' instead of 'exists', because we want to + # fail if there's a file with that name + if os.path.isdir(result): + usable = os.access(result, os.W_OK) + if not usable: + logger.warning('Directory exists but is not writable: %s', result) + else: + try: + os.makedirs(result) + usable = True + except OSError: + logger.warning('Unable to create %s', result, exc_info=True) + usable = False + if not usable: + result = tempfile.mkdtemp() + logger.warning('Default location unusable, using %s', result) + return os.path.join(result, suffix) + + +def path_to_cache_dir(path): + """ + Convert an absolute path to a directory name for use in a cache. + + The algorithm used is: + + #. On Windows, any ``':'`` in the drive is replaced with ``'---'``. + #. Any occurrence of ``os.sep`` is replaced with ``'--'``. + #. ``'.cache'`` is appended. + """ + d, p = os.path.splitdrive(os.path.abspath(path)) + if d: + d = d.replace(':', '---') + p = p.replace(os.sep, '--') + return d + p + '.cache' + + +def ensure_slash(s): + if not s.endswith('/'): + return s + '/' + return s + + +def parse_credentials(netloc): + username = password = None + if '@' in netloc: + prefix, netloc = netloc.rsplit('@', 1) + if ':' not in prefix: + username = prefix + else: + username, password = prefix.split(':', 1) + if username: + username = unquote(username) + if password: + password = unquote(password) + return username, password, netloc + + +def get_process_umask(): + result = os.umask(0o22) + os.umask(result) + return result + +def is_string_sequence(seq): + result = True + i = None + for i, s in enumerate(seq): + if not isinstance(s, string_types): + result = False + break + assert i is not None + return result + +PROJECT_NAME_AND_VERSION = re.compile('([a-z0-9_]+([.-][a-z_][a-z0-9_]*)*)-' + '([a-z0-9_.+-]+)', re.I) +PYTHON_VERSION = re.compile(r'-py(\d\.?\d?)') + + +def split_filename(filename, project_name=None): + """ + Extract name, version, python version from a filename (no extension) + + Return name, version, pyver or None + """ + result = None + pyver = None + filename = unquote(filename).replace(' ', '-') + m = PYTHON_VERSION.search(filename) + if m: + pyver = m.group(1) + filename = filename[:m.start()] + if project_name and len(filename) > len(project_name) + 1: + m = re.match(re.escape(project_name) + r'\b', filename) + if m: + n = m.end() + result = filename[:n], filename[n + 1:], pyver + if result is None: + m = PROJECT_NAME_AND_VERSION.match(filename) + if m: + result = m.group(1), m.group(3), pyver + return result + +# Allow spaces in name because of legacy dists like "Twisted Core" +NAME_VERSION_RE = re.compile(r'(?P[\w .-]+)\s*' + r'\(\s*(?P[^\s)]+)\)$') + +def parse_name_and_version(p): + """ + A utility method used to get name and version from a string. + + From e.g. a Provides-Dist value. + + :param p: A value in a form 'foo (1.0)' + :return: The name and version as a tuple. + """ + m = NAME_VERSION_RE.match(p) + if not m: + raise DistlibException('Ill-formed name/version string: \'%s\'' % p) + d = m.groupdict() + return d['name'].strip().lower(), d['ver'] + +def get_extras(requested, available): + result = set() + requested = set(requested or []) + available = set(available or []) + if '*' in requested: + requested.remove('*') + result |= available + for r in requested: + if r == '-': + result.add(r) + elif r.startswith('-'): + unwanted = r[1:] + if unwanted not in available: + logger.warning('undeclared extra: %s' % unwanted) + if unwanted in result: + result.remove(unwanted) + else: + if r not in available: + logger.warning('undeclared extra: %s' % r) + result.add(r) + return result +# +# Extended metadata functionality +# + +def _get_external_data(url): + result = {} + try: + # urlopen might fail if it runs into redirections, + # because of Python issue #13696. Fixed in locators + # using a custom redirect handler. + resp = urlopen(url) + headers = resp.info() + ct = headers.get('Content-Type') + if not ct.startswith('application/json'): + logger.debug('Unexpected response for JSON request: %s', ct) + else: + reader = codecs.getreader('utf-8')(resp) + #data = reader.read().decode('utf-8') + #result = json.loads(data) + result = json.load(reader) + except Exception as e: + logger.exception('Failed to get external data for %s: %s', url, e) + return result + +_external_data_base_url = 'https://www.red-dove.com/pypi/projects/' + +def get_project_data(name): + url = '%s/%s/project.json' % (name[0].upper(), name) + url = urljoin(_external_data_base_url, url) + result = _get_external_data(url) + return result + +def get_package_data(name, version): + url = '%s/%s/package-%s.json' % (name[0].upper(), name, version) + url = urljoin(_external_data_base_url, url) + return _get_external_data(url) + + +class Cache(object): + """ + A class implementing a cache for resources that need to live in the file system + e.g. shared libraries. This class was moved from resources to here because it + could be used by other modules, e.g. the wheel module. + """ + + def __init__(self, base): + """ + Initialise an instance. + + :param base: The base directory where the cache should be located. + """ + # we use 'isdir' instead of 'exists', because we want to + # fail if there's a file with that name + if not os.path.isdir(base): # pragma: no cover + os.makedirs(base) + if (os.stat(base).st_mode & 0o77) != 0: + logger.warning('Directory \'%s\' is not private', base) + self.base = os.path.abspath(os.path.normpath(base)) + + def prefix_to_dir(self, prefix): + """ + Converts a resource prefix to a directory name in the cache. + """ + return path_to_cache_dir(prefix) + + def clear(self): + """ + Clear the cache. + """ + not_removed = [] + for fn in os.listdir(self.base): + fn = os.path.join(self.base, fn) + try: + if os.path.islink(fn) or os.path.isfile(fn): + os.remove(fn) + elif os.path.isdir(fn): + shutil.rmtree(fn) + except Exception: + not_removed.append(fn) + return not_removed + + +class EventMixin(object): + """ + A very simple publish/subscribe system. + """ + def __init__(self): + self._subscribers = {} + + def add(self, event, subscriber, append=True): + """ + Add a subscriber for an event. + + :param event: The name of an event. + :param subscriber: The subscriber to be added (and called when the + event is published). + :param append: Whether to append or prepend the subscriber to an + existing subscriber list for the event. + """ + subs = self._subscribers + if event not in subs: + subs[event] = deque([subscriber]) + else: + sq = subs[event] + if append: + sq.append(subscriber) + else: + sq.appendleft(subscriber) + + def remove(self, event, subscriber): + """ + Remove a subscriber for an event. + + :param event: The name of an event. + :param subscriber: The subscriber to be removed. + """ + subs = self._subscribers + if event not in subs: + raise ValueError('No subscribers: %r' % event) + subs[event].remove(subscriber) + + def get_subscribers(self, event): + """ + Return an iterator for the subscribers for an event. + :param event: The event to return subscribers for. + """ + return iter(self._subscribers.get(event, ())) + + def publish(self, event, *args, **kwargs): + """ + Publish a event and return a list of values returned by its + subscribers. + + :param event: The event to publish. + :param args: The positional arguments to pass to the event's + subscribers. + :param kwargs: The keyword arguments to pass to the event's + subscribers. + """ + result = [] + for subscriber in self.get_subscribers(event): + try: + value = subscriber(event, *args, **kwargs) + except Exception: + logger.exception('Exception during event publication') + value = None + result.append(value) + logger.debug('publish %s: args = %s, kwargs = %s, result = %s', + event, args, kwargs, result) + return result + +# +# Simple sequencing +# +class Sequencer(object): + def __init__(self): + self._preds = {} + self._succs = {} + self._nodes = set() # nodes with no preds/succs + + def add_node(self, node): + self._nodes.add(node) + + def remove_node(self, node, edges=False): + if node in self._nodes: + self._nodes.remove(node) + if edges: + for p in set(self._preds.get(node, ())): + self.remove(p, node) + for s in set(self._succs.get(node, ())): + self.remove(node, s) + # Remove empties + for k, v in list(self._preds.items()): + if not v: + del self._preds[k] + for k, v in list(self._succs.items()): + if not v: + del self._succs[k] + + def add(self, pred, succ): + assert pred != succ + self._preds.setdefault(succ, set()).add(pred) + self._succs.setdefault(pred, set()).add(succ) + + def remove(self, pred, succ): + assert pred != succ + try: + preds = self._preds[succ] + succs = self._succs[pred] + except KeyError: # pragma: no cover + raise ValueError('%r not a successor of anything' % succ) + try: + preds.remove(pred) + succs.remove(succ) + except KeyError: # pragma: no cover + raise ValueError('%r not a successor of %r' % (succ, pred)) + + def is_step(self, step): + return (step in self._preds or step in self._succs or + step in self._nodes) + + def get_steps(self, final): + if not self.is_step(final): + raise ValueError('Unknown: %r' % final) + result = [] + todo = [] + seen = set() + todo.append(final) + while todo: + step = todo.pop(0) + if step in seen: + # if a step was already seen, + # move it to the end (so it will appear earlier + # when reversed on return) ... but not for the + # final step, as that would be confusing for + # users + if step != final: + result.remove(step) + result.append(step) + else: + seen.add(step) + result.append(step) + preds = self._preds.get(step, ()) + todo.extend(preds) + return reversed(result) + + @property + def strong_connections(self): + #http://en.wikipedia.org/wiki/Tarjan%27s_strongly_connected_components_algorithm + index_counter = [0] + stack = [] + lowlinks = {} + index = {} + result = [] + + graph = self._succs + + def strongconnect(node): + # set the depth index for this node to the smallest unused index + index[node] = index_counter[0] + lowlinks[node] = index_counter[0] + index_counter[0] += 1 + stack.append(node) + + # Consider successors + try: + successors = graph[node] + except Exception: + successors = [] + for successor in successors: + if successor not in lowlinks: + # Successor has not yet been visited + strongconnect(successor) + lowlinks[node] = min(lowlinks[node],lowlinks[successor]) + elif successor in stack: + # the successor is in the stack and hence in the current + # strongly connected component (SCC) + lowlinks[node] = min(lowlinks[node],index[successor]) + + # If `node` is a root node, pop the stack and generate an SCC + if lowlinks[node] == index[node]: + connected_component = [] + + while True: + successor = stack.pop() + connected_component.append(successor) + if successor == node: break + component = tuple(connected_component) + # storing the result + result.append(component) + + for node in graph: + if node not in lowlinks: + strongconnect(node) + + return result + + @property + def dot(self): + result = ['digraph G {'] + for succ in self._preds: + preds = self._preds[succ] + for pred in preds: + result.append(' %s -> %s;' % (pred, succ)) + for node in self._nodes: + result.append(' %s;' % node) + result.append('}') + return '\n'.join(result) + +# +# Unarchiving functionality for zip, tar, tgz, tbz, whl +# + +ARCHIVE_EXTENSIONS = ('.tar.gz', '.tar.bz2', '.tar', '.zip', + '.tgz', '.tbz', '.whl') + +def unarchive(archive_filename, dest_dir, format=None, check=True): + + def check_path(path): + if not isinstance(path, text_type): + path = path.decode('utf-8') + p = os.path.abspath(os.path.join(dest_dir, path)) + if not p.startswith(dest_dir) or p[plen] != os.sep: + raise ValueError('path outside destination: %r' % p) + + dest_dir = os.path.abspath(dest_dir) + plen = len(dest_dir) + archive = None + if format is None: + if archive_filename.endswith(('.zip', '.whl')): + format = 'zip' + elif archive_filename.endswith(('.tar.gz', '.tgz')): + format = 'tgz' + mode = 'r:gz' + elif archive_filename.endswith(('.tar.bz2', '.tbz')): + format = 'tbz' + mode = 'r:bz2' + elif archive_filename.endswith('.tar'): + format = 'tar' + mode = 'r' + else: # pragma: no cover + raise ValueError('Unknown format for %r' % archive_filename) + try: + if format == 'zip': + archive = ZipFile(archive_filename, 'r') + if check: + names = archive.namelist() + for name in names: + check_path(name) + else: + archive = tarfile.open(archive_filename, mode) + if check: + names = archive.getnames() + for name in names: + check_path(name) + if format != 'zip' and sys.version_info[0] < 3: + # See Python issue 17153. If the dest path contains Unicode, + # tarfile extraction fails on Python 2.x if a member path name + # contains non-ASCII characters - it leads to an implicit + # bytes -> unicode conversion using ASCII to decode. + for tarinfo in archive.getmembers(): + if not isinstance(tarinfo.name, text_type): + tarinfo.name = tarinfo.name.decode('utf-8') + + # Limit extraction of dangerous items, if this Python + # allows it easily. If not, just trust the input. + # See: https://docs.python.org/3/library/tarfile.html#extraction-filters + def extraction_filter(member, path): + """Run tarfile.tar_filter, but raise the expected ValueError""" + # This is only called if the current Python has tarfile filters + try: + return tarfile.tar_filter(member, path) + except tarfile.FilterError as exc: + raise ValueError(str(exc)) + archive.extraction_filter = extraction_filter + + archive.extractall(dest_dir) + + finally: + if archive: + archive.close() + + +def zip_dir(directory): + """zip a directory tree into a BytesIO object""" + result = io.BytesIO() + dlen = len(directory) + with ZipFile(result, "w") as zf: + for root, dirs, files in os.walk(directory): + for name in files: + full = os.path.join(root, name) + rel = root[dlen:] + dest = os.path.join(rel, name) + zf.write(full, dest) + return result + +# +# Simple progress bar +# + +UNITS = ('', 'K', 'M', 'G','T','P') + + +class Progress(object): + unknown = 'UNKNOWN' + + def __init__(self, minval=0, maxval=100): + assert maxval is None or maxval >= minval + self.min = self.cur = minval + self.max = maxval + self.started = None + self.elapsed = 0 + self.done = False + + def update(self, curval): + assert self.min <= curval + assert self.max is None or curval <= self.max + self.cur = curval + now = time.time() + if self.started is None: + self.started = now + else: + self.elapsed = now - self.started + + def increment(self, incr): + assert incr >= 0 + self.update(self.cur + incr) + + def start(self): + self.update(self.min) + return self + + def stop(self): + if self.max is not None: + self.update(self.max) + self.done = True + + @property + def maximum(self): + return self.unknown if self.max is None else self.max + + @property + def percentage(self): + if self.done: + result = '100 %' + elif self.max is None: + result = ' ?? %' + else: + v = 100.0 * (self.cur - self.min) / (self.max - self.min) + result = '%3d %%' % v + return result + + def format_duration(self, duration): + if (duration <= 0) and self.max is None or self.cur == self.min: + result = '??:??:??' + #elif duration < 1: + # result = '--:--:--' + else: + result = time.strftime('%H:%M:%S', time.gmtime(duration)) + return result + + @property + def ETA(self): + if self.done: + prefix = 'Done' + t = self.elapsed + #import pdb; pdb.set_trace() + else: + prefix = 'ETA ' + if self.max is None: + t = -1 + elif self.elapsed == 0 or (self.cur == self.min): + t = 0 + else: + #import pdb; pdb.set_trace() + t = float(self.max - self.min) + t /= self.cur - self.min + t = (t - 1) * self.elapsed + return '%s: %s' % (prefix, self.format_duration(t)) + + @property + def speed(self): + if self.elapsed == 0: + result = 0.0 + else: + result = (self.cur - self.min) / self.elapsed + for unit in UNITS: + if result < 1000: + break + result /= 1000.0 + return '%d %sB/s' % (result, unit) + +# +# Glob functionality +# + +RICH_GLOB = re.compile(r'\{([^}]*)\}') +_CHECK_RECURSIVE_GLOB = re.compile(r'[^/\\,{]\*\*|\*\*[^/\\,}]') +_CHECK_MISMATCH_SET = re.compile(r'^[^{]*\}|\{[^}]*$') + + +def iglob(path_glob): + """Extended globbing function that supports ** and {opt1,opt2,opt3}.""" + if _CHECK_RECURSIVE_GLOB.search(path_glob): + msg = """invalid glob %r: recursive glob "**" must be used alone""" + raise ValueError(msg % path_glob) + if _CHECK_MISMATCH_SET.search(path_glob): + msg = """invalid glob %r: mismatching set marker '{' or '}'""" + raise ValueError(msg % path_glob) + return _iglob(path_glob) + + +def _iglob(path_glob): + rich_path_glob = RICH_GLOB.split(path_glob, 1) + if len(rich_path_glob) > 1: + assert len(rich_path_glob) == 3, rich_path_glob + prefix, set, suffix = rich_path_glob + for item in set.split(','): + for path in _iglob(''.join((prefix, item, suffix))): + yield path + else: + if '**' not in path_glob: + for item in std_iglob(path_glob): + yield item + else: + prefix, radical = path_glob.split('**', 1) + if prefix == '': + prefix = '.' + if radical == '': + radical = '*' + else: + # we support both + radical = radical.lstrip('/') + radical = radical.lstrip('\\') + for path, dir, files in os.walk(prefix): + path = os.path.normpath(path) + for fn in _iglob(os.path.join(path, radical)): + yield fn + +if ssl: + from .compat import (HTTPSHandler as BaseHTTPSHandler, match_hostname, + CertificateError) + + +# +# HTTPSConnection which verifies certificates/matches domains +# + + class HTTPSConnection(httplib.HTTPSConnection): + ca_certs = None # set this to the path to the certs file (.pem) + check_domain = True # only used if ca_certs is not None + + # noinspection PyPropertyAccess + def connect(self): + sock = socket.create_connection((self.host, self.port), self.timeout) + if getattr(self, '_tunnel_host', False): + self.sock = sock + self._tunnel() + + context = ssl.SSLContext(ssl.PROTOCOL_SSLv23) + if hasattr(ssl, 'OP_NO_SSLv2'): + context.options |= ssl.OP_NO_SSLv2 + if getattr(self, 'cert_file', None): + context.load_cert_chain(self.cert_file, self.key_file) + kwargs = {} + if self.ca_certs: + context.verify_mode = ssl.CERT_REQUIRED + context.load_verify_locations(cafile=self.ca_certs) + if getattr(ssl, 'HAS_SNI', False): + kwargs['server_hostname'] = self.host + + self.sock = context.wrap_socket(sock, **kwargs) + if self.ca_certs and self.check_domain: + try: + match_hostname(self.sock.getpeercert(), self.host) + logger.debug('Host verified: %s', self.host) + except CertificateError: # pragma: no cover + self.sock.shutdown(socket.SHUT_RDWR) + self.sock.close() + raise + + class HTTPSHandler(BaseHTTPSHandler): + def __init__(self, ca_certs, check_domain=True): + BaseHTTPSHandler.__init__(self) + self.ca_certs = ca_certs + self.check_domain = check_domain + + def _conn_maker(self, *args, **kwargs): + """ + This is called to create a connection instance. Normally you'd + pass a connection class to do_open, but it doesn't actually check for + a class, and just expects a callable. As long as we behave just as a + constructor would have, we should be OK. If it ever changes so that + we *must* pass a class, we'll create an UnsafeHTTPSConnection class + which just sets check_domain to False in the class definition, and + choose which one to pass to do_open. + """ + result = HTTPSConnection(*args, **kwargs) + if self.ca_certs: + result.ca_certs = self.ca_certs + result.check_domain = self.check_domain + return result + + def https_open(self, req): + try: + return self.do_open(self._conn_maker, req) + except URLError as e: + if 'certificate verify failed' in str(e.reason): + raise CertificateError('Unable to verify server certificate ' + 'for %s' % req.host) + else: + raise + + # + # To prevent against mixing HTTP traffic with HTTPS (examples: A Man-In-The- + # Middle proxy using HTTP listens on port 443, or an index mistakenly serves + # HTML containing a http://xyz link when it should be https://xyz), + # you can use the following handler class, which does not allow HTTP traffic. + # + # It works by inheriting from HTTPHandler - so build_opener won't add a + # handler for HTTP itself. + # + class HTTPSOnlyHandler(HTTPSHandler, HTTPHandler): + def http_open(self, req): + raise URLError('Unexpected HTTP request on what should be a secure ' + 'connection: %s' % req) + +# +# XML-RPC with timeouts +# +class Transport(xmlrpclib.Transport): + def __init__(self, timeout, use_datetime=0): + self.timeout = timeout + xmlrpclib.Transport.__init__(self, use_datetime) + + def make_connection(self, host): + h, eh, x509 = self.get_host_info(host) + if not self._connection or host != self._connection[0]: + self._extra_headers = eh + self._connection = host, httplib.HTTPConnection(h) + return self._connection[1] + +if ssl: + class SafeTransport(xmlrpclib.SafeTransport): + def __init__(self, timeout, use_datetime=0): + self.timeout = timeout + xmlrpclib.SafeTransport.__init__(self, use_datetime) + + def make_connection(self, host): + h, eh, kwargs = self.get_host_info(host) + if not kwargs: + kwargs = {} + kwargs['timeout'] = self.timeout + if not self._connection or host != self._connection[0]: + self._extra_headers = eh + self._connection = host, httplib.HTTPSConnection(h, None, + **kwargs) + return self._connection[1] + + +class ServerProxy(xmlrpclib.ServerProxy): + def __init__(self, uri, **kwargs): + self.timeout = timeout = kwargs.pop('timeout', None) + # The above classes only come into play if a timeout + # is specified + if timeout is not None: + # scheme = splittype(uri) # deprecated as of Python 3.8 + scheme = urlparse(uri)[0] + use_datetime = kwargs.get('use_datetime', 0) + if scheme == 'https': + tcls = SafeTransport + else: + tcls = Transport + kwargs['transport'] = t = tcls(timeout, use_datetime=use_datetime) + self.transport = t + xmlrpclib.ServerProxy.__init__(self, uri, **kwargs) + +# +# CSV functionality. This is provided because on 2.x, the csv module can't +# handle Unicode. However, we need to deal with Unicode in e.g. RECORD files. +# + +def _csv_open(fn, mode, **kwargs): + if sys.version_info[0] < 3: + mode += 'b' + else: + kwargs['newline'] = '' + # Python 3 determines encoding from locale. Force 'utf-8' + # file encoding to match other forced utf-8 encoding + kwargs['encoding'] = 'utf-8' + return open(fn, mode, **kwargs) + + +class CSVBase(object): + defaults = { + 'delimiter': str(','), # The strs are used because we need native + 'quotechar': str('"'), # str in the csv API (2.x won't take + 'lineterminator': str('\n') # Unicode) + } + + def __enter__(self): + return self + + def __exit__(self, *exc_info): + self.stream.close() + + +class CSVReader(CSVBase): + def __init__(self, **kwargs): + if 'stream' in kwargs: + stream = kwargs['stream'] + if sys.version_info[0] >= 3: + # needs to be a text stream + stream = codecs.getreader('utf-8')(stream) + self.stream = stream + else: + self.stream = _csv_open(kwargs['path'], 'r') + self.reader = csv.reader(self.stream, **self.defaults) + + def __iter__(self): + return self + + def next(self): + result = next(self.reader) + if sys.version_info[0] < 3: + for i, item in enumerate(result): + if not isinstance(item, text_type): + result[i] = item.decode('utf-8') + return result + + __next__ = next + +class CSVWriter(CSVBase): + def __init__(self, fn, **kwargs): + self.stream = _csv_open(fn, 'w') + self.writer = csv.writer(self.stream, **self.defaults) + + def writerow(self, row): + if sys.version_info[0] < 3: + r = [] + for item in row: + if isinstance(item, text_type): + item = item.encode('utf-8') + r.append(item) + row = r + self.writer.writerow(row) + +# +# Configurator functionality +# + +class Configurator(BaseConfigurator): + + value_converters = dict(BaseConfigurator.value_converters) + value_converters['inc'] = 'inc_convert' + + def __init__(self, config, base=None): + super(Configurator, self).__init__(config) + self.base = base or os.getcwd() + + def configure_custom(self, config): + def convert(o): + if isinstance(o, (list, tuple)): + result = type(o)([convert(i) for i in o]) + elif isinstance(o, dict): + if '()' in o: + result = self.configure_custom(o) + else: + result = {} + for k in o: + result[k] = convert(o[k]) + else: + result = self.convert(o) + return result + + c = config.pop('()') + if not callable(c): + c = self.resolve(c) + props = config.pop('.', None) + # Check for valid identifiers + args = config.pop('[]', ()) + if args: + args = tuple([convert(o) for o in args]) + items = [(k, convert(config[k])) for k in config if valid_ident(k)] + kwargs = dict(items) + result = c(*args, **kwargs) + if props: + for n, v in props.items(): + setattr(result, n, convert(v)) + return result + + def __getitem__(self, key): + result = self.config[key] + if isinstance(result, dict) and '()' in result: + self.config[key] = result = self.configure_custom(result) + return result + + def inc_convert(self, value): + """Default converter for the inc:// protocol.""" + if not os.path.isabs(value): + value = os.path.join(self.base, value) + with codecs.open(value, 'r', encoding='utf-8') as f: + result = json.load(f) + return result + + +class SubprocessMixin(object): + """ + Mixin for running subprocesses and capturing their output + """ + def __init__(self, verbose=False, progress=None): + self.verbose = verbose + self.progress = progress + + def reader(self, stream, context): + """ + Read lines from a subprocess' output stream and either pass to a progress + callable (if specified) or write progress information to sys.stderr. + """ + progress = self.progress + verbose = self.verbose + while True: + s = stream.readline() + if not s: + break + if progress is not None: + progress(s, context) + else: + if not verbose: + sys.stderr.write('.') + else: + sys.stderr.write(s.decode('utf-8')) + sys.stderr.flush() + stream.close() + + def run_command(self, cmd, **kwargs): + p = subprocess.Popen(cmd, stdout=subprocess.PIPE, + stderr=subprocess.PIPE, **kwargs) + t1 = threading.Thread(target=self.reader, args=(p.stdout, 'stdout')) + t1.start() + t2 = threading.Thread(target=self.reader, args=(p.stderr, 'stderr')) + t2.start() + p.wait() + t1.join() + t2.join() + if self.progress is not None: + self.progress('done.', 'main') + elif self.verbose: + sys.stderr.write('done.\n') + return p + + +def normalize_name(name): + """Normalize a python package name a la PEP 503""" + # https://www.python.org/dev/peps/pep-0503/#normalized-names + return re.sub('[-_.]+', '-', name).lower() + +# def _get_pypirc_command(): + # """ + # Get the distutils command for interacting with PyPI configurations. + # :return: the command. + # """ + # from distutils.core import Distribution + # from distutils.config import PyPIRCCommand + # d = Distribution() + # return PyPIRCCommand(d) + +class PyPIRCFile(object): + + DEFAULT_REPOSITORY = 'https://upload.pypi.org/legacy/' + DEFAULT_REALM = 'pypi' + + def __init__(self, fn=None, url=None): + if fn is None: + fn = os.path.join(os.path.expanduser('~'), '.pypirc') + self.filename = fn + self.url = url + + def read(self): + result = {} + + if os.path.exists(self.filename): + repository = self.url or self.DEFAULT_REPOSITORY + + config = configparser.RawConfigParser() + config.read(self.filename) + sections = config.sections() + if 'distutils' in sections: + # let's get the list of servers + index_servers = config.get('distutils', 'index-servers') + _servers = [server.strip() for server in + index_servers.split('\n') + if server.strip() != ''] + if _servers == []: + # nothing set, let's try to get the default pypi + if 'pypi' in sections: + _servers = ['pypi'] + else: + for server in _servers: + result = {'server': server} + result['username'] = config.get(server, 'username') + + # optional params + for key, default in (('repository', self.DEFAULT_REPOSITORY), + ('realm', self.DEFAULT_REALM), + ('password', None)): + if config.has_option(server, key): + result[key] = config.get(server, key) + else: + result[key] = default + + # work around people having "repository" for the "pypi" + # section of their config set to the HTTP (rather than + # HTTPS) URL + if (server == 'pypi' and + repository in (self.DEFAULT_REPOSITORY, 'pypi')): + result['repository'] = self.DEFAULT_REPOSITORY + elif (result['server'] != repository and + result['repository'] != repository): + result = {} + elif 'server-login' in sections: + # old format + server = 'server-login' + if config.has_option(server, 'repository'): + repository = config.get(server, 'repository') + else: + repository = self.DEFAULT_REPOSITORY + result = { + 'username': config.get(server, 'username'), + 'password': config.get(server, 'password'), + 'repository': repository, + 'server': server, + 'realm': self.DEFAULT_REALM + } + return result + + def update(self, username, password): + # import pdb; pdb.set_trace() + config = configparser.RawConfigParser() + fn = self.filename + config.read(fn) + if not config.has_section('pypi'): + config.add_section('pypi') + config.set('pypi', 'username', username) + config.set('pypi', 'password', password) + with open(fn, 'w') as f: + config.write(f) + +def _load_pypirc(index): + """ + Read the PyPI access configuration as supported by distutils. + """ + return PyPIRCFile(url=index.url).read() + +def _store_pypirc(index): + PyPIRCFile().update(index.username, index.password) + +# +# get_platform()/get_host_platform() copied from Python 3.10.a0 source, with some minor +# tweaks +# + +def get_host_platform(): + """Return a string that identifies the current platform. This is used mainly to + distinguish platform-specific build directories and platform-specific built + distributions. Typically includes the OS name and version and the + architecture (as supplied by 'os.uname()'), although the exact information + included depends on the OS; eg. on Linux, the kernel version isn't + particularly important. + + Examples of returned values: + linux-i586 + linux-alpha (?) + solaris-2.6-sun4u + + Windows will return one of: + win-amd64 (64bit Windows on AMD64 (aka x86_64, Intel64, EM64T, etc) + win32 (all others - specifically, sys.platform is returned) + + For other non-POSIX platforms, currently just returns 'sys.platform'. + + """ + if os.name == 'nt': + if 'amd64' in sys.version.lower(): + return 'win-amd64' + if '(arm)' in sys.version.lower(): + return 'win-arm32' + if '(arm64)' in sys.version.lower(): + return 'win-arm64' + return sys.platform + + # Set for cross builds explicitly + if "_PYTHON_HOST_PLATFORM" in os.environ: + return os.environ["_PYTHON_HOST_PLATFORM"] + + if os.name != 'posix' or not hasattr(os, 'uname'): + # XXX what about the architecture? NT is Intel or Alpha, + # Mac OS is M68k or PPC, etc. + return sys.platform + + # Try to distinguish various flavours of Unix + + (osname, host, release, version, machine) = os.uname() + + # Convert the OS name to lowercase, remove '/' characters, and translate + # spaces (for "Power Macintosh") + osname = osname.lower().replace('/', '') + machine = machine.replace(' ', '_').replace('/', '-') + + if osname[:5] == 'linux': + # At least on Linux/Intel, 'machine' is the processor -- + # i386, etc. + # XXX what about Alpha, SPARC, etc? + return "%s-%s" % (osname, machine) + + elif osname[:5] == 'sunos': + if release[0] >= '5': # SunOS 5 == Solaris 2 + osname = 'solaris' + release = '%d.%s' % (int(release[0]) - 3, release[2:]) + # We can't use 'platform.architecture()[0]' because a + # bootstrap problem. We use a dict to get an error + # if some suspicious happens. + bitness = {2147483647:'32bit', 9223372036854775807:'64bit'} + machine += '.%s' % bitness[sys.maxsize] + # fall through to standard osname-release-machine representation + elif osname[:3] == 'aix': + from _aix_support import aix_platform + return aix_platform() + elif osname[:6] == 'cygwin': + osname = 'cygwin' + rel_re = re.compile (r'[\d.]+', re.ASCII) + m = rel_re.match(release) + if m: + release = m.group() + elif osname[:6] == 'darwin': + import _osx_support + try: + from distutils import sysconfig + except ImportError: + import sysconfig + osname, release, machine = _osx_support.get_platform_osx( + sysconfig.get_config_vars(), + osname, release, machine) + + return '%s-%s-%s' % (osname, release, machine) + + +_TARGET_TO_PLAT = { + 'x86' : 'win32', + 'x64' : 'win-amd64', + 'arm' : 'win-arm32', +} + + +def get_platform(): + if os.name != 'nt': + return get_host_platform() + cross_compilation_target = os.environ.get('VSCMD_ARG_TGT_ARCH') + if cross_compilation_target not in _TARGET_TO_PLAT: + return get_host_platform() + return _TARGET_TO_PLAT[cross_compilation_target] diff --git a/venv/lib/python3.10/site-packages/distlib/version.py b/venv/lib/python3.10/site-packages/distlib/version.py new file mode 100644 index 0000000..5de88ef --- /dev/null +++ b/venv/lib/python3.10/site-packages/distlib/version.py @@ -0,0 +1,748 @@ +# -*- coding: utf-8 -*- +# +# Copyright (C) 2012-2017 The Python Software Foundation. +# See LICENSE.txt and CONTRIBUTORS.txt. +# +""" +Implementation of a flexible versioning scheme providing support for PEP-440, +setuptools-compatible and semantic versioning. +""" + +import logging +import re + +from .compat import string_types +from .util import parse_requirement + +__all__ = ['NormalizedVersion', 'NormalizedMatcher', + 'LegacyVersion', 'LegacyMatcher', + 'SemanticVersion', 'SemanticMatcher', + 'UnsupportedVersionError', 'get_scheme'] + +logger = logging.getLogger(__name__) + + +class UnsupportedVersionError(ValueError): + """This is an unsupported version.""" + pass + + +class Version(object): + def __init__(self, s): + self._string = s = s.strip() + self._parts = parts = self.parse(s) + assert isinstance(parts, tuple) + assert len(parts) > 0 + + def parse(self, s): + raise NotImplementedError('please implement in a subclass') + + def _check_compatible(self, other): + if type(self) != type(other): + raise TypeError('cannot compare %r and %r' % (self, other)) + + def __eq__(self, other): + self._check_compatible(other) + return self._parts == other._parts + + def __ne__(self, other): + return not self.__eq__(other) + + def __lt__(self, other): + self._check_compatible(other) + return self._parts < other._parts + + def __gt__(self, other): + return not (self.__lt__(other) or self.__eq__(other)) + + def __le__(self, other): + return self.__lt__(other) or self.__eq__(other) + + def __ge__(self, other): + return self.__gt__(other) or self.__eq__(other) + + # See http://docs.python.org/reference/datamodel#object.__hash__ + def __hash__(self): + return hash(self._parts) + + def __repr__(self): + return "%s('%s')" % (self.__class__.__name__, self._string) + + def __str__(self): + return self._string + + @property + def is_prerelease(self): + raise NotImplementedError('Please implement in subclasses.') + + +class Matcher(object): + version_class = None + + # value is either a callable or the name of a method + _operators = { + '<': lambda v, c, p: v < c, + '>': lambda v, c, p: v > c, + '<=': lambda v, c, p: v == c or v < c, + '>=': lambda v, c, p: v == c or v > c, + '==': lambda v, c, p: v == c, + '===': lambda v, c, p: v == c, + # by default, compatible => >=. + '~=': lambda v, c, p: v == c or v > c, + '!=': lambda v, c, p: v != c, + } + + # this is a method only to support alternative implementations + # via overriding + def parse_requirement(self, s): + return parse_requirement(s) + + def __init__(self, s): + if self.version_class is None: + raise ValueError('Please specify a version class') + self._string = s = s.strip() + r = self.parse_requirement(s) + if not r: + raise ValueError('Not valid: %r' % s) + self.name = r.name + self.key = self.name.lower() # for case-insensitive comparisons + clist = [] + if r.constraints: + # import pdb; pdb.set_trace() + for op, s in r.constraints: + if s.endswith('.*'): + if op not in ('==', '!='): + raise ValueError('\'.*\' not allowed for ' + '%r constraints' % op) + # Could be a partial version (e.g. for '2.*') which + # won't parse as a version, so keep it as a string + vn, prefix = s[:-2], True + # Just to check that vn is a valid version + self.version_class(vn) + else: + # Should parse as a version, so we can create an + # instance for the comparison + vn, prefix = self.version_class(s), False + clist.append((op, vn, prefix)) + self._parts = tuple(clist) + + def match(self, version): + """ + Check if the provided version matches the constraints. + + :param version: The version to match against this instance. + :type version: String or :class:`Version` instance. + """ + if isinstance(version, string_types): + version = self.version_class(version) + for operator, constraint, prefix in self._parts: + f = self._operators.get(operator) + if isinstance(f, string_types): + f = getattr(self, f) + if not f: + msg = ('%r not implemented ' + 'for %s' % (operator, self.__class__.__name__)) + raise NotImplementedError(msg) + if not f(version, constraint, prefix): + return False + return True + + @property + def exact_version(self): + result = None + if len(self._parts) == 1 and self._parts[0][0] in ('==', '==='): + result = self._parts[0][1] + return result + + def _check_compatible(self, other): + if type(self) != type(other) or self.name != other.name: + raise TypeError('cannot compare %s and %s' % (self, other)) + + def __eq__(self, other): + self._check_compatible(other) + return self.key == other.key and self._parts == other._parts + + def __ne__(self, other): + return not self.__eq__(other) + + # See http://docs.python.org/reference/datamodel#object.__hash__ + def __hash__(self): + return hash(self.key) + hash(self._parts) + + def __repr__(self): + return "%s(%r)" % (self.__class__.__name__, self._string) + + def __str__(self): + return self._string + + +PEP440_VERSION_RE = re.compile(r'^v?(\d+!)?(\d+(\.\d+)*)((a|alpha|b|beta|c|rc|pre|preview)(\d+)?)?' + r'(\.(post|r|rev)(\d+)?)?([._-]?(dev)(\d+)?)?' + r'(\+([a-zA-Z\d]+(\.[a-zA-Z\d]+)?))?$', re.I) + + +def _pep_440_key(s): + s = s.strip() + m = PEP440_VERSION_RE.match(s) + if not m: + raise UnsupportedVersionError('Not a valid version: %s' % s) + groups = m.groups() + nums = tuple(int(v) for v in groups[1].split('.')) + while len(nums) > 1 and nums[-1] == 0: + nums = nums[:-1] + + if not groups[0]: + epoch = 0 + else: + epoch = int(groups[0][:-1]) + pre = groups[4:6] + post = groups[7:9] + dev = groups[10:12] + local = groups[13] + if pre == (None, None): + pre = () + else: + if pre[1] is None: + pre = pre[0], 0 + else: + pre = pre[0], int(pre[1]) + if post == (None, None): + post = () + else: + if post[1] is None: + post = post[0], 0 + else: + post = post[0], int(post[1]) + if dev == (None, None): + dev = () + else: + if dev[1] is None: + dev = dev[0], 0 + else: + dev = dev[0], int(dev[1]) + if local is None: + local = () + else: + parts = [] + for part in local.split('.'): + # to ensure that numeric compares as > lexicographic, avoid + # comparing them directly, but encode a tuple which ensures + # correct sorting + if part.isdigit(): + part = (1, int(part)) + else: + part = (0, part) + parts.append(part) + local = tuple(parts) + if not pre: + # either before pre-release, or final release and after + if not post and dev: + # before pre-release + pre = ('a', -1) # to sort before a0 + else: + pre = ('z',) # to sort after all pre-releases + # now look at the state of post and dev. + if not post: + post = ('_',) # sort before 'a' + if not dev: + dev = ('final',) + + #print('%s -> %s' % (s, m.groups())) + return epoch, nums, pre, post, dev, local + + +_normalized_key = _pep_440_key + + +class NormalizedVersion(Version): + """A rational version. + + Good: + 1.2 # equivalent to "1.2.0" + 1.2.0 + 1.2a1 + 1.2.3a2 + 1.2.3b1 + 1.2.3c1 + 1.2.3.4 + TODO: fill this out + + Bad: + 1 # minimum two numbers + 1.2a # release level must have a release serial + 1.2.3b + """ + def parse(self, s): + result = _normalized_key(s) + # _normalized_key loses trailing zeroes in the release + # clause, since that's needed to ensure that X.Y == X.Y.0 == X.Y.0.0 + # However, PEP 440 prefix matching needs it: for example, + # (~= 1.4.5.0) matches differently to (~= 1.4.5.0.0). + m = PEP440_VERSION_RE.match(s) # must succeed + groups = m.groups() + self._release_clause = tuple(int(v) for v in groups[1].split('.')) + return result + + PREREL_TAGS = set(['a', 'b', 'c', 'rc', 'dev']) + + @property + def is_prerelease(self): + return any(t[0] in self.PREREL_TAGS for t in self._parts if t) + + +def _match_prefix(x, y): + x = str(x) + y = str(y) + if x == y: + return True + if not x.startswith(y): + return False + n = len(y) + return x[n] == '.' + + +class NormalizedMatcher(Matcher): + version_class = NormalizedVersion + + # value is either a callable or the name of a method + _operators = { + '~=': '_match_compatible', + '<': '_match_lt', + '>': '_match_gt', + '<=': '_match_le', + '>=': '_match_ge', + '==': '_match_eq', + '===': '_match_arbitrary', + '!=': '_match_ne', + } + + def _adjust_local(self, version, constraint, prefix): + if prefix: + strip_local = '+' not in constraint and version._parts[-1] + else: + # both constraint and version are + # NormalizedVersion instances. + # If constraint does not have a local component, + # ensure the version doesn't, either. + strip_local = not constraint._parts[-1] and version._parts[-1] + if strip_local: + s = version._string.split('+', 1)[0] + version = self.version_class(s) + return version, constraint + + def _match_lt(self, version, constraint, prefix): + version, constraint = self._adjust_local(version, constraint, prefix) + if version >= constraint: + return False + release_clause = constraint._release_clause + pfx = '.'.join([str(i) for i in release_clause]) + return not _match_prefix(version, pfx) + + def _match_gt(self, version, constraint, prefix): + version, constraint = self._adjust_local(version, constraint, prefix) + if version <= constraint: + return False + release_clause = constraint._release_clause + pfx = '.'.join([str(i) for i in release_clause]) + return not _match_prefix(version, pfx) + + def _match_le(self, version, constraint, prefix): + version, constraint = self._adjust_local(version, constraint, prefix) + return version <= constraint + + def _match_ge(self, version, constraint, prefix): + version, constraint = self._adjust_local(version, constraint, prefix) + return version >= constraint + + def _match_eq(self, version, constraint, prefix): + version, constraint = self._adjust_local(version, constraint, prefix) + if not prefix: + result = (version == constraint) + else: + result = _match_prefix(version, constraint) + return result + + def _match_arbitrary(self, version, constraint, prefix): + return str(version) == str(constraint) + + def _match_ne(self, version, constraint, prefix): + version, constraint = self._adjust_local(version, constraint, prefix) + if not prefix: + result = (version != constraint) + else: + result = not _match_prefix(version, constraint) + return result + + def _match_compatible(self, version, constraint, prefix): + version, constraint = self._adjust_local(version, constraint, prefix) + if version == constraint: + return True + if version < constraint: + return False +# if not prefix: +# return True + release_clause = constraint._release_clause + if len(release_clause) > 1: + release_clause = release_clause[:-1] + pfx = '.'.join([str(i) for i in release_clause]) + return _match_prefix(version, pfx) + +_REPLACEMENTS = ( + (re.compile('[.+-]$'), ''), # remove trailing puncts + (re.compile(r'^[.](\d)'), r'0.\1'), # .N -> 0.N at start + (re.compile('^[.-]'), ''), # remove leading puncts + (re.compile(r'^\((.*)\)$'), r'\1'), # remove parentheses + (re.compile(r'^v(ersion)?\s*(\d+)'), r'\2'), # remove leading v(ersion) + (re.compile(r'^r(ev)?\s*(\d+)'), r'\2'), # remove leading v(ersion) + (re.compile('[.]{2,}'), '.'), # multiple runs of '.' + (re.compile(r'\b(alfa|apha)\b'), 'alpha'), # misspelt alpha + (re.compile(r'\b(pre-alpha|prealpha)\b'), + 'pre.alpha'), # standardise + (re.compile(r'\(beta\)$'), 'beta'), # remove parentheses +) + +_SUFFIX_REPLACEMENTS = ( + (re.compile('^[:~._+-]+'), ''), # remove leading puncts + (re.compile('[,*")([\\]]'), ''), # remove unwanted chars + (re.compile('[~:+_ -]'), '.'), # replace illegal chars + (re.compile('[.]{2,}'), '.'), # multiple runs of '.' + (re.compile(r'\.$'), ''), # trailing '.' +) + +_NUMERIC_PREFIX = re.compile(r'(\d+(\.\d+)*)') + + +def _suggest_semantic_version(s): + """ + Try to suggest a semantic form for a version for which + _suggest_normalized_version couldn't come up with anything. + """ + result = s.strip().lower() + for pat, repl in _REPLACEMENTS: + result = pat.sub(repl, result) + if not result: + result = '0.0.0' + + # Now look for numeric prefix, and separate it out from + # the rest. + #import pdb; pdb.set_trace() + m = _NUMERIC_PREFIX.match(result) + if not m: + prefix = '0.0.0' + suffix = result + else: + prefix = m.groups()[0].split('.') + prefix = [int(i) for i in prefix] + while len(prefix) < 3: + prefix.append(0) + if len(prefix) == 3: + suffix = result[m.end():] + else: + suffix = '.'.join([str(i) for i in prefix[3:]]) + result[m.end():] + prefix = prefix[:3] + prefix = '.'.join([str(i) for i in prefix]) + suffix = suffix.strip() + if suffix: + #import pdb; pdb.set_trace() + # massage the suffix. + for pat, repl in _SUFFIX_REPLACEMENTS: + suffix = pat.sub(repl, suffix) + + if not suffix: + result = prefix + else: + sep = '-' if 'dev' in suffix else '+' + result = prefix + sep + suffix + if not is_semver(result): + result = None + return result + + +def _suggest_normalized_version(s): + """Suggest a normalized version close to the given version string. + + If you have a version string that isn't rational (i.e. NormalizedVersion + doesn't like it) then you might be able to get an equivalent (or close) + rational version from this function. + + This does a number of simple normalizations to the given string, based + on observation of versions currently in use on PyPI. Given a dump of + those version during PyCon 2009, 4287 of them: + - 2312 (53.93%) match NormalizedVersion without change + with the automatic suggestion + - 3474 (81.04%) match when using this suggestion method + + @param s {str} An irrational version string. + @returns A rational version string, or None, if couldn't determine one. + """ + try: + _normalized_key(s) + return s # already rational + except UnsupportedVersionError: + pass + + rs = s.lower() + + # part of this could use maketrans + for orig, repl in (('-alpha', 'a'), ('-beta', 'b'), ('alpha', 'a'), + ('beta', 'b'), ('rc', 'c'), ('-final', ''), + ('-pre', 'c'), + ('-release', ''), ('.release', ''), ('-stable', ''), + ('+', '.'), ('_', '.'), (' ', ''), ('.final', ''), + ('final', '')): + rs = rs.replace(orig, repl) + + # if something ends with dev or pre, we add a 0 + rs = re.sub(r"pre$", r"pre0", rs) + rs = re.sub(r"dev$", r"dev0", rs) + + # if we have something like "b-2" or "a.2" at the end of the + # version, that is probably beta, alpha, etc + # let's remove the dash or dot + rs = re.sub(r"([abc]|rc)[\-\.](\d+)$", r"\1\2", rs) + + # 1.0-dev-r371 -> 1.0.dev371 + # 0.1-dev-r79 -> 0.1.dev79 + rs = re.sub(r"[\-\.](dev)[\-\.]?r?(\d+)$", r".\1\2", rs) + + # Clean: 2.0.a.3, 2.0.b1, 0.9.0~c1 + rs = re.sub(r"[.~]?([abc])\.?", r"\1", rs) + + # Clean: v0.3, v1.0 + if rs.startswith('v'): + rs = rs[1:] + + # Clean leading '0's on numbers. + #TODO: unintended side-effect on, e.g., "2003.05.09" + # PyPI stats: 77 (~2%) better + rs = re.sub(r"\b0+(\d+)(?!\d)", r"\1", rs) + + # Clean a/b/c with no version. E.g. "1.0a" -> "1.0a0". Setuptools infers + # zero. + # PyPI stats: 245 (7.56%) better + rs = re.sub(r"(\d+[abc])$", r"\g<1>0", rs) + + # the 'dev-rNNN' tag is a dev tag + rs = re.sub(r"\.?(dev-r|dev\.r)\.?(\d+)$", r".dev\2", rs) + + # clean the - when used as a pre delimiter + rs = re.sub(r"-(a|b|c)(\d+)$", r"\1\2", rs) + + # a terminal "dev" or "devel" can be changed into ".dev0" + rs = re.sub(r"[\.\-](dev|devel)$", r".dev0", rs) + + # a terminal "dev" can be changed into ".dev0" + rs = re.sub(r"(?![\.\-])dev$", r".dev0", rs) + + # a terminal "final" or "stable" can be removed + rs = re.sub(r"(final|stable)$", "", rs) + + # The 'r' and the '-' tags are post release tags + # 0.4a1.r10 -> 0.4a1.post10 + # 0.9.33-17222 -> 0.9.33.post17222 + # 0.9.33-r17222 -> 0.9.33.post17222 + rs = re.sub(r"\.?(r|-|-r)\.?(\d+)$", r".post\2", rs) + + # Clean 'r' instead of 'dev' usage: + # 0.9.33+r17222 -> 0.9.33.dev17222 + # 1.0dev123 -> 1.0.dev123 + # 1.0.git123 -> 1.0.dev123 + # 1.0.bzr123 -> 1.0.dev123 + # 0.1a0dev.123 -> 0.1a0.dev123 + # PyPI stats: ~150 (~4%) better + rs = re.sub(r"\.?(dev|git|bzr)\.?(\d+)$", r".dev\2", rs) + + # Clean '.pre' (normalized from '-pre' above) instead of 'c' usage: + # 0.2.pre1 -> 0.2c1 + # 0.2-c1 -> 0.2c1 + # 1.0preview123 -> 1.0c123 + # PyPI stats: ~21 (0.62%) better + rs = re.sub(r"\.?(pre|preview|-c)(\d+)$", r"c\g<2>", rs) + + # Tcl/Tk uses "px" for their post release markers + rs = re.sub(r"p(\d+)$", r".post\1", rs) + + try: + _normalized_key(rs) + except UnsupportedVersionError: + rs = None + return rs + +# +# Legacy version processing (distribute-compatible) +# + +_VERSION_PART = re.compile(r'([a-z]+|\d+|[\.-])', re.I) +_VERSION_REPLACE = { + 'pre': 'c', + 'preview': 'c', + '-': 'final-', + 'rc': 'c', + 'dev': '@', + '': None, + '.': None, +} + + +def _legacy_key(s): + def get_parts(s): + result = [] + for p in _VERSION_PART.split(s.lower()): + p = _VERSION_REPLACE.get(p, p) + if p: + if '0' <= p[:1] <= '9': + p = p.zfill(8) + else: + p = '*' + p + result.append(p) + result.append('*final') + return result + + result = [] + for p in get_parts(s): + if p.startswith('*'): + if p < '*final': + while result and result[-1] == '*final-': + result.pop() + while result and result[-1] == '00000000': + result.pop() + result.append(p) + return tuple(result) + + +class LegacyVersion(Version): + def parse(self, s): + return _legacy_key(s) + + @property + def is_prerelease(self): + result = False + for x in self._parts: + if (isinstance(x, string_types) and x.startswith('*') and + x < '*final'): + result = True + break + return result + + +class LegacyMatcher(Matcher): + version_class = LegacyVersion + + _operators = dict(Matcher._operators) + _operators['~='] = '_match_compatible' + + numeric_re = re.compile(r'^(\d+(\.\d+)*)') + + def _match_compatible(self, version, constraint, prefix): + if version < constraint: + return False + m = self.numeric_re.match(str(constraint)) + if not m: + logger.warning('Cannot compute compatible match for version %s ' + ' and constraint %s', version, constraint) + return True + s = m.groups()[0] + if '.' in s: + s = s.rsplit('.', 1)[0] + return _match_prefix(version, s) + +# +# Semantic versioning +# + +_SEMVER_RE = re.compile(r'^(\d+)\.(\d+)\.(\d+)' + r'(-[a-z0-9]+(\.[a-z0-9-]+)*)?' + r'(\+[a-z0-9]+(\.[a-z0-9-]+)*)?$', re.I) + + +def is_semver(s): + return _SEMVER_RE.match(s) + + +def _semantic_key(s): + def make_tuple(s, absent): + if s is None: + result = (absent,) + else: + parts = s[1:].split('.') + # We can't compare ints and strings on Python 3, so fudge it + # by zero-filling numeric values so simulate a numeric comparison + result = tuple([p.zfill(8) if p.isdigit() else p for p in parts]) + return result + + m = is_semver(s) + if not m: + raise UnsupportedVersionError(s) + groups = m.groups() + major, minor, patch = [int(i) for i in groups[:3]] + # choose the '|' and '*' so that versions sort correctly + pre, build = make_tuple(groups[3], '|'), make_tuple(groups[5], '*') + return (major, minor, patch), pre, build + + +class SemanticVersion(Version): + def parse(self, s): + return _semantic_key(s) + + @property + def is_prerelease(self): + return self._parts[1][0] != '|' + + +class SemanticMatcher(Matcher): + version_class = SemanticVersion + + +class VersionScheme(object): + def __init__(self, key, matcher, suggester=None): + self.key = key + self.matcher = matcher + self.suggester = suggester + + def is_valid_version(self, s): + try: + self.matcher.version_class(s) + result = True + except UnsupportedVersionError: + result = False + return result + + def is_valid_matcher(self, s): + try: + self.matcher(s) + result = True + except UnsupportedVersionError: + result = False + return result + + def is_valid_constraint_list(self, s): + """ + Used for processing some metadata fields + """ + # See issue #140. Be tolerant of a single trailing comma. + if s.endswith(','): + s = s[:-1] + return self.is_valid_matcher('dummy_name (%s)' % s) + + def suggest(self, s): + if self.suggester is None: + result = None + else: + result = self.suggester(s) + return result + +_SCHEMES = { + 'normalized': VersionScheme(_normalized_key, NormalizedMatcher, + _suggest_normalized_version), + 'legacy': VersionScheme(_legacy_key, LegacyMatcher, lambda self, s: s), + 'semantic': VersionScheme(_semantic_key, SemanticMatcher, + _suggest_semantic_version), +} + +_SCHEMES['default'] = _SCHEMES['normalized'] + + +def get_scheme(name): + if name not in _SCHEMES: + raise ValueError('unknown scheme name: %r' % name) + return _SCHEMES[name] diff --git a/venv/lib/python3.10/site-packages/pip/_vendor/distlib/w32.exe b/venv/lib/python3.10/site-packages/distlib/w32.exe similarity index 100% rename from venv/lib/python3.10/site-packages/pip/_vendor/distlib/w32.exe rename to venv/lib/python3.10/site-packages/distlib/w32.exe diff --git a/venv/lib/python3.10/site-packages/pip/_vendor/distlib/w64-arm.exe b/venv/lib/python3.10/site-packages/distlib/w64-arm.exe similarity index 100% rename from venv/lib/python3.10/site-packages/pip/_vendor/distlib/w64-arm.exe rename to venv/lib/python3.10/site-packages/distlib/w64-arm.exe diff --git a/venv/lib/python3.10/site-packages/pip/_vendor/distlib/w64.exe b/venv/lib/python3.10/site-packages/distlib/w64.exe similarity index 100% rename from venv/lib/python3.10/site-packages/pip/_vendor/distlib/w64.exe rename to venv/lib/python3.10/site-packages/distlib/w64.exe diff --git a/venv/lib/python3.10/site-packages/distlib/wheel.py b/venv/lib/python3.10/site-packages/distlib/wheel.py new file mode 100644 index 0000000..028c2d9 --- /dev/null +++ b/venv/lib/python3.10/site-packages/distlib/wheel.py @@ -0,0 +1,1082 @@ +# -*- coding: utf-8 -*- +# +# Copyright (C) 2013-2020 Vinay Sajip. +# Licensed to the Python Software Foundation under a contributor agreement. +# See LICENSE.txt and CONTRIBUTORS.txt. +# +from __future__ import unicode_literals + +import base64 +import codecs +import datetime +from email import message_from_file +import hashlib +import json +import logging +import os +import posixpath +import re +import shutil +import sys +import tempfile +import zipfile + +from . import __version__, DistlibException +from .compat import sysconfig, ZipFile, fsdecode, text_type, filter +from .database import InstalledDistribution +from .metadata import (Metadata, METADATA_FILENAME, WHEEL_METADATA_FILENAME, + LEGACY_METADATA_FILENAME) +from .util import (FileOperator, convert_path, CSVReader, CSVWriter, Cache, + cached_property, get_cache_base, read_exports, tempdir, + get_platform) +from .version import NormalizedVersion, UnsupportedVersionError + +logger = logging.getLogger(__name__) + +cache = None # created when needed + +if hasattr(sys, 'pypy_version_info'): # pragma: no cover + IMP_PREFIX = 'pp' +elif sys.platform.startswith('java'): # pragma: no cover + IMP_PREFIX = 'jy' +elif sys.platform == 'cli': # pragma: no cover + IMP_PREFIX = 'ip' +else: + IMP_PREFIX = 'cp' + +VER_SUFFIX = sysconfig.get_config_var('py_version_nodot') +if not VER_SUFFIX: # pragma: no cover + VER_SUFFIX = '%s%s' % sys.version_info[:2] +PYVER = 'py' + VER_SUFFIX +IMPVER = IMP_PREFIX + VER_SUFFIX + +ARCH = get_platform().replace('-', '_').replace('.', '_') + +ABI = sysconfig.get_config_var('SOABI') +if ABI and ABI.startswith('cpython-'): + ABI = ABI.replace('cpython-', 'cp').split('-')[0] +else: + def _derive_abi(): + parts = ['cp', VER_SUFFIX] + if sysconfig.get_config_var('Py_DEBUG'): + parts.append('d') + if IMP_PREFIX == 'cp': + vi = sys.version_info[:2] + if vi < (3, 8): + wpm = sysconfig.get_config_var('WITH_PYMALLOC') + if wpm is None: + wpm = True + if wpm: + parts.append('m') + if vi < (3, 3): + us = sysconfig.get_config_var('Py_UNICODE_SIZE') + if us == 4 or (us is None and sys.maxunicode == 0x10FFFF): + parts.append('u') + return ''.join(parts) + ABI = _derive_abi() + del _derive_abi + +FILENAME_RE = re.compile(r''' +(?P[^-]+) +-(?P\d+[^-]*) +(-(?P\d+[^-]*))? +-(?P\w+\d+(\.\w+\d+)*) +-(?P\w+) +-(?P\w+(\.\w+)*) +\.whl$ +''', re.IGNORECASE | re.VERBOSE) + +NAME_VERSION_RE = re.compile(r''' +(?P[^-]+) +-(?P\d+[^-]*) +(-(?P\d+[^-]*))?$ +''', re.IGNORECASE | re.VERBOSE) + +SHEBANG_RE = re.compile(br'\s*#![^\r\n]*') +SHEBANG_DETAIL_RE = re.compile(br'^(\s*#!("[^"]+"|\S+))\s+(.*)$') +SHEBANG_PYTHON = b'#!python' +SHEBANG_PYTHONW = b'#!pythonw' + +if os.sep == '/': + to_posix = lambda o: o +else: + to_posix = lambda o: o.replace(os.sep, '/') + +if sys.version_info[0] < 3: + import imp +else: + imp = None + import importlib.machinery + import importlib.util + +def _get_suffixes(): + if imp: + return [s[0] for s in imp.get_suffixes()] + else: + return importlib.machinery.EXTENSION_SUFFIXES + +def _load_dynamic(name, path): + # https://docs.python.org/3/library/importlib.html#importing-a-source-file-directly + if imp: + return imp.load_dynamic(name, path) + else: + spec = importlib.util.spec_from_file_location(name, path) + module = importlib.util.module_from_spec(spec) + sys.modules[name] = module + spec.loader.exec_module(module) + return module + +class Mounter(object): + def __init__(self): + self.impure_wheels = {} + self.libs = {} + + def add(self, pathname, extensions): + self.impure_wheels[pathname] = extensions + self.libs.update(extensions) + + def remove(self, pathname): + extensions = self.impure_wheels.pop(pathname) + for k, v in extensions: + if k in self.libs: + del self.libs[k] + + def find_module(self, fullname, path=None): + if fullname in self.libs: + result = self + else: + result = None + return result + + def load_module(self, fullname): + if fullname in sys.modules: + result = sys.modules[fullname] + else: + if fullname not in self.libs: + raise ImportError('unable to find extension for %s' % fullname) + result = _load_dynamic(fullname, self.libs[fullname]) + result.__loader__ = self + parts = fullname.rsplit('.', 1) + if len(parts) > 1: + result.__package__ = parts[0] + return result + +_hook = Mounter() + + +class Wheel(object): + """ + Class to build and install from Wheel files (PEP 427). + """ + + wheel_version = (1, 1) + hash_kind = 'sha256' + + def __init__(self, filename=None, sign=False, verify=False): + """ + Initialise an instance using a (valid) filename. + """ + self.sign = sign + self.should_verify = verify + self.buildver = '' + self.pyver = [PYVER] + self.abi = ['none'] + self.arch = ['any'] + self.dirname = os.getcwd() + if filename is None: + self.name = 'dummy' + self.version = '0.1' + self._filename = self.filename + else: + m = NAME_VERSION_RE.match(filename) + if m: + info = m.groupdict('') + self.name = info['nm'] + # Reinstate the local version separator + self.version = info['vn'].replace('_', '-') + self.buildver = info['bn'] + self._filename = self.filename + else: + dirname, filename = os.path.split(filename) + m = FILENAME_RE.match(filename) + if not m: + raise DistlibException('Invalid name or ' + 'filename: %r' % filename) + if dirname: + self.dirname = os.path.abspath(dirname) + self._filename = filename + info = m.groupdict('') + self.name = info['nm'] + self.version = info['vn'] + self.buildver = info['bn'] + self.pyver = info['py'].split('.') + self.abi = info['bi'].split('.') + self.arch = info['ar'].split('.') + + @property + def filename(self): + """ + Build and return a filename from the various components. + """ + if self.buildver: + buildver = '-' + self.buildver + else: + buildver = '' + pyver = '.'.join(self.pyver) + abi = '.'.join(self.abi) + arch = '.'.join(self.arch) + # replace - with _ as a local version separator + version = self.version.replace('-', '_') + return '%s-%s%s-%s-%s-%s.whl' % (self.name, version, buildver, + pyver, abi, arch) + + @property + def exists(self): + path = os.path.join(self.dirname, self.filename) + return os.path.isfile(path) + + @property + def tags(self): + for pyver in self.pyver: + for abi in self.abi: + for arch in self.arch: + yield pyver, abi, arch + + @cached_property + def metadata(self): + pathname = os.path.join(self.dirname, self.filename) + name_ver = '%s-%s' % (self.name, self.version) + info_dir = '%s.dist-info' % name_ver + wrapper = codecs.getreader('utf-8') + with ZipFile(pathname, 'r') as zf: + wheel_metadata = self.get_wheel_metadata(zf) + wv = wheel_metadata['Wheel-Version'].split('.', 1) + file_version = tuple([int(i) for i in wv]) + # if file_version < (1, 1): + # fns = [WHEEL_METADATA_FILENAME, METADATA_FILENAME, + # LEGACY_METADATA_FILENAME] + # else: + # fns = [WHEEL_METADATA_FILENAME, METADATA_FILENAME] + fns = [WHEEL_METADATA_FILENAME, LEGACY_METADATA_FILENAME] + result = None + for fn in fns: + try: + metadata_filename = posixpath.join(info_dir, fn) + with zf.open(metadata_filename) as bf: + wf = wrapper(bf) + result = Metadata(fileobj=wf) + if result: + break + except KeyError: + pass + if not result: + raise ValueError('Invalid wheel, because metadata is ' + 'missing: looked in %s' % ', '.join(fns)) + return result + + def get_wheel_metadata(self, zf): + name_ver = '%s-%s' % (self.name, self.version) + info_dir = '%s.dist-info' % name_ver + metadata_filename = posixpath.join(info_dir, 'WHEEL') + with zf.open(metadata_filename) as bf: + wf = codecs.getreader('utf-8')(bf) + message = message_from_file(wf) + return dict(message) + + @cached_property + def info(self): + pathname = os.path.join(self.dirname, self.filename) + with ZipFile(pathname, 'r') as zf: + result = self.get_wheel_metadata(zf) + return result + + def process_shebang(self, data): + m = SHEBANG_RE.match(data) + if m: + end = m.end() + shebang, data_after_shebang = data[:end], data[end:] + # Preserve any arguments after the interpreter + if b'pythonw' in shebang.lower(): + shebang_python = SHEBANG_PYTHONW + else: + shebang_python = SHEBANG_PYTHON + m = SHEBANG_DETAIL_RE.match(shebang) + if m: + args = b' ' + m.groups()[-1] + else: + args = b'' + shebang = shebang_python + args + data = shebang + data_after_shebang + else: + cr = data.find(b'\r') + lf = data.find(b'\n') + if cr < 0 or cr > lf: + term = b'\n' + else: + if data[cr:cr + 2] == b'\r\n': + term = b'\r\n' + else: + term = b'\r' + data = SHEBANG_PYTHON + term + data + return data + + def get_hash(self, data, hash_kind=None): + if hash_kind is None: + hash_kind = self.hash_kind + try: + hasher = getattr(hashlib, hash_kind) + except AttributeError: + raise DistlibException('Unsupported hash algorithm: %r' % hash_kind) + result = hasher(data).digest() + result = base64.urlsafe_b64encode(result).rstrip(b'=').decode('ascii') + return hash_kind, result + + def write_record(self, records, record_path, archive_record_path): + records = list(records) # make a copy, as mutated + records.append((archive_record_path, '', '')) + with CSVWriter(record_path) as writer: + for row in records: + writer.writerow(row) + + def write_records(self, info, libdir, archive_paths): + records = [] + distinfo, info_dir = info + hasher = getattr(hashlib, self.hash_kind) + for ap, p in archive_paths: + with open(p, 'rb') as f: + data = f.read() + digest = '%s=%s' % self.get_hash(data) + size = os.path.getsize(p) + records.append((ap, digest, size)) + + p = os.path.join(distinfo, 'RECORD') + ap = to_posix(os.path.join(info_dir, 'RECORD')) + self.write_record(records, p, ap) + archive_paths.append((ap, p)) + + def build_zip(self, pathname, archive_paths): + with ZipFile(pathname, 'w', zipfile.ZIP_DEFLATED) as zf: + for ap, p in archive_paths: + logger.debug('Wrote %s to %s in wheel', p, ap) + zf.write(p, ap) + + def build(self, paths, tags=None, wheel_version=None): + """ + Build a wheel from files in specified paths, and use any specified tags + when determining the name of the wheel. + """ + if tags is None: + tags = {} + + libkey = list(filter(lambda o: o in paths, ('purelib', 'platlib')))[0] + if libkey == 'platlib': + is_pure = 'false' + default_pyver = [IMPVER] + default_abi = [ABI] + default_arch = [ARCH] + else: + is_pure = 'true' + default_pyver = [PYVER] + default_abi = ['none'] + default_arch = ['any'] + + self.pyver = tags.get('pyver', default_pyver) + self.abi = tags.get('abi', default_abi) + self.arch = tags.get('arch', default_arch) + + libdir = paths[libkey] + + name_ver = '%s-%s' % (self.name, self.version) + data_dir = '%s.data' % name_ver + info_dir = '%s.dist-info' % name_ver + + archive_paths = [] + + # First, stuff which is not in site-packages + for key in ('data', 'headers', 'scripts'): + if key not in paths: + continue + path = paths[key] + if os.path.isdir(path): + for root, dirs, files in os.walk(path): + for fn in files: + p = fsdecode(os.path.join(root, fn)) + rp = os.path.relpath(p, path) + ap = to_posix(os.path.join(data_dir, key, rp)) + archive_paths.append((ap, p)) + if key == 'scripts' and not p.endswith('.exe'): + with open(p, 'rb') as f: + data = f.read() + data = self.process_shebang(data) + with open(p, 'wb') as f: + f.write(data) + + # Now, stuff which is in site-packages, other than the + # distinfo stuff. + path = libdir + distinfo = None + for root, dirs, files in os.walk(path): + if root == path: + # At the top level only, save distinfo for later + # and skip it for now + for i, dn in enumerate(dirs): + dn = fsdecode(dn) + if dn.endswith('.dist-info'): + distinfo = os.path.join(root, dn) + del dirs[i] + break + assert distinfo, '.dist-info directory expected, not found' + + for fn in files: + # comment out next suite to leave .pyc files in + if fsdecode(fn).endswith(('.pyc', '.pyo')): + continue + p = os.path.join(root, fn) + rp = to_posix(os.path.relpath(p, path)) + archive_paths.append((rp, p)) + + # Now distinfo. Assumed to be flat, i.e. os.listdir is enough. + files = os.listdir(distinfo) + for fn in files: + if fn not in ('RECORD', 'INSTALLER', 'SHARED', 'WHEEL'): + p = fsdecode(os.path.join(distinfo, fn)) + ap = to_posix(os.path.join(info_dir, fn)) + archive_paths.append((ap, p)) + + wheel_metadata = [ + 'Wheel-Version: %d.%d' % (wheel_version or self.wheel_version), + 'Generator: distlib %s' % __version__, + 'Root-Is-Purelib: %s' % is_pure, + ] + for pyver, abi, arch in self.tags: + wheel_metadata.append('Tag: %s-%s-%s' % (pyver, abi, arch)) + p = os.path.join(distinfo, 'WHEEL') + with open(p, 'w') as f: + f.write('\n'.join(wheel_metadata)) + ap = to_posix(os.path.join(info_dir, 'WHEEL')) + archive_paths.append((ap, p)) + + # sort the entries by archive path. Not needed by any spec, but it + # keeps the archive listing and RECORD tidier than they would otherwise + # be. Use the number of path segments to keep directory entries together, + # and keep the dist-info stuff at the end. + def sorter(t): + ap = t[0] + n = ap.count('/') + if '.dist-info' in ap: + n += 10000 + return (n, ap) + archive_paths = sorted(archive_paths, key=sorter) + + # Now, at last, RECORD. + # Paths in here are archive paths - nothing else makes sense. + self.write_records((distinfo, info_dir), libdir, archive_paths) + # Now, ready to build the zip file + pathname = os.path.join(self.dirname, self.filename) + self.build_zip(pathname, archive_paths) + return pathname + + def skip_entry(self, arcname): + """ + Determine whether an archive entry should be skipped when verifying + or installing. + """ + # The signature file won't be in RECORD, + # and we don't currently don't do anything with it + # We also skip directories, as they won't be in RECORD + # either. See: + # + # https://github.com/pypa/wheel/issues/294 + # https://github.com/pypa/wheel/issues/287 + # https://github.com/pypa/wheel/pull/289 + # + return arcname.endswith(('/', '/RECORD.jws')) + + def install(self, paths, maker, **kwargs): + """ + Install a wheel to the specified paths. If kwarg ``warner`` is + specified, it should be a callable, which will be called with two + tuples indicating the wheel version of this software and the wheel + version in the file, if there is a discrepancy in the versions. + This can be used to issue any warnings to raise any exceptions. + If kwarg ``lib_only`` is True, only the purelib/platlib files are + installed, and the headers, scripts, data and dist-info metadata are + not written. If kwarg ``bytecode_hashed_invalidation`` is True, written + bytecode will try to use file-hash based invalidation (PEP-552) on + supported interpreter versions (CPython 2.7+). + + The return value is a :class:`InstalledDistribution` instance unless + ``options.lib_only`` is True, in which case the return value is ``None``. + """ + + dry_run = maker.dry_run + warner = kwargs.get('warner') + lib_only = kwargs.get('lib_only', False) + bc_hashed_invalidation = kwargs.get('bytecode_hashed_invalidation', False) + + pathname = os.path.join(self.dirname, self.filename) + name_ver = '%s-%s' % (self.name, self.version) + data_dir = '%s.data' % name_ver + info_dir = '%s.dist-info' % name_ver + + metadata_name = posixpath.join(info_dir, LEGACY_METADATA_FILENAME) + wheel_metadata_name = posixpath.join(info_dir, 'WHEEL') + record_name = posixpath.join(info_dir, 'RECORD') + + wrapper = codecs.getreader('utf-8') + + with ZipFile(pathname, 'r') as zf: + with zf.open(wheel_metadata_name) as bwf: + wf = wrapper(bwf) + message = message_from_file(wf) + wv = message['Wheel-Version'].split('.', 1) + file_version = tuple([int(i) for i in wv]) + if (file_version != self.wheel_version) and warner: + warner(self.wheel_version, file_version) + + if message['Root-Is-Purelib'] == 'true': + libdir = paths['purelib'] + else: + libdir = paths['platlib'] + + records = {} + with zf.open(record_name) as bf: + with CSVReader(stream=bf) as reader: + for row in reader: + p = row[0] + records[p] = row + + data_pfx = posixpath.join(data_dir, '') + info_pfx = posixpath.join(info_dir, '') + script_pfx = posixpath.join(data_dir, 'scripts', '') + + # make a new instance rather than a copy of maker's, + # as we mutate it + fileop = FileOperator(dry_run=dry_run) + fileop.record = True # so we can rollback if needed + + bc = not sys.dont_write_bytecode # Double negatives. Lovely! + + outfiles = [] # for RECORD writing + + # for script copying/shebang processing + workdir = tempfile.mkdtemp() + # set target dir later + # we default add_launchers to False, as the + # Python Launcher should be used instead + maker.source_dir = workdir + maker.target_dir = None + try: + for zinfo in zf.infolist(): + arcname = zinfo.filename + if isinstance(arcname, text_type): + u_arcname = arcname + else: + u_arcname = arcname.decode('utf-8') + if self.skip_entry(u_arcname): + continue + row = records[u_arcname] + if row[2] and str(zinfo.file_size) != row[2]: + raise DistlibException('size mismatch for ' + '%s' % u_arcname) + if row[1]: + kind, value = row[1].split('=', 1) + with zf.open(arcname) as bf: + data = bf.read() + _, digest = self.get_hash(data, kind) + if digest != value: + raise DistlibException('digest mismatch for ' + '%s' % arcname) + + if lib_only and u_arcname.startswith((info_pfx, data_pfx)): + logger.debug('lib_only: skipping %s', u_arcname) + continue + is_script = (u_arcname.startswith(script_pfx) + and not u_arcname.endswith('.exe')) + + if u_arcname.startswith(data_pfx): + _, where, rp = u_arcname.split('/', 2) + outfile = os.path.join(paths[where], convert_path(rp)) + else: + # meant for site-packages. + if u_arcname in (wheel_metadata_name, record_name): + continue + outfile = os.path.join(libdir, convert_path(u_arcname)) + if not is_script: + with zf.open(arcname) as bf: + fileop.copy_stream(bf, outfile) + # Issue #147: permission bits aren't preserved. Using + # zf.extract(zinfo, libdir) should have worked, but didn't, + # see https://www.thetopsites.net/article/53834422.shtml + # So ... manually preserve permission bits as given in zinfo + if os.name == 'posix': + # just set the normal permission bits + os.chmod(outfile, (zinfo.external_attr >> 16) & 0x1FF) + outfiles.append(outfile) + # Double check the digest of the written file + if not dry_run and row[1]: + with open(outfile, 'rb') as bf: + data = bf.read() + _, newdigest = self.get_hash(data, kind) + if newdigest != digest: + raise DistlibException('digest mismatch ' + 'on write for ' + '%s' % outfile) + if bc and outfile.endswith('.py'): + try: + pyc = fileop.byte_compile(outfile, + hashed_invalidation=bc_hashed_invalidation) + outfiles.append(pyc) + except Exception: + # Don't give up if byte-compilation fails, + # but log it and perhaps warn the user + logger.warning('Byte-compilation failed', + exc_info=True) + else: + fn = os.path.basename(convert_path(arcname)) + workname = os.path.join(workdir, fn) + with zf.open(arcname) as bf: + fileop.copy_stream(bf, workname) + + dn, fn = os.path.split(outfile) + maker.target_dir = dn + filenames = maker.make(fn) + fileop.set_executable_mode(filenames) + outfiles.extend(filenames) + + if lib_only: + logger.debug('lib_only: returning None') + dist = None + else: + # Generate scripts + + # Try to get pydist.json so we can see if there are + # any commands to generate. If this fails (e.g. because + # of a legacy wheel), log a warning but don't give up. + commands = None + file_version = self.info['Wheel-Version'] + if file_version == '1.0': + # Use legacy info + ep = posixpath.join(info_dir, 'entry_points.txt') + try: + with zf.open(ep) as bwf: + epdata = read_exports(bwf) + commands = {} + for key in ('console', 'gui'): + k = '%s_scripts' % key + if k in epdata: + commands['wrap_%s' % key] = d = {} + for v in epdata[k].values(): + s = '%s:%s' % (v.prefix, v.suffix) + if v.flags: + s += ' [%s]' % ','.join(v.flags) + d[v.name] = s + except Exception: + logger.warning('Unable to read legacy script ' + 'metadata, so cannot generate ' + 'scripts') + else: + try: + with zf.open(metadata_name) as bwf: + wf = wrapper(bwf) + commands = json.load(wf).get('extensions') + if commands: + commands = commands.get('python.commands') + except Exception: + logger.warning('Unable to read JSON metadata, so ' + 'cannot generate scripts') + if commands: + console_scripts = commands.get('wrap_console', {}) + gui_scripts = commands.get('wrap_gui', {}) + if console_scripts or gui_scripts: + script_dir = paths.get('scripts', '') + if not os.path.isdir(script_dir): + raise ValueError('Valid script path not ' + 'specified') + maker.target_dir = script_dir + for k, v in console_scripts.items(): + script = '%s = %s' % (k, v) + filenames = maker.make(script) + fileop.set_executable_mode(filenames) + + if gui_scripts: + options = {'gui': True } + for k, v in gui_scripts.items(): + script = '%s = %s' % (k, v) + filenames = maker.make(script, options) + fileop.set_executable_mode(filenames) + + p = os.path.join(libdir, info_dir) + dist = InstalledDistribution(p) + + # Write SHARED + paths = dict(paths) # don't change passed in dict + del paths['purelib'] + del paths['platlib'] + paths['lib'] = libdir + p = dist.write_shared_locations(paths, dry_run) + if p: + outfiles.append(p) + + # Write RECORD + dist.write_installed_files(outfiles, paths['prefix'], + dry_run) + return dist + except Exception: # pragma: no cover + logger.exception('installation failed.') + fileop.rollback() + raise + finally: + shutil.rmtree(workdir) + + def _get_dylib_cache(self): + global cache + if cache is None: + # Use native string to avoid issues on 2.x: see Python #20140. + base = os.path.join(get_cache_base(), str('dylib-cache'), + '%s.%s' % sys.version_info[:2]) + cache = Cache(base) + return cache + + def _get_extensions(self): + pathname = os.path.join(self.dirname, self.filename) + name_ver = '%s-%s' % (self.name, self.version) + info_dir = '%s.dist-info' % name_ver + arcname = posixpath.join(info_dir, 'EXTENSIONS') + wrapper = codecs.getreader('utf-8') + result = [] + with ZipFile(pathname, 'r') as zf: + try: + with zf.open(arcname) as bf: + wf = wrapper(bf) + extensions = json.load(wf) + cache = self._get_dylib_cache() + prefix = cache.prefix_to_dir(pathname) + cache_base = os.path.join(cache.base, prefix) + if not os.path.isdir(cache_base): + os.makedirs(cache_base) + for name, relpath in extensions.items(): + dest = os.path.join(cache_base, convert_path(relpath)) + if not os.path.exists(dest): + extract = True + else: + file_time = os.stat(dest).st_mtime + file_time = datetime.datetime.fromtimestamp(file_time) + info = zf.getinfo(relpath) + wheel_time = datetime.datetime(*info.date_time) + extract = wheel_time > file_time + if extract: + zf.extract(relpath, cache_base) + result.append((name, dest)) + except KeyError: + pass + return result + + def is_compatible(self): + """ + Determine if a wheel is compatible with the running system. + """ + return is_compatible(self) + + def is_mountable(self): + """ + Determine if a wheel is asserted as mountable by its metadata. + """ + return True # for now - metadata details TBD + + def mount(self, append=False): + pathname = os.path.abspath(os.path.join(self.dirname, self.filename)) + if not self.is_compatible(): + msg = 'Wheel %s not compatible with this Python.' % pathname + raise DistlibException(msg) + if not self.is_mountable(): + msg = 'Wheel %s is marked as not mountable.' % pathname + raise DistlibException(msg) + if pathname in sys.path: + logger.debug('%s already in path', pathname) + else: + if append: + sys.path.append(pathname) + else: + sys.path.insert(0, pathname) + extensions = self._get_extensions() + if extensions: + if _hook not in sys.meta_path: + sys.meta_path.append(_hook) + _hook.add(pathname, extensions) + + def unmount(self): + pathname = os.path.abspath(os.path.join(self.dirname, self.filename)) + if pathname not in sys.path: + logger.debug('%s not in path', pathname) + else: + sys.path.remove(pathname) + if pathname in _hook.impure_wheels: + _hook.remove(pathname) + if not _hook.impure_wheels: + if _hook in sys.meta_path: + sys.meta_path.remove(_hook) + + def verify(self): + pathname = os.path.join(self.dirname, self.filename) + name_ver = '%s-%s' % (self.name, self.version) + data_dir = '%s.data' % name_ver + info_dir = '%s.dist-info' % name_ver + + metadata_name = posixpath.join(info_dir, LEGACY_METADATA_FILENAME) + wheel_metadata_name = posixpath.join(info_dir, 'WHEEL') + record_name = posixpath.join(info_dir, 'RECORD') + + wrapper = codecs.getreader('utf-8') + + with ZipFile(pathname, 'r') as zf: + with zf.open(wheel_metadata_name) as bwf: + wf = wrapper(bwf) + message = message_from_file(wf) + wv = message['Wheel-Version'].split('.', 1) + file_version = tuple([int(i) for i in wv]) + # TODO version verification + + records = {} + with zf.open(record_name) as bf: + with CSVReader(stream=bf) as reader: + for row in reader: + p = row[0] + records[p] = row + + for zinfo in zf.infolist(): + arcname = zinfo.filename + if isinstance(arcname, text_type): + u_arcname = arcname + else: + u_arcname = arcname.decode('utf-8') + # See issue #115: some wheels have .. in their entries, but + # in the filename ... e.g. __main__..py ! So the check is + # updated to look for .. in the directory portions + p = u_arcname.split('/') + if '..' in p: + raise DistlibException('invalid entry in ' + 'wheel: %r' % u_arcname) + + if self.skip_entry(u_arcname): + continue + row = records[u_arcname] + if row[2] and str(zinfo.file_size) != row[2]: + raise DistlibException('size mismatch for ' + '%s' % u_arcname) + if row[1]: + kind, value = row[1].split('=', 1) + with zf.open(arcname) as bf: + data = bf.read() + _, digest = self.get_hash(data, kind) + if digest != value: + raise DistlibException('digest mismatch for ' + '%s' % arcname) + + def update(self, modifier, dest_dir=None, **kwargs): + """ + Update the contents of a wheel in a generic way. The modifier should + be a callable which expects a dictionary argument: its keys are + archive-entry paths, and its values are absolute filesystem paths + where the contents the corresponding archive entries can be found. The + modifier is free to change the contents of the files pointed to, add + new entries and remove entries, before returning. This method will + extract the entire contents of the wheel to a temporary location, call + the modifier, and then use the passed (and possibly updated) + dictionary to write a new wheel. If ``dest_dir`` is specified, the new + wheel is written there -- otherwise, the original wheel is overwritten. + + The modifier should return True if it updated the wheel, else False. + This method returns the same value the modifier returns. + """ + + def get_version(path_map, info_dir): + version = path = None + key = '%s/%s' % (info_dir, LEGACY_METADATA_FILENAME) + if key not in path_map: + key = '%s/PKG-INFO' % info_dir + if key in path_map: + path = path_map[key] + version = Metadata(path=path).version + return version, path + + def update_version(version, path): + updated = None + try: + v = NormalizedVersion(version) + i = version.find('-') + if i < 0: + updated = '%s+1' % version + else: + parts = [int(s) for s in version[i + 1:].split('.')] + parts[-1] += 1 + updated = '%s+%s' % (version[:i], + '.'.join(str(i) for i in parts)) + except UnsupportedVersionError: + logger.debug('Cannot update non-compliant (PEP-440) ' + 'version %r', version) + if updated: + md = Metadata(path=path) + md.version = updated + legacy = path.endswith(LEGACY_METADATA_FILENAME) + md.write(path=path, legacy=legacy) + logger.debug('Version updated from %r to %r', version, + updated) + + pathname = os.path.join(self.dirname, self.filename) + name_ver = '%s-%s' % (self.name, self.version) + info_dir = '%s.dist-info' % name_ver + record_name = posixpath.join(info_dir, 'RECORD') + with tempdir() as workdir: + with ZipFile(pathname, 'r') as zf: + path_map = {} + for zinfo in zf.infolist(): + arcname = zinfo.filename + if isinstance(arcname, text_type): + u_arcname = arcname + else: + u_arcname = arcname.decode('utf-8') + if u_arcname == record_name: + continue + if '..' in u_arcname: + raise DistlibException('invalid entry in ' + 'wheel: %r' % u_arcname) + zf.extract(zinfo, workdir) + path = os.path.join(workdir, convert_path(u_arcname)) + path_map[u_arcname] = path + + # Remember the version. + original_version, _ = get_version(path_map, info_dir) + # Files extracted. Call the modifier. + modified = modifier(path_map, **kwargs) + if modified: + # Something changed - need to build a new wheel. + current_version, path = get_version(path_map, info_dir) + if current_version and (current_version == original_version): + # Add or update local version to signify changes. + update_version(current_version, path) + # Decide where the new wheel goes. + if dest_dir is None: + fd, newpath = tempfile.mkstemp(suffix='.whl', + prefix='wheel-update-', + dir=workdir) + os.close(fd) + else: + if not os.path.isdir(dest_dir): + raise DistlibException('Not a directory: %r' % dest_dir) + newpath = os.path.join(dest_dir, self.filename) + archive_paths = list(path_map.items()) + distinfo = os.path.join(workdir, info_dir) + info = distinfo, info_dir + self.write_records(info, workdir, archive_paths) + self.build_zip(newpath, archive_paths) + if dest_dir is None: + shutil.copyfile(newpath, pathname) + return modified + +def _get_glibc_version(): + import platform + ver = platform.libc_ver() + result = [] + if ver[0] == 'glibc': + for s in ver[1].split('.'): + result.append(int(s) if s.isdigit() else 0) + result = tuple(result) + return result + +def compatible_tags(): + """ + Return (pyver, abi, arch) tuples compatible with this Python. + """ + versions = [VER_SUFFIX] + major = VER_SUFFIX[0] + for minor in range(sys.version_info[1] - 1, - 1, -1): + versions.append(''.join([major, str(minor)])) + + abis = [] + for suffix in _get_suffixes(): + if suffix.startswith('.abi'): + abis.append(suffix.split('.', 2)[1]) + abis.sort() + if ABI != 'none': + abis.insert(0, ABI) + abis.append('none') + result = [] + + arches = [ARCH] + if sys.platform == 'darwin': + m = re.match(r'(\w+)_(\d+)_(\d+)_(\w+)$', ARCH) + if m: + name, major, minor, arch = m.groups() + minor = int(minor) + matches = [arch] + if arch in ('i386', 'ppc'): + matches.append('fat') + if arch in ('i386', 'ppc', 'x86_64'): + matches.append('fat3') + if arch in ('ppc64', 'x86_64'): + matches.append('fat64') + if arch in ('i386', 'x86_64'): + matches.append('intel') + if arch in ('i386', 'x86_64', 'intel', 'ppc', 'ppc64'): + matches.append('universal') + while minor >= 0: + for match in matches: + s = '%s_%s_%s_%s' % (name, major, minor, match) + if s != ARCH: # already there + arches.append(s) + minor -= 1 + + # Most specific - our Python version, ABI and arch + for abi in abis: + for arch in arches: + result.append((''.join((IMP_PREFIX, versions[0])), abi, arch)) + # manylinux + if abi != 'none' and sys.platform.startswith('linux'): + arch = arch.replace('linux_', '') + parts = _get_glibc_version() + if len(parts) == 2: + if parts >= (2, 5): + result.append((''.join((IMP_PREFIX, versions[0])), abi, + 'manylinux1_%s' % arch)) + if parts >= (2, 12): + result.append((''.join((IMP_PREFIX, versions[0])), abi, + 'manylinux2010_%s' % arch)) + if parts >= (2, 17): + result.append((''.join((IMP_PREFIX, versions[0])), abi, + 'manylinux2014_%s' % arch)) + result.append((''.join((IMP_PREFIX, versions[0])), abi, + 'manylinux_%s_%s_%s' % (parts[0], parts[1], + arch))) + + # where no ABI / arch dependency, but IMP_PREFIX dependency + for i, version in enumerate(versions): + result.append((''.join((IMP_PREFIX, version)), 'none', 'any')) + if i == 0: + result.append((''.join((IMP_PREFIX, version[0])), 'none', 'any')) + + # no IMP_PREFIX, ABI or arch dependency + for i, version in enumerate(versions): + result.append((''.join(('py', version)), 'none', 'any')) + if i == 0: + result.append((''.join(('py', version[0])), 'none', 'any')) + + return set(result) + + +COMPATIBLE_TAGS = compatible_tags() + +del compatible_tags + + +def is_compatible(wheel, tags=None): + if not isinstance(wheel, Wheel): + wheel = Wheel(wheel) # assume it's a filename + result = False + if tags is None: + tags = COMPATIBLE_TAGS + for ver, abi, arch in tags: + if ver in wheel.pyver and abi in wheel.abi and arch in wheel.arch: + result = True + break + return result diff --git a/venv/lib/python3.10/site-packages/distutils-precedence.pth b/venv/lib/python3.10/site-packages/distutils-precedence.pth index 7f009fe..6de4198 100644 --- a/venv/lib/python3.10/site-packages/distutils-precedence.pth +++ b/venv/lib/python3.10/site-packages/distutils-precedence.pth @@ -1 +1 @@ -import os; var = 'SETUPTOOLS_USE_DISTUTILS'; enabled = os.environ.get(var, 'local') == 'local'; enabled and __import__('_distutils_hack').add_shim(); +import os; var = 'SETUPTOOLS_USE_DISTUTILS'; enabled = os.environ.get(var, 'stdlib') == 'local'; enabled and __import__('_distutils_hack').add_shim(); diff --git a/venv/lib/python3.10/site-packages/pip-22.3.1.dist-info/top_level.txt b/venv/lib/python3.10/site-packages/execnet-2.0.2.dist-info/INSTALLER similarity index 100% rename from venv/lib/python3.10/site-packages/pip-22.3.1.dist-info/top_level.txt rename to venv/lib/python3.10/site-packages/execnet-2.0.2.dist-info/INSTALLER diff --git a/venv/lib/python3.10/site-packages/execnet-2.0.2.dist-info/METADATA b/venv/lib/python3.10/site-packages/execnet-2.0.2.dist-info/METADATA new file mode 100644 index 0000000..be274ee --- /dev/null +++ b/venv/lib/python3.10/site-packages/execnet-2.0.2.dist-info/METADATA @@ -0,0 +1,79 @@ +Metadata-Version: 2.1 +Name: execnet +Version: 2.0.2 +Summary: execnet: rapid multi-Python deployment +Project-URL: Homepage, https://execnet.readthedocs.io/en/latest/ +Author: holger krekel and others +License-Expression: MIT +License-File: LICENSE +Classifier: Development Status :: 5 - Production/Stable +Classifier: Intended Audience :: Developers +Classifier: License :: OSI Approved :: MIT License +Classifier: Operating System :: MacOS :: MacOS X +Classifier: Operating System :: Microsoft :: Windows +Classifier: Operating System :: POSIX +Classifier: Programming Language :: Python :: 3.7 +Classifier: Programming Language :: Python :: 3.8 +Classifier: Programming Language :: Python :: 3.9 +Classifier: Programming Language :: Python :: 3.10 +Classifier: Programming Language :: Python :: 3.11 +Classifier: Programming Language :: Python :: Implementation :: CPython +Classifier: Programming Language :: Python :: Implementation :: PyPy +Classifier: Topic :: Software Development :: Libraries +Classifier: Topic :: System :: Distributed Computing +Classifier: Topic :: System :: Networking +Requires-Python: >=3.7 +Provides-Extra: testing +Requires-Dist: hatch; extra == 'testing' +Requires-Dist: pre-commit; extra == 'testing' +Requires-Dist: pytest; extra == 'testing' +Requires-Dist: tox; extra == 'testing' +Description-Content-Type: text/x-rst + +execnet: distributed Python deployment and communication +======================================================== + +Important +--------- + +.. image:: https://img.shields.io/pypi/v/execnet.svg + :target: https://pypi.org/project/execnet/ + +.. image:: https://anaconda.org/conda-forge/execnet/badges/version.svg + :target: https://anaconda.org/conda-forge/execnet + +.. image:: https://img.shields.io/pypi/pyversions/execnet.svg + :target: https://pypi.org/project/execnet/ + +.. image:: https://github.com/pytest-dev/execnet/workflows/test/badge.svg + :target: https://github.com/pytest-dev/execnet/actions?query=workflow%3Atest + +.. image:: https://img.shields.io/badge/code%20style-black-000000.svg + :target: https://github.com/python/black + +.. _execnet: http://codespeak.net/execnet + +execnet_ provides carefully tested means to ad-hoc interact with Python +interpreters across version, platform and network barriers. It provides +a minimal and fast API targeting the following uses: + +* distribute tasks to local or remote processes +* write and deploy hybrid multi-process applications +* write scripts to administer multiple hosts + +Features +-------- + +* zero-install bootstrapping: no remote installation required! + +* flexible communication: send/receive as well as + callback/queue mechanisms supported + +* simple serialization of python builtin types (no pickling) + +* grouped creation and robust termination of processes + +* interoperable between Windows and Unix-ish systems. + +* integrates with different threading models, including standard + os threads, eventlet and gevent based systems. diff --git a/venv/lib/python3.10/site-packages/execnet-2.0.2.dist-info/RECORD b/venv/lib/python3.10/site-packages/execnet-2.0.2.dist-info/RECORD new file mode 100644 index 0000000..73360b2 --- /dev/null +++ b/venv/lib/python3.10/site-packages/execnet-2.0.2.dist-info/RECORD @@ -0,0 +1,41 @@ +execnet-2.0.2.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +execnet-2.0.2.dist-info/METADATA,sha256=PYur7wZUvw8vi_xO1eaHG1AENGFP3b4GvGPdSF4-xPw,2923 +execnet-2.0.2.dist-info/RECORD,, +execnet-2.0.2.dist-info/WHEEL,sha256=9QBuHhg6FNW7lppboF2vKVbCGTVzsFykgRQjjlajrhA,87 +execnet-2.0.2.dist-info/licenses/LICENSE,sha256=6J7tEHTTqUMZi6E5uAhE9bRFuGC7p0qK6twGEFZhZOo,1054 +execnet/__init__.py,sha256=fK5acxR6_nWp1VEGiFmH9iOuTL9s6Cki4qMlOBbaUhY,958 +execnet/__pycache__/__init__.cpython-310.pyc,, +execnet/__pycache__/_version.cpython-310.pyc,, +execnet/__pycache__/gateway.cpython-310.pyc,, +execnet/__pycache__/gateway_base.cpython-310.pyc,, +execnet/__pycache__/gateway_bootstrap.cpython-310.pyc,, +execnet/__pycache__/gateway_io.cpython-310.pyc,, +execnet/__pycache__/gateway_socket.cpython-310.pyc,, +execnet/__pycache__/multi.cpython-310.pyc,, +execnet/__pycache__/rsync.cpython-310.pyc,, +execnet/__pycache__/rsync_remote.cpython-310.pyc,, +execnet/__pycache__/xspec.cpython-310.pyc,, +execnet/_version.py,sha256=-VCumP-Qccyvy1Pv2Xu0ibn3uXUGNwumtYyTQEiAOWI,160 +execnet/gateway.py,sha256=0cZ7FqS-H58XZzir5UVtmwyTPs_FxYeex7I5sxMsvkw,7115 +execnet/gateway_base.py,sha256=f50WnQuLbN-cOHGBQ7MqmB3e2iMyiP2lBXMu2v6sSSw,51114 +execnet/gateway_bootstrap.py,sha256=-E80SLYvDrwMUt4iyycjbXFUqbMcgrmL2o9iGTqljjs,2990 +execnet/gateway_io.py,sha256=PeDpwLbXYSQNGuiSh2Y68OoaAgfdcw4vZWU6Dn1bvUw,6836 +execnet/gateway_socket.py,sha256=5i7-8OShRwCjf7ymsCW0C_U0vtIcnw44JFriZbXS1fk,2501 +execnet/multi.py,sha256=e3Atqix0ICNWE4FOQU5y_ZNpw2_ifurKHJiyE8XMnC8,10291 +execnet/rsync.py,sha256=WpiYLL0X1URVzPfYcMoeNZNX-1F2dFlHKSZHEDayL54,7611 +execnet/rsync_remote.py,sha256=diItTUXYUCt_FHqYynqhv9ftWqqD3jy9JJcDyZIoL-c,3828 +execnet/script/__init__.py,sha256=MsSFjiLMLJZ7QhUPpVBWKiyDnCzryquRyr329NoCACI,2 +execnet/script/__pycache__/__init__.cpython-310.pyc,, +execnet/script/__pycache__/loop_socketserver.cpython-310.pyc,, +execnet/script/__pycache__/quitserver.cpython-310.pyc,, +execnet/script/__pycache__/shell.cpython-310.pyc,, +execnet/script/__pycache__/socketserver.cpython-310.pyc,, +execnet/script/__pycache__/socketserverservice.cpython-310.pyc,, +execnet/script/__pycache__/xx.cpython-310.pyc,, +execnet/script/loop_socketserver.py,sha256=TYYvmXrA7wYilcAJ9tobC_k-czXvxflBKvl1MX5caM4,418 +execnet/script/quitserver.py,sha256=OPJhcFDrjY7lUzV5UVuIb3nC6i1MXld8WcuGRZhNBWo,306 +execnet/script/shell.py,sha256=9mYjbqXii1oH7a4I9MbeSG64DauFEVYLfOjsmI1K5Tc,2480 +execnet/script/socketserver.py,sha256=YL6UoJmcn5nADPwrSwsf39B2vKK5thtvifuxbI77N1U,3696 +execnet/script/socketserverservice.py,sha256=fQEV2Kb8akMubMS12O-hz4KjrUx-9JTtyuKKHvM3iX0,3044 +execnet/script/xx.py,sha256=iSdw0F2i3oQsON8_obWF_z0wtT9B_QFobA0sBf_K4ic,186 +execnet/xspec.py,sha256=t8bK0Ua1mZ1oIbXYnVDvqGm9IGrfRqHkABMKxoWUsX8,1788 diff --git a/venv/lib/python3.10/site-packages/execnet-2.0.2.dist-info/WHEEL b/venv/lib/python3.10/site-packages/execnet-2.0.2.dist-info/WHEEL new file mode 100644 index 0000000..ba1a8af --- /dev/null +++ b/venv/lib/python3.10/site-packages/execnet-2.0.2.dist-info/WHEEL @@ -0,0 +1,4 @@ +Wheel-Version: 1.0 +Generator: hatchling 1.18.0 +Root-Is-Purelib: true +Tag: py3-none-any diff --git a/venv/lib/python3.10/site-packages/execnet-2.0.2.dist-info/licenses/LICENSE b/venv/lib/python3.10/site-packages/execnet-2.0.2.dist-info/licenses/LICENSE new file mode 100644 index 0000000..ff33b8f --- /dev/null +++ b/venv/lib/python3.10/site-packages/execnet-2.0.2.dist-info/licenses/LICENSE @@ -0,0 +1,18 @@ + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to deal + in the Software without restriction, including without limitation the rights + to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be included in all + copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + SOFTWARE. diff --git a/venv/lib/python3.10/site-packages/execnet/__init__.py b/venv/lib/python3.10/site-packages/execnet/__init__.py new file mode 100644 index 0000000..c403e2b --- /dev/null +++ b/venv/lib/python3.10/site-packages/execnet/__init__.py @@ -0,0 +1,44 @@ +""" +execnet +------- + +pure python lib for connecting to local and remote Python Interpreters. + +(c) 2012, Holger Krekel and others +""" +from ._version import version as __version__ +from .gateway_base import DataFormatError +from .gateway_base import dump +from .gateway_base import dumps +from .gateway_base import load +from .gateway_base import loads +from .gateway_base import RemoteError +from .gateway_base import TimeoutError +from .gateway_bootstrap import HostNotFound +from .multi import default_group +from .multi import Group +from .multi import makegateway +from .multi import MultiChannel +from .multi import set_execmodel +from .rsync import RSync +from .xspec import XSpec + + +__all__ = [ + "__version__", + "makegateway", + "set_execmodel", + "HostNotFound", + "RemoteError", + "TimeoutError", + "XSpec", + "Group", + "MultiChannel", + "RSync", + "default_group", + "dumps", + "loads", + "load", + "dump", + "DataFormatError", +] diff --git a/venv/lib/python3.10/site-packages/execnet/_version.py b/venv/lib/python3.10/site-packages/execnet/_version.py new file mode 100644 index 0000000..c6ecda4 --- /dev/null +++ b/venv/lib/python3.10/site-packages/execnet/_version.py @@ -0,0 +1,4 @@ +# file generated by setuptools_scm +# don't change, don't track in version control +__version__ = version = '2.0.2' +__version_tuple__ = version_tuple = (2, 0, 2) diff --git a/venv/lib/python3.10/site-packages/execnet/gateway.py b/venv/lib/python3.10/site-packages/execnet/gateway.py new file mode 100644 index 0000000..6e0b8a7 --- /dev/null +++ b/venv/lib/python3.10/site-packages/execnet/gateway.py @@ -0,0 +1,216 @@ +""" +gateway code for initiating popen, socket and ssh connections. +(c) 2004-2013, Holger Krekel and others +""" +import inspect +import linecache +import os +import sys +import textwrap +import types + +import execnet + +from . import gateway_base +from .gateway_base import Message + +importdir = os.path.dirname(os.path.dirname(execnet.__file__)) + + +class Gateway(gateway_base.BaseGateway): + """Gateway to a local or remote Python Interpreter.""" + + def __init__(self, io, spec): + super().__init__(io=io, id=spec.id, _startcount=1) + self.spec = spec + self._initreceive() + + @property + def remoteaddress(self): + return self._io.remoteaddress + + def __repr__(self): + """return string representing gateway type and status.""" + try: + r = self.hasreceiver() and "receive-live" or "not-receiving" + i = len(self._channelfactory.channels()) + except AttributeError: + r = "uninitialized" + i = "no" + return "<{} id={!r} {}, {} model, {} active channels>".format( + self.__class__.__name__, self.id, r, self.execmodel.backend, i + ) + + def exit(self): + """trigger gateway exit. Defer waiting for finishing + of receiver-thread and subprocess activity to when + group.terminate() is called. + """ + self._trace("gateway.exit() called") + if self not in self._group: + self._trace("gateway already unregistered with group") + return + self._group._unregister(self) + try: + self._trace("--> sending GATEWAY_TERMINATE") + self._send(Message.GATEWAY_TERMINATE) + self._trace("--> io.close_write") + self._io.close_write() + except (ValueError, EOFError, OSError): + v = sys.exc_info()[1] + self._trace("io-error: could not send termination sequence") + self._trace(" exception: %r" % v) + + def reconfigure(self, py2str_as_py3str=True, py3str_as_py2str=False): + """ + set the string coercion for this gateway + the default is to try to convert py2 str as py3 str, + but not to try and convert py3 str to py2 str + """ + self._strconfig = (py2str_as_py3str, py3str_as_py2str) + data = gateway_base.dumps_internal(self._strconfig) + self._send(Message.RECONFIGURE, data=data) + + def _rinfo(self, update=False): + """return some sys/env information from remote.""" + if update or not hasattr(self, "_cache_rinfo"): + ch = self.remote_exec(rinfo_source) + try: + self._cache_rinfo = RInfo(ch.receive()) + finally: + ch.waitclose() + return self._cache_rinfo + + def hasreceiver(self): + """return True if gateway is able to receive data.""" + return self._receivepool.active_count() > 0 + + def remote_status(self): + """return information object about remote execution status.""" + channel = self.newchannel() + self._send(Message.STATUS, channel.id) + statusdict = channel.receive() + # the other side didn't actually instantiate a channel + # so we just delete the internal id/channel mapping + self._channelfactory._local_close(channel.id) + return RemoteStatus(statusdict) + + def remote_exec(self, source, **kwargs): + """return channel object and connect it to a remote + execution thread where the given ``source`` executes. + + * ``source`` is a string: execute source string remotely + with a ``channel`` put into the global namespace. + * ``source`` is a pure function: serialize source and + call function with ``**kwargs``, adding a + ``channel`` object to the keyword arguments. + * ``source`` is a pure module: execute source of module + with a ``channel`` in its global namespace + + In all cases the binding ``__name__='__channelexec__'`` + will be available in the global namespace of the remotely + executing code. + """ + call_name = None + file_name = None + if isinstance(source, types.ModuleType): + file_name = inspect.getsourcefile(source) + linecache.updatecache(file_name) + source = inspect.getsource(source) + elif isinstance(source, types.FunctionType): + call_name = source.__name__ + file_name = inspect.getsourcefile(source) + source = _source_of_function(source) + else: + source = textwrap.dedent(str(source)) + + if not call_name and kwargs: + raise TypeError("can't pass kwargs to non-function remote_exec") + + channel = self.newchannel() + self._send( + Message.CHANNEL_EXEC, + channel.id, + gateway_base.dumps_internal((source, file_name, call_name, kwargs)), + ) + return channel + + def remote_init_threads(self, num=None): + """DEPRECATED. Is currently a NO-OPERATION already.""" + print("WARNING: remote_init_threads()" " is a no-operation in execnet-1.2") + + +class RInfo: + def __init__(self, kwargs): + self.__dict__.update(kwargs) + + def __repr__(self): + info = ", ".join("%s=%s" % item for item in sorted(self.__dict__.items())) + return "" % info + + +RemoteStatus = RInfo + + +def rinfo_source(channel): + import sys + import os + + channel.send( + dict( + executable=sys.executable, + version_info=sys.version_info[:5], + platform=sys.platform, + cwd=os.getcwd(), + pid=os.getpid(), + ) + ) + + +def _find_non_builtin_globals(source, codeobj): + import ast + import builtins + + vars = dict.fromkeys(codeobj.co_varnames) + return [ + node.id + for node in ast.walk(ast.parse(source)) + if isinstance(node, ast.Name) + and node.id not in vars + and node.id not in builtins.__dict__ + ] + + +def _source_of_function(function): + if function.__name__ == "": + raise ValueError("can't evaluate lambda functions'") + # XXX: we dont check before remote instantiation + # if arguments are used properly + try: + sig = inspect.getfullargspec(function) + except AttributeError: + args = inspect.getargspec(function)[0] + else: + args = sig.args + if not args or args[0] != "channel": + raise ValueError("expected first function argument to be `channel`") + + closure = function.__closure__ + codeobj = function.__code__ + + if closure is not None: + raise ValueError("functions with closures can't be passed") + + try: + source = inspect.getsource(function) + except OSError: + raise ValueError("can't find source file for %s" % function) + + source = textwrap.dedent(source) # just for inner functions + + used_globals = _find_non_builtin_globals(source, codeobj) + if used_globals: + raise ValueError("the use of non-builtin globals isn't supported", used_globals) + + leading_ws = "\n" * (codeobj.co_firstlineno - 1) + return leading_ws + source diff --git a/venv/lib/python3.10/site-packages/execnet/gateway_base.py b/venv/lib/python3.10/site-packages/execnet/gateway_base.py new file mode 100644 index 0000000..83c23e9 --- /dev/null +++ b/venv/lib/python3.10/site-packages/execnet/gateway_base.py @@ -0,0 +1,1640 @@ +""" +base execnet gateway code send to the other side for bootstrapping. + +NOTE: aims to be compatible to Python 2.5-3.X, Jython and IronPython + +:copyright: 2004-2015 +:authors: + - Holger Krekel + - Armin Rigo + - Benjamin Peterson + - Ronny Pfannschmidt + - many others +""" +from __future__ import annotations + +import abc +import os +import struct +import sys +import traceback +import weakref +from _thread import interrupt_main +from io import BytesIO +from typing import Callable + + +class ExecModel(metaclass=abc.ABCMeta): + @property + @abc.abstractmethod + def backend(self): + raise NotImplementedError() + + def __repr__(self): + return "" % self.backend + + @property + @abc.abstractmethod + def queue(self): + raise NotImplementedError() + + @property + @abc.abstractmethod + def subprocess(self): + raise NotImplementedError() + + @property + @abc.abstractmethod + def socket(self): + raise NotImplementedError() + + @abc.abstractmethod + def start(self, func, args=()): + raise NotImplementedError() + + @abc.abstractmethod + def get_ident(self): + raise NotImplementedError() + + @abc.abstractmethod + def sleep(self, delay): + raise NotImplementedError() + + @abc.abstractmethod + def fdopen(self, fd, mode, bufsize=1): + raise NotImplementedError() + + @abc.abstractmethod + def Lock(self): + raise NotImplementedError() + + @abc.abstractmethod + def RLock(self): + raise NotImplementedError() + + @abc.abstractmethod + def Event(self): + raise NotImplementedError() + + +class ThreadExecModel(ExecModel): + backend = "thread" + + @property + def queue(self): + import queue + + return queue + + @property + def subprocess(self): + import subprocess + + return subprocess + + @property + def socket(self): + import socket + + return socket + + def get_ident(self): + import _thread + + return _thread.get_ident() + + def sleep(self, delay): + import time + + time.sleep(delay) + + def start(self, func, args=()): + import _thread + + return _thread.start_new_thread(func, args) + + def fdopen(self, fd, mode, bufsize=1): + import os + + return os.fdopen(fd, mode, bufsize, encoding="utf-8") + + def Lock(self): + import threading + + return threading.RLock() + + def RLock(self): + import threading + + return threading.RLock() + + def Event(self): + import threading + + return threading.Event() + + +class EventletExecModel(ExecModel): + backend = "eventlet" + + @property + def queue(self): + import eventlet + + return eventlet.queue + + @property + def subprocess(self): + import eventlet.green.subprocess + + return eventlet.green.subprocess + + @property + def socket(self): + import eventlet.green.socket + + return eventlet.green.socket + + def get_ident(self): + import eventlet.green.thread + + return eventlet.green.thread.get_ident() + + def sleep(self, delay): + import eventlet + + eventlet.sleep(delay) + + def start(self, func, args=()): + import eventlet + + return eventlet.spawn_n(func, *args) + + def fdopen(self, fd, mode, bufsize=1): + import eventlet.green.os + + return eventlet.green.os.fdopen(fd, mode, bufsize) + + def Lock(self): + import eventlet.green.threading + + return eventlet.green.threading.RLock() + + def RLock(self): + import eventlet.green.threading + + return eventlet.green.threading.RLock() + + def Event(self): + import eventlet.green.threading + + return eventlet.green.threading.Event() + + +class GeventExecModel(ExecModel): + backend = "gevent" + + @property + def queue(self): + import gevent.queue + + return gevent.queue + + @property + def subprocess(self): + import gevent.subprocess + + return gevent.subprocess + + @property + def socket(self): + import gevent + + return gevent.socket + + def get_ident(self): + import gevent.thread + + return gevent.thread.get_ident() + + def sleep(self, delay): + import gevent + + gevent.sleep(delay) + + def start(self, func, args=()): + import gevent + + return gevent.spawn(func, *args) + + def fdopen(self, fd, mode, bufsize=1): + # XXX + import gevent.fileobject + + return gevent.fileobject.FileObjectThread(fd, mode, bufsize) + + def Lock(self): + import gevent.lock + + return gevent.lock.RLock() + + def RLock(self): + import gevent.lock + + return gevent.lock.RLock() + + def Event(self): + import gevent.event + + return gevent.event.Event() + + +def get_execmodel(backend): + if hasattr(backend, "backend"): + return backend + if backend == "thread": + return ThreadExecModel() + elif backend == "eventlet": + return EventletExecModel() + elif backend == "gevent": + return GeventExecModel() + else: + raise ValueError(f"unknown execmodel {backend!r}") + + +class Reply: + """reply instances provide access to the result + of a function execution that got dispatched + through WorkerPool.spawn() + """ + + def __init__(self, task, threadmodel): + self.task = task + self._result_ready = threadmodel.Event() + self.running = True + + def get(self, timeout=None): + """get the result object from an asynchronous function execution. + if the function execution raised an exception, + then calling get() will reraise that exception + including its traceback. + """ + self.waitfinish(timeout) + try: + return self._result + except AttributeError: + raise self._excinfo[1].with_traceback(self._excinfo[2]) + + def waitfinish(self, timeout=None): + if not self._result_ready.wait(timeout): + raise OSError(f"timeout waiting for {self.task!r}") + + def run(self): + func, args, kwargs = self.task + try: + try: + self._result = func(*args, **kwargs) + except BaseException: + # sys may be already None when shutting down the interpreter + if sys is not None: + self._excinfo = sys.exc_info() + finally: + self._result_ready.set() + self.running = False + + +class WorkerPool: + """A WorkerPool allows to spawn function executions + to threads, returning a reply object on which you + can ask for the result (and get exceptions reraised). + + This implementation allows the main thread to integrate + itself into performing function execution through + calling integrate_as_primary_thread() which will return + when the pool received a trigger_shutdown(). + """ + + def __init__(self, execmodel, hasprimary=False): + """by default allow unlimited number of spawns.""" + self.execmodel = execmodel + self._running_lock = self.execmodel.Lock() + self._running = set() + self._shuttingdown = False + self._waitall_events = [] + if hasprimary: + if self.execmodel.backend != "thread": + raise ValueError("hasprimary=True requires thread model") + self._primary_thread_task_ready = self.execmodel.Event() + else: + self._primary_thread_task_ready = None + + def integrate_as_primary_thread(self): + """integrate the thread with which we are called as a primary + thread for executing functions triggered with spawn(). + """ + assert self.execmodel.backend == "thread", self.execmodel + primary_thread_task_ready = self._primary_thread_task_ready + # interacts with code at REF1 + while 1: + primary_thread_task_ready.wait() + reply = self._primary_thread_task + if reply is None: # trigger_shutdown() woke us up + break + self._perform_spawn(reply) + # we are concurrent with trigger_shutdown and spawn + with self._running_lock: + if self._shuttingdown: + break + primary_thread_task_ready.clear() + + def trigger_shutdown(self): + with self._running_lock: + self._shuttingdown = True + if self._primary_thread_task_ready is not None: + self._primary_thread_task = None + self._primary_thread_task_ready.set() + + def active_count(self): + return len(self._running) + + def _perform_spawn(self, reply): + reply.run() + with self._running_lock: + self._running.remove(reply) + if not self._running: + while self._waitall_events: + waitall_event = self._waitall_events.pop() + waitall_event.set() + + def _try_send_to_primary_thread(self, reply): + # REF1 in 'thread' model we give priority to running in main thread + # note that we should be called with _running_lock hold + primary_thread_task_ready = self._primary_thread_task_ready + if primary_thread_task_ready is not None: + if not primary_thread_task_ready.is_set(): + self._primary_thread_task = reply + # wake up primary thread + primary_thread_task_ready.set() + return True + return False + + def spawn(self, func, *args, **kwargs): + """return Reply object for the asynchronous dispatch + of the given func(*args, **kwargs). + """ + reply = Reply((func, args, kwargs), self.execmodel) + with self._running_lock: + if self._shuttingdown: + raise ValueError("pool is shutting down") + self._running.add(reply) + if not self._try_send_to_primary_thread(reply): + self.execmodel.start(self._perform_spawn, (reply,)) + return reply + + def terminate(self, timeout=None): + """trigger shutdown and wait for completion of all executions.""" + self.trigger_shutdown() + return self.waitall(timeout=timeout) + + def waitall(self, timeout=None): + """wait until all active spawns have finished executing.""" + with self._running_lock: + if not self._running: + return True + # if a Reply still runs, we let run_and_release + # signal us -- note that we are still holding the + # _running_lock to avoid race conditions + my_waitall_event = self.execmodel.Event() + self._waitall_events.append(my_waitall_event) + return my_waitall_event.wait(timeout=timeout) + + +sysex = (KeyboardInterrupt, SystemExit) + + +DEBUG = os.environ.get("EXECNET_DEBUG") +pid = os.getpid() +if DEBUG == "2": + + def trace(*msg): + try: + line = " ".join(map(str, msg)) + sys.stderr.write(f"[{pid}] {line}\n") + sys.stderr.flush() + except Exception: + pass # nothing we can do, likely interpreter-shutdown + +elif DEBUG: + import tempfile + import os + + fn = os.path.join(tempfile.gettempdir(), "execnet-debug-%d" % pid) + # sys.stderr.write("execnet-debug at %r" % (fn,)) + debugfile = open(fn, "w") + + def trace(*msg): + try: + line = " ".join(map(str, msg)) + debugfile.write(line + "\n") + debugfile.flush() + except Exception: + try: + v = sys.exc_info()[1] + sys.stderr.write(f"[{pid}] exception during tracing: {v!r}\n") + except Exception: + pass # nothing we can do, likely interpreter-shutdown + +else: + notrace = trace = lambda *msg: None + + +class Popen2IO: + error = (IOError, OSError, EOFError) + + def __init__(self, outfile, infile, execmodel): + # we need raw byte streams + self.outfile, self.infile = outfile, infile + if sys.platform == "win32": + import msvcrt + + try: + msvcrt.setmode(infile.fileno(), os.O_BINARY) + msvcrt.setmode(outfile.fileno(), os.O_BINARY) + except (AttributeError, OSError): + pass + self._read = getattr(infile, "buffer", infile).read + self._write = getattr(outfile, "buffer", outfile).write + self.execmodel = execmodel + + def read(self, numbytes): + """Read exactly 'numbytes' bytes from the pipe.""" + # a file in non-blocking mode may return less bytes, so we loop + buf = b"" + while numbytes > len(buf): + data = self._read(numbytes - len(buf)) + if not data: + raise EOFError("expected %d bytes, got %d" % (numbytes, len(buf))) + buf += data + return buf + + def write(self, data): + """write out all data bytes.""" + assert isinstance(data, bytes) + self._write(data) + self.outfile.flush() + + def close_read(self): + self.infile.close() + + def close_write(self): + self.outfile.close() + + +class Message: + """encapsulates Messages and their wire protocol.""" + + # message code -> name, handler + _types: dict[int, tuple[str, Callable[[Message, BaseGateway], None]]] = {} + + def __init__(self, msgcode, channelid=0, data=b""): + self.msgcode = msgcode + self.channelid = channelid + self.data = data + + @staticmethod + def from_io(io): + try: + header = io.read(9) # type 1, channel 4, payload 4 + if not header: + raise EOFError("empty read") + except EOFError: + e = sys.exc_info()[1] + raise EOFError("couldn't load message header, " + e.args[0]) + msgtype, channel, payload = struct.unpack("!bii", header) + return Message(msgtype, channel, io.read(payload)) + + def to_io(self, io): + header = struct.pack("!bii", self.msgcode, self.channelid, len(self.data)) + io.write(header + self.data) + + def received(self, gateway): + handler = self._types[self.msgcode][1] + handler(self, gateway) + + def __repr__(self): + name = self._types[self.msgcode][0] + return "".format( + name, self.channelid, len(self.data) + ) + + def _status(message, gateway): + # we use the channelid to send back information + # but don't instantiate a channel object + d = { + "numchannels": len(gateway._channelfactory._channels), + "numexecuting": gateway._execpool.active_count(), + "execmodel": gateway.execmodel.backend, + } + gateway._send(Message.CHANNEL_DATA, message.channelid, dumps_internal(d)) + gateway._send(Message.CHANNEL_CLOSE, message.channelid) + + STATUS = 0 + _types[STATUS] = ("STATUS", _status) + + def _reconfigure(message, gateway): + if message.channelid == 0: + target = gateway + else: + target = gateway._channelfactory.new(message.channelid) + target._strconfig = loads_internal(message.data, gateway) + + RECONFIGURE = 1 + _types[RECONFIGURE] = ("RECONFIGURE", _reconfigure) + + def _gateway_terminate(message, gateway): + raise GatewayReceivedTerminate(gateway) + + GATEWAY_TERMINATE = 2 + _types[GATEWAY_TERMINATE] = ("GATEWAY_TERMINATE", _gateway_terminate) + + def _channel_exec(message, gateway): + channel = gateway._channelfactory.new(message.channelid) + gateway._local_schedulexec(channel=channel, sourcetask=message.data) + + CHANNEL_EXEC = 3 + _types[CHANNEL_EXEC] = ("CHANNEL_EXEC", _channel_exec) + + def _channel_data(message, gateway): + gateway._channelfactory._local_receive(message.channelid, message.data) + + CHANNEL_DATA = 4 + _types[CHANNEL_DATA] = ("CHANNEL_DATA", _channel_data) + + def _channel_close(message, gateway): + gateway._channelfactory._local_close(message.channelid) + + CHANNEL_CLOSE = 5 + _types[CHANNEL_CLOSE] = ("CHANNEL_CLOSE", _channel_close) + + def _channel_close_error(message, gateway): + remote_error = RemoteError(loads_internal(message.data)) + gateway._channelfactory._local_close(message.channelid, remote_error) + + CHANNEL_CLOSE_ERROR = 6 + _types[CHANNEL_CLOSE_ERROR] = ("CHANNEL_CLOSE_ERROR", _channel_close_error) + + def _channel_last_message(message, gateway): + gateway._channelfactory._local_close(message.channelid, sendonly=True) + + CHANNEL_LAST_MESSAGE = 7 + _types[CHANNEL_LAST_MESSAGE] = ("CHANNEL_LAST_MESSAGE", _channel_last_message) + + +class GatewayReceivedTerminate(Exception): + """Receiverthread got termination message.""" + + +def geterrortext(excinfo, format_exception=traceback.format_exception, sysex=sysex): + try: + l = format_exception(*excinfo) + errortext = "".join(l) + except sysex: + raise + except BaseException: + errortext = f"{excinfo[0].__name__}: {excinfo[1]}" + return errortext + + +class RemoteError(Exception): + """Exception containing a stringified error from the other side.""" + + def __init__(self, formatted): + super().__init__() + self.formatted = formatted + + def __str__(self): + return self.formatted + + def __repr__(self): + return f"{self.__class__.__name__}: {self.formatted}" + + def warn(self): + if self.formatted != INTERRUPT_TEXT: + # XXX do this better + sys.stderr.write(f"[{os.getpid()}] Warning: unhandled {self!r}\n") + + +class TimeoutError(IOError): + """Exception indicating that a timeout was reached.""" + + +NO_ENDMARKER_WANTED = object() + + +class Channel: + "Communication channel between two Python Interpreter execution points." + RemoteError = RemoteError + TimeoutError = TimeoutError + _INTERNALWAKEUP = 1000 + _executing = False + + def __init__(self, gateway, id): + assert isinstance(id, int) + assert not isinstance(gateway, type) + self.gateway = gateway + # XXX: defaults copied from Unserializer + self._strconfig = getattr(gateway, "_strconfig", (True, False)) + self.id = id + self._items = self.gateway.execmodel.queue.Queue() + self._closed = False + self._receiveclosed = self.gateway.execmodel.Event() + self._remoteerrors = [] + + def _trace(self, *msg): + self.gateway._trace(self.id, *msg) + + def setcallback(self, callback, endmarker=NO_ENDMARKER_WANTED): + """set a callback function for receiving items. + + All already queued items will immediately trigger the callback. + Afterwards the callback will execute in the receiver thread + for each received data item and calls to ``receive()`` will + raise an error. + If an endmarker is specified the callback will eventually + be called with the endmarker when the channel closes. + """ + _callbacks = self.gateway._channelfactory._callbacks + with self.gateway._receivelock: + if self._items is None: + raise OSError(f"{self!r} has callback already registered") + items = self._items + self._items = None + while 1: + try: + olditem = items.get(block=False) + except self.gateway.execmodel.queue.Empty: + if not (self._closed or self._receiveclosed.is_set()): + _callbacks[self.id] = (callback, endmarker, self._strconfig) + break + else: + if olditem is ENDMARKER: + items.put(olditem) # for other receivers + if endmarker is not NO_ENDMARKER_WANTED: + callback(endmarker) + break + else: + callback(olditem) + + def __repr__(self): + flag = self.isclosed() and "closed" or "open" + return "" % (self.id, flag) + + def __del__(self): + if self.gateway is None: # can be None in tests + return + self._trace("channel.__del__") + # no multithreading issues here, because we have the last ref to 'self' + if self._closed: + # state transition "closed" --> "deleted" + for error in self._remoteerrors: + error.warn() + elif self._receiveclosed.is_set(): + # state transition "sendonly" --> "deleted" + # the remote channel is already in "deleted" state, nothing to do + pass + else: + # state transition "opened" --> "deleted" + # check if we are in the middle of interpreter shutdown + # in which case the process will go away and we probably + # don't need to try to send a closing or last message + # (and often it won't work anymore to send things out) + if Message is not None: + if self._items is None: # has_callback + msgcode = Message.CHANNEL_LAST_MESSAGE + else: + msgcode = Message.CHANNEL_CLOSE + try: + self.gateway._send(msgcode, self.id) + except (OSError, ValueError): # ignore problems with sending + pass + + def _getremoteerror(self): + try: + return self._remoteerrors.pop(0) + except IndexError: + try: + return self.gateway._error + except AttributeError: + pass + return None + + # + # public API for channel objects + # + def isclosed(self): + """return True if the channel is closed. A closed + channel may still hold items. + """ + return self._closed + + def makefile(self, mode="w", proxyclose=False): + """return a file-like object. + mode can be 'w' or 'r' for writeable/readable files. + if proxyclose is true file.close() will also close the channel. + """ + if mode == "w": + return ChannelFileWrite(channel=self, proxyclose=proxyclose) + elif mode == "r": + return ChannelFileRead(channel=self, proxyclose=proxyclose) + raise ValueError(f"mode {mode!r} not available") + + def close(self, error=None): + """close down this channel with an optional error message. + Note that closing of a channel tied to remote_exec happens + automatically at the end of execution and cannot + be done explicitly. + """ + if self._executing: + raise OSError("cannot explicitly close channel within remote_exec") + if self._closed: + self.gateway._trace(self, "ignoring redundant call to close()") + if not self._closed: + # state transition "opened/sendonly" --> "closed" + # threads warning: the channel might be closed under our feet, + # but it's never damaging to send too many CHANNEL_CLOSE messages + # however, if the other side triggered a close already, we + # do not send back a closed message. + if not self._receiveclosed.is_set(): + put = self.gateway._send + if error is not None: + put(Message.CHANNEL_CLOSE_ERROR, self.id, dumps_internal(error)) + else: + put(Message.CHANNEL_CLOSE, self.id) + self._trace("sent channel close message") + if isinstance(error, RemoteError): + self._remoteerrors.append(error) + self._closed = True # --> "closed" + self._receiveclosed.set() + queue = self._items + if queue is not None: + queue.put(ENDMARKER) + self.gateway._channelfactory._no_longer_opened(self.id) + + def waitclose(self, timeout=None): + """wait until this channel is closed (or the remote side + otherwise signalled that no more data was being sent). + The channel may still hold receiveable items, but not receive + any more after waitclose() has returned. Exceptions from executing + code on the other side are reraised as local channel.RemoteErrors. + EOFError is raised if the reading-connection was prematurely closed, + which often indicates a dying process. + self.TimeoutError is raised after the specified number of seconds + (default is None, i.e. wait indefinitely). + """ + # wait for non-"opened" state + self._receiveclosed.wait(timeout=timeout) + if not self._receiveclosed.is_set(): + raise self.TimeoutError("Timeout after %r seconds" % timeout) + error = self._getremoteerror() + if error: + raise error + + def send(self, item): + """sends the given item to the other side of the channel, + possibly blocking if the sender queue is full. + The item must be a simple python type and will be + copied to the other side by value. IOError is + raised if the write pipe was prematurely closed. + """ + if self.isclosed(): + raise OSError(f"cannot send to {self!r}") + self.gateway._send(Message.CHANNEL_DATA, self.id, dumps_internal(item)) + + def receive(self, timeout=None): + """receive a data item that was sent from the other side. + timeout: None [default] blocked waiting. A positive number + indicates the number of seconds after which a channel.TimeoutError + exception will be raised if no item was received. + Note that exceptions from the remotely executing code will be + reraised as channel.RemoteError exceptions containing + a textual representation of the remote traceback. + """ + itemqueue = self._items + if itemqueue is None: + raise OSError("cannot receive(), channel has receiver callback") + try: + x = itemqueue.get(timeout=timeout) + except self.gateway.execmodel.queue.Empty: + raise self.TimeoutError("no item after %r seconds" % timeout) + if x is ENDMARKER: + itemqueue.put(x) # for other receivers + raise self._getremoteerror() or EOFError() + else: + return x + + def __iter__(self): + return self + + def next(self): + try: + return self.receive() + except EOFError: + raise StopIteration + + __next__ = next + + def reconfigure(self, py2str_as_py3str=True, py3str_as_py2str=False): + """ + set the string coercion for this channel + the default is to try to convert py2 str as py3 str, + but not to try and convert py3 str to py2 str + """ + self._strconfig = (py2str_as_py3str, py3str_as_py2str) + data = dumps_internal(self._strconfig) + self.gateway._send(Message.RECONFIGURE, self.id, data=data) + + +ENDMARKER = object() +INTERRUPT_TEXT = "keyboard-interrupted" + + +class ChannelFactory: + def __init__(self, gateway, startcount=1): + self._channels = weakref.WeakValueDictionary() + self._callbacks = {} + self._writelock = gateway.execmodel.Lock() + self.gateway = gateway + self.count = startcount + self.finished = False + self._list = list # needed during interp-shutdown + + def new(self, id=None): + """create a new Channel with 'id' (or create new id if None).""" + with self._writelock: + if self.finished: + raise OSError(f"connection already closed: {self.gateway}") + if id is None: + id = self.count + self.count += 2 + try: + channel = self._channels[id] + except KeyError: + channel = self._channels[id] = Channel(self.gateway, id) + return channel + + def channels(self): + return self._list(self._channels.values()) + + # + # internal methods, called from the receiver thread + # + def _no_longer_opened(self, id): + try: + del self._channels[id] + except KeyError: + pass + try: + callback, endmarker, strconfig = self._callbacks.pop(id) + except KeyError: + pass + else: + if endmarker is not NO_ENDMARKER_WANTED: + callback(endmarker) + + def _local_close(self, id, remoteerror=None, sendonly=False): + channel = self._channels.get(id) + if channel is None: + # channel already in "deleted" state + if remoteerror: + remoteerror.warn() + self._no_longer_opened(id) + else: + # state transition to "closed" state + if remoteerror: + channel._remoteerrors.append(remoteerror) + queue = channel._items + if queue is not None: + queue.put(ENDMARKER) + self._no_longer_opened(id) + if not sendonly: # otherwise #--> "sendonly" + channel._closed = True # --> "closed" + channel._receiveclosed.set() + + def _local_receive(self, id, data): + # executes in receiver thread + channel = self._channels.get(id) + try: + callback, endmarker, strconfig = self._callbacks[id] + except KeyError: + queue = channel and channel._items + if queue is None: + pass # drop data + else: + item = loads_internal(data, channel) + queue.put(item) + else: + try: + data = loads_internal(data, channel, strconfig) + callback(data) # even if channel may be already closed + except Exception: + excinfo = sys.exc_info() + self.gateway._trace("exception during callback: %s" % excinfo[1]) + errortext = self.gateway._geterrortext(excinfo) + self.gateway._send( + Message.CHANNEL_CLOSE_ERROR, id, dumps_internal(errortext) + ) + self._local_close(id, errortext) + + def _finished_receiving(self): + with self._writelock: + self.finished = True + for id in self._list(self._channels): + self._local_close(id, sendonly=True) + for id in self._list(self._callbacks): + self._no_longer_opened(id) + + +class ChannelFile: + def __init__(self, channel, proxyclose=True): + self.channel = channel + self._proxyclose = proxyclose + + def isatty(self): + return False + + def close(self): + if self._proxyclose: + self.channel.close() + + def __repr__(self): + state = self.channel.isclosed() and "closed" or "open" + return "" % (self.channel.id, state) + + +class ChannelFileWrite(ChannelFile): + def write(self, out): + self.channel.send(out) + + def flush(self): + pass + + +class ChannelFileRead(ChannelFile): + def __init__(self, channel, proxyclose=True): + super().__init__(channel, proxyclose) + self._buffer = None + + def read(self, n): + try: + if self._buffer is None: + self._buffer = self.channel.receive() + while len(self._buffer) < n: + self._buffer += self.channel.receive() + except EOFError: + self.close() + if self._buffer is None: + ret = "" + else: + ret = self._buffer[:n] + self._buffer = self._buffer[n:] + return ret + + def readline(self): + if self._buffer is not None: + i = self._buffer.find("\n") + if i != -1: + return self.read(i + 1) + line = self.read(len(self._buffer) + 1) + else: + line = self.read(1) + while line and line[-1] != "\n": + c = self.read(1) + if not c: + break + line += c + return line + + +class BaseGateway: + exc_info = sys.exc_info + _sysex = sysex + id = "" + + def __init__(self, io, id, _startcount=2): + self.execmodel = io.execmodel + self._io = io + self.id = id + self._strconfig = (Unserializer.py2str_as_py3str, Unserializer.py3str_as_py2str) + self._channelfactory = ChannelFactory(self, _startcount) + self._receivelock = self.execmodel.RLock() + # globals may be NONE at process-termination + self.__trace = trace + self._geterrortext = geterrortext + self._receivepool = WorkerPool(self.execmodel) + + def _trace(self, *msg): + self.__trace(self.id, *msg) + + def _initreceive(self): + self._receivepool.spawn(self._thread_receiver) + + def _thread_receiver(self): + def log(*msg): + self._trace("[receiver-thread]", *msg) + + log("RECEIVERTHREAD: starting to run") + io = self._io + try: + while 1: + msg = Message.from_io(io) + log("received", msg) + with self._receivelock: + msg.received(self) + del msg + except (KeyboardInterrupt, GatewayReceivedTerminate): + pass + except EOFError: + log("EOF without prior gateway termination message") + self._error = self.exc_info()[1] + except Exception: + log(self._geterrortext(self.exc_info())) + log("finishing receiving thread") + # wake up and terminate any execution waiting to receive + self._channelfactory._finished_receiving() + log("terminating execution") + self._terminate_execution() + log("closing read") + self._io.close_read() + log("closing write") + self._io.close_write() + log("terminating our receive pseudo pool") + self._receivepool.trigger_shutdown() + + def _terminate_execution(self): + pass + + def _send(self, msgcode, channelid=0, data=b""): + message = Message(msgcode, channelid, data) + try: + message.to_io(self._io) + self._trace("sent", message) + except (OSError, ValueError): + e = sys.exc_info()[1] + self._trace("failed to send", message, e) + # ValueError might be because the IO is already closed + raise OSError("cannot send (already closed?)") + + def _local_schedulexec(self, channel, sourcetask): + channel.close("execution disallowed") + + # _____________________________________________________________________ + # + # High Level Interface + # _____________________________________________________________________ + # + def newchannel(self): + """return a new independent channel.""" + return self._channelfactory.new() + + def join(self, timeout=None): + """Wait for receiverthread to terminate.""" + self._trace("waiting for receiver thread to finish") + self._receivepool.waitall() + + +class WorkerGateway(BaseGateway): + def _local_schedulexec(self, channel, sourcetask): + sourcetask = loads_internal(sourcetask) + self._execpool.spawn(self.executetask, (channel, sourcetask)) + + def _terminate_execution(self): + # called from receiverthread + self._trace("shutting down execution pool") + self._execpool.trigger_shutdown() + if not self._execpool.waitall(5.0): + self._trace("execution ongoing after 5 secs," " trying interrupt_main") + # We try hard to terminate execution based on the assumption + # that there is only one gateway object running per-process. + if sys.platform != "win32": + self._trace("sending ourselves a SIGINT") + os.kill(os.getpid(), 2) # send ourselves a SIGINT + elif interrupt_main is not None: + self._trace("calling interrupt_main()") + interrupt_main() + if not self._execpool.waitall(10.0): + self._trace( + "execution did not finish in another 10 secs, " "calling os._exit()" + ) + os._exit(1) + + def serve(self): + def trace(msg): + self._trace("[serve] " + msg) + + hasprimary = self.execmodel.backend == "thread" + self._execpool = WorkerPool(self.execmodel, hasprimary=hasprimary) + trace("spawning receiver thread") + self._initreceive() + try: + if hasprimary: + # this will return when we are in shutdown + trace("integrating as primary thread") + self._execpool.integrate_as_primary_thread() + trace("joining receiver thread") + self.join() + except KeyboardInterrupt: + # in the worker we can't really do anything sensible + trace("swallowing keyboardinterrupt, serve finished") + + def executetask(self, item): + try: + channel, (source, file_name, call_name, kwargs) = item + loc = {"channel": channel, "__name__": "__channelexec__"} + self._trace(f"execution starts[{channel.id}]: {repr(source)[:50]}") + channel._executing = True + try: + co = compile(source + "\n", file_name or "", "exec") + exec(co, loc) + if call_name: + self._trace("calling %s(**%60r)" % (call_name, kwargs)) + function = loc[call_name] + function(channel, **kwargs) + finally: + channel._executing = False + self._trace("execution finished") + except KeyboardInterrupt: + channel.close(INTERRUPT_TEXT) + raise + except BaseException: + excinfo = self.exc_info() + if not isinstance(excinfo[1], EOFError): + if not channel.gateway._channelfactory.finished: + self._trace(f"got exception: {excinfo[1]!r}") + errortext = self._geterrortext(excinfo) + channel.close(errortext) + return + self._trace("ignoring EOFError because receiving finished") + channel.close() + + +# +# Cross-Python pickling code, tested from test_serializer.py +# + + +class DataFormatError(Exception): + pass + + +class DumpError(DataFormatError): + """Error while serializing an object.""" + + +class LoadError(DataFormatError): + """Error while unserializing an object.""" + + +def bchr(n): + return bytes([n]) + + +DUMPFORMAT_VERSION = bchr(2) + +FOUR_BYTE_INT_MAX = 2147483647 + +FLOAT_FORMAT = "!d" +FLOAT_FORMAT_SIZE = struct.calcsize(FLOAT_FORMAT) +COMPLEX_FORMAT = "!dd" +COMPLEX_FORMAT_SIZE = struct.calcsize(COMPLEX_FORMAT) + + +class _Stop(Exception): + pass + + +class opcode: + """container for name -> num mappings.""" + + BUILDTUPLE = b"@" + BYTES = b"A" + CHANNEL = b"B" + FALSE = b"C" + FLOAT = b"D" + FROZENSET = b"E" + INT = b"F" + LONG = b"G" + LONGINT = b"H" + LONGLONG = b"I" + NEWDICT = b"J" + NEWLIST = b"K" + NONE = b"L" + PY2STRING = b"M" + PY3STRING = b"N" + SET = b"O" + SETITEM = b"P" + STOP = b"Q" + TRUE = b"R" + UNICODE = b"S" + COMPLEX = b"T" + + +class Unserializer: + num2func: dict[bytes, Callable[[Unserializer], None]] = {} + py2str_as_py3str = True # True + py3str_as_py2str = False # false means py2 will get unicode + + def __init__(self, stream, channel_or_gateway=None, strconfig=None): + gateway = getattr(channel_or_gateway, "gateway", channel_or_gateway) + strconfig = getattr(channel_or_gateway, "_strconfig", strconfig) + if strconfig: + self.py2str_as_py3str, self.py3str_as_py2str = strconfig + self.stream = stream + self.channelfactory = getattr(gateway, "_channelfactory", gateway) + + def load(self, versioned=False): + if versioned: + ver = self.stream.read(1) + if ver != DUMPFORMAT_VERSION: + raise LoadError("wrong dumpformat version %r" % ver) + self.stack = [] + try: + while True: + opcode = self.stream.read(1) + if not opcode: + raise EOFError + try: + loader = self.num2func[opcode] + except KeyError: + raise LoadError( + "unknown opcode %r - " "wire protocol corruption?" % (opcode,) + ) + loader(self) + except _Stop: + if len(self.stack) != 1: + raise LoadError("internal unserialization error") + return self.stack.pop(0) + else: + raise LoadError("didn't get STOP") + + def load_none(self): + self.stack.append(None) + + num2func[opcode.NONE] = load_none + + def load_true(self): + self.stack.append(True) + + num2func[opcode.TRUE] = load_true + + def load_false(self): + self.stack.append(False) + + num2func[opcode.FALSE] = load_false + + def load_int(self): + i = self._read_int4() + self.stack.append(i) + + num2func[opcode.INT] = load_int + + def load_longint(self): + s = self._read_byte_string() + self.stack.append(int(s)) + + num2func[opcode.LONGINT] = load_longint + + load_long = load_int + num2func[opcode.LONG] = load_long + load_longlong = load_longint + num2func[opcode.LONGLONG] = load_longlong + + def load_float(self): + binary = self.stream.read(FLOAT_FORMAT_SIZE) + self.stack.append(struct.unpack(FLOAT_FORMAT, binary)[0]) + + num2func[opcode.FLOAT] = load_float + + def load_complex(self): + binary = self.stream.read(COMPLEX_FORMAT_SIZE) + self.stack.append(complex(*struct.unpack(COMPLEX_FORMAT, binary))) + + num2func[opcode.COMPLEX] = load_complex + + def _read_int4(self): + return struct.unpack("!i", self.stream.read(4))[0] + + def _read_byte_string(self): + length = self._read_int4() + as_bytes = self.stream.read(length) + return as_bytes + + def load_py3string(self): + as_bytes = self._read_byte_string() + if self.py3str_as_py2str: + # XXX Should we try to decode into latin-1? + self.stack.append(as_bytes) + else: + self.stack.append(as_bytes.decode("utf-8")) + + num2func[opcode.PY3STRING] = load_py3string + + def load_py2string(self): + as_bytes = self._read_byte_string() + if self.py2str_as_py3str: + s = as_bytes.decode("latin-1") + else: + s = as_bytes + self.stack.append(s) + + num2func[opcode.PY2STRING] = load_py2string + + def load_bytes(self): + s = self._read_byte_string() + self.stack.append(s) + + num2func[opcode.BYTES] = load_bytes + + def load_unicode(self): + self.stack.append(self._read_byte_string().decode("utf-8")) + + num2func[opcode.UNICODE] = load_unicode + + def load_newlist(self): + length = self._read_int4() + self.stack.append([None] * length) + + num2func[opcode.NEWLIST] = load_newlist + + def load_setitem(self): + if len(self.stack) < 3: + raise LoadError("not enough items for setitem") + value = self.stack.pop() + key = self.stack.pop() + self.stack[-1][key] = value + + num2func[opcode.SETITEM] = load_setitem + + def load_newdict(self): + self.stack.append({}) + + num2func[opcode.NEWDICT] = load_newdict + + def _load_collection(self, type_): + length = self._read_int4() + if length: + res = type_(self.stack[-length:]) + del self.stack[-length:] + self.stack.append(res) + else: + self.stack.append(type_()) + + def load_buildtuple(self): + self._load_collection(tuple) + + num2func[opcode.BUILDTUPLE] = load_buildtuple + + def load_set(self): + self._load_collection(set) + + num2func[opcode.SET] = load_set + + def load_frozenset(self): + self._load_collection(frozenset) + + num2func[opcode.FROZENSET] = load_frozenset + + def load_stop(self): + raise _Stop + + num2func[opcode.STOP] = load_stop + + def load_channel(self): + id = self._read_int4() + newchannel = self.channelfactory.new(id) + self.stack.append(newchannel) + + num2func[opcode.CHANNEL] = load_channel + + +def dumps(obj): + """return a serialized bytestring of the given obj. + + The obj and all contained objects must be of a builtin + python type (so nested dicts, sets, etc. are all ok but + not user-level instances). + """ + return _Serializer().save(obj, versioned=True) + + +def dump(byteio, obj): + """write a serialized bytestring of the given obj to the given stream.""" + _Serializer(write=byteio.write).save(obj, versioned=True) + + +def loads(bytestring, py2str_as_py3str=False, py3str_as_py2str=False): + """return the object as deserialized from the given bytestring. + + py2str_as_py3str: if true then string (str) objects previously + dumped on Python2 will be loaded as Python3 + strings which really are text objects. + py3str_as_py2str: if true then string (str) objects previously + dumped on Python3 will be loaded as Python2 + strings instead of unicode objects. + + if the bytestring was dumped with an incompatible protocol + version or if the bytestring is corrupted, the + ``execnet.DataFormatError`` will be raised. + """ + io = BytesIO(bytestring) + return load( + io, py2str_as_py3str=py2str_as_py3str, py3str_as_py2str=py3str_as_py2str + ) + + +def load(io, py2str_as_py3str=False, py3str_as_py2str=False): + """derserialize an object form the specified stream. + + Behaviour and parameters are otherwise the same as with ``loads`` + """ + strconfig = (py2str_as_py3str, py3str_as_py2str) + return Unserializer(io, strconfig=strconfig).load(versioned=True) + + +def loads_internal(bytestring, channelfactory=None, strconfig=None): + io = BytesIO(bytestring) + return Unserializer(io, channelfactory, strconfig).load() + + +def dumps_internal(obj): + return _Serializer().save(obj) + + +class _Serializer: + _dispatch: dict[type, Callable[[_Serializer, object], None]] = {} + + def __init__(self, write=None): + if write is None: + self._streamlist = [] + write = self._streamlist.append + self._write = write + + def save(self, obj, versioned=False): + # calling here is not re-entrant but multiple instances + # may write to the same stream because of the common platform + # atomic-write guarantee (concurrent writes each happen atomically) + if versioned: + self._write(DUMPFORMAT_VERSION) + self._save(obj) + self._write(opcode.STOP) + try: + streamlist = self._streamlist + except AttributeError: + return None + return b"".join(streamlist) + + def _save(self, obj): + tp = type(obj) + try: + dispatch = self._dispatch[tp] + except KeyError: + methodname = "save_" + tp.__name__ + meth = getattr(self.__class__, methodname, None) + if meth is None: + raise DumpError(f"can't serialize {tp}") + dispatch = self._dispatch[tp] = meth + dispatch(self, obj) + + def save_NoneType(self, non): + self._write(opcode.NONE) + + def save_bool(self, boolean): + if boolean: + self._write(opcode.TRUE) + else: + self._write(opcode.FALSE) + + def save_bytes(self, bytes_): + self._write(opcode.BYTES) + self._write_byte_sequence(bytes_) + + def save_str(self, s): + self._write(opcode.PY3STRING) + self._write_unicode_string(s) + + def _write_unicode_string(self, s): + try: + as_bytes = s.encode("utf-8") + except UnicodeEncodeError: + raise DumpError("strings must be utf-8 encodable") + self._write_byte_sequence(as_bytes) + + def _write_byte_sequence(self, bytes_): + self._write_int4(len(bytes_), "string is too long") + self._write(bytes_) + + def _save_integral(self, i, short_op, long_op): + if i <= FOUR_BYTE_INT_MAX: + self._write(short_op) + self._write_int4(i) + else: + self._write(long_op) + self._write_byte_sequence(str(i).rstrip("L").encode("ascii")) + + def save_int(self, i): + self._save_integral(i, opcode.INT, opcode.LONGINT) + + def save_long(self, l): + self._save_integral(l, opcode.LONG, opcode.LONGLONG) + + def save_float(self, flt): + self._write(opcode.FLOAT) + self._write(struct.pack(FLOAT_FORMAT, flt)) + + def save_complex(self, cpx): + self._write(opcode.COMPLEX) + self._write(struct.pack(COMPLEX_FORMAT, cpx.real, cpx.imag)) + + def _write_int4(self, i, error="int must be less than %i" % (FOUR_BYTE_INT_MAX,)): + if i > FOUR_BYTE_INT_MAX: + raise DumpError(error) + self._write(struct.pack("!i", i)) + + def save_list(self, L): + self._write(opcode.NEWLIST) + self._write_int4(len(L), "list is too long") + for i, item in enumerate(L): + self._write_setitem(i, item) + + def _write_setitem(self, key, value): + self._save(key) + self._save(value) + self._write(opcode.SETITEM) + + def save_dict(self, d): + self._write(opcode.NEWDICT) + for key, value in d.items(): + self._write_setitem(key, value) + + def save_tuple(self, tup): + for item in tup: + self._save(item) + self._write(opcode.BUILDTUPLE) + self._write_int4(len(tup), "tuple is too long") + + def _write_set(self, s, op): + for item in s: + self._save(item) + self._write(op) + self._write_int4(len(s), "set is too long") + + def save_set(self, s): + self._write_set(s, opcode.SET) + + def save_frozenset(self, s): + self._write_set(s, opcode.FROZENSET) + + def save_Channel(self, channel): + self._write(opcode.CHANNEL) + self._write_int4(channel.id) + + +def init_popen_io(execmodel): + if not hasattr(os, "dup"): # jython + io = Popen2IO(sys.stdout, sys.stdin, execmodel) + import tempfile + + sys.stdin = tempfile.TemporaryFile("r") + sys.stdout = tempfile.TemporaryFile("w") + else: + try: + devnull = os.devnull + except AttributeError: + if os.name == "nt": + devnull = "NUL" + else: + devnull = "/dev/null" + # stdin + stdin = execmodel.fdopen(os.dup(0), "r", 1) + fd = os.open(devnull, os.O_RDONLY) + os.dup2(fd, 0) + os.close(fd) + + # stdout + stdout = execmodel.fdopen(os.dup(1), "w", 1) + fd = os.open(devnull, os.O_WRONLY) + os.dup2(fd, 1) + + # stderr for win32 + if os.name == "nt": + sys.stderr = execmodel.fdopen(os.dup(2), "w", 1) + os.dup2(fd, 2) + os.close(fd) + io = Popen2IO(stdout, stdin, execmodel) + sys.stdin = execmodel.fdopen(0, "r", 1) + sys.stdout = execmodel.fdopen(1, "w", 1) + return io + + +def serve(io, id): + trace(f"creating workergateway on {io!r}") + WorkerGateway(io=io, id=id, _startcount=2).serve() diff --git a/venv/lib/python3.10/site-packages/execnet/gateway_bootstrap.py b/venv/lib/python3.10/site-packages/execnet/gateway_bootstrap.py new file mode 100644 index 0000000..ba5bf10 --- /dev/null +++ b/venv/lib/python3.10/site-packages/execnet/gateway_bootstrap.py @@ -0,0 +1,109 @@ +""" +code to initialize the remote side of a gateway once the io is created +""" +import inspect +import os + +import execnet + +from . import gateway_base +from .gateway import Gateway + +importdir = os.path.dirname(os.path.dirname(execnet.__file__)) + + +class HostNotFound(Exception): + pass + + +def bootstrap_import(io, spec): + # only insert the importdir into the path if we must. This prevents + # bugs where backports expect to be shadowed by the standard library on + # newer versions of python but would instead shadow the standard library + sendexec( + io, + "import sys", + "if %r not in sys.path:" % importdir, + " sys.path.insert(0, %r)" % importdir, + "from execnet.gateway_base import serve, init_popen_io, get_execmodel", + "sys.stdout.write('1')", + "sys.stdout.flush()", + "execmodel = get_execmodel(%r)" % spec.execmodel, + "serve(init_popen_io(execmodel), id='%s-worker')" % spec.id, + ) + s = io.read(1) + assert s == b"1", repr(s) + + +def bootstrap_exec(io, spec): + try: + sendexec( + io, + inspect.getsource(gateway_base), + "execmodel = get_execmodel(%r)" % spec.execmodel, + "io = init_popen_io(execmodel)", + "io.write('1'.encode('ascii'))", + "serve(io, id='%s-worker')" % spec.id, + ) + s = io.read(1) + assert s == b"1" + except EOFError: + ret = io.wait() + if ret == 255: + raise HostNotFound(io.remoteaddress) + + +def bootstrap_socket(io, id): + # XXX: switch to spec + from execnet.gateway_socket import SocketIO + + sendexec( + io, + inspect.getsource(gateway_base), + "import socket", + inspect.getsource(SocketIO), + "try: execmodel", + "except NameError:", + " execmodel = get_execmodel('thread')", + "io = SocketIO(clientsock, execmodel)", + "io.write('1'.encode('ascii'))", + "serve(io, id='%s-worker')" % id, + ) + s = io.read(1) + assert s == b"1" + + +def sendexec(io, *sources): + source = "\n".join(sources) + io.write((repr(source) + "\n").encode("utf-8")) + + +def fix_pid_for_jython_popen(gw): + """ + fix for jython 2.5.1 + """ + spec, io = gw.spec, gw._io + if spec.popen and not spec.via: + # XXX: handle the case of remote being jython + # and not having the popen pid + if io.popen.pid is None: + io.popen.pid = gw.remote_exec( + "import os; channel.send(os.getpid())" + ).receive() + + +def bootstrap(io, spec): + if spec.popen: + if spec.via or spec.python: + bootstrap_exec(io, spec) + else: + bootstrap_import(io, spec) + elif spec.ssh or spec.vagrant_ssh: + bootstrap_exec(io, spec) + elif spec.socket: + bootstrap_socket(io, spec) + else: + raise ValueError("unknown gateway type, can't bootstrap") + gw = Gateway(io, spec) + fix_pid_for_jython_popen(gw) + return gw diff --git a/venv/lib/python3.10/site-packages/execnet/gateway_io.py b/venv/lib/python3.10/site-packages/execnet/gateway_io.py new file mode 100644 index 0000000..c631f8d --- /dev/null +++ b/venv/lib/python3.10/site-packages/execnet/gateway_io.py @@ -0,0 +1,231 @@ +""" +execnet io initialization code + +creates io instances used for gateway io +""" +import os +import shlex +import sys + +try: + from execnet.gateway_base import Popen2IO, Message +except ImportError: + from __main__ import Popen2IO, Message # type: ignore[no-redef] + +from functools import partial + + +class Popen2IOMaster(Popen2IO): + def __init__(self, args, execmodel): + PIPE = execmodel.subprocess.PIPE + self.popen = p = execmodel.subprocess.Popen(args, stdout=PIPE, stdin=PIPE) + super().__init__(p.stdin, p.stdout, execmodel=execmodel) + + def wait(self): + try: + return self.popen.wait() + except OSError: + pass # subprocess probably dead already + + def kill(self): + killpopen(self.popen) + + +def killpopen(popen): + try: + popen.kill() + except OSError as e: + sys.stderr.write("ERROR killing: %s\n" % e) + sys.stderr.flush() + + +popen_bootstrapline = "import sys;exec(eval(sys.stdin.readline()))" + + +def shell_split_path(path): + """ + Use shell lexer to split the given path into a list of components, + taking care to handle Windows' '\' correctly. + """ + if sys.platform.startswith("win"): + # replace \\ by / otherwise shlex will strip them out + path = path.replace("\\", "/") + return shlex.split(path) + + +def popen_args(spec): + args = shell_split_path(spec.python) if spec.python else [sys.executable] + args.append("-u") + if spec.dont_write_bytecode: + args.append("-B") + args.extend(["-c", popen_bootstrapline]) + return args + + +def ssh_args(spec): + # NOTE: If changing this, you need to sync those changes to vagrant_args + # as well, or, take some time to further refactor the commonalities of + # ssh_args and vagrant_args. + remotepython = spec.python or "python" + args = ["ssh", "-C"] + if spec.ssh_config is not None: + args.extend(["-F", str(spec.ssh_config)]) + + args.extend(spec.ssh.split()) + remotecmd = f'{remotepython} -c "{popen_bootstrapline}"' + args.append(remotecmd) + return args + + +def vagrant_ssh_args(spec): + # This is the vagrant-wrapped version of SSH. Unfortunately the + # command lines are incompatible to just channel through ssh_args + # due to ordering/templating issues. + # NOTE: This should be kept in sync with the ssh_args behaviour. + # spec.vagrant is identical to spec.ssh in that they both carry + # the remote host "address". + remotepython = spec.python or "python" + args = ["vagrant", "ssh", spec.vagrant_ssh, "--", "-C"] + if spec.ssh_config is not None: + args.extend(["-F", str(spec.ssh_config)]) + remotecmd = f'{remotepython} -c "{popen_bootstrapline}"' + args.extend([remotecmd]) + return args + + +def create_io(spec, execmodel): + if spec.popen: + args = popen_args(spec) + return Popen2IOMaster(args, execmodel) + if spec.ssh: + args = ssh_args(spec) + io = Popen2IOMaster(args, execmodel) + io.remoteaddress = spec.ssh + return io + if spec.vagrant_ssh: + args = vagrant_ssh_args(spec) + io = Popen2IOMaster(args, execmodel) + io.remoteaddress = spec.vagrant_ssh + return io + + +# +# Proxy Gateway handling code +# +# master: proxy initiator +# forwarder: forwards between master and sub +# sub: sub process that is proxied to the initiator + +RIO_KILL = 1 +RIO_WAIT = 2 +RIO_REMOTEADDRESS = 3 +RIO_CLOSE_WRITE = 4 + + +class ProxyIO: + """A Proxy IO object allows to instantiate a Gateway + through another "via" gateway. A master:ProxyIO object + provides an IO object effectively connected to the sub + via the forwarder. To achieve this, master:ProxyIO interacts + with forwarder:serve_proxy_io() which itself + instantiates and interacts with the sub. + """ + + def __init__(self, proxy_channel, execmodel): + # after exchanging the control channel we use proxy_channel + # for messaging IO + self.controlchan = proxy_channel.gateway.newchannel() + proxy_channel.send(self.controlchan) + self.iochan = proxy_channel + self.iochan_file = self.iochan.makefile("r") + self.execmodel = execmodel + + def read(self, nbytes): + return self.iochan_file.read(nbytes) + + def write(self, data): + return self.iochan.send(data) + + def _controll(self, event): + self.controlchan.send(event) + return self.controlchan.receive() + + def close_write(self): + self._controll(RIO_CLOSE_WRITE) + + def kill(self): + self._controll(RIO_KILL) + + def wait(self): + return self._controll(RIO_WAIT) + + @property + def remoteaddress(self): + return self._controll(RIO_REMOTEADDRESS) + + def __repr__(self): + return f"" + + +class PseudoSpec: + def __init__(self, vars): + self.__dict__.update(vars) + + def __getattr__(self, name): + return None + + +def serve_proxy_io(proxy_channelX): + execmodel = proxy_channelX.gateway.execmodel + log = partial( + proxy_channelX.gateway._trace, "serve_proxy_io:%s" % proxy_channelX.id + ) + spec = PseudoSpec(proxy_channelX.receive()) + # create sub IO object which we will proxy back to our proxy initiator + sub_io = create_io(spec, execmodel) + control_chan = proxy_channelX.receive() + log("got control chan", control_chan) + + # read data from master, forward it to the sub + # XXX writing might block, thus blocking the receiver thread + def forward_to_sub(data): + log("forward data to sub, size %s" % len(data)) + sub_io.write(data) + + proxy_channelX.setcallback(forward_to_sub) + + def control(data): + if data == RIO_WAIT: + control_chan.send(sub_io.wait()) + elif data == RIO_KILL: + control_chan.send(sub_io.kill()) + elif data == RIO_REMOTEADDRESS: + control_chan.send(sub_io.remoteaddress) + elif data == RIO_CLOSE_WRITE: + control_chan.send(sub_io.close_write()) + + control_chan.setcallback(control) + + # write data to the master coming from the sub + forward_to_master_file = proxy_channelX.makefile("w") + + # read bootstrap byte from sub, send it on to master + log("reading bootstrap byte from sub", spec.id) + initial = sub_io.read(1) + assert initial == b"1", initial + log("forwarding bootstrap byte from sub", spec.id) + forward_to_master_file.write(initial) + + # enter message forwarding loop + while True: + try: + message = Message.from_io(sub_io) + except EOFError: + log("EOF from sub, terminating proxying loop", spec.id) + break + message.to_io(forward_to_master_file) + # proxy_channelX will be closed from remote_exec's finalization code + + +if __name__ == "__channelexec__": + serve_proxy_io(channel) # type: ignore[name-defined] diff --git a/venv/lib/python3.10/site-packages/execnet/gateway_socket.py b/venv/lib/python3.10/site-packages/execnet/gateway_socket.py new file mode 100644 index 0000000..4379e01 --- /dev/null +++ b/venv/lib/python3.10/site-packages/execnet/gateway_socket.py @@ -0,0 +1,89 @@ +import sys + +from execnet.gateway_bootstrap import HostNotFound + + +class SocketIO: + def __init__(self, sock, execmodel): + self.sock = sock + self.execmodel = execmodel + socket = execmodel.socket + try: + # IPTOS_LOWDELAY + sock.setsockopt(socket.SOL_IP, socket.IP_TOS, 0x10) + sock.setsockopt(socket.SOL_TCP, socket.TCP_NODELAY, 1) + except (AttributeError, OSError): + sys.stderr.write("WARNING: cannot set socketoption") + + def read(self, numbytes): + "Read exactly 'bytes' bytes from the socket." + buf = b"" + while len(buf) < numbytes: + t = self.sock.recv(numbytes - len(buf)) + if not t: + raise EOFError + buf += t + return buf + + def write(self, data): + self.sock.sendall(data) + + def close_read(self): + try: + self.sock.shutdown(0) + except self.execmodel.socket.error: + pass + + def close_write(self): + try: + self.sock.shutdown(1) + except self.execmodel.socket.error: + pass + + def wait(self): + pass + + def kill(self): + pass + + +def start_via(gateway, hostport=None): + """return a host, port tuple, + after instantiating a socketserver on the given gateway + """ + if hostport is None: + host, port = ("localhost", 0) + else: + host, port = hostport + + from execnet.script import socketserver + + # execute the above socketserverbootstrap on the other side + channel = gateway.remote_exec(socketserver) + channel.send((host, port)) + (realhost, realport) = channel.receive() + # self._trace("new_remote received" + # "port=%r, hostname = %r" %(realport, hostname)) + if not realhost or realhost == "0.0.0.0": + realhost = "localhost" + return realhost, realport + + +def create_io(spec, group, execmodel): + assert not spec.python, "socket: specifying python executables not yet supported" + gateway_id = spec.installvia + if gateway_id: + host, port = start_via(group[gateway_id]) + else: + host, port = spec.socket.split(":") + port = int(port) + + socket = execmodel.socket + sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) + io = SocketIO(sock, execmodel) + io.remoteaddress = "%s:%d" % (host, port) + try: + sock.connect((host, port)) + except execmodel.socket.gaierror: + raise HostNotFound(str(sys.exc_info()[1])) + return io diff --git a/venv/lib/python3.10/site-packages/execnet/multi.py b/venv/lib/python3.10/site-packages/execnet/multi.py new file mode 100644 index 0000000..64e9501 --- /dev/null +++ b/venv/lib/python3.10/site-packages/execnet/multi.py @@ -0,0 +1,316 @@ +""" +Managing Gateway Groups and interactions with multiple channels. + +(c) 2008-2014, Holger Krekel and others +""" +import atexit +import sys +from functools import partial +from threading import Lock + +from . import gateway_bootstrap +from . import gateway_io +from .gateway_base import get_execmodel +from .gateway_base import trace +from .gateway_base import WorkerPool +from .xspec import XSpec + +NO_ENDMARKER_WANTED = object() + + +class Group: + """Gateway Groups.""" + + defaultspec = "popen" + + def __init__(self, xspecs=(), execmodel="thread"): + """initialize group and make gateways as specified. + execmodel can be 'thread' or 'eventlet'. + """ + self._gateways = [] + self._autoidcounter = 0 + self._autoidlock = Lock() + self._gateways_to_join = [] + # we use the same execmodel for all of the Gateway objects + # we spawn on our side. Probably we should not allow different + # execmodels between different groups but not clear. + # Note that "other side" execmodels may differ and is typically + # specified by the spec passed to makegateway. + self.set_execmodel(execmodel) + for xspec in xspecs: + self.makegateway(xspec) + atexit.register(self._cleanup_atexit) + + @property + def execmodel(self): + return self._execmodel + + @property + def remote_execmodel(self): + return self._remote_execmodel + + def set_execmodel(self, execmodel, remote_execmodel=None): + """Set the execution model for local and remote site. + + execmodel can be one of "thread" or "eventlet" (XXX gevent). + It determines the execution model for any newly created gateway. + If remote_execmodel is not specified it takes on the value + of execmodel. + + NOTE: Execution models can only be set before any gateway is created. + + """ + if self._gateways: + raise ValueError( + "can not set execution models if " "gateways have been created already" + ) + if remote_execmodel is None: + remote_execmodel = execmodel + self._execmodel = get_execmodel(execmodel) + self._remote_execmodel = get_execmodel(remote_execmodel) + + def __repr__(self): + idgateways = [gw.id for gw in self] + return "" % idgateways + + def __getitem__(self, key): + if isinstance(key, int): + return self._gateways[key] + for gw in self._gateways: + if gw == key or gw.id == key: + return gw + raise KeyError(key) + + def __contains__(self, key): + try: + self[key] + return True + except KeyError: + return False + + def __len__(self): + return len(self._gateways) + + def __iter__(self): + return iter(list(self._gateways)) + + def makegateway(self, spec=None): + """create and configure a gateway to a Python interpreter. + The ``spec`` string encodes the target gateway type + and configuration information. The general format is:: + + key1=value1//key2=value2//... + + If you leave out the ``=value`` part a True value is assumed. + Valid types: ``popen``, ``ssh=hostname``, ``socket=host:port``. + Valid configuration:: + + id= specifies the gateway id + python= specifies which python interpreter to execute + execmodel=model 'thread', 'eventlet', 'gevent' model for execution + chdir= specifies to which directory to change + nice= specifies process priority of new process + env:NAME=value specifies a remote environment variable setting. + + If no spec is given, self.defaultspec is used. + """ + if not spec: + spec = self.defaultspec + if not isinstance(spec, XSpec): + spec = XSpec(spec) + self.allocate_id(spec) + if spec.execmodel is None: + spec.execmodel = self.remote_execmodel.backend + if spec.via: + assert not spec.socket + master = self[spec.via] + proxy_channel = master.remote_exec(gateway_io) + proxy_channel.send(vars(spec)) + proxy_io_master = gateway_io.ProxyIO(proxy_channel, self.execmodel) + gw = gateway_bootstrap.bootstrap(proxy_io_master, spec) + elif spec.popen or spec.ssh or spec.vagrant_ssh: + io = gateway_io.create_io(spec, execmodel=self.execmodel) + gw = gateway_bootstrap.bootstrap(io, spec) + elif spec.socket: + from . import gateway_socket + + io = gateway_socket.create_io(spec, self, execmodel=self.execmodel) + gw = gateway_bootstrap.bootstrap(io, spec) + else: + raise ValueError(f"no gateway type found for {spec._spec!r}") + gw.spec = spec + self._register(gw) + if spec.chdir or spec.nice or spec.env: + channel = gw.remote_exec( + """ + import os + path, nice, env = channel.receive() + if path: + if not os.path.exists(path): + os.mkdir(path) + os.chdir(path) + if nice and hasattr(os, 'nice'): + os.nice(nice) + if env: + for name, value in env.items(): + os.environ[name] = value + """ + ) + nice = spec.nice and int(spec.nice) or 0 + channel.send((spec.chdir, nice, spec.env)) + channel.waitclose() + return gw + + def allocate_id(self, spec): + """(re-entrant) allocate id for the given xspec object.""" + if spec.id is None: + with self._autoidlock: + id = "gw" + str(self._autoidcounter) + self._autoidcounter += 1 + if id in self: + raise ValueError(f"already have gateway with id {id!r}") + spec.id = id + + def _register(self, gateway): + assert not hasattr(gateway, "_group") + assert gateway.id + assert gateway.id not in self + self._gateways.append(gateway) + gateway._group = self + + def _unregister(self, gateway): + self._gateways.remove(gateway) + self._gateways_to_join.append(gateway) + + def _cleanup_atexit(self): + trace(f"=== atexit cleanup {self!r} ===") + self.terminate(timeout=1.0) + + def terminate(self, timeout=None): + """trigger exit of member gateways and wait for termination + of member gateways and associated subprocesses. After waiting + timeout seconds try to to kill local sub processes of popen- + and ssh-gateways. Timeout defaults to None meaning + open-ended waiting and no kill attempts. + """ + + while self: + vias = {} + for gw in self: + if gw.spec.via: + vias[gw.spec.via] = True + for gw in self: + if gw.id not in vias: + gw.exit() + + def join_wait(gw): + gw.join() + gw._io.wait() + + def kill(gw): + trace("Gateways did not come down after timeout: %r" % gw) + gw._io.kill() + + safe_terminate( + self.execmodel, + timeout, + [ + (partial(join_wait, gw), partial(kill, gw)) + for gw in self._gateways_to_join + ], + ) + self._gateways_to_join[:] = [] + + def remote_exec(self, source, **kwargs): + """remote_exec source on all member gateways and return + MultiChannel connecting to all sub processes. + """ + channels = [] + for gw in self: + channels.append(gw.remote_exec(source, **kwargs)) + return MultiChannel(channels) + + +class MultiChannel: + def __init__(self, channels): + self._channels = channels + + def __len__(self): + return len(self._channels) + + def __iter__(self): + return iter(self._channels) + + def __getitem__(self, key): + return self._channels[key] + + def __contains__(self, chan): + return chan in self._channels + + def send_each(self, item): + for ch in self._channels: + ch.send(item) + + def receive_each(self, withchannel=False): + assert not hasattr(self, "_queue") + l = [] + for ch in self._channels: + obj = ch.receive() + if withchannel: + l.append((ch, obj)) + else: + l.append(obj) + return l + + def make_receive_queue(self, endmarker=NO_ENDMARKER_WANTED): + try: + return self._queue + except AttributeError: + self._queue = None + for ch in self._channels: + if self._queue is None: + self._queue = ch.gateway.execmodel.queue.Queue() + + def putreceived(obj, channel=ch): + self._queue.put((channel, obj)) + + if endmarker is NO_ENDMARKER_WANTED: + ch.setcallback(putreceived) + else: + ch.setcallback(putreceived, endmarker=endmarker) + return self._queue + + def waitclose(self): + first = None + for ch in self._channels: + try: + ch.waitclose() + except ch.RemoteError: + if first is None: + first = sys.exc_info() + if first: + raise first[1].with_traceback(first[2]) + + +def safe_terminate(execmodel, timeout, list_of_paired_functions): + workerpool = WorkerPool(execmodel) + + def termkill(termfunc, killfunc): + termreply = workerpool.spawn(termfunc) + try: + termreply.get(timeout=timeout) + except OSError: + killfunc() + + replylist = [] + for termfunc, killfunc in list_of_paired_functions: + reply = workerpool.spawn(termkill, termfunc, killfunc) + replylist.append(reply) + for reply in replylist: + reply.get() + workerpool.waitall(timeout=timeout) + + +default_group = Group() +makegateway = default_group.makegateway +set_execmodel = default_group.set_execmodel diff --git a/venv/lib/python3.10/site-packages/execnet/rsync.py b/venv/lib/python3.10/site-packages/execnet/rsync.py new file mode 100644 index 0000000..1484d49 --- /dev/null +++ b/venv/lib/python3.10/site-packages/execnet/rsync.py @@ -0,0 +1,215 @@ +""" +1:N rsync implementation on top of execnet. + +(c) 2006-2009, Armin Rigo, Holger Krekel, Maciej Fijalkowski +""" +import os +import stat +from hashlib import md5 +from queue import Queue + +import execnet.rsync_remote + + +class RSync: + """This class allows to send a directory structure (recursively) + to one or multiple remote filesystems. + + There is limited support for symlinks, which means that symlinks + pointing to the sourcetree will be send "as is" while external + symlinks will be just copied (regardless of existence of such + a path on remote side). + """ + + def __init__(self, sourcedir, callback=None, verbose=True): + self._sourcedir = str(sourcedir) + self._verbose = verbose + assert callback is None or hasattr(callback, "__call__") + self._callback = callback + self._channels = {} + self._receivequeue = Queue() + self._links = [] + + def filter(self, path): + return True + + def _end_of_channel(self, channel): + if channel in self._channels: + # too early! we must have got an error + channel.waitclose() + # or else we raise one + raise OSError(f"connection unexpectedly closed: {channel.gateway} ") + + def _process_link(self, channel): + for link in self._links: + channel.send(link) + # completion marker, this host is done + channel.send(42) + + def _done(self, channel): + """Call all callbacks""" + finishedcallback = self._channels.pop(channel) + if finishedcallback: + finishedcallback() + channel.waitclose() + + def _list_done(self, channel): + # sum up all to send + if self._callback: + s = sum([self._paths[i] for i in self._to_send[channel]]) + self._callback("list", s, channel) + + def _send_item(self, channel, data): + """Send one item""" + modified_rel_path, checksum = data + modifiedpath = os.path.join(self._sourcedir, *modified_rel_path) + try: + f = open(modifiedpath, "rb") + data = f.read() + except OSError: + data = None + + # provide info to progress callback function + modified_rel_path = "/".join(modified_rel_path) + if data is not None: + self._paths[modified_rel_path] = len(data) + else: + self._paths[modified_rel_path] = 0 + if channel not in self._to_send: + self._to_send[channel] = [] + self._to_send[channel].append(modified_rel_path) + # print "sending", modified_rel_path, data and len(data) or 0, checksum + + if data is not None: + f.close() + if checksum is not None and checksum == md5(data).digest(): + data = None # not really modified + else: + self._report_send_file(channel.gateway, modified_rel_path) + channel.send(data) + + def _report_send_file(self, gateway, modified_rel_path): + if self._verbose: + print(f"{gateway} <= {modified_rel_path}") + + def send(self, raises=True): + """Sends a sourcedir to all added targets. Flag indicates + whether to raise an error or return in case of lack of + targets + """ + if not self._channels: + if raises: + raise OSError( + "no targets available, maybe you " "are trying call send() twice?" + ) + return + # normalize a trailing '/' away + self._sourcedir = os.path.dirname(os.path.join(self._sourcedir, "x")) + # send directory structure and file timestamps/sizes + self._send_directory_structure(self._sourcedir) + + # paths and to_send are only used for doing + # progress-related callbacks + self._paths = {} + self._to_send = {} + + # send modified file to clients + while self._channels: + channel, req = self._receivequeue.get() + if req is None: + self._end_of_channel(channel) + else: + command, data = req + if command == "links": + self._process_link(channel) + elif command == "done": + self._done(channel) + elif command == "ack": + if self._callback: + self._callback("ack", self._paths[data], channel) + elif command == "list_done": + self._list_done(channel) + elif command == "send": + self._send_item(channel, data) + del data + else: + assert "Unknown command %s" % command + + def add_target(self, gateway, destdir, finishedcallback=None, **options): + """Adds a remote target specified via a gateway + and a remote destination directory. + """ + for name in options: + assert name in ("delete",) + + def itemcallback(req): + self._receivequeue.put((channel, req)) + + channel = gateway.remote_exec(execnet.rsync_remote) + channel.reconfigure(py2str_as_py3str=False, py3str_as_py2str=False) + channel.setcallback(itemcallback, endmarker=None) + channel.send((str(destdir), options)) + self._channels[channel] = finishedcallback + + def _broadcast(self, msg): + for channel in self._channels: + channel.send(msg) + + def _send_link(self, linktype, basename, linkpoint): + self._links.append((linktype, basename, linkpoint)) + + def _send_directory(self, path): + # dir: send a list of entries + names = [] + subpaths = [] + for name in os.listdir(path): + p = os.path.join(path, name) + if self.filter(p): + names.append(name) + subpaths.append(p) + mode = os.lstat(path).st_mode + self._broadcast([mode] + names) + for p in subpaths: + self._send_directory_structure(p) + + def _send_link_structure(self, path): + sourcedir = self._sourcedir + basename = path[len(self._sourcedir) + 1 :] + linkpoint = os.readlink(path) + # On Windows, readlink returns an extended path (//?/) for + # absolute links, but relpath doesn't like mixing extended + # and non-extended paths. So fix it up ourselves. + if ( + os.path.__name__ == "ntpath" + and linkpoint.startswith("\\\\?\\") + and not self._sourcedir.startswith("\\\\?\\") + ): + sourcedir = "\\\\?\\" + self._sourcedir + try: + relpath = os.path.relpath(linkpoint, sourcedir) + except ValueError: + relpath = None + if relpath not in (None, os.curdir, os.pardir) and not relpath.startswith( + os.pardir + os.sep + ): + self._send_link("linkbase", basename, relpath) + else: + # relative or absolute link, just send it + self._send_link("link", basename, linkpoint) + self._broadcast(None) + + def _send_directory_structure(self, path): + try: + st = os.lstat(path) + except OSError: + self._broadcast((None, 0, 0)) + return + if stat.S_ISREG(st.st_mode): + # regular file: send a mode/timestamp/size pair + self._broadcast((st.st_mode, st.st_mtime, st.st_size)) + elif stat.S_ISDIR(st.st_mode): + self._send_directory(path) + elif stat.S_ISLNK(st.st_mode): + self._send_link_structure(path) + else: + raise ValueError(f"cannot sync {path!r}") diff --git a/venv/lib/python3.10/site-packages/execnet/rsync_remote.py b/venv/lib/python3.10/site-packages/execnet/rsync_remote.py new file mode 100644 index 0000000..4ac1880 --- /dev/null +++ b/venv/lib/python3.10/site-packages/execnet/rsync_remote.py @@ -0,0 +1,117 @@ +""" +(c) 2006-2013, Armin Rigo, Holger Krekel, Maciej Fijalkowski +""" + + +def serve_rsync(channel): + import os + import stat + import shutil + from hashlib import md5 + + destdir, options = channel.receive() + modifiedfiles = [] + + def remove(path): + assert path.startswith(destdir) + try: + os.unlink(path) + except OSError: + # assume it's a dir + shutil.rmtree(path, True) + + def receive_directory_structure(path, relcomponents): + try: + st = os.lstat(path) + except OSError: + st = None + msg = channel.receive() + if isinstance(msg, list): + if st and not stat.S_ISDIR(st.st_mode): + os.unlink(path) + st = None + if not st: + os.makedirs(path) + mode = msg.pop(0) + if mode: + # Ensure directories are writable, otherwise a + # permission denied error (EACCES) would be raised + # when attempting to receive read-only directory + # structures. + os.chmod(path, mode | 0o700) + entrynames = {} + for entryname in msg: + destpath = os.path.join(path, entryname) + receive_directory_structure(destpath, relcomponents + [entryname]) + entrynames[entryname] = True + if options.get("delete"): + for othername in os.listdir(path): + if othername not in entrynames: + otherpath = os.path.join(path, othername) + remove(otherpath) + elif msg is not None: + assert isinstance(msg, tuple) + checksum = None + if st: + if stat.S_ISREG(st.st_mode): + msg_mode, msg_mtime, msg_size = msg + if msg_size != st.st_size: + pass + elif msg_mtime != st.st_mtime: + f = open(path, "rb") + checksum = md5(f.read()).digest() + f.close() + elif msg_mode and msg_mode != st.st_mode: + os.chmod(path, msg_mode | 0o700) + return + else: + return # already fine + else: + remove(path) + channel.send(("send", (relcomponents, checksum))) + modifiedfiles.append((path, msg)) + + receive_directory_structure(destdir, []) + + STRICT_CHECK = False # seems most useful this way for py.test + channel.send(("list_done", None)) + + for path, (mode, time, size) in modifiedfiles: + data = channel.receive() + channel.send(("ack", path[len(destdir) + 1 :])) + if data is not None: + if STRICT_CHECK and len(data) != size: + raise OSError(f"file modified during rsync: {path!r}") + f = open(path, "wb") + f.write(data) + f.close() + try: + if mode: + os.chmod(path, mode) + os.utime(path, (time, time)) + except OSError: + pass + del data + channel.send(("links", None)) + + msg = channel.receive() + while msg != 42: + # we get symlink + _type, relpath, linkpoint = msg + path = os.path.join(destdir, relpath) + try: + remove(path) + except OSError: + pass + if _type == "linkbase": + src = os.path.join(destdir, linkpoint) + else: + assert _type == "link", _type + src = linkpoint + os.symlink(src, path) + msg = channel.receive() + channel.send(("done", None)) + + +if __name__ == "__channelexec__": + serve_rsync(channel) # type: ignore[name-defined] diff --git a/venv/lib/python3.10/site-packages/execnet/script/__init__.py b/venv/lib/python3.10/site-packages/execnet/script/__init__.py new file mode 100644 index 0000000..792d600 --- /dev/null +++ b/venv/lib/python3.10/site-packages/execnet/script/__init__.py @@ -0,0 +1 @@ +# diff --git a/venv/lib/python3.10/site-packages/execnet/script/loop_socketserver.py b/venv/lib/python3.10/site-packages/execnet/script/loop_socketserver.py new file mode 100644 index 0000000..a4688a8 --- /dev/null +++ b/venv/lib/python3.10/site-packages/execnet/script/loop_socketserver.py @@ -0,0 +1,14 @@ +import os +import subprocess +import sys + +if __name__ == "__main__": + directory = os.path.dirname(os.path.abspath(sys.argv[0])) + script = os.path.join(directory, "socketserver.py") + while 1: + cmdlist = ["python", script] + cmdlist.extend(sys.argv[1:]) + text = "starting subcommand: " + " ".join(cmdlist) + print(text) + process = subprocess.Popen(cmdlist) + process.wait() diff --git a/venv/lib/python3.10/site-packages/execnet/script/quitserver.py b/venv/lib/python3.10/site-packages/execnet/script/quitserver.py new file mode 100644 index 0000000..9d5cbb1 --- /dev/null +++ b/venv/lib/python3.10/site-packages/execnet/script/quitserver.py @@ -0,0 +1,17 @@ +""" + + send a "quit" signal to a remote server + +""" +from __future__ import annotations + +import socket +import sys + + +host, port = sys.argv[1].split(":") +hostport = (host, int(port)) + +sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) +sock.connect(hostport) +sock.sendall(b'"raise KeyboardInterrupt"\n') diff --git a/venv/lib/python3.10/site-packages/execnet/script/shell.py b/venv/lib/python3.10/site-packages/execnet/script/shell.py new file mode 100644 index 0000000..f47cd4d --- /dev/null +++ b/venv/lib/python3.10/site-packages/execnet/script/shell.py @@ -0,0 +1,88 @@ +#! /usr/bin/env python +""" +a remote python shell + +for injection into startserver.py +""" +import os +import select +import socket +import sys +from threading import Thread +from traceback import print_exc + + +def clientside(): + print("client side starting") + host, port = sys.argv[1].split(":") + port = int(port) + myself = open(os.path.abspath(sys.argv[0])).read() + sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) + sock.connect((host, port)) + sock.sendall(repr(myself) + "\n") + print("send boot string") + inputlist = [sock, sys.stdin] + try: + while 1: + r, w, e = select.select(inputlist, [], []) + if sys.stdin in r: + line = raw_input() + sock.sendall(line + "\n") + if sock in r: + line = sock.recv(4096) + sys.stdout.write(line) + sys.stdout.flush() + except BaseException: + import traceback + + print(traceback.print_exc()) + + sys.exit(1) + + +class promptagent(Thread): + def __init__(self, clientsock): + print("server side starting") + super.__init__() + self.clientsock = clientsock + + def run(self): + print("Entering thread prompt loop") + clientfile = self.clientsock.makefile("w") + + filein = self.clientsock.makefile("r") + loc = self.clientsock.getsockname() + + while 1: + try: + clientfile.write("%s %s >>> " % loc) + clientfile.flush() + line = filein.readline() + if not line: + raise EOFError("nothing") + if line.strip(): + oldout, olderr = sys.stdout, sys.stderr + sys.stdout, sys.stderr = clientfile, clientfile + try: + try: + exec(compile(line + "\n", "", "single")) + except BaseException: + print_exc() + finally: + sys.stdout = oldout + sys.stderr = olderr + clientfile.flush() + except EOFError: + sys.stderr.write("connection close, prompt thread returns") + break + + self.clientsock.close() + + +sock = globals().get("clientsock") +if sock is not None: + prompter = promptagent(sock) + prompter.start() + print("promptagent - thread started") +else: + clientside() diff --git a/venv/lib/python3.10/site-packages/execnet/script/socketserver.py b/venv/lib/python3.10/site-packages/execnet/script/socketserver.py new file mode 100644 index 0000000..034b434 --- /dev/null +++ b/venv/lib/python3.10/site-packages/execnet/script/socketserver.py @@ -0,0 +1,133 @@ +#! /usr/bin/env python +""" + start socket based minimal readline exec server + + it can exeuted in 2 modes of operation + + 1. as normal script, that listens for new connections + + 2. via existing_gateway.remote_exec (as imported module) + +""" +# this part of the program only executes on the server side +# +import os +import sys + +progname = "socket_readline_exec_server-1.2" + + +def get_fcntl(): + try: + import fcntl + except ImportError: + fcntl = None + return fcntl + + +fcntl = get_fcntl() + +debug = 0 + +if debug: # and not os.isatty(sys.stdin.fileno()) + f = open("/tmp/execnet-socket-pyout.log", "w") + old = sys.stdout, sys.stderr + sys.stdout = sys.stderr = f + + +def print_(*args): + print(" ".join(str(arg) for arg in args)) + + +exec( + """def exec_(source, locs): + exec(source, locs)""" +) + + +def exec_from_one_connection(serversock): + print_(progname, "Entering Accept loop", serversock.getsockname()) + clientsock, address = serversock.accept() + print_(progname, "got new connection from %s %s" % address) + clientfile = clientsock.makefile("rb") + print_("reading line") + # rstrip so that we can use \r\n for telnet testing + source = clientfile.readline().rstrip() + clientfile.close() + g = {"clientsock": clientsock, "address": address, "execmodel": execmodel} + source = eval(source) + if source: + co = compile(source + "\n", "", "exec") + print_(progname, "compiled source, executing") + try: + exec_(co, g) # noqa + finally: + print_(progname, "finished executing code") + # background thread might hold a reference to this (!?) + # clientsock.close() + + +def bind_and_listen(hostport, execmodel): + socket = execmodel.socket + if isinstance(hostport, str): + host, port = hostport.split(":") + hostport = (host, int(port)) + serversock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) + # set close-on-exec + if hasattr(fcntl, "FD_CLOEXEC"): + old = fcntl.fcntl(serversock.fileno(), fcntl.F_GETFD) + fcntl.fcntl(serversock.fileno(), fcntl.F_SETFD, old | fcntl.FD_CLOEXEC) + # allow the address to be re-used in a reasonable amount of time + if os.name == "posix" and sys.platform != "cygwin": + serversock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) + + serversock.bind(hostport) + serversock.listen(5) + return serversock + + +def startserver(serversock, loop=False): + execute_path = os.getcwd() + try: + while 1: + try: + exec_from_one_connection(serversock) + except (KeyboardInterrupt, SystemExit): + raise + except BaseException: + if debug: + import traceback + + traceback.print_exc() + else: + excinfo = sys.exc_info() + print_("got exception", excinfo[1]) + os.chdir(execute_path) + if not loop: + break + finally: + print_("leaving socketserver execloop") + serversock.shutdown(2) + + +if __name__ == "__main__": + import sys + + if len(sys.argv) > 1: + hostport = sys.argv[1] + else: + hostport = ":8888" + from execnet.gateway_base import get_execmodel + + execmodel = get_execmodel("thread") + serversock = bind_and_listen(hostport, execmodel) + startserver(serversock, loop=True) + +elif __name__ == "__channelexec__": + chan = globals()["channel"] + execmodel = chan.gateway.execmodel + bindname = chan.receive() + sock = bind_and_listen(bindname, execmodel) + port = sock.getsockname() + chan.send(port) + startserver(sock) diff --git a/venv/lib/python3.10/site-packages/execnet/script/socketserverservice.py b/venv/lib/python3.10/site-packages/execnet/script/socketserverservice.py new file mode 100644 index 0000000..3d64f13 --- /dev/null +++ b/venv/lib/python3.10/site-packages/execnet/script/socketserverservice.py @@ -0,0 +1,91 @@ +""" +A windows service wrapper for the py.execnet socketserver. + +To use, run: + python socketserverservice.py register + net start ExecNetSocketServer +""" +import socketserver +import sys +import threading + +import servicemanager +import win32event +import win32evtlogutil +import win32service +import win32serviceutil + + +appname = "ExecNetSocketServer" + + +class SocketServerService(win32serviceutil.ServiceFramework): + _svc_name_ = appname + _svc_display_name_ = "%s" % appname + _svc_deps_ = ["EventLog"] + + def __init__(self, args): + # The exe-file has messages for the Event Log Viewer. + # Register the exe-file as event source. + # + # Probably it would be better if this is done at installation time, + # so that it also could be removed if the service is uninstalled. + # Unfortunately it cannot be done in the 'if __name__ == "__main__"' + # block below, because the 'frozen' exe-file does not run this code. + # + win32evtlogutil.AddSourceToRegistry( + self._svc_display_name_, servicemanager.__file__, "Application" + ) + super.__init__(args) + self.hWaitStop = win32event.CreateEvent(None, 0, 0, None) + self.WAIT_TIME = 1000 # in milliseconds + + def SvcStop(self): + self.ReportServiceStatus(win32service.SERVICE_STOP_PENDING) + win32event.SetEvent(self.hWaitStop) + + def SvcDoRun(self): + # Redirect stdout and stderr to prevent "IOError: [Errno 9] + # Bad file descriptor". Windows services don't have functional + # output streams. + sys.stdout = sys.stderr = open("nul", "w") + + # Write a 'started' event to the event log... + win32evtlogutil.ReportEvent( + self._svc_display_name_, + servicemanager.PYS_SERVICE_STARTED, + 0, # category + servicemanager.EVENTLOG_INFORMATION_TYPE, + (self._svc_name_, ""), + ) + print("Begin: %s" % self._svc_display_name_) + + hostport = ":8888" + print("Starting py.execnet SocketServer on %s" % hostport) + serversock = socketserver.bind_and_listen(hostport) + thread = threading.Thread( + target=socketserver.startserver, args=(serversock,), kwargs={"loop": True} + ) + thread.setDaemon(True) + thread.start() + + # wait to be stopped or self.WAIT_TIME to pass + while True: + result = win32event.WaitForSingleObject(self.hWaitStop, self.WAIT_TIME) + if result == win32event.WAIT_OBJECT_0: + break + + # write a 'stopped' event to the event log. + win32evtlogutil.ReportEvent( + self._svc_display_name_, + servicemanager.PYS_SERVICE_STOPPED, + 0, # category + servicemanager.EVENTLOG_INFORMATION_TYPE, + (self._svc_name_, ""), + ) + print("End: %s" % appname) + + +if __name__ == "__main__": + # Note that this code will not be run in the 'frozen' exe-file!!! + win32serviceutil.HandleCommandLine(SocketServerService) diff --git a/venv/lib/python3.10/site-packages/execnet/script/xx.py b/venv/lib/python3.10/site-packages/execnet/script/xx.py new file mode 100644 index 0000000..687cc81 --- /dev/null +++ b/venv/lib/python3.10/site-packages/execnet/script/xx.py @@ -0,0 +1,12 @@ +import sys + +import register +import rlcompleter2 + +rlcompleter2.setup() + +try: + hostport = sys.argv[1] +except BaseException: + hostport = ":8888" +gw = register.ServerGateway(hostport) diff --git a/venv/lib/python3.10/site-packages/execnet/xspec.py b/venv/lib/python3.10/site-packages/execnet/xspec.py new file mode 100644 index 0000000..4d33ad6 --- /dev/null +++ b/venv/lib/python3.10/site-packages/execnet/xspec.py @@ -0,0 +1,59 @@ +""" +(c) 2008-2013, holger krekel +""" + + +class XSpec: + """Execution Specification: key1=value1//key2=value2 ... + * keys need to be unique within the specification scope + * neither key nor value are allowed to contain "//" + * keys are not allowed to contain "=" + * keys are not allowed to start with underscore + * if no "=value" is given, assume a boolean True value + """ + + # XXX allow customization, for only allow specific key names + popen = ( + ssh + ) = socket = python = chdir = nice = dont_write_bytecode = execmodel = None + + def __init__(self, string): + self._spec = string + self.env = {} + for keyvalue in string.split("//"): + i = keyvalue.find("=") + if i == -1: + key, value = keyvalue, True + else: + key, value = keyvalue[:i], keyvalue[i + 1 :] + if key[0] == "_": + raise AttributeError("%r not a valid XSpec key" % key) + if key in self.__dict__: + raise ValueError(f"duplicate key: {key!r} in {string!r}") + if key.startswith("env:"): + self.env[key[4:]] = value + else: + setattr(self, key, value) + + def __getattr__(self, name): + if name[0] == "_": + raise AttributeError(name) + return None + + def __repr__(self): + return f"" + + def __str__(self): + return self._spec + + def __hash__(self): + return hash(self._spec) + + def __eq__(self, other): + return self._spec == getattr(other, "_spec", None) + + def __ne__(self, other): + return self._spec != getattr(other, "_spec", None) + + def _samefilesystem(self): + return self.popen is not None and self.chdir is None diff --git a/venv/lib/python3.10/site-packages/filelock-3.12.2.dist-info/INSTALLER b/venv/lib/python3.10/site-packages/filelock-3.12.2.dist-info/INSTALLER new file mode 100644 index 0000000..a1b589e --- /dev/null +++ b/venv/lib/python3.10/site-packages/filelock-3.12.2.dist-info/INSTALLER @@ -0,0 +1 @@ +pip diff --git a/venv/lib/python3.10/site-packages/filelock-3.12.2.dist-info/METADATA b/venv/lib/python3.10/site-packages/filelock-3.12.2.dist-info/METADATA new file mode 100644 index 0000000..694ba40 --- /dev/null +++ b/venv/lib/python3.10/site-packages/filelock-3.12.2.dist-info/METADATA @@ -0,0 +1,55 @@ +Metadata-Version: 2.1 +Name: filelock +Version: 3.12.2 +Summary: A platform independent file lock. +Project-URL: Documentation, https://py-filelock.readthedocs.io +Project-URL: Homepage, https://github.com/tox-dev/py-filelock +Project-URL: Source, https://github.com/tox-dev/py-filelock +Project-URL: Tracker, https://github.com/tox-dev/py-filelock/issues +Maintainer-email: Bernát Gábor +License-Expression: Unlicense +License-File: LICENSE +Keywords: application,cache,directory,log,user +Classifier: Development Status :: 5 - Production/Stable +Classifier: Intended Audience :: Developers +Classifier: License :: OSI Approved :: The Unlicense (Unlicense) +Classifier: Operating System :: OS Independent +Classifier: Programming Language :: Python +Classifier: Programming Language :: Python :: 3 :: Only +Classifier: Programming Language :: Python :: 3.7 +Classifier: Programming Language :: Python :: 3.8 +Classifier: Programming Language :: Python :: 3.9 +Classifier: Programming Language :: Python :: 3.10 +Classifier: Programming Language :: Python :: 3.11 +Classifier: Programming Language :: Python :: 3.12 +Classifier: Topic :: Internet +Classifier: Topic :: Software Development :: Libraries +Classifier: Topic :: System +Requires-Python: >=3.7 +Provides-Extra: docs +Requires-Dist: furo>=2023.5.20; extra == 'docs' +Requires-Dist: sphinx-autodoc-typehints!=1.23.4,>=1.23; extra == 'docs' +Requires-Dist: sphinx>=7.0.1; extra == 'docs' +Provides-Extra: testing +Requires-Dist: covdefaults>=2.3; extra == 'testing' +Requires-Dist: coverage>=7.2.7; extra == 'testing' +Requires-Dist: diff-cover>=7.5; extra == 'testing' +Requires-Dist: pytest-cov>=4.1; extra == 'testing' +Requires-Dist: pytest-mock>=3.10; extra == 'testing' +Requires-Dist: pytest-timeout>=2.1; extra == 'testing' +Requires-Dist: pytest>=7.3.1; extra == 'testing' +Description-Content-Type: text/markdown + +# py-filelock + +[![PyPI](https://img.shields.io/pypi/v/filelock)](https://pypi.org/project/filelock/) +[![Supported Python +versions](https://img.shields.io/pypi/pyversions/filelock.svg)](https://pypi.org/project/filelock/) +[![Documentation +status](https://readthedocs.org/projects/py-filelock/badge/?version=latest)](https://py-filelock.readthedocs.io/en/latest/?badge=latest) +[![Code style: +black](https://img.shields.io/badge/code%20style-black-000000.svg)](https://github.com/psf/black) +[![Downloads](https://pepy.tech/badge/filelock/month)](https://pepy.tech/project/filelock) +[![check](https://github.com/tox-dev/py-filelock/actions/workflows/check.yml/badge.svg)](https://github.com/tox-dev/py-filelock/actions/workflows/check.yml) + +For more information checkout the [official documentation](https://py-filelock.readthedocs.io/en/latest/index.html). diff --git a/venv/lib/python3.10/site-packages/filelock-3.12.2.dist-info/RECORD b/venv/lib/python3.10/site-packages/filelock-3.12.2.dist-info/RECORD new file mode 100644 index 0000000..7de6507 --- /dev/null +++ b/venv/lib/python3.10/site-packages/filelock-3.12.2.dist-info/RECORD @@ -0,0 +1,22 @@ +filelock-3.12.2.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +filelock-3.12.2.dist-info/METADATA,sha256=XziDNuweWluDKuB0HgIgQPYZr2D1UfRluRfZ7RNypsw,2724 +filelock-3.12.2.dist-info/RECORD,, +filelock-3.12.2.dist-info/WHEEL,sha256=9QBuHhg6FNW7lppboF2vKVbCGTVzsFykgRQjjlajrhA,87 +filelock-3.12.2.dist-info/licenses/LICENSE,sha256=iNm062BXnBkew5HKBMFhMFctfu3EqG2qWL8oxuFMm80,1210 +filelock/__init__.py,sha256=nCvrEw6t391LA0d_TsybAbRF3HI4g6lYoHil-xghyJs,1230 +filelock/__pycache__/__init__.cpython-310.pyc,, +filelock/__pycache__/_api.cpython-310.pyc,, +filelock/__pycache__/_error.cpython-310.pyc,, +filelock/__pycache__/_soft.cpython-310.pyc,, +filelock/__pycache__/_unix.cpython-310.pyc,, +filelock/__pycache__/_util.cpython-310.pyc,, +filelock/__pycache__/_windows.cpython-310.pyc,, +filelock/__pycache__/version.cpython-310.pyc,, +filelock/_api.py,sha256=iUUv2QVWTX4g3v2LSH2m8iF-ZlyP1UkezsfCSvXgil0,10125 +filelock/_error.py,sha256=-5jMcjTu60YAvAO1UbqDD1GIEjVkwr8xCFwDBtMeYDg,787 +filelock/_soft.py,sha256=FlmkORe37IXz0voO2JPmdDjk2W5BH5B5LSDqnQ7ZOTU,1638 +filelock/_unix.py,sha256=T-g81COqIF-yEJKKyxax_8joejxw7JVYWDPrpy2Cq2I,2062 +filelock/_util.py,sha256=Y3CMudAij-xLOWdIMxWhWEaOTCI_BICW0spcv_LFp4Y,1410 +filelock/_windows.py,sha256=3wpFAtTliqodzqLXk8h1EX_T_zyd32t_roJqKVr0pm0,2100 +filelock/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +filelock/version.py,sha256=Vk4x7NmWnlU1UDYhJpyZCmorJtPQLx9a4YEOiepQZgM,162 diff --git a/venv/lib/python3.10/site-packages/filelock-3.12.2.dist-info/WHEEL b/venv/lib/python3.10/site-packages/filelock-3.12.2.dist-info/WHEEL new file mode 100644 index 0000000..ba1a8af --- /dev/null +++ b/venv/lib/python3.10/site-packages/filelock-3.12.2.dist-info/WHEEL @@ -0,0 +1,4 @@ +Wheel-Version: 1.0 +Generator: hatchling 1.18.0 +Root-Is-Purelib: true +Tag: py3-none-any diff --git a/venv/lib/python3.10/site-packages/filelock-3.12.2.dist-info/licenses/LICENSE b/venv/lib/python3.10/site-packages/filelock-3.12.2.dist-info/licenses/LICENSE new file mode 100644 index 0000000..cf1ab25 --- /dev/null +++ b/venv/lib/python3.10/site-packages/filelock-3.12.2.dist-info/licenses/LICENSE @@ -0,0 +1,24 @@ +This is free and unencumbered software released into the public domain. + +Anyone is free to copy, modify, publish, use, compile, sell, or +distribute this software, either in source code form or as a compiled +binary, for any purpose, commercial or non-commercial, and by any +means. + +In jurisdictions that recognize copyright laws, the author or authors +of this software dedicate any and all copyright interest in the +software to the public domain. We make this dedication for the benefit +of the public at large and to the detriment of our heirs and +successors. We intend this dedication to be an overt act of +relinquishment in perpetuity of all present and future rights to this +software under copyright law. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. +IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR +OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, +ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR +OTHER DEALINGS IN THE SOFTWARE. + +For more information, please refer to diff --git a/venv/lib/python3.10/site-packages/filelock/__init__.py b/venv/lib/python3.10/site-packages/filelock/__init__.py new file mode 100644 index 0000000..99654ea --- /dev/null +++ b/venv/lib/python3.10/site-packages/filelock/__init__.py @@ -0,0 +1,51 @@ +""" +A platform independent file lock that supports the with-statement. + +.. autodata:: filelock.__version__ + :no-value: + +""" +from __future__ import annotations + +import sys +import warnings +from typing import TYPE_CHECKING + +from ._api import AcquireReturnProxy, BaseFileLock +from ._error import Timeout +from ._soft import SoftFileLock +from ._unix import UnixFileLock, has_fcntl +from ._windows import WindowsFileLock +from .version import version + +#: version of the project as a string +__version__: str = version + + +if sys.platform == "win32": # pragma: win32 cover + _FileLock: type[BaseFileLock] = WindowsFileLock +else: # pragma: win32 no cover + if has_fcntl: # noqa: PLR5501 + _FileLock: type[BaseFileLock] = UnixFileLock + else: + _FileLock = SoftFileLock + if warnings is not None: + warnings.warn("only soft file lock is available", stacklevel=2) + +if TYPE_CHECKING: # noqa: SIM108 + FileLock = SoftFileLock +else: + #: Alias for the lock, which should be used for the current platform. + FileLock = _FileLock + + +__all__ = [ + "__version__", + "FileLock", + "SoftFileLock", + "Timeout", + "UnixFileLock", + "WindowsFileLock", + "BaseFileLock", + "AcquireReturnProxy", +] diff --git a/venv/lib/python3.10/site-packages/filelock/_api.py b/venv/lib/python3.10/site-packages/filelock/_api.py new file mode 100644 index 0000000..7754f08 --- /dev/null +++ b/venv/lib/python3.10/site-packages/filelock/_api.py @@ -0,0 +1,281 @@ +from __future__ import annotations + +import contextlib +import logging +import os +import time +import warnings +from abc import ABC, abstractmethod +from dataclasses import dataclass +from threading import local +from typing import TYPE_CHECKING, Any + +from ._error import Timeout + +if TYPE_CHECKING: + from types import TracebackType + +_LOGGER = logging.getLogger("filelock") + + +# This is a helper class which is returned by :meth:`BaseFileLock.acquire` and wraps the lock to make sure __enter__ +# is not called twice when entering the with statement. If we would simply return *self*, the lock would be acquired +# again in the *__enter__* method of the BaseFileLock, but not released again automatically. issue #37 (memory leak) +class AcquireReturnProxy: + """A context aware object that will release the lock file when exiting.""" + + def __init__(self, lock: BaseFileLock) -> None: + self.lock = lock + + def __enter__(self) -> BaseFileLock: + return self.lock + + def __exit__( + self, + exc_type: type[BaseException] | None, + exc_value: BaseException | None, + traceback: TracebackType | None, + ) -> None: + self.lock.release() + + +@dataclass +class FileLockContext: + """A dataclass which holds the context for a ``BaseFileLock`` object.""" + + # The context is held in a separate class to allow optional use of thread local storage via the + # ThreadLocalFileContext class. + + #: The path to the lock file. + lock_file: str + + #: The default timeout value. + timeout: float + + #: The mode for the lock files + mode: int + + #: The file descriptor for the *_lock_file* as it is returned by the os.open() function, not None when lock held + lock_file_fd: int | None = None + + #: The lock counter is used for implementing the nested locking mechanism. + lock_counter: int = 0 # When the lock is acquired is increased and the lock is only released, when this value is 0 + + +class ThreadLocalFileContext(FileLockContext, local): + """A thread local version of the ``FileLockContext`` class.""" + + +class BaseFileLock(ABC, contextlib.ContextDecorator): + """Abstract base class for a file lock object.""" + + def __init__( + self, + lock_file: str | os.PathLike[Any], + timeout: float = -1, + mode: int = 0o644, + thread_local: bool = True, # noqa: FBT001, FBT002 + ) -> None: + """ + Create a new lock object. + + :param lock_file: path to the file + :param timeout: default timeout when acquiring the lock, in seconds. It will be used as fallback value in + the acquire method, if no timeout value (``None``) is given. If you want to disable the timeout, set it + to a negative value. A timeout of 0 means, that there is exactly one attempt to acquire the file lock. + :param mode: file permissions for the lockfile. + :param thread_local: Whether this object's internal context should be thread local or not. + If this is set to ``False`` then the lock will be reentrant across threads. + """ + self._is_thread_local = thread_local + + # Create the context. Note that external code should not work with the context directly and should instead use + # properties of this class. + kwargs: dict[str, Any] = { + "lock_file": os.fspath(lock_file), + "timeout": timeout, + "mode": mode, + } + self._context: FileLockContext = (ThreadLocalFileContext if thread_local else FileLockContext)(**kwargs) + + def is_thread_local(self) -> bool: + """:return: a flag indicating if this lock is thread local or not""" + return self._is_thread_local + + @property + def lock_file(self) -> str: + """:return: path to the lock file""" + return self._context.lock_file + + @property + def timeout(self) -> float: + """ + :return: the default timeout value, in seconds + + .. versionadded:: 2.0.0 + """ + return self._context.timeout + + @timeout.setter + def timeout(self, value: float | str) -> None: + """ + Change the default timeout value. + + :param value: the new value, in seconds + """ + self._context.timeout = float(value) + + @abstractmethod + def _acquire(self) -> None: + """If the file lock could be acquired, self._context.lock_file_fd holds the file descriptor of the lock file.""" + raise NotImplementedError + + @abstractmethod + def _release(self) -> None: + """Releases the lock and sets self._context.lock_file_fd to None.""" + raise NotImplementedError + + @property + def is_locked(self) -> bool: + """ + + :return: A boolean indicating if the lock file is holding the lock currently. + + .. versionchanged:: 2.0.0 + + This was previously a method and is now a property. + """ + return self._context.lock_file_fd is not None + + @property + def lock_counter(self) -> int: + """:return: The number of times this lock has been acquired (but not yet released).""" + return self._context.lock_counter + + def acquire( + self, + timeout: float | None = None, + poll_interval: float = 0.05, + *, + poll_intervall: float | None = None, + blocking: bool = True, + ) -> AcquireReturnProxy: + """ + Try to acquire the file lock. + + :param timeout: maximum wait time for acquiring the lock, ``None`` means use the default :attr:`~timeout` is and + if ``timeout < 0``, there is no timeout and this method will block until the lock could be acquired + :param poll_interval: interval of trying to acquire the lock file + :param poll_intervall: deprecated, kept for backwards compatibility, use ``poll_interval`` instead + :param blocking: defaults to True. If False, function will return immediately if it cannot obtain a lock on the + first attempt. Otherwise, this method will block until the timeout expires or the lock is acquired. + :raises Timeout: if fails to acquire lock within the timeout period + :return: a context object that will unlock the file when the context is exited + + .. code-block:: python + + # You can use this method in the context manager (recommended) + with lock.acquire(): + pass + + # Or use an equivalent try-finally construct: + lock.acquire() + try: + pass + finally: + lock.release() + + .. versionchanged:: 2.0.0 + + This method returns now a *proxy* object instead of *self*, + so that it can be used in a with statement without side effects. + + """ + # Use the default timeout, if no timeout is provided. + if timeout is None: + timeout = self._context.timeout + + if poll_intervall is not None: + msg = "use poll_interval instead of poll_intervall" + warnings.warn(msg, DeprecationWarning, stacklevel=2) + poll_interval = poll_intervall + + # Increment the number right at the beginning. We can still undo it, if something fails. + self._context.lock_counter += 1 + + lock_id = id(self) + lock_filename = self.lock_file + start_time = time.perf_counter() + try: + while True: + if not self.is_locked: + _LOGGER.debug("Attempting to acquire lock %s on %s", lock_id, lock_filename) + self._acquire() + if self.is_locked: + _LOGGER.debug("Lock %s acquired on %s", lock_id, lock_filename) + break + if blocking is False: + _LOGGER.debug("Failed to immediately acquire lock %s on %s", lock_id, lock_filename) + raise Timeout(lock_filename) # noqa: TRY301 + if 0 <= timeout < time.perf_counter() - start_time: + _LOGGER.debug("Timeout on acquiring lock %s on %s", lock_id, lock_filename) + raise Timeout(lock_filename) # noqa: TRY301 + msg = "Lock %s not acquired on %s, waiting %s seconds ..." + _LOGGER.debug(msg, lock_id, lock_filename, poll_interval) + time.sleep(poll_interval) + except BaseException: # Something did go wrong, so decrement the counter. + self._context.lock_counter = max(0, self._context.lock_counter - 1) + raise + return AcquireReturnProxy(lock=self) + + def release(self, force: bool = False) -> None: # noqa: FBT001, FBT002 + """ + Releases the file lock. Please note, that the lock is only completely released, if the lock counter is 0. Also + note, that the lock file itself is not automatically deleted. + + :param force: If true, the lock counter is ignored and the lock is released in every case/ + """ + if self.is_locked: + self._context.lock_counter -= 1 + + if self._context.lock_counter == 0 or force: + lock_id, lock_filename = id(self), self.lock_file + + _LOGGER.debug("Attempting to release lock %s on %s", lock_id, lock_filename) + self._release() + self._context.lock_counter = 0 + _LOGGER.debug("Lock %s released on %s", lock_id, lock_filename) + + def __enter__(self) -> BaseFileLock: + """ + Acquire the lock. + + :return: the lock object + """ + self.acquire() + return self + + def __exit__( + self, + exc_type: type[BaseException] | None, + exc_value: BaseException | None, + traceback: TracebackType | None, + ) -> None: + """ + Release the lock. + + :param exc_type: the exception type if raised + :param exc_value: the exception value if raised + :param traceback: the exception traceback if raised + """ + self.release() + + def __del__(self) -> None: + """Called when the lock object is deleted.""" + self.release(force=True) + + +__all__ = [ + "BaseFileLock", + "AcquireReturnProxy", +] diff --git a/venv/lib/python3.10/site-packages/filelock/_error.py b/venv/lib/python3.10/site-packages/filelock/_error.py new file mode 100644 index 0000000..f7ff08c --- /dev/null +++ b/venv/lib/python3.10/site-packages/filelock/_error.py @@ -0,0 +1,30 @@ +from __future__ import annotations + +from typing import Any + + +class Timeout(TimeoutError): # noqa: N818 + """Raised when the lock could not be acquired in *timeout* seconds.""" + + def __init__(self, lock_file: str) -> None: + super().__init__() + self._lock_file = lock_file + + def __reduce__(self) -> str | tuple[Any, ...]: + return self.__class__, (self._lock_file,) # Properly pickle the exception + + def __str__(self) -> str: + return f"The file lock '{self._lock_file}' could not be acquired." + + def __repr__(self) -> str: + return f"{self.__class__.__name__}({self.lock_file!r})" + + @property + def lock_file(self) -> str: + """:return: The path of the file lock.""" + return self._lock_file + + +__all__ = [ + "Timeout", +] diff --git a/venv/lib/python3.10/site-packages/filelock/_soft.py b/venv/lib/python3.10/site-packages/filelock/_soft.py new file mode 100644 index 0000000..b99912b --- /dev/null +++ b/venv/lib/python3.10/site-packages/filelock/_soft.py @@ -0,0 +1,46 @@ +from __future__ import annotations + +import os +import sys +from contextlib import suppress +from errno import EACCES, EEXIST +from pathlib import Path + +from ._api import BaseFileLock +from ._util import raise_on_not_writable_file + + +class SoftFileLock(BaseFileLock): + """Simply watches the existence of the lock file.""" + + def _acquire(self) -> None: + raise_on_not_writable_file(self.lock_file) + # first check for exists and read-only mode as the open will mask this case as EEXIST + flags = ( + os.O_WRONLY # open for writing only + | os.O_CREAT + | os.O_EXCL # together with above raise EEXIST if the file specified by filename exists + | os.O_TRUNC # truncate the file to zero byte + ) + try: + file_handler = os.open(self.lock_file, flags, self._context.mode) + except OSError as exception: # re-raise unless expected exception + if not ( + exception.errno == EEXIST # lock already exist + or (exception.errno == EACCES and sys.platform == "win32") # has no access to this lock + ): # pragma: win32 no cover + raise + else: + self._context.lock_file_fd = file_handler + + def _release(self) -> None: + assert self._context.lock_file_fd is not None # noqa: S101 + os.close(self._context.lock_file_fd) # the lock file is definitely not None + self._context.lock_file_fd = None + with suppress(OSError): # the file is already deleted and that's what we want + Path(self.lock_file).unlink() + + +__all__ = [ + "SoftFileLock", +] diff --git a/venv/lib/python3.10/site-packages/filelock/_unix.py b/venv/lib/python3.10/site-packages/filelock/_unix.py new file mode 100644 index 0000000..40cec0a --- /dev/null +++ b/venv/lib/python3.10/site-packages/filelock/_unix.py @@ -0,0 +1,63 @@ +from __future__ import annotations + +import os +import sys +from contextlib import suppress +from errno import ENOSYS +from typing import cast + +from ._api import BaseFileLock + +#: a flag to indicate if the fcntl API is available +has_fcntl = False +if sys.platform == "win32": # pragma: win32 cover + + class UnixFileLock(BaseFileLock): + """Uses the :func:`fcntl.flock` to hard lock the lock file on unix systems.""" + + def _acquire(self) -> None: + raise NotImplementedError + + def _release(self) -> None: + raise NotImplementedError + +else: # pragma: win32 no cover + try: + import fcntl + except ImportError: + pass + else: + has_fcntl = True + + class UnixFileLock(BaseFileLock): + """Uses the :func:`fcntl.flock` to hard lock the lock file on unix systems.""" + + def _acquire(self) -> None: + open_flags = os.O_RDWR | os.O_CREAT | os.O_TRUNC + fd = os.open(self.lock_file, open_flags, self._context.mode) + with suppress(PermissionError): # This locked is not owned by this UID + os.fchmod(fd, self._context.mode) + try: + fcntl.flock(fd, fcntl.LOCK_EX | fcntl.LOCK_NB) + except OSError as exception: + os.close(fd) + if exception.errno == ENOSYS: # NotImplemented error + msg = "FileSystem does not appear to support flock; user SoftFileLock instead" + raise NotImplementedError(msg) from exception + else: + self._context.lock_file_fd = fd + + def _release(self) -> None: + # Do not remove the lockfile: + # https://github.com/tox-dev/py-filelock/issues/31 + # https://stackoverflow.com/questions/17708885/flock-removing-locked-file-without-race-condition + fd = cast(int, self._context.lock_file_fd) + self._context.lock_file_fd = None + fcntl.flock(fd, fcntl.LOCK_UN) + os.close(fd) + + +__all__ = [ + "has_fcntl", + "UnixFileLock", +] diff --git a/venv/lib/python3.10/site-packages/filelock/_util.py b/venv/lib/python3.10/site-packages/filelock/_util.py new file mode 100644 index 0000000..3d95731 --- /dev/null +++ b/venv/lib/python3.10/site-packages/filelock/_util.py @@ -0,0 +1,37 @@ +from __future__ import annotations + +import os +import stat +import sys +from errno import EACCES, EISDIR + + +def raise_on_not_writable_file(filename: str) -> None: + """ + Raise an exception if attempting to open the file for writing would fail. + This is done so files that will never be writable can be separated from + files that are writable but currently locked + :param filename: file to check + :raises OSError: as if the file was opened for writing. + """ + try: # use stat to do exists + can write to check without race condition + file_stat = os.stat(filename) # noqa: PTH116 + except OSError: + return # swallow does not exist or other errors + + if file_stat.st_mtime != 0: # if os.stat returns but modification is zero that's an invalid os.stat - ignore it + if not (file_stat.st_mode & stat.S_IWUSR): + raise PermissionError(EACCES, "Permission denied", filename) + + if stat.S_ISDIR(file_stat.st_mode): + if sys.platform == "win32": # pragma: win32 cover + # On Windows, this is PermissionError + raise PermissionError(EACCES, "Permission denied", filename) + else: # pragma: win32 no cover # noqa: RET506 + # On linux / macOS, this is IsADirectoryError + raise IsADirectoryError(EISDIR, "Is a directory", filename) + + +__all__ = [ + "raise_on_not_writable_file", +] diff --git a/venv/lib/python3.10/site-packages/filelock/_windows.py b/venv/lib/python3.10/site-packages/filelock/_windows.py new file mode 100644 index 0000000..41683f4 --- /dev/null +++ b/venv/lib/python3.10/site-packages/filelock/_windows.py @@ -0,0 +1,64 @@ +from __future__ import annotations + +import os +import sys +from contextlib import suppress +from errno import EACCES +from pathlib import Path +from typing import cast + +from ._api import BaseFileLock +from ._util import raise_on_not_writable_file + +if sys.platform == "win32": # pragma: win32 cover + import msvcrt + + class WindowsFileLock(BaseFileLock): + """Uses the :func:`msvcrt.locking` function to hard lock the lock file on Windows systems.""" + + def _acquire(self) -> None: + raise_on_not_writable_file(self.lock_file) + flags = ( + os.O_RDWR # open for read and write + | os.O_CREAT # create file if not exists + | os.O_TRUNC # truncate file if not empty + ) + try: + fd = os.open(self.lock_file, flags, self._context.mode) + except OSError as exception: + if exception.errno != EACCES: # has no access to this lock + raise + else: + try: + msvcrt.locking(fd, msvcrt.LK_NBLCK, 1) + except OSError as exception: + os.close(fd) # close file first + if exception.errno != EACCES: # file is already locked + raise + else: + self._context.lock_file_fd = fd + + def _release(self) -> None: + fd = cast(int, self._context.lock_file_fd) + self._context.lock_file_fd = None + msvcrt.locking(fd, msvcrt.LK_UNLCK, 1) + os.close(fd) + + with suppress(OSError): # Probably another instance of the application hat acquired the file lock. + Path(self.lock_file).unlink() + +else: # pragma: win32 no cover + + class WindowsFileLock(BaseFileLock): + """Uses the :func:`msvcrt.locking` function to hard lock the lock file on Windows systems.""" + + def _acquire(self) -> None: + raise NotImplementedError + + def _release(self) -> None: + raise NotImplementedError + + +__all__ = [ + "WindowsFileLock", +] diff --git a/venv/lib/python3.10/site-packages/setuptools/_vendor/jaraco/__init__.py b/venv/lib/python3.10/site-packages/filelock/py.typed similarity index 100% rename from venv/lib/python3.10/site-packages/setuptools/_vendor/jaraco/__init__.py rename to venv/lib/python3.10/site-packages/filelock/py.typed diff --git a/venv/lib/python3.10/site-packages/filelock/version.py b/venv/lib/python3.10/site-packages/filelock/version.py new file mode 100644 index 0000000..1579fac --- /dev/null +++ b/venv/lib/python3.10/site-packages/filelock/version.py @@ -0,0 +1,4 @@ +# file generated by setuptools_scm +# don't change, don't track in version control +__version__ = version = '3.12.2' +__version_tuple__ = version_tuple = (3, 12, 2) diff --git a/venv/lib/python3.10/site-packages/iniconfig-2.0.0.dist-info/INSTALLER b/venv/lib/python3.10/site-packages/iniconfig-2.0.0.dist-info/INSTALLER new file mode 100644 index 0000000..a1b589e --- /dev/null +++ b/venv/lib/python3.10/site-packages/iniconfig-2.0.0.dist-info/INSTALLER @@ -0,0 +1 @@ +pip diff --git a/venv/lib/python3.10/site-packages/iniconfig-2.0.0.dist-info/METADATA b/venv/lib/python3.10/site-packages/iniconfig-2.0.0.dist-info/METADATA new file mode 100644 index 0000000..3ea1e01 --- /dev/null +++ b/venv/lib/python3.10/site-packages/iniconfig-2.0.0.dist-info/METADATA @@ -0,0 +1,80 @@ +Metadata-Version: 2.1 +Name: iniconfig +Version: 2.0.0 +Summary: brain-dead simple config-ini parsing +Project-URL: Homepage, https://github.com/pytest-dev/iniconfig +Author-email: Ronny Pfannschmidt , Holger Krekel +License-Expression: MIT +License-File: LICENSE +Classifier: Development Status :: 4 - Beta +Classifier: Intended Audience :: Developers +Classifier: License :: OSI Approved :: MIT License +Classifier: Operating System :: MacOS :: MacOS X +Classifier: Operating System :: Microsoft :: Windows +Classifier: Operating System :: POSIX +Classifier: Programming Language :: Python :: 3 +Classifier: Programming Language :: Python :: 3 :: Only +Classifier: Programming Language :: Python :: 3.7 +Classifier: Programming Language :: Python :: 3.8 +Classifier: Programming Language :: Python :: 3.9 +Classifier: Programming Language :: Python :: 3.10 +Classifier: Programming Language :: Python :: 3.11 +Classifier: Topic :: Software Development :: Libraries +Classifier: Topic :: Utilities +Requires-Python: >=3.7 +Description-Content-Type: text/x-rst + +iniconfig: brain-dead simple parsing of ini files +======================================================= + +iniconfig is a small and simple INI-file parser module +having a unique set of features: + +* maintains order of sections and entries +* supports multi-line values with or without line-continuations +* supports "#" comments everywhere +* raises errors with proper line-numbers +* no bells and whistles like automatic substitutions +* iniconfig raises an Error if two sections have the same name. + +If you encounter issues or have feature wishes please report them to: + + https://github.com/RonnyPfannschmidt/iniconfig/issues + +Basic Example +=================================== + +If you have an ini file like this: + +.. code-block:: ini + + # content of example.ini + [section1] # comment + name1=value1 # comment + name1b=value1,value2 # comment + + [section2] + name2= + line1 + line2 + +then you can do: + +.. code-block:: pycon + + >>> import iniconfig + >>> ini = iniconfig.IniConfig("example.ini") + >>> ini['section1']['name1'] # raises KeyError if not exists + 'value1' + >>> ini.get('section1', 'name1b', [], lambda x: x.split(",")) + ['value1', 'value2'] + >>> ini.get('section1', 'notexist', [], lambda x: x.split(",")) + [] + >>> [x.name for x in list(ini)] + ['section1', 'section2'] + >>> list(list(ini)[0].items()) + [('name1', 'value1'), ('name1b', 'value1,value2')] + >>> 'section1' in ini + True + >>> 'inexistendsection' in ini + False diff --git a/venv/lib/python3.10/site-packages/iniconfig-2.0.0.dist-info/RECORD b/venv/lib/python3.10/site-packages/iniconfig-2.0.0.dist-info/RECORD new file mode 100644 index 0000000..398d2bf --- /dev/null +++ b/venv/lib/python3.10/site-packages/iniconfig-2.0.0.dist-info/RECORD @@ -0,0 +1,14 @@ +iniconfig-2.0.0.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +iniconfig-2.0.0.dist-info/METADATA,sha256=2KcBd5DEFiZclO-ruP_qzN71qcTL0hNsCw5MCDIPN6I,2599 +iniconfig-2.0.0.dist-info/RECORD,, +iniconfig-2.0.0.dist-info/WHEEL,sha256=hKi7AIIx6qfnsRbr087vpeJnrVUuDokDHZacPPMW7-Y,87 +iniconfig-2.0.0.dist-info/licenses/LICENSE,sha256=KvaAw570k_uCgwNW0dPfGstaBgM8ui3sehniHKp3qGY,1061 +iniconfig/__init__.py,sha256=ALJSNenAgTD7RNj820NggEQuyaZp2QseTCThGJPavk0,5473 +iniconfig/__pycache__/__init__.cpython-310.pyc,, +iniconfig/__pycache__/_parse.cpython-310.pyc,, +iniconfig/__pycache__/_version.cpython-310.pyc,, +iniconfig/__pycache__/exceptions.cpython-310.pyc,, +iniconfig/_parse.py,sha256=OWGLbmE8GjxcoMWTvnGbck1RoNsTm5bt5ficIRZqWJ8,2436 +iniconfig/_version.py,sha256=WM8rOXoL5t25aMQJp4qbU2XP09nrDtmDnrAGhHSk0Wk,160 +iniconfig/exceptions.py,sha256=3V2JS5rndwiYUh84PNYS_1zd8H8IB-Rar81ARAA7E9s,501 +iniconfig/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 diff --git a/venv/lib/python3.10/site-packages/iniconfig-2.0.0.dist-info/WHEEL b/venv/lib/python3.10/site-packages/iniconfig-2.0.0.dist-info/WHEEL new file mode 100644 index 0000000..8d5c0ce --- /dev/null +++ b/venv/lib/python3.10/site-packages/iniconfig-2.0.0.dist-info/WHEEL @@ -0,0 +1,4 @@ +Wheel-Version: 1.0 +Generator: hatchling 1.12.2 +Root-Is-Purelib: true +Tag: py3-none-any diff --git a/venv/lib/python3.10/site-packages/iniconfig-2.0.0.dist-info/licenses/LICENSE b/venv/lib/python3.10/site-packages/iniconfig-2.0.0.dist-info/licenses/LICENSE new file mode 100644 index 0000000..31ecdfb --- /dev/null +++ b/venv/lib/python3.10/site-packages/iniconfig-2.0.0.dist-info/licenses/LICENSE @@ -0,0 +1,19 @@ + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to deal + in the Software without restriction, including without limitation the rights + to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be included in all + copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + SOFTWARE. + diff --git a/venv/lib/python3.10/site-packages/iniconfig/__init__.py b/venv/lib/python3.10/site-packages/iniconfig/__init__.py new file mode 100644 index 0000000..c18a8e4 --- /dev/null +++ b/venv/lib/python3.10/site-packages/iniconfig/__init__.py @@ -0,0 +1,216 @@ +""" brain-dead simple parser for ini-style files. +(C) Ronny Pfannschmidt, Holger Krekel -- MIT licensed +""" +from __future__ import annotations +from typing import ( + Callable, + Iterator, + Mapping, + Optional, + Tuple, + TypeVar, + Union, + TYPE_CHECKING, + NoReturn, + NamedTuple, + overload, + cast, +) + +import os + +if TYPE_CHECKING: + from typing_extensions import Final + +__all__ = ["IniConfig", "ParseError", "COMMENTCHARS", "iscommentline"] + +from .exceptions import ParseError +from . import _parse +from ._parse import COMMENTCHARS, iscommentline + +_D = TypeVar("_D") +_T = TypeVar("_T") + + +class SectionWrapper: + config: Final[IniConfig] + name: Final[str] + + def __init__(self, config: IniConfig, name: str) -> None: + self.config = config + self.name = name + + def lineof(self, name: str) -> int | None: + return self.config.lineof(self.name, name) + + @overload + def get(self, key: str) -> str | None: + ... + + @overload + def get( + self, + key: str, + convert: Callable[[str], _T], + ) -> _T | None: + ... + + @overload + def get( + self, + key: str, + default: None, + convert: Callable[[str], _T], + ) -> _T | None: + ... + + @overload + def get(self, key: str, default: _D, convert: None = None) -> str | _D: + ... + + @overload + def get( + self, + key: str, + default: _D, + convert: Callable[[str], _T], + ) -> _T | _D: + ... + + # TODO: investigate possible mypy bug wrt matching the passed over data + def get( # type: ignore [misc] + self, + key: str, + default: _D | None = None, + convert: Callable[[str], _T] | None = None, + ) -> _D | _T | str | None: + return self.config.get(self.name, key, convert=convert, default=default) + + def __getitem__(self, key: str) -> str: + return self.config.sections[self.name][key] + + def __iter__(self) -> Iterator[str]: + section: Mapping[str, str] = self.config.sections.get(self.name, {}) + + def lineof(key: str) -> int: + return self.config.lineof(self.name, key) # type: ignore[return-value] + + yield from sorted(section, key=lineof) + + def items(self) -> Iterator[tuple[str, str]]: + for name in self: + yield name, self[name] + + +class IniConfig: + path: Final[str] + sections: Final[Mapping[str, Mapping[str, str]]] + + def __init__( + self, + path: str | os.PathLike[str], + data: str | None = None, + encoding: str = "utf-8", + ) -> None: + self.path = os.fspath(path) + if data is None: + with open(self.path, encoding=encoding) as fp: + data = fp.read() + + tokens = _parse.parse_lines(self.path, data.splitlines(True)) + + self._sources = {} + sections_data: dict[str, dict[str, str]] + self.sections = sections_data = {} + + for lineno, section, name, value in tokens: + if section is None: + raise ParseError(self.path, lineno, "no section header defined") + self._sources[section, name] = lineno + if name is None: + if section in self.sections: + raise ParseError( + self.path, lineno, f"duplicate section {section!r}" + ) + sections_data[section] = {} + else: + if name in self.sections[section]: + raise ParseError(self.path, lineno, f"duplicate name {name!r}") + assert value is not None + sections_data[section][name] = value + + def lineof(self, section: str, name: str | None = None) -> int | None: + lineno = self._sources.get((section, name)) + return None if lineno is None else lineno + 1 + + @overload + def get( + self, + section: str, + name: str, + ) -> str | None: + ... + + @overload + def get( + self, + section: str, + name: str, + convert: Callable[[str], _T], + ) -> _T | None: + ... + + @overload + def get( + self, + section: str, + name: str, + default: None, + convert: Callable[[str], _T], + ) -> _T | None: + ... + + @overload + def get( + self, section: str, name: str, default: _D, convert: None = None + ) -> str | _D: + ... + + @overload + def get( + self, + section: str, + name: str, + default: _D, + convert: Callable[[str], _T], + ) -> _T | _D: + ... + + def get( # type: ignore + self, + section: str, + name: str, + default: _D | None = None, + convert: Callable[[str], _T] | None = None, + ) -> _D | _T | str | None: + try: + value: str = self.sections[section][name] + except KeyError: + return default + else: + if convert is not None: + return convert(value) + else: + return value + + def __getitem__(self, name: str) -> SectionWrapper: + if name not in self.sections: + raise KeyError(name) + return SectionWrapper(self, name) + + def __iter__(self) -> Iterator[SectionWrapper]: + for name in sorted(self.sections, key=self.lineof): # type: ignore + yield SectionWrapper(self, name) + + def __contains__(self, arg: str) -> bool: + return arg in self.sections diff --git a/venv/lib/python3.10/site-packages/iniconfig/_parse.py b/venv/lib/python3.10/site-packages/iniconfig/_parse.py new file mode 100644 index 0000000..2d03437 --- /dev/null +++ b/venv/lib/python3.10/site-packages/iniconfig/_parse.py @@ -0,0 +1,82 @@ +from __future__ import annotations +from .exceptions import ParseError + +from typing import NamedTuple + + +COMMENTCHARS = "#;" + + +class _ParsedLine(NamedTuple): + lineno: int + section: str | None + name: str | None + value: str | None + + +def parse_lines(path: str, line_iter: list[str]) -> list[_ParsedLine]: + result: list[_ParsedLine] = [] + section = None + for lineno, line in enumerate(line_iter): + name, data = _parseline(path, line, lineno) + # new value + if name is not None and data is not None: + result.append(_ParsedLine(lineno, section, name, data)) + # new section + elif name is not None and data is None: + if not name: + raise ParseError(path, lineno, "empty section name") + section = name + result.append(_ParsedLine(lineno, section, None, None)) + # continuation + elif name is None and data is not None: + if not result: + raise ParseError(path, lineno, "unexpected value continuation") + last = result.pop() + if last.name is None: + raise ParseError(path, lineno, "unexpected value continuation") + + if last.value: + last = last._replace(value=f"{last.value}\n{data}") + else: + last = last._replace(value=data) + result.append(last) + return result + + +def _parseline(path: str, line: str, lineno: int) -> tuple[str | None, str | None]: + # blank lines + if iscommentline(line): + line = "" + else: + line = line.rstrip() + if not line: + return None, None + # section + if line[0] == "[": + realline = line + for c in COMMENTCHARS: + line = line.split(c)[0].rstrip() + if line[-1] == "]": + return line[1:-1], None + return None, realline.strip() + # value + elif not line[0].isspace(): + try: + name, value = line.split("=", 1) + if ":" in name: + raise ValueError() + except ValueError: + try: + name, value = line.split(":", 1) + except ValueError: + raise ParseError(path, lineno, "unexpected line: %r" % line) + return name.strip(), value.strip() + # continuation + else: + return None, line.strip() + + +def iscommentline(line: str) -> bool: + c = line.lstrip()[:1] + return c in COMMENTCHARS diff --git a/venv/lib/python3.10/site-packages/iniconfig/_version.py b/venv/lib/python3.10/site-packages/iniconfig/_version.py new file mode 100644 index 0000000..dd1883d --- /dev/null +++ b/venv/lib/python3.10/site-packages/iniconfig/_version.py @@ -0,0 +1,4 @@ +# file generated by setuptools_scm +# don't change, don't track in version control +__version__ = version = '2.0.0' +__version_tuple__ = version_tuple = (2, 0, 0) diff --git a/venv/lib/python3.10/site-packages/iniconfig/exceptions.py b/venv/lib/python3.10/site-packages/iniconfig/exceptions.py new file mode 100644 index 0000000..bc898e6 --- /dev/null +++ b/venv/lib/python3.10/site-packages/iniconfig/exceptions.py @@ -0,0 +1,20 @@ +from __future__ import annotations +from typing import TYPE_CHECKING + +if TYPE_CHECKING: + from typing_extensions import Final + + +class ParseError(Exception): + path: Final[str] + lineno: Final[int] + msg: Final[str] + + def __init__(self, path: str, lineno: int, msg: str) -> None: + super().__init__(path, lineno, msg) + self.path = path + self.lineno = lineno + self.msg = msg + + def __str__(self) -> str: + return f"{self.path}:{self.lineno + 1}: {self.msg}" diff --git a/venv/lib/python3.10/site-packages/wheel-0.38.4.virtualenv b/venv/lib/python3.10/site-packages/iniconfig/py.typed similarity index 100% rename from venv/lib/python3.10/site-packages/wheel-0.38.4.virtualenv rename to venv/lib/python3.10/site-packages/iniconfig/py.typed diff --git a/venv/lib/python3.10/site-packages/nose_parameterized/__init__.py b/venv/lib/python3.10/site-packages/nose_parameterized/__init__.py new file mode 100644 index 0000000..992578c --- /dev/null +++ b/venv/lib/python3.10/site-packages/nose_parameterized/__init__.py @@ -0,0 +1,11 @@ +from .parameterized import parameterized, param + +import os +import warnings +if not os.environ.get("NOSE_PARAMETERIZED_NO_WARN"): + warnings.warn( + "The 'nose-parameterized' package has been renamed 'parameterized'. " + "For the two step migration instructions, see: " + "https://github.com/wolever/parameterized#migrating-from-nose-parameterized-to-parameterized " + "(set NOSE_PARAMETERIZED_NO_WARN=1 to suppress this warning)" + ) diff --git a/venv/lib/python3.10/site-packages/nose_parameterized/compat.py b/venv/lib/python3.10/site-packages/nose_parameterized/compat.py new file mode 100644 index 0000000..ab54197 --- /dev/null +++ b/venv/lib/python3.10/site-packages/nose_parameterized/compat.py @@ -0,0 +1,13 @@ +""" +A stripped down version of six.py, containing only the bits we actually need. +Kept minimal so that OS package maintainers don't need to patch out six.py. +""" + +import sys + +PY3 = sys.version_info[0] == 3 + +if PY3: + string_types = str, +else: + string_types = basestring, diff --git a/venv/lib/python3.10/site-packages/nose_parameterized/parameterized.py b/venv/lib/python3.10/site-packages/nose_parameterized/parameterized.py new file mode 100644 index 0000000..e31320e --- /dev/null +++ b/venv/lib/python3.10/site-packages/nose_parameterized/parameterized.py @@ -0,0 +1,410 @@ +import re +import sys +import inspect +import warnings +from functools import wraps +from collections import namedtuple + +try: + from collections import OrderedDict as MaybeOrderedDict +except ImportError: + MaybeOrderedDict = dict + +from unittest import TestCase + +PY3 = sys.version_info[0] == 3 +PY2 = sys.version_info[0] == 2 + + +if PY3: + def new_instancemethod(f, *args): + return f + + # Python 3 doesn't have an InstanceType, so just use a dummy type. + class InstanceType(): + pass + lzip = lambda *a: list(zip(*a)) + text_type = str + string_types = str, + bytes_type = bytes +else: + import new + new_instancemethod = new.instancemethod + from types import InstanceType + lzip = zip + text_type = unicode + bytes_type = str + string_types = basestring, + +_param = namedtuple("param", "args kwargs") + +class param(_param): + """ Represents a single parameter to a test case. + + For example:: + + >>> p = param("foo", bar=16) + >>> p + param("foo", bar=16) + >>> p.args + ('foo', ) + >>> p.kwargs + {'bar': 16} + + Intended to be used as an argument to ``@parameterized``:: + + @parameterized([ + param("foo", bar=16), + ]) + def test_stuff(foo, bar=16): + pass + """ + + def __new__(cls, *args , **kwargs): + return _param.__new__(cls, args, kwargs) + + @classmethod + def explicit(cls, args=None, kwargs=None): + """ Creates a ``param`` by explicitly specifying ``args`` and + ``kwargs``:: + + >>> param.explicit([1,2,3]) + param(*(1, 2, 3)) + >>> param.explicit(kwargs={"foo": 42}) + param(*(), **{"foo": "42"}) + """ + args = args or () + kwargs = kwargs or {} + return cls(*args, **kwargs) + + @classmethod + def from_decorator(cls, args): + """ Returns an instance of ``param()`` for ``@parameterized`` argument + ``args``:: + + >>> param.from_decorator((42, )) + param(args=(42, ), kwargs={}) + >>> param.from_decorator("foo") + param(args=("foo", ), kwargs={}) + """ + if isinstance(args, param): + return args + if isinstance(args, string_types): + args = (args, ) + return cls(*args) + + def __repr__(self): + return "param(*%r, **%r)" %self + + +class QuietOrderedDict(MaybeOrderedDict): + """ When OrderedDict is available, use it to make sure that the kwargs in + doc strings are consistently ordered. """ + __str__ = dict.__str__ + __repr__ = dict.__repr__ + + +def parameterized_argument_value_pairs(func, p): + """Return tuples of parameterized arguments and their values. + + This is useful if you are writing your own doc_func + function and need to know the values for each parameter name:: + + >>> def func(a, foo=None, bar=42, **kwargs): pass + >>> p = param(1, foo=7, extra=99) + >>> parameterized_argument_value_pairs(func, p) + [("a", 1), ("foo", 7), ("bar", 42), ("**kwargs", {"extra": 99})] + + If the function's first argument is named ``self`` then it will be + ignored:: + + >>> def func(self, a): pass + >>> p = param(1) + >>> parameterized_argument_value_pairs(func, p) + [("a", 1)] + + Additionally, empty ``*args`` or ``**kwargs`` will be ignored:: + + >>> def func(foo, *args): pass + >>> p = param(1) + >>> parameterized_argument_value_pairs(func, p) + [("foo", 1)] + >>> p = param(1, 16) + >>> parameterized_argument_value_pairs(func, p) + [("foo", 1), ("*args", (16, ))] + """ + argspec = inspect.getargspec(func) + arg_offset = 1 if argspec.args[:1] == ["self"] else 0 + + named_args = argspec.args[arg_offset:] + + result = lzip(named_args, p.args) + named_args = argspec.args[len(result) + arg_offset:] + varargs = p.args[len(result):] + + result.extend([ + (name, p.kwargs.get(name, default)) + for (name, default) + in zip(named_args, argspec.defaults or []) + ]) + + seen_arg_names = set([ n for (n, _) in result ]) + keywords = QuietOrderedDict(sorted([ + (name, p.kwargs[name]) + for name in p.kwargs + if name not in seen_arg_names + ])) + + if varargs: + result.append(("*%s" %(argspec.varargs, ), tuple(varargs))) + + if keywords: + result.append(("**%s" %(argspec.keywords, ), keywords)) + + return result + +def short_repr(x, n=64): + """ A shortened repr of ``x`` which is guaranteed to be ``unicode``:: + + >>> short_repr("foo") + u"foo" + >>> short_repr("123456789", n=4) + u"12...89" + """ + + x_repr = repr(x) + if isinstance(x_repr, bytes_type): + try: + x_repr = text_type(x_repr, "utf-8") + except UnicodeDecodeError: + x_repr = text_type(x_repr, "latin1") + if len(x_repr) > n: + x_repr = x_repr[:n//2] + "..." + x_repr[len(x_repr) - n//2:] + return x_repr + +def default_doc_func(func, num, p): + if func.__doc__ is None: + return None + + all_args_with_values = parameterized_argument_value_pairs(func, p) + + # Assumes that the function passed is a bound method. + descs = ["%s=%s" %(n, short_repr(v)) for n, v in all_args_with_values] + + # The documentation might be a multiline string, so split it + # and just work with the first string, ignoring the period + # at the end if there is one. + first, nl, rest = func.__doc__.lstrip().partition("\n") + suffix = "" + if first.endswith("."): + suffix = "." + first = first[:-1] + args = "%s[with %s]" %(len(first) and " " or "", ", ".join(descs)) + return "".join([first.rstrip(), args, suffix, nl, rest]) + +def default_name_func(func, num, p): + base_name = func.__name__ + name_suffix = "_%s" %(num, ) + if len(p.args) > 0 and isinstance(p.args[0], string_types): + name_suffix += "_" + parameterized.to_safe_name(p.args[0]) + return base_name + name_suffix + +class parameterized(object): + """ Parameterize a test case:: + + class TestInt(object): + @parameterized([ + ("A", 10), + ("F", 15), + param("10", 42, base=42) + ]) + def test_int(self, input, expected, base=16): + actual = int(input, base=base) + assert_equal(actual, expected) + + @parameterized([ + (2, 3, 5) + (3, 5, 8), + ]) + def test_add(a, b, expected): + assert_equal(a + b, expected) + """ + + def __init__(self, input, doc_func=None): + self.get_input = self.input_as_callable(input) + self.doc_func = doc_func or default_doc_func + + def __call__(self, test_func): + self.assert_not_in_testcase_subclass() + + @wraps(test_func) + def wrapper(test_self=None): + f = test_func + if test_self is not None: + # If we are a test method (which we suppose to be true if we + # are being passed a "self" argument), we first need to create + # an instance method, attach it to the instance of the test + # class, then pull it back off to turn it into a bound method. + # If we don't do this, Nose gets cranky. + f = self.make_bound_method(test_self, test_func) + # Note: because nose is so very picky, the more obvious + # ``return self.yield_nose_tuples(f)`` won't work here. + for nose_tuple in self.yield_nose_tuples(f, wrapper): + yield nose_tuple + + test_func.__name__ = "_helper_for_%s" %(test_func.__name__, ) + wrapper.parameterized_input = self.get_input() + wrapper.parameterized_func = test_func + return wrapper + + def yield_nose_tuples(self, func, wrapper): + original_doc = wrapper.__doc__ + for num, args in enumerate(wrapper.parameterized_input): + p = param.from_decorator(args) + # ... then yield that as a tuple. If those steps aren't + # followed precicely, Nose gets upset and doesn't run the test + # or doesn't run setup methods. + nose_tuple = self.param_as_nose_tuple(func, num, p) + nose_func = nose_tuple[0] + try: + wrapper.__doc__ = nose_func.__doc__ + yield nose_tuple + finally: + wrapper.__doc__ = original_doc + + def param_as_nose_tuple(self, func, num, p): + if p.kwargs: + nose_func = wraps(func)(lambda args, kwargs: func(*args, **kwargs)) + nose_args = (p.args, p.kwargs) + else: + nose_func = wraps(func)(lambda *args: func(*args)) + nose_args = p.args + nose_func.__doc__ = self.doc_func(func, num, p) + return (nose_func, ) + nose_args + + def make_bound_method(self, instance, func): + cls = type(instance) + if issubclass(cls, InstanceType): + raise TypeError(( + "@parameterized can't be used with old-style classes, but " + "%r has an old-style class. Consider using a new-style " + "class, or '@parameterized.expand' " + "(see http://stackoverflow.com/q/54867/71522 for more " + "information on old-style classes)." + ) %(instance, )) + im_f = new_instancemethod(func, None, cls) + setattr(cls, func.__name__, im_f) + return getattr(instance, func.__name__) + + def assert_not_in_testcase_subclass(self): + parent_classes = self._terrible_magic_get_defining_classes() + if any(issubclass(cls, TestCase) for cls in parent_classes): + raise Exception("Warning: '@parameterized' tests won't work " + "inside subclasses of 'TestCase' - use " + "'@parameterized.expand' instead") + + def _terrible_magic_get_defining_classes(self): + """ Returns the set of parent classes of the class currently being defined. + Will likely only work if called from the ``parameterized`` decorator. + This function is entirely @brandon_rhodes's fault, as he suggested + the implementation: http://stackoverflow.com/a/8793684/71522 + """ + stack = inspect.stack() + if len(stack) <= 4: + return [] + frame = stack[4] + code_context = frame[4] and frame[4][0].strip() + if not (code_context and code_context.startswith("class ")): + return [] + _, _, parents = code_context.partition("(") + parents, _, _ = parents.partition(")") + return eval("[" + parents + "]", frame[0].f_globals, frame[0].f_locals) + + @classmethod + def input_as_callable(cls, input): + if callable(input): + return lambda: cls.check_input_values(input()) + input_values = cls.check_input_values(input) + return lambda: input_values + + @classmethod + def check_input_values(cls, input_values): + # Explicitly convery non-list inputs to a list so that: + # 1. A helpful exception will be raised if they aren't iterable, and + # 2. Generators are unwrapped exactly once (otherwise `nosetests + # --processes=n` has issues; see: + # https://github.com/wolever/nose-parameterized/pull/31) + if not isinstance(input_values, list): + input_values = list(input_values) + return input_values + + @classmethod + def expand(cls, input, name_func=None, doc_func=None, **legacy): + """ A "brute force" method of parameterizing test cases. Creates new + test cases and injects them into the namespace that the wrapped + function is being defined in. Useful for parameterizing tests in + subclasses of 'UnitTest', where Nose test generators don't work. + + >>> @parameterized.expand([("foo", 1, 2)]) + ... def test_add1(name, input, expected): + ... actual = add1(input) + ... assert_equal(actual, expected) + ... + >>> locals() + ... 'test_add1_foo_0': ... + >>> + """ + + if "testcase_func_name" in legacy: + warnings.warn("testcase_func_name= is deprecated; use name_func=", + DeprecationWarning, stacklevel=2) + if not name_func: + name_func = legacy["testcase_func_name"] + + if "testcase_func_doc" in legacy: + warnings.warn("testcase_func_doc= is deprecated; use doc_func=", + DeprecationWarning, stacklevel=2) + if not doc_func: + doc_func = legacy["testcase_func_doc"] + + doc_func = doc_func or default_doc_func + name_func = name_func or default_name_func + + def parameterized_expand_wrapper(f, instance=None): + stack = inspect.stack() + frame = stack[1] + frame_locals = frame[0].f_locals + + paramters = cls.input_as_callable(input)() + for num, args in enumerate(paramters): + p = param.from_decorator(args) + name = name_func(f, num, p) + frame_locals[name] = cls.param_as_standalone_func(p, f, name) + frame_locals[name].__doc__ = doc_func(f, num, p) + + f.__test__ = False + return parameterized_expand_wrapper + + @classmethod + def param_as_standalone_func(cls, p, func, name): + @wraps(func) + def standalone_func(*a): + return func(*(a + p.args), **p.kwargs) + standalone_func.__name__ = name + + # place_as is used by py.test to determine what source file should be + # used for this test. + standalone_func.place_as = func + + # Remove __wrapped__ because py.test will try to look at __wrapped__ + # to determine which parameters should be used with this test case, + # and obviously we don't need it to do any parameterization. + try: + del standalone_func.__wrapped__ + except AttributeError: + pass + return standalone_func + + @classmethod + def to_safe_name(cls, s): + return str(re.sub("[^a-zA-Z0-9_]+", "_", s)) diff --git a/venv/lib/python3.10/site-packages/nose_parameterized/test.py b/venv/lib/python3.10/site-packages/nose_parameterized/test.py new file mode 100644 index 0000000..93e15ea --- /dev/null +++ b/venv/lib/python3.10/site-packages/nose_parameterized/test.py @@ -0,0 +1,281 @@ +# coding=utf-8 + +import inspect +from unittest import TestCase +from nose.tools import assert_equal +from nose.plugins.skip import SkipTest + +from .parameterized import ( + PY3, PY2, parameterized, param, parameterized_argument_value_pairs, + short_repr, +) + +def assert_contains(haystack, needle): + if needle not in haystack: + raise AssertionError("%r not in %r" %(needle, haystack)) + +def detect_runner(candidates): + for x in reversed(inspect.stack()): + frame = x[0] + for mod in candidates: + frame_mod = frame.f_globals.get("__name__", "") + if frame_mod == mod or frame_mod.startswith(mod + "."): + return mod + return "" + +runner = detect_runner(["nose", "nose2","unittest", "unittest2"]) +UNITTEST = runner.startswith("unittest") +NOSE2 = (runner == "nose2") + +SKIP_FLAGS = { + "generator": UNITTEST, + # nose2 doesn't run tests on old-style classes under Py2, so don't expect + # these tests to run under nose2. + "py2nose2": (PY2 and NOSE2), +} + +missing_tests = set() + +def expect(skip, tests=None): + if tests is None: + tests = skip + skip = None + if any(SKIP_FLAGS.get(f) for f in (skip or "").split()): + return + missing_tests.update(tests) + + +if not (PY2 and NOSE2): + missing_tests.update([ + ]) + +test_params = [ + (42, ), + "foo0", + param("foo1"), + param("foo2", bar=42), +] + +expect("generator", [ + "test_naked_function('foo0', bar=None)", + "test_naked_function('foo1', bar=None)", + "test_naked_function('foo2', bar=42)", + "test_naked_function(42, bar=None)", +]) + +@parameterized(test_params) +def test_naked_function(foo, bar=None): + missing_tests.remove("test_naked_function(%r, bar=%r)" %(foo, bar)) + + +class TestParameterized(object): + expect("generator", [ + "test_instance_method('foo0', bar=None)", + "test_instance_method('foo1', bar=None)", + "test_instance_method('foo2', bar=42)", + "test_instance_method(42, bar=None)", + ]) + + @parameterized(test_params) + def test_instance_method(self, foo, bar=None): + missing_tests.remove("test_instance_method(%r, bar=%r)" %(foo, bar)) + + +def custom_naming_func(custom_tag): + def custom_naming_func(testcase_func, param_num, param): + return testcase_func.__name__ + ('_%s_name_' % custom_tag) + str(param.args[0]) + + return custom_naming_func + + +class TestParamerizedOnTestCase(TestCase): + expect([ + "test_on_TestCase('foo0', bar=None)", + "test_on_TestCase('foo1', bar=None)", + "test_on_TestCase('foo2', bar=42)", + "test_on_TestCase(42, bar=None)", + ]) + + @parameterized.expand(test_params) + def test_on_TestCase(self, foo, bar=None): + missing_tests.remove("test_on_TestCase(%r, bar=%r)" %(foo, bar)) + + expect([ + "test_on_TestCase2_custom_name_42(42, bar=None)", + "test_on_TestCase2_custom_name_foo0('foo0', bar=None)", + "test_on_TestCase2_custom_name_foo1('foo1', bar=None)", + "test_on_TestCase2_custom_name_foo2('foo2', bar=42)", + ]) + + @parameterized.expand(test_params, + name_func=custom_naming_func("custom")) + def test_on_TestCase2(self, foo, bar=None): + stack = inspect.stack() + frame = stack[1] + frame_locals = frame[0].f_locals + nose_test_method_name = frame_locals['a'][0]._testMethodName + expected_name = "test_on_TestCase2_custom_name_" + str(foo) + assert_equal(nose_test_method_name, expected_name, + "Test Method name '%s' did not get customized to expected: '%s'" % + (nose_test_method_name, expected_name)) + missing_tests.remove("%s(%r, bar=%r)" %(expected_name, foo, bar)) + + +class TestParameterizedExpandDocstring(TestCase): + def _assert_docstring(self, expected_docstring, rstrip=False): + """ Checks the current test method's docstring. Must be called directly + from the test method. """ + stack = inspect.stack() + f_locals = stack[3][0].f_locals + test_method = ( + f_locals.get("testMethod") or # Py27 + f_locals.get("function") # Py33 + ) + if test_method is None: + raise AssertionError("uh oh, unittest changed a local variable name") + actual_docstring = test_method.__doc__ + if rstrip: + actual_docstring = actual_docstring.rstrip() + assert_equal(actual_docstring, expected_docstring) + + @parameterized.expand([param("foo")], + doc_func=lambda f, n, p: "stuff") + def test_custom_doc_func(self, foo, bar=None): + """Documentation""" + self._assert_docstring("stuff") + + @parameterized.expand([param("foo")]) + def test_single_line_docstring(self, foo): + """Documentation.""" + self._assert_docstring("Documentation [with foo=%r]." %(foo, )) + + @parameterized.expand([param("foo")]) + def test_empty_docstring(self, foo): + "" + self._assert_docstring("[with foo=%r]" %(foo, )) + + @parameterized.expand([param("foo")]) + def test_multiline_documentation(self, foo): + """Documentation. + + More""" + self._assert_docstring( + "Documentation [with foo=%r].\n\n" + " More" %(foo, ) + ) + + @parameterized.expand([param("foo")]) + def test_unicode_docstring(self, foo): + u"""Döcumentation.""" + self._assert_docstring(u"Döcumentation [with foo=%r]." %(foo, )) + + @parameterized.expand([param("foo", )]) + def test_default_values_get_correct_value(self, foo, bar=12): + """Documentation""" + self._assert_docstring("Documentation [with foo=%r, bar=%r]" %(foo, bar)) + + @parameterized.expand([param("foo", )]) + def test_with_leading_newline(self, foo, bar=12): + """ + Documentation + """ + self._assert_docstring("Documentation [with foo=%r, bar=%r]" %(foo, bar), rstrip=True) + + +def test_warns_when_using_parameterized_with_TestCase(): + try: + class TestTestCaseWarnsOnBadUseOfParameterized(TestCase): + @parameterized([42]) + def test_in_subclass_of_TestCase(self, foo): + pass + except Exception as e: + assert_contains(str(e), "parameterized.expand") + else: + raise AssertionError("Expected exception not raised") + +expect("generator", [ + "test_wrapped_iterable_input('foo')", +]) +@parameterized(lambda: iter(["foo"])) +def test_wrapped_iterable_input(foo): + missing_tests.remove("test_wrapped_iterable_input(%r)" %(foo, )) + +def test_helpful_error_on_non_iterable_input(): + try: + for _ in parameterized(lambda: 42)(lambda: None)(): + pass + except Exception as e: + assert_contains(str(e), "is not iterable") + else: + raise AssertionError("Expected exception not raised") + + +def tearDownModule(): + missing = sorted(list(missing_tests)) + assert_equal(missing, []) + +def test_old_style_classes(): + if PY3: + raise SkipTest("Py3 doesn't have old-style classes") + class OldStyleClass: + @parameterized(["foo"]) + def parameterized_method(self, param): + pass + try: + list(OldStyleClass().parameterized_method()) + except TypeError as e: + assert_contains(str(e), "new-style") + assert_contains(str(e), "parameterized.expand") + assert_contains(str(e), "OldStyleClass") + else: + raise AssertionError("expected TypeError not raised by old-style class") + + +class TestOldStyleClass: + expect("py2nose2 generator", [ + "test_on_old_style_class('foo')", + "test_on_old_style_class('bar')", + ]) + + @parameterized.expand(["foo", "bar"]) + def test_old_style_classes(self, param): + missing_tests.remove("test_on_old_style_class(%r)" %(param, )) + + +@parameterized([ + ("", param(), []), + ("*a, **kw", param(), []), + ("*a, **kw", param(1, foo=42), [("*a", (1, )), ("**kw", {"foo": 42})]), + ("foo", param(1), [("foo", 1)]), + ("foo, *a", param(1), [("foo", 1)]), + ("foo, *a", param(1, 9), [("foo", 1), ("*a", (9, ))]), + ("foo, *a, **kw", param(1, bar=9), [("foo", 1), ("**kw", {"bar": 9})]), + ("x=9", param(), [("x", 9)]), + ("x=9", param(1), [("x", 1)]), + ("x, y=9, *a, **kw", param(1), [("x", 1), ("y", 9)]), + ("x, y=9, *a, **kw", param(1, 2), [("x", 1), ("y", 2)]), + ("x, y=9, *a, **kw", param(1, 2, 3), [("x", 1), ("y", 2), ("*a", (3, ))]), + ("x, y=9, *a, **kw", param(1, y=2), [("x", 1), ("y", 2)]), + ("x, y=9, *a, **kw", param(1, z=2), [("x", 1), ("y", 9), ("**kw", {"z": 2})]), + ("x, y=9, *a, **kw", param(1, 2, 3, z=3), [("x", 1), ("y", 2), ("*a", (3, )), ("**kw", {"z": 3})]), +]) +def test_parameterized_argument_value_pairs(func_params, p, expected): + helper = eval("lambda %s: None" %(func_params, )) + actual = parameterized_argument_value_pairs(helper, p) + assert_equal(actual, expected) + + +@parameterized([ + ("abcd", "'abcd'"), + ("123456789", "'12...89'"), + (123456789, "123...789") # number types do not have quotes, so we can repr more +]) +def test_short_repr(input, expected, n=6): + assert_equal(short_repr(input, n=n), expected) + +@parameterized([ + ("foo", ), +]) +def test_with_docstring(input): + """ Docstring! """ + pass diff --git a/venv/lib/python3.10/site-packages/packaging-23.1.dist-info/INSTALLER b/venv/lib/python3.10/site-packages/packaging-23.1.dist-info/INSTALLER new file mode 100644 index 0000000..a1b589e --- /dev/null +++ b/venv/lib/python3.10/site-packages/packaging-23.1.dist-info/INSTALLER @@ -0,0 +1 @@ +pip diff --git a/venv/lib/python3.10/site-packages/packaging-23.1.dist-info/LICENSE b/venv/lib/python3.10/site-packages/packaging-23.1.dist-info/LICENSE new file mode 100644 index 0000000..6f62d44 --- /dev/null +++ b/venv/lib/python3.10/site-packages/packaging-23.1.dist-info/LICENSE @@ -0,0 +1,3 @@ +This software is made available under the terms of *either* of the licenses +found in LICENSE.APACHE or LICENSE.BSD. Contributions to this software is made +under the terms of *both* these licenses. diff --git a/venv/lib/python3.10/site-packages/packaging-23.1.dist-info/LICENSE.APACHE b/venv/lib/python3.10/site-packages/packaging-23.1.dist-info/LICENSE.APACHE new file mode 100644 index 0000000..f433b1a --- /dev/null +++ b/venv/lib/python3.10/site-packages/packaging-23.1.dist-info/LICENSE.APACHE @@ -0,0 +1,177 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS diff --git a/venv/lib/python3.10/site-packages/packaging-23.1.dist-info/LICENSE.BSD b/venv/lib/python3.10/site-packages/packaging-23.1.dist-info/LICENSE.BSD new file mode 100644 index 0000000..42ce7b7 --- /dev/null +++ b/venv/lib/python3.10/site-packages/packaging-23.1.dist-info/LICENSE.BSD @@ -0,0 +1,23 @@ +Copyright (c) Donald Stufft and individual contributors. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/venv/lib/python3.10/site-packages/packaging-23.1.dist-info/METADATA b/venv/lib/python3.10/site-packages/packaging-23.1.dist-info/METADATA new file mode 100644 index 0000000..c43882a --- /dev/null +++ b/venv/lib/python3.10/site-packages/packaging-23.1.dist-info/METADATA @@ -0,0 +1,99 @@ +Metadata-Version: 2.1 +Name: packaging +Version: 23.1 +Summary: Core utilities for Python packages +Author-email: Donald Stufft +Requires-Python: >=3.7 +Description-Content-Type: text/x-rst +Classifier: Development Status :: 5 - Production/Stable +Classifier: Intended Audience :: Developers +Classifier: License :: OSI Approved :: Apache Software License +Classifier: License :: OSI Approved :: BSD License +Classifier: Programming Language :: Python +Classifier: Programming Language :: Python :: 3 +Classifier: Programming Language :: Python :: 3 :: Only +Classifier: Programming Language :: Python :: 3.7 +Classifier: Programming Language :: Python :: 3.8 +Classifier: Programming Language :: Python :: 3.9 +Classifier: Programming Language :: Python :: 3.10 +Classifier: Programming Language :: Python :: 3.11 +Classifier: Programming Language :: Python :: Implementation :: CPython +Classifier: Programming Language :: Python :: Implementation :: PyPy +Classifier: Typing :: Typed +Project-URL: Documentation, https://packaging.pypa.io/ +Project-URL: Source, https://github.com/pypa/packaging + +packaging +========= + +.. start-intro + +Reusable core utilities for various Python Packaging +`interoperability specifications `_. + +This library provides utilities that implement the interoperability +specifications which have clearly one correct behaviour (eg: :pep:`440`) +or benefit greatly from having a single shared implementation (eg: :pep:`425`). + +.. end-intro + +The ``packaging`` project includes the following: version handling, specifiers, +markers, requirements, tags, utilities. + +Documentation +------------- + +The `documentation`_ provides information and the API for the following: + +- Version Handling +- Specifiers +- Markers +- Requirements +- Tags +- Utilities + +Installation +------------ + +Use ``pip`` to install these utilities:: + + pip install packaging + +Discussion +---------- + +If you run into bugs, you can file them in our `issue tracker`_. + +You can also join ``#pypa`` on Freenode to ask questions or get involved. + + +.. _`documentation`: https://packaging.pypa.io/ +.. _`issue tracker`: https://github.com/pypa/packaging/issues + + +Code of Conduct +--------------- + +Everyone interacting in the packaging project's codebases, issue trackers, chat +rooms, and mailing lists is expected to follow the `PSF Code of Conduct`_. + +.. _PSF Code of Conduct: https://github.com/pypa/.github/blob/main/CODE_OF_CONDUCT.md + +Contributing +------------ + +The ``CONTRIBUTING.rst`` file outlines how to contribute to this project as +well as how to report a potential security issue. The documentation for this +project also covers information about `project development`_ and `security`_. + +.. _`project development`: https://packaging.pypa.io/en/latest/development/ +.. _`security`: https://packaging.pypa.io/en/latest/security/ + +Project History +--------------- + +Please review the ``CHANGELOG.rst`` file or the `Changelog documentation`_ for +recent changes and project history. + +.. _`Changelog documentation`: https://packaging.pypa.io/en/latest/changelog/ + diff --git a/venv/lib/python3.10/site-packages/packaging-23.1.dist-info/RECORD b/venv/lib/python3.10/site-packages/packaging-23.1.dist-info/RECORD new file mode 100644 index 0000000..cb52e4f --- /dev/null +++ b/venv/lib/python3.10/site-packages/packaging-23.1.dist-info/RECORD @@ -0,0 +1,36 @@ +packaging-23.1.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +packaging-23.1.dist-info/LICENSE,sha256=ytHvW9NA1z4HS6YU0m996spceUDD2MNIUuZcSQlobEg,197 +packaging-23.1.dist-info/LICENSE.APACHE,sha256=DVQuDIgE45qn836wDaWnYhSdxoLXgpRRKH4RuTjpRZQ,10174 +packaging-23.1.dist-info/LICENSE.BSD,sha256=tw5-m3QvHMb5SLNMFqo5_-zpQZY2S8iP8NIYDwAo-sU,1344 +packaging-23.1.dist-info/METADATA,sha256=JnduJDlxs2IVeB-nIqAC3-HyNcPhP_MADd9_k_MjmaI,3082 +packaging-23.1.dist-info/RECORD,, +packaging-23.1.dist-info/WHEEL,sha256=rSgq_JpHF9fHR1lx53qwg_1-2LypZE_qmcuXbVUq948,81 +packaging/__init__.py,sha256=kYVZSmXT6CWInT4UJPDtrSQBAZu8fMuFBxpv5GsDTLk,501 +packaging/__pycache__/__init__.cpython-310.pyc,, +packaging/__pycache__/_elffile.cpython-310.pyc,, +packaging/__pycache__/_manylinux.cpython-310.pyc,, +packaging/__pycache__/_musllinux.cpython-310.pyc,, +packaging/__pycache__/_parser.cpython-310.pyc,, +packaging/__pycache__/_structures.cpython-310.pyc,, +packaging/__pycache__/_tokenizer.cpython-310.pyc,, +packaging/__pycache__/markers.cpython-310.pyc,, +packaging/__pycache__/metadata.cpython-310.pyc,, +packaging/__pycache__/requirements.cpython-310.pyc,, +packaging/__pycache__/specifiers.cpython-310.pyc,, +packaging/__pycache__/tags.cpython-310.pyc,, +packaging/__pycache__/utils.cpython-310.pyc,, +packaging/__pycache__/version.cpython-310.pyc,, +packaging/_elffile.py,sha256=hbmK8OD6Z7fY6hwinHEUcD1by7czkGiNYu7ShnFEk2k,3266 +packaging/_manylinux.py,sha256=ESGrDEVmBc8jYTtdZRAWiLk72lOzAKWeezFgoJ_MuBc,8926 +packaging/_musllinux.py,sha256=mvPk7FNjjILKRLIdMxR7IvJ1uggLgCszo-L9rjfpi0M,2524 +packaging/_parser.py,sha256=KJQkBh_Xbfb-qsB560YIEItrTpCZaOh4_YMfBtd5XIY,10194 +packaging/_structures.py,sha256=q3eVNmbWJGG_S0Dit_S3Ao8qQqz_5PYTXFAKBZe5yr4,1431 +packaging/_tokenizer.py,sha256=alCtbwXhOFAmFGZ6BQ-wCTSFoRAJ2z-ysIf7__MTJ_k,5292 +packaging/markers.py,sha256=eH-txS2zq1HdNpTd9LcZUcVIwewAiNU0grmq5wjKnOk,8208 +packaging/metadata.py,sha256=PjELMLxKG_iu3HWjKAOdKhuNrHfWgpdTF2Q4nObsZeM,16397 +packaging/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +packaging/requirements.py,sha256=hJzvtJyAvENc_VfwfhnOZV1851-VW8JCGh-R96NE4Pc,3287 +packaging/specifiers.py,sha256=ZOpqL_w_Kj6ZF_OWdliQUzhEyHlDbi6989kr-sF5GHs,39206 +packaging/tags.py,sha256=_1gLX8h1SgpjAdYCP9XqU37zRjXtU5ZliGy3IM-WcSM,18106 +packaging/utils.py,sha256=es0cCezKspzriQ-3V88h3yJzxz028euV2sUwM61kE-o,4355 +packaging/version.py,sha256=2NH3E57hzRhn0BV9boUBvgPsxlTqLJeI0EpYQoNvGi0,16326 diff --git a/venv/lib/python3.10/site-packages/packaging-23.1.dist-info/WHEEL b/venv/lib/python3.10/site-packages/packaging-23.1.dist-info/WHEEL new file mode 100644 index 0000000..db4a255 --- /dev/null +++ b/venv/lib/python3.10/site-packages/packaging-23.1.dist-info/WHEEL @@ -0,0 +1,4 @@ +Wheel-Version: 1.0 +Generator: flit 3.8.0 +Root-Is-Purelib: true +Tag: py3-none-any diff --git a/venv/lib/python3.10/site-packages/packaging/__init__.py b/venv/lib/python3.10/site-packages/packaging/__init__.py new file mode 100644 index 0000000..13cadc7 --- /dev/null +++ b/venv/lib/python3.10/site-packages/packaging/__init__.py @@ -0,0 +1,15 @@ +# This file is dual licensed under the terms of the Apache License, Version +# 2.0, and the BSD License. See the LICENSE file in the root of this repository +# for complete details. + +__title__ = "packaging" +__summary__ = "Core utilities for Python packages" +__uri__ = "https://github.com/pypa/packaging" + +__version__ = "23.1" + +__author__ = "Donald Stufft and individual contributors" +__email__ = "donald@stufft.io" + +__license__ = "BSD-2-Clause or Apache-2.0" +__copyright__ = "2014-2019 %s" % __author__ diff --git a/venv/lib/python3.10/site-packages/packaging/_elffile.py b/venv/lib/python3.10/site-packages/packaging/_elffile.py new file mode 100644 index 0000000..6fb19b3 --- /dev/null +++ b/venv/lib/python3.10/site-packages/packaging/_elffile.py @@ -0,0 +1,108 @@ +""" +ELF file parser. + +This provides a class ``ELFFile`` that parses an ELF executable in a similar +interface to ``ZipFile``. Only the read interface is implemented. + +Based on: https://gist.github.com/lyssdod/f51579ae8d93c8657a5564aefc2ffbca +ELF header: https://refspecs.linuxfoundation.org/elf/gabi4+/ch4.eheader.html +""" + +import enum +import os +import struct +from typing import IO, Optional, Tuple + + +class ELFInvalid(ValueError): + pass + + +class EIClass(enum.IntEnum): + C32 = 1 + C64 = 2 + + +class EIData(enum.IntEnum): + Lsb = 1 + Msb = 2 + + +class EMachine(enum.IntEnum): + I386 = 3 + S390 = 22 + Arm = 40 + X8664 = 62 + AArc64 = 183 + + +class ELFFile: + """ + Representation of an ELF executable. + """ + + def __init__(self, f: IO[bytes]) -> None: + self._f = f + + try: + ident = self._read("16B") + except struct.error: + raise ELFInvalid("unable to parse identification") + magic = bytes(ident[:4]) + if magic != b"\x7fELF": + raise ELFInvalid(f"invalid magic: {magic!r}") + + self.capacity = ident[4] # Format for program header (bitness). + self.encoding = ident[5] # Data structure encoding (endianness). + + try: + # e_fmt: Format for program header. + # p_fmt: Format for section header. + # p_idx: Indexes to find p_type, p_offset, and p_filesz. + e_fmt, self._p_fmt, self._p_idx = { + (1, 1): ("HHIIIIIHHH", ">IIIIIIII", (0, 1, 4)), # 32-bit MSB. + (2, 1): ("HHIQQQIHHH", ">IIQQQQQQ", (0, 2, 5)), # 64-bit MSB. + }[(self.capacity, self.encoding)] + except KeyError: + raise ELFInvalid( + f"unrecognized capacity ({self.capacity}) or " + f"encoding ({self.encoding})" + ) + + try: + ( + _, + self.machine, # Architecture type. + _, + _, + self._e_phoff, # Offset of program header. + _, + self.flags, # Processor-specific flags. + _, + self._e_phentsize, # Size of section. + self._e_phnum, # Number of sections. + ) = self._read(e_fmt) + except struct.error as e: + raise ELFInvalid("unable to parse machine and section information") from e + + def _read(self, fmt: str) -> Tuple[int, ...]: + return struct.unpack(fmt, self._f.read(struct.calcsize(fmt))) + + @property + def interpreter(self) -> Optional[str]: + """ + The path recorded in the ``PT_INTERP`` section header. + """ + for index in range(self._e_phnum): + self._f.seek(self._e_phoff + self._e_phentsize * index) + try: + data = self._read(self._p_fmt) + except struct.error: + continue + if data[self._p_idx[0]] != 3: # Not PT_INTERP. + continue + self._f.seek(data[self._p_idx[1]]) + return os.fsdecode(self._f.read(data[self._p_idx[2]])).strip("\0") + return None diff --git a/venv/lib/python3.10/site-packages/wheel/vendored/packaging/_manylinux.py b/venv/lib/python3.10/site-packages/packaging/_manylinux.py similarity index 59% rename from venv/lib/python3.10/site-packages/wheel/vendored/packaging/_manylinux.py rename to venv/lib/python3.10/site-packages/packaging/_manylinux.py index 4934ba8..449c655 100644 --- a/venv/lib/python3.10/site-packages/wheel/vendored/packaging/_manylinux.py +++ b/venv/lib/python3.10/site-packages/packaging/_manylinux.py @@ -1,123 +1,60 @@ -from __future__ import annotations - import collections +import contextlib import functools import os import re -import struct import sys import warnings -from typing import IO, Iterator, NamedTuple - - -# Python does not provide platform information at sufficient granularity to -# identify the architecture of the running executable in some cases, so we -# determine it dynamically by reading the information from the running -# process. This only applies on Linux, which uses the ELF format. -class _ELFFileHeader: - # https://en.wikipedia.org/wiki/Executable_and_Linkable_Format#File_header - class _InvalidELFFileHeader(ValueError): - """ - An invalid ELF file header was found. - """ - - ELF_MAGIC_NUMBER = 0x7F454C46 - ELFCLASS32 = 1 - ELFCLASS64 = 2 - ELFDATA2LSB = 1 - ELFDATA2MSB = 2 - EM_386 = 3 - EM_S390 = 22 - EM_ARM = 40 - EM_X86_64 = 62 - EF_ARM_ABIMASK = 0xFF000000 - EF_ARM_ABI_VER5 = 0x05000000 - EF_ARM_ABI_FLOAT_HARD = 0x00000400 - - def __init__(self, file: IO[bytes]) -> None: - def unpack(fmt: str) -> int: - try: - data = file.read(struct.calcsize(fmt)) - result: tuple[int, ...] = struct.unpack(fmt, data) - except struct.error: - raise _ELFFileHeader._InvalidELFFileHeader() - return result[0] - - self.e_ident_magic = unpack(">I") - if self.e_ident_magic != self.ELF_MAGIC_NUMBER: - raise _ELFFileHeader._InvalidELFFileHeader() - self.e_ident_class = unpack("B") - if self.e_ident_class not in {self.ELFCLASS32, self.ELFCLASS64}: - raise _ELFFileHeader._InvalidELFFileHeader() - self.e_ident_data = unpack("B") - if self.e_ident_data not in {self.ELFDATA2LSB, self.ELFDATA2MSB}: - raise _ELFFileHeader._InvalidELFFileHeader() - self.e_ident_version = unpack("B") - self.e_ident_osabi = unpack("B") - self.e_ident_abiversion = unpack("B") - self.e_ident_pad = file.read(7) - format_h = "H" - format_i = "I" - format_q = "Q" - format_p = format_i if self.e_ident_class == self.ELFCLASS32 else format_q - self.e_type = unpack(format_h) - self.e_machine = unpack(format_h) - self.e_version = unpack(format_i) - self.e_entry = unpack(format_p) - self.e_phoff = unpack(format_p) - self.e_shoff = unpack(format_p) - self.e_flags = unpack(format_i) - self.e_ehsize = unpack(format_h) - self.e_phentsize = unpack(format_h) - self.e_phnum = unpack(format_h) - self.e_shentsize = unpack(format_h) - self.e_shnum = unpack(format_h) - self.e_shstrndx = unpack(format_h) - - -def _get_elf_header() -> _ELFFileHeader | None: +from typing import Dict, Generator, Iterator, NamedTuple, Optional, Tuple + +from ._elffile import EIClass, EIData, ELFFile, EMachine + +EF_ARM_ABIMASK = 0xFF000000 +EF_ARM_ABI_VER5 = 0x05000000 +EF_ARM_ABI_FLOAT_HARD = 0x00000400 + + +# `os.PathLike` not a generic type until Python 3.9, so sticking with `str` +# as the type for `path` until then. +@contextlib.contextmanager +def _parse_elf(path: str) -> Generator[Optional[ELFFile], None, None]: try: - with open(sys.executable, "rb") as f: - elf_header = _ELFFileHeader(f) - except (OSError, TypeError, _ELFFileHeader._InvalidELFFileHeader): - return None - return elf_header + with open(path, "rb") as f: + yield ELFFile(f) + except (OSError, TypeError, ValueError): + yield None -def _is_linux_armhf() -> bool: +def _is_linux_armhf(executable: str) -> bool: # hard-float ABI can be detected from the ELF header of the running # process # https://static.docs.arm.com/ihi0044/g/aaelf32.pdf - elf_header = _get_elf_header() - if elf_header is None: - return False - result = elf_header.e_ident_class == elf_header.ELFCLASS32 - result &= elf_header.e_ident_data == elf_header.ELFDATA2LSB - result &= elf_header.e_machine == elf_header.EM_ARM - result &= ( - elf_header.e_flags & elf_header.EF_ARM_ABIMASK - ) == elf_header.EF_ARM_ABI_VER5 - result &= ( - elf_header.e_flags & elf_header.EF_ARM_ABI_FLOAT_HARD - ) == elf_header.EF_ARM_ABI_FLOAT_HARD - return result - - -def _is_linux_i686() -> bool: - elf_header = _get_elf_header() - if elf_header is None: - return False - result = elf_header.e_ident_class == elf_header.ELFCLASS32 - result &= elf_header.e_ident_data == elf_header.ELFDATA2LSB - result &= elf_header.e_machine == elf_header.EM_386 - return result + with _parse_elf(executable) as f: + return ( + f is not None + and f.capacity == EIClass.C32 + and f.encoding == EIData.Lsb + and f.machine == EMachine.Arm + and f.flags & EF_ARM_ABIMASK == EF_ARM_ABI_VER5 + and f.flags & EF_ARM_ABI_FLOAT_HARD == EF_ARM_ABI_FLOAT_HARD + ) + + +def _is_linux_i686(executable: str) -> bool: + with _parse_elf(executable) as f: + return ( + f is not None + and f.capacity == EIClass.C32 + and f.encoding == EIData.Lsb + and f.machine == EMachine.I386 + ) -def _have_compatible_abi(arch: str) -> bool: +def _have_compatible_abi(executable: str, arch: str) -> bool: if arch == "armv7l": - return _is_linux_armhf() + return _is_linux_armhf(executable) if arch == "i686": - return _is_linux_i686() + return _is_linux_i686(executable) return arch in {"x86_64", "aarch64", "ppc64", "ppc64le", "s390x"} @@ -126,7 +63,7 @@ def _have_compatible_abi(arch: str) -> bool: # For now, guess what the highest minor version might be, assume it will # be 50 for testing. Once this actually happens, update the dictionary # with the actual value. -_LAST_GLIBC_MINOR: dict[int, int] = collections.defaultdict(lambda: 50) +_LAST_GLIBC_MINOR: Dict[int, int] = collections.defaultdict(lambda: 50) class _GLibCVersion(NamedTuple): @@ -134,7 +71,7 @@ class _GLibCVersion(NamedTuple): minor: int -def _glibc_version_string_confstr() -> str | None: +def _glibc_version_string_confstr() -> Optional[str]: """ Primary implementation of glibc_version_string using os.confstr. """ @@ -143,17 +80,17 @@ def _glibc_version_string_confstr() -> str | None: # platform module. # https://github.com/python/cpython/blob/fcf1d003bf4f0100c/Lib/platform.py#L175-L183 try: - # os.confstr("CS_GNU_LIBC_VERSION") returns a string like "glibc 2.17". - version_string = os.confstr("CS_GNU_LIBC_VERSION") + # Should be a string like "glibc 2.17". + version_string: str = getattr(os, "confstr")("CS_GNU_LIBC_VERSION") assert version_string is not None - _, version = version_string.split() + _, version = version_string.rsplit() except (AssertionError, AttributeError, OSError, ValueError): # os.confstr() or CS_GNU_LIBC_VERSION not available (or a bad value)... return None return version -def _glibc_version_string_ctypes() -> str | None: +def _glibc_version_string_ctypes() -> Optional[str]: """ Fallback implementation of glibc_version_string using ctypes. """ @@ -197,12 +134,12 @@ def _glibc_version_string_ctypes() -> str | None: return version_str -def _glibc_version_string() -> str | None: +def _glibc_version_string() -> Optional[str]: """Returns glibc version string, or None if not using glibc.""" return _glibc_version_string_confstr() or _glibc_version_string_ctypes() -def _parse_glibc_version(version_str: str) -> tuple[int, int]: +def _parse_glibc_version(version_str: str) -> Tuple[int, int]: """Parse glibc version. We use a regexp instead of str.split because we want to discard any @@ -213,8 +150,8 @@ def _parse_glibc_version(version_str: str) -> tuple[int, int]: m = re.match(r"(?P[0-9]+)\.(?P[0-9]+)", version_str) if not m: warnings.warn( - "Expected glibc version with 2 components major.minor," - " got: %s" % version_str, + f"Expected glibc version with 2 components major.minor," + f" got: {version_str}", RuntimeWarning, ) return -1, -1 @@ -222,7 +159,7 @@ def _parse_glibc_version(version_str: str) -> tuple[int, int]: @functools.lru_cache() -def _get_glibc_version() -> tuple[int, int]: +def _get_glibc_version() -> Tuple[int, int]: version_str = _glibc_version_string() if version_str is None: return (-1, -1) @@ -267,7 +204,7 @@ def _is_compatible(name: str, arch: str, version: _GLibCVersion) -> bool: def platform_tags(linux: str, arch: str) -> Iterator[str]: - if not _have_compatible_abi(arch): + if not _have_compatible_abi(sys.executable, arch): return # Oldest glibc to be supported regardless of architecture is (2, 17). too_old_glibc2 = _GLibCVersion(2, 16) diff --git a/venv/lib/python3.10/site-packages/packaging/_musllinux.py b/venv/lib/python3.10/site-packages/packaging/_musllinux.py new file mode 100644 index 0000000..706ba60 --- /dev/null +++ b/venv/lib/python3.10/site-packages/packaging/_musllinux.py @@ -0,0 +1,80 @@ +"""PEP 656 support. + +This module implements logic to detect if the currently running Python is +linked against musl, and what musl version is used. +""" + +import functools +import re +import subprocess +import sys +from typing import Iterator, NamedTuple, Optional + +from ._elffile import ELFFile + + +class _MuslVersion(NamedTuple): + major: int + minor: int + + +def _parse_musl_version(output: str) -> Optional[_MuslVersion]: + lines = [n for n in (n.strip() for n in output.splitlines()) if n] + if len(lines) < 2 or lines[0][:4] != "musl": + return None + m = re.match(r"Version (\d+)\.(\d+)", lines[1]) + if not m: + return None + return _MuslVersion(major=int(m.group(1)), minor=int(m.group(2))) + + +@functools.lru_cache() +def _get_musl_version(executable: str) -> Optional[_MuslVersion]: + """Detect currently-running musl runtime version. + + This is done by checking the specified executable's dynamic linking + information, and invoking the loader to parse its output for a version + string. If the loader is musl, the output would be something like:: + + musl libc (x86_64) + Version 1.2.2 + Dynamic Program Loader + """ + try: + with open(executable, "rb") as f: + ld = ELFFile(f).interpreter + except (OSError, TypeError, ValueError): + return None + if ld is None or "musl" not in ld: + return None + proc = subprocess.run([ld], stderr=subprocess.PIPE, universal_newlines=True) + return _parse_musl_version(proc.stderr) + + +def platform_tags(arch: str) -> Iterator[str]: + """Generate musllinux tags compatible to the current platform. + + :param arch: Should be the part of platform tag after the ``linux_`` + prefix, e.g. ``x86_64``. The ``linux_`` prefix is assumed as a + prerequisite for the current platform to be musllinux-compatible. + + :returns: An iterator of compatible musllinux tags. + """ + sys_musl = _get_musl_version(sys.executable) + if sys_musl is None: # Python not dynamically linked against musl. + return + for minor in range(sys_musl.minor, -1, -1): + yield f"musllinux_{sys_musl.major}_{minor}_{arch}" + + +if __name__ == "__main__": # pragma: no cover + import sysconfig + + plat = sysconfig.get_platform() + assert plat.startswith("linux-"), "not linux" + + print("plat:", plat) + print("musl:", _get_musl_version(sys.executable)) + print("tags:", end=" ") + for t in platform_tags(re.sub(r"[.-]", "_", plat.split("-", 1)[-1])): + print(t, end="\n ") diff --git a/venv/lib/python3.10/site-packages/packaging/_parser.py b/venv/lib/python3.10/site-packages/packaging/_parser.py new file mode 100644 index 0000000..5a18b75 --- /dev/null +++ b/venv/lib/python3.10/site-packages/packaging/_parser.py @@ -0,0 +1,353 @@ +"""Handwritten parser of dependency specifiers. + +The docstring for each __parse_* function contains ENBF-inspired grammar representing +the implementation. +""" + +import ast +from typing import Any, List, NamedTuple, Optional, Tuple, Union + +from ._tokenizer import DEFAULT_RULES, Tokenizer + + +class Node: + def __init__(self, value: str) -> None: + self.value = value + + def __str__(self) -> str: + return self.value + + def __repr__(self) -> str: + return f"<{self.__class__.__name__}('{self}')>" + + def serialize(self) -> str: + raise NotImplementedError + + +class Variable(Node): + def serialize(self) -> str: + return str(self) + + +class Value(Node): + def serialize(self) -> str: + return f'"{self}"' + + +class Op(Node): + def serialize(self) -> str: + return str(self) + + +MarkerVar = Union[Variable, Value] +MarkerItem = Tuple[MarkerVar, Op, MarkerVar] +# MarkerAtom = Union[MarkerItem, List["MarkerAtom"]] +# MarkerList = List[Union["MarkerList", MarkerAtom, str]] +# mypy does not support recursive type definition +# https://github.com/python/mypy/issues/731 +MarkerAtom = Any +MarkerList = List[Any] + + +class ParsedRequirement(NamedTuple): + name: str + url: str + extras: List[str] + specifier: str + marker: Optional[MarkerList] + + +# -------------------------------------------------------------------------------------- +# Recursive descent parser for dependency specifier +# -------------------------------------------------------------------------------------- +def parse_requirement(source: str) -> ParsedRequirement: + return _parse_requirement(Tokenizer(source, rules=DEFAULT_RULES)) + + +def _parse_requirement(tokenizer: Tokenizer) -> ParsedRequirement: + """ + requirement = WS? IDENTIFIER WS? extras WS? requirement_details + """ + tokenizer.consume("WS") + + name_token = tokenizer.expect( + "IDENTIFIER", expected="package name at the start of dependency specifier" + ) + name = name_token.text + tokenizer.consume("WS") + + extras = _parse_extras(tokenizer) + tokenizer.consume("WS") + + url, specifier, marker = _parse_requirement_details(tokenizer) + tokenizer.expect("END", expected="end of dependency specifier") + + return ParsedRequirement(name, url, extras, specifier, marker) + + +def _parse_requirement_details( + tokenizer: Tokenizer, +) -> Tuple[str, str, Optional[MarkerList]]: + """ + requirement_details = AT URL (WS requirement_marker?)? + | specifier WS? (requirement_marker)? + """ + + specifier = "" + url = "" + marker = None + + if tokenizer.check("AT"): + tokenizer.read() + tokenizer.consume("WS") + + url_start = tokenizer.position + url = tokenizer.expect("URL", expected="URL after @").text + if tokenizer.check("END", peek=True): + return (url, specifier, marker) + + tokenizer.expect("WS", expected="whitespace after URL") + + # The input might end after whitespace. + if tokenizer.check("END", peek=True): + return (url, specifier, marker) + + marker = _parse_requirement_marker( + tokenizer, span_start=url_start, after="URL and whitespace" + ) + else: + specifier_start = tokenizer.position + specifier = _parse_specifier(tokenizer) + tokenizer.consume("WS") + + if tokenizer.check("END", peek=True): + return (url, specifier, marker) + + marker = _parse_requirement_marker( + tokenizer, + span_start=specifier_start, + after=( + "version specifier" + if specifier + else "name and no valid version specifier" + ), + ) + + return (url, specifier, marker) + + +def _parse_requirement_marker( + tokenizer: Tokenizer, *, span_start: int, after: str +) -> MarkerList: + """ + requirement_marker = SEMICOLON marker WS? + """ + + if not tokenizer.check("SEMICOLON"): + tokenizer.raise_syntax_error( + f"Expected end or semicolon (after {after})", + span_start=span_start, + ) + tokenizer.read() + + marker = _parse_marker(tokenizer) + tokenizer.consume("WS") + + return marker + + +def _parse_extras(tokenizer: Tokenizer) -> List[str]: + """ + extras = (LEFT_BRACKET wsp* extras_list? wsp* RIGHT_BRACKET)? + """ + if not tokenizer.check("LEFT_BRACKET", peek=True): + return [] + + with tokenizer.enclosing_tokens( + "LEFT_BRACKET", + "RIGHT_BRACKET", + around="extras", + ): + tokenizer.consume("WS") + extras = _parse_extras_list(tokenizer) + tokenizer.consume("WS") + + return extras + + +def _parse_extras_list(tokenizer: Tokenizer) -> List[str]: + """ + extras_list = identifier (wsp* ',' wsp* identifier)* + """ + extras: List[str] = [] + + if not tokenizer.check("IDENTIFIER"): + return extras + + extras.append(tokenizer.read().text) + + while True: + tokenizer.consume("WS") + if tokenizer.check("IDENTIFIER", peek=True): + tokenizer.raise_syntax_error("Expected comma between extra names") + elif not tokenizer.check("COMMA"): + break + + tokenizer.read() + tokenizer.consume("WS") + + extra_token = tokenizer.expect("IDENTIFIER", expected="extra name after comma") + extras.append(extra_token.text) + + return extras + + +def _parse_specifier(tokenizer: Tokenizer) -> str: + """ + specifier = LEFT_PARENTHESIS WS? version_many WS? RIGHT_PARENTHESIS + | WS? version_many WS? + """ + with tokenizer.enclosing_tokens( + "LEFT_PARENTHESIS", + "RIGHT_PARENTHESIS", + around="version specifier", + ): + tokenizer.consume("WS") + parsed_specifiers = _parse_version_many(tokenizer) + tokenizer.consume("WS") + + return parsed_specifiers + + +def _parse_version_many(tokenizer: Tokenizer) -> str: + """ + version_many = (SPECIFIER (WS? COMMA WS? SPECIFIER)*)? + """ + parsed_specifiers = "" + while tokenizer.check("SPECIFIER"): + span_start = tokenizer.position + parsed_specifiers += tokenizer.read().text + if tokenizer.check("VERSION_PREFIX_TRAIL", peek=True): + tokenizer.raise_syntax_error( + ".* suffix can only be used with `==` or `!=` operators", + span_start=span_start, + span_end=tokenizer.position + 1, + ) + if tokenizer.check("VERSION_LOCAL_LABEL_TRAIL", peek=True): + tokenizer.raise_syntax_error( + "Local version label can only be used with `==` or `!=` operators", + span_start=span_start, + span_end=tokenizer.position, + ) + tokenizer.consume("WS") + if not tokenizer.check("COMMA"): + break + parsed_specifiers += tokenizer.read().text + tokenizer.consume("WS") + + return parsed_specifiers + + +# -------------------------------------------------------------------------------------- +# Recursive descent parser for marker expression +# -------------------------------------------------------------------------------------- +def parse_marker(source: str) -> MarkerList: + return _parse_marker(Tokenizer(source, rules=DEFAULT_RULES)) + + +def _parse_marker(tokenizer: Tokenizer) -> MarkerList: + """ + marker = marker_atom (BOOLOP marker_atom)+ + """ + expression = [_parse_marker_atom(tokenizer)] + while tokenizer.check("BOOLOP"): + token = tokenizer.read() + expr_right = _parse_marker_atom(tokenizer) + expression.extend((token.text, expr_right)) + return expression + + +def _parse_marker_atom(tokenizer: Tokenizer) -> MarkerAtom: + """ + marker_atom = WS? LEFT_PARENTHESIS WS? marker WS? RIGHT_PARENTHESIS WS? + | WS? marker_item WS? + """ + + tokenizer.consume("WS") + if tokenizer.check("LEFT_PARENTHESIS", peek=True): + with tokenizer.enclosing_tokens( + "LEFT_PARENTHESIS", + "RIGHT_PARENTHESIS", + around="marker expression", + ): + tokenizer.consume("WS") + marker: MarkerAtom = _parse_marker(tokenizer) + tokenizer.consume("WS") + else: + marker = _parse_marker_item(tokenizer) + tokenizer.consume("WS") + return marker + + +def _parse_marker_item(tokenizer: Tokenizer) -> MarkerItem: + """ + marker_item = WS? marker_var WS? marker_op WS? marker_var WS? + """ + tokenizer.consume("WS") + marker_var_left = _parse_marker_var(tokenizer) + tokenizer.consume("WS") + marker_op = _parse_marker_op(tokenizer) + tokenizer.consume("WS") + marker_var_right = _parse_marker_var(tokenizer) + tokenizer.consume("WS") + return (marker_var_left, marker_op, marker_var_right) + + +def _parse_marker_var(tokenizer: Tokenizer) -> MarkerVar: + """ + marker_var = VARIABLE | QUOTED_STRING + """ + if tokenizer.check("VARIABLE"): + return process_env_var(tokenizer.read().text.replace(".", "_")) + elif tokenizer.check("QUOTED_STRING"): + return process_python_str(tokenizer.read().text) + else: + tokenizer.raise_syntax_error( + message="Expected a marker variable or quoted string" + ) + + +def process_env_var(env_var: str) -> Variable: + if ( + env_var == "platform_python_implementation" + or env_var == "python_implementation" + ): + return Variable("platform_python_implementation") + else: + return Variable(env_var) + + +def process_python_str(python_str: str) -> Value: + value = ast.literal_eval(python_str) + return Value(str(value)) + + +def _parse_marker_op(tokenizer: Tokenizer) -> Op: + """ + marker_op = IN | NOT IN | OP + """ + if tokenizer.check("IN"): + tokenizer.read() + return Op("in") + elif tokenizer.check("NOT"): + tokenizer.read() + tokenizer.expect("WS", expected="whitespace after 'not'") + tokenizer.expect("IN", expected="'in' after 'not'") + return Op("not in") + elif tokenizer.check("OP"): + return Op(tokenizer.read().text) + else: + return tokenizer.raise_syntax_error( + "Expected marker operator, one of " + "<=, <, !=, ==, >=, >, ~=, ===, in, not in" + ) diff --git a/venv/lib/python3.10/site-packages/packaging/_structures.py b/venv/lib/python3.10/site-packages/packaging/_structures.py new file mode 100644 index 0000000..90a6465 --- /dev/null +++ b/venv/lib/python3.10/site-packages/packaging/_structures.py @@ -0,0 +1,61 @@ +# This file is dual licensed under the terms of the Apache License, Version +# 2.0, and the BSD License. See the LICENSE file in the root of this repository +# for complete details. + + +class InfinityType: + def __repr__(self) -> str: + return "Infinity" + + def __hash__(self) -> int: + return hash(repr(self)) + + def __lt__(self, other: object) -> bool: + return False + + def __le__(self, other: object) -> bool: + return False + + def __eq__(self, other: object) -> bool: + return isinstance(other, self.__class__) + + def __gt__(self, other: object) -> bool: + return True + + def __ge__(self, other: object) -> bool: + return True + + def __neg__(self: object) -> "NegativeInfinityType": + return NegativeInfinity + + +Infinity = InfinityType() + + +class NegativeInfinityType: + def __repr__(self) -> str: + return "-Infinity" + + def __hash__(self) -> int: + return hash(repr(self)) + + def __lt__(self, other: object) -> bool: + return True + + def __le__(self, other: object) -> bool: + return True + + def __eq__(self, other: object) -> bool: + return isinstance(other, self.__class__) + + def __gt__(self, other: object) -> bool: + return False + + def __ge__(self, other: object) -> bool: + return False + + def __neg__(self: object) -> InfinityType: + return Infinity + + +NegativeInfinity = NegativeInfinityType() diff --git a/venv/lib/python3.10/site-packages/packaging/_tokenizer.py b/venv/lib/python3.10/site-packages/packaging/_tokenizer.py new file mode 100644 index 0000000..dd0d648 --- /dev/null +++ b/venv/lib/python3.10/site-packages/packaging/_tokenizer.py @@ -0,0 +1,192 @@ +import contextlib +import re +from dataclasses import dataclass +from typing import Dict, Iterator, NoReturn, Optional, Tuple, Union + +from .specifiers import Specifier + + +@dataclass +class Token: + name: str + text: str + position: int + + +class ParserSyntaxError(Exception): + """The provided source text could not be parsed correctly.""" + + def __init__( + self, + message: str, + *, + source: str, + span: Tuple[int, int], + ) -> None: + self.span = span + self.message = message + self.source = source + + super().__init__() + + def __str__(self) -> str: + marker = " " * self.span[0] + "~" * (self.span[1] - self.span[0]) + "^" + return "\n ".join([self.message, self.source, marker]) + + +DEFAULT_RULES: "Dict[str, Union[str, re.Pattern[str]]]" = { + "LEFT_PARENTHESIS": r"\(", + "RIGHT_PARENTHESIS": r"\)", + "LEFT_BRACKET": r"\[", + "RIGHT_BRACKET": r"\]", + "SEMICOLON": r";", + "COMMA": r",", + "QUOTED_STRING": re.compile( + r""" + ( + ('[^']*') + | + ("[^"]*") + ) + """, + re.VERBOSE, + ), + "OP": r"(===|==|~=|!=|<=|>=|<|>)", + "BOOLOP": r"\b(or|and)\b", + "IN": r"\bin\b", + "NOT": r"\bnot\b", + "VARIABLE": re.compile( + r""" + \b( + python_version + |python_full_version + |os[._]name + |sys[._]platform + |platform_(release|system) + |platform[._](version|machine|python_implementation) + |python_implementation + |implementation_(name|version) + |extra + )\b + """, + re.VERBOSE, + ), + "SPECIFIER": re.compile( + Specifier._operator_regex_str + Specifier._version_regex_str, + re.VERBOSE | re.IGNORECASE, + ), + "AT": r"\@", + "URL": r"[^ \t]+", + "IDENTIFIER": r"\b[a-zA-Z0-9][a-zA-Z0-9._-]*\b", + "VERSION_PREFIX_TRAIL": r"\.\*", + "VERSION_LOCAL_LABEL_TRAIL": r"\+[a-z0-9]+(?:[-_\.][a-z0-9]+)*", + "WS": r"[ \t]+", + "END": r"$", +} + + +class Tokenizer: + """Context-sensitive token parsing. + + Provides methods to examine the input stream to check whether the next token + matches. + """ + + def __init__( + self, + source: str, + *, + rules: "Dict[str, Union[str, re.Pattern[str]]]", + ) -> None: + self.source = source + self.rules: Dict[str, re.Pattern[str]] = { + name: re.compile(pattern) for name, pattern in rules.items() + } + self.next_token: Optional[Token] = None + self.position = 0 + + def consume(self, name: str) -> None: + """Move beyond provided token name, if at current position.""" + if self.check(name): + self.read() + + def check(self, name: str, *, peek: bool = False) -> bool: + """Check whether the next token has the provided name. + + By default, if the check succeeds, the token *must* be read before + another check. If `peek` is set to `True`, the token is not loaded and + would need to be checked again. + """ + assert ( + self.next_token is None + ), f"Cannot check for {name!r}, already have {self.next_token!r}" + assert name in self.rules, f"Unknown token name: {name!r}" + + expression = self.rules[name] + + match = expression.match(self.source, self.position) + if match is None: + return False + if not peek: + self.next_token = Token(name, match[0], self.position) + return True + + def expect(self, name: str, *, expected: str) -> Token: + """Expect a certain token name next, failing with a syntax error otherwise. + + The token is *not* read. + """ + if not self.check(name): + raise self.raise_syntax_error(f"Expected {expected}") + return self.read() + + def read(self) -> Token: + """Consume the next token and return it.""" + token = self.next_token + assert token is not None + + self.position += len(token.text) + self.next_token = None + + return token + + def raise_syntax_error( + self, + message: str, + *, + span_start: Optional[int] = None, + span_end: Optional[int] = None, + ) -> NoReturn: + """Raise ParserSyntaxError at the given position.""" + span = ( + self.position if span_start is None else span_start, + self.position if span_end is None else span_end, + ) + raise ParserSyntaxError( + message, + source=self.source, + span=span, + ) + + @contextlib.contextmanager + def enclosing_tokens( + self, open_token: str, close_token: str, *, around: str + ) -> Iterator[None]: + if self.check(open_token): + open_position = self.position + self.read() + else: + open_position = None + + yield + + if open_position is None: + return + + if not self.check(close_token): + self.raise_syntax_error( + f"Expected matching {close_token} for {open_token}, after {around}", + span_start=open_position, + ) + + self.read() diff --git a/venv/lib/python3.10/site-packages/packaging/markers.py b/venv/lib/python3.10/site-packages/packaging/markers.py new file mode 100644 index 0000000..8b98fca --- /dev/null +++ b/venv/lib/python3.10/site-packages/packaging/markers.py @@ -0,0 +1,252 @@ +# This file is dual licensed under the terms of the Apache License, Version +# 2.0, and the BSD License. See the LICENSE file in the root of this repository +# for complete details. + +import operator +import os +import platform +import sys +from typing import Any, Callable, Dict, List, Optional, Tuple, Union + +from ._parser import ( + MarkerAtom, + MarkerList, + Op, + Value, + Variable, + parse_marker as _parse_marker, +) +from ._tokenizer import ParserSyntaxError +from .specifiers import InvalidSpecifier, Specifier +from .utils import canonicalize_name + +__all__ = [ + "InvalidMarker", + "UndefinedComparison", + "UndefinedEnvironmentName", + "Marker", + "default_environment", +] + +Operator = Callable[[str, str], bool] + + +class InvalidMarker(ValueError): + """ + An invalid marker was found, users should refer to PEP 508. + """ + + +class UndefinedComparison(ValueError): + """ + An invalid operation was attempted on a value that doesn't support it. + """ + + +class UndefinedEnvironmentName(ValueError): + """ + A name was attempted to be used that does not exist inside of the + environment. + """ + + +def _normalize_extra_values(results: Any) -> Any: + """ + Normalize extra values. + """ + if isinstance(results[0], tuple): + lhs, op, rhs = results[0] + if isinstance(lhs, Variable) and lhs.value == "extra": + normalized_extra = canonicalize_name(rhs.value) + rhs = Value(normalized_extra) + elif isinstance(rhs, Variable) and rhs.value == "extra": + normalized_extra = canonicalize_name(lhs.value) + lhs = Value(normalized_extra) + results[0] = lhs, op, rhs + return results + + +def _format_marker( + marker: Union[List[str], MarkerAtom, str], first: Optional[bool] = True +) -> str: + + assert isinstance(marker, (list, tuple, str)) + + # Sometimes we have a structure like [[...]] which is a single item list + # where the single item is itself it's own list. In that case we want skip + # the rest of this function so that we don't get extraneous () on the + # outside. + if ( + isinstance(marker, list) + and len(marker) == 1 + and isinstance(marker[0], (list, tuple)) + ): + return _format_marker(marker[0]) + + if isinstance(marker, list): + inner = (_format_marker(m, first=False) for m in marker) + if first: + return " ".join(inner) + else: + return "(" + " ".join(inner) + ")" + elif isinstance(marker, tuple): + return " ".join([m.serialize() for m in marker]) + else: + return marker + + +_operators: Dict[str, Operator] = { + "in": lambda lhs, rhs: lhs in rhs, + "not in": lambda lhs, rhs: lhs not in rhs, + "<": operator.lt, + "<=": operator.le, + "==": operator.eq, + "!=": operator.ne, + ">=": operator.ge, + ">": operator.gt, +} + + +def _eval_op(lhs: str, op: Op, rhs: str) -> bool: + try: + spec = Specifier("".join([op.serialize(), rhs])) + except InvalidSpecifier: + pass + else: + return spec.contains(lhs, prereleases=True) + + oper: Optional[Operator] = _operators.get(op.serialize()) + if oper is None: + raise UndefinedComparison(f"Undefined {op!r} on {lhs!r} and {rhs!r}.") + + return oper(lhs, rhs) + + +def _normalize(*values: str, key: str) -> Tuple[str, ...]: + # PEP 685 – Comparison of extra names for optional distribution dependencies + # https://peps.python.org/pep-0685/ + # > When comparing extra names, tools MUST normalize the names being + # > compared using the semantics outlined in PEP 503 for names + if key == "extra": + return tuple(canonicalize_name(v) for v in values) + + # other environment markers don't have such standards + return values + + +def _evaluate_markers(markers: MarkerList, environment: Dict[str, str]) -> bool: + groups: List[List[bool]] = [[]] + + for marker in markers: + assert isinstance(marker, (list, tuple, str)) + + if isinstance(marker, list): + groups[-1].append(_evaluate_markers(marker, environment)) + elif isinstance(marker, tuple): + lhs, op, rhs = marker + + if isinstance(lhs, Variable): + environment_key = lhs.value + lhs_value = environment[environment_key] + rhs_value = rhs.value + else: + lhs_value = lhs.value + environment_key = rhs.value + rhs_value = environment[environment_key] + + lhs_value, rhs_value = _normalize(lhs_value, rhs_value, key=environment_key) + groups[-1].append(_eval_op(lhs_value, op, rhs_value)) + else: + assert marker in ["and", "or"] + if marker == "or": + groups.append([]) + + return any(all(item) for item in groups) + + +def format_full_version(info: "sys._version_info") -> str: + version = "{0.major}.{0.minor}.{0.micro}".format(info) + kind = info.releaselevel + if kind != "final": + version += kind[0] + str(info.serial) + return version + + +def default_environment() -> Dict[str, str]: + iver = format_full_version(sys.implementation.version) + implementation_name = sys.implementation.name + return { + "implementation_name": implementation_name, + "implementation_version": iver, + "os_name": os.name, + "platform_machine": platform.machine(), + "platform_release": platform.release(), + "platform_system": platform.system(), + "platform_version": platform.version(), + "python_full_version": platform.python_version(), + "platform_python_implementation": platform.python_implementation(), + "python_version": ".".join(platform.python_version_tuple()[:2]), + "sys_platform": sys.platform, + } + + +class Marker: + def __init__(self, marker: str) -> None: + # Note: We create a Marker object without calling this constructor in + # packaging.requirements.Requirement. If any additional logic is + # added here, make sure to mirror/adapt Requirement. + try: + self._markers = _normalize_extra_values(_parse_marker(marker)) + # The attribute `_markers` can be described in terms of a recursive type: + # MarkerList = List[Union[Tuple[Node, ...], str, MarkerList]] + # + # For example, the following expression: + # python_version > "3.6" or (python_version == "3.6" and os_name == "unix") + # + # is parsed into: + # [ + # (, ')>, ), + # 'and', + # [ + # (, , ), + # 'or', + # (, , ) + # ] + # ] + except ParserSyntaxError as e: + raise InvalidMarker(str(e)) from e + + def __str__(self) -> str: + return _format_marker(self._markers) + + def __repr__(self) -> str: + return f"" + + def __hash__(self) -> int: + return hash((self.__class__.__name__, str(self))) + + def __eq__(self, other: Any) -> bool: + if not isinstance(other, Marker): + return NotImplemented + + return str(self) == str(other) + + def evaluate(self, environment: Optional[Dict[str, str]] = None) -> bool: + """Evaluate a marker. + + Return the boolean from evaluating the given marker against the + environment. environment is an optional argument to override all or + part of the determined environment. + + The environment is determined from the current Python process. + """ + current_environment = default_environment() + current_environment["extra"] = "" + if environment is not None: + current_environment.update(environment) + # The API used to allow setting extra to None. We need to handle this + # case for backwards compatibility. + if current_environment["extra"] is None: + current_environment["extra"] = "" + + return _evaluate_markers(self._markers, current_environment) diff --git a/venv/lib/python3.10/site-packages/packaging/metadata.py b/venv/lib/python3.10/site-packages/packaging/metadata.py new file mode 100644 index 0000000..e76a60c --- /dev/null +++ b/venv/lib/python3.10/site-packages/packaging/metadata.py @@ -0,0 +1,408 @@ +import email.feedparser +import email.header +import email.message +import email.parser +import email.policy +import sys +import typing +from typing import Dict, List, Optional, Tuple, Union, cast + +if sys.version_info >= (3, 8): # pragma: no cover + from typing import TypedDict +else: # pragma: no cover + if typing.TYPE_CHECKING: + from typing_extensions import TypedDict + else: + try: + from typing_extensions import TypedDict + except ImportError: + + class TypedDict: + def __init_subclass__(*_args, **_kwargs): + pass + + +# The RawMetadata class attempts to make as few assumptions about the underlying +# serialization formats as possible. The idea is that as long as a serialization +# formats offer some very basic primitives in *some* way then we can support +# serializing to and from that format. +class RawMetadata(TypedDict, total=False): + """A dictionary of raw core metadata. + + Each field in core metadata maps to a key of this dictionary (when data is + provided). The key is lower-case and underscores are used instead of dashes + compared to the equivalent core metadata field. Any core metadata field that + can be specified multiple times or can hold multiple values in a single + field have a key with a plural name. + + Core metadata fields that can be specified multiple times are stored as a + list or dict depending on which is appropriate for the field. Any fields + which hold multiple values in a single field are stored as a list. + + """ + + # Metadata 1.0 - PEP 241 + metadata_version: str + name: str + version: str + platforms: List[str] + summary: str + description: str + keywords: List[str] + home_page: str + author: str + author_email: str + license: str + + # Metadata 1.1 - PEP 314 + supported_platforms: List[str] + download_url: str + classifiers: List[str] + requires: List[str] + provides: List[str] + obsoletes: List[str] + + # Metadata 1.2 - PEP 345 + maintainer: str + maintainer_email: str + requires_dist: List[str] + provides_dist: List[str] + obsoletes_dist: List[str] + requires_python: str + requires_external: List[str] + project_urls: Dict[str, str] + + # Metadata 2.0 + # PEP 426 attempted to completely revamp the metadata format + # but got stuck without ever being able to build consensus on + # it and ultimately ended up withdrawn. + # + # However, a number of tools had started emiting METADATA with + # `2.0` Metadata-Version, so for historical reasons, this version + # was skipped. + + # Metadata 2.1 - PEP 566 + description_content_type: str + provides_extra: List[str] + + # Metadata 2.2 - PEP 643 + dynamic: List[str] + + # Metadata 2.3 - PEP 685 + # No new fields were added in PEP 685, just some edge case were + # tightened up to provide better interoptability. + + +_STRING_FIELDS = { + "author", + "author_email", + "description", + "description_content_type", + "download_url", + "home_page", + "license", + "maintainer", + "maintainer_email", + "metadata_version", + "name", + "requires_python", + "summary", + "version", +} + +_LIST_STRING_FIELDS = { + "classifiers", + "dynamic", + "obsoletes", + "obsoletes_dist", + "platforms", + "provides", + "provides_dist", + "provides_extra", + "requires", + "requires_dist", + "requires_external", + "supported_platforms", +} + + +def _parse_keywords(data: str) -> List[str]: + """Split a string of comma-separate keyboards into a list of keywords.""" + return [k.strip() for k in data.split(",")] + + +def _parse_project_urls(data: List[str]) -> Dict[str, str]: + """Parse a list of label/URL string pairings separated by a comma.""" + urls = {} + for pair in data: + # Our logic is slightly tricky here as we want to try and do + # *something* reasonable with malformed data. + # + # The main thing that we have to worry about, is data that does + # not have a ',' at all to split the label from the Value. There + # isn't a singular right answer here, and we will fail validation + # later on (if the caller is validating) so it doesn't *really* + # matter, but since the missing value has to be an empty str + # and our return value is dict[str, str], if we let the key + # be the missing value, then they'd have multiple '' values that + # overwrite each other in a accumulating dict. + # + # The other potentional issue is that it's possible to have the + # same label multiple times in the metadata, with no solid "right" + # answer with what to do in that case. As such, we'll do the only + # thing we can, which is treat the field as unparseable and add it + # to our list of unparsed fields. + parts = [p.strip() for p in pair.split(",", 1)] + parts.extend([""] * (max(0, 2 - len(parts)))) # Ensure 2 items + + # TODO: The spec doesn't say anything about if the keys should be + # considered case sensitive or not... logically they should + # be case-preserving and case-insensitive, but doing that + # would open up more cases where we might have duplicate + # entries. + label, url = parts + if label in urls: + # The label already exists in our set of urls, so this field + # is unparseable, and we can just add the whole thing to our + # unparseable data and stop processing it. + raise KeyError("duplicate labels in project urls") + urls[label] = url + + return urls + + +def _get_payload(msg: email.message.Message, source: Union[bytes, str]) -> str: + """Get the body of the message.""" + # If our source is a str, then our caller has managed encodings for us, + # and we don't need to deal with it. + if isinstance(source, str): + payload: str = msg.get_payload() + return payload + # If our source is a bytes, then we're managing the encoding and we need + # to deal with it. + else: + bpayload: bytes = msg.get_payload(decode=True) + try: + return bpayload.decode("utf8", "strict") + except UnicodeDecodeError: + raise ValueError("payload in an invalid encoding") + + +# The various parse_FORMAT functions here are intended to be as lenient as +# possible in their parsing, while still returning a correctly typed +# RawMetadata. +# +# To aid in this, we also generally want to do as little touching of the +# data as possible, except where there are possibly some historic holdovers +# that make valid data awkward to work with. +# +# While this is a lower level, intermediate format than our ``Metadata`` +# class, some light touch ups can make a massive difference in usability. + +# Map METADATA fields to RawMetadata. +_EMAIL_TO_RAW_MAPPING = { + "author": "author", + "author-email": "author_email", + "classifier": "classifiers", + "description": "description", + "description-content-type": "description_content_type", + "download-url": "download_url", + "dynamic": "dynamic", + "home-page": "home_page", + "keywords": "keywords", + "license": "license", + "maintainer": "maintainer", + "maintainer-email": "maintainer_email", + "metadata-version": "metadata_version", + "name": "name", + "obsoletes": "obsoletes", + "obsoletes-dist": "obsoletes_dist", + "platform": "platforms", + "project-url": "project_urls", + "provides": "provides", + "provides-dist": "provides_dist", + "provides-extra": "provides_extra", + "requires": "requires", + "requires-dist": "requires_dist", + "requires-external": "requires_external", + "requires-python": "requires_python", + "summary": "summary", + "supported-platform": "supported_platforms", + "version": "version", +} + + +def parse_email(data: Union[bytes, str]) -> Tuple[RawMetadata, Dict[str, List[str]]]: + """Parse a distribution's metadata. + + This function returns a two-item tuple of dicts. The first dict is of + recognized fields from the core metadata specification. Fields that can be + parsed and translated into Python's built-in types are converted + appropriately. All other fields are left as-is. Fields that are allowed to + appear multiple times are stored as lists. + + The second dict contains all other fields from the metadata. This includes + any unrecognized fields. It also includes any fields which are expected to + be parsed into a built-in type but were not formatted appropriately. Finally, + any fields that are expected to appear only once but are repeated are + included in this dict. + + """ + raw: Dict[str, Union[str, List[str], Dict[str, str]]] = {} + unparsed: Dict[str, List[str]] = {} + + if isinstance(data, str): + parsed = email.parser.Parser(policy=email.policy.compat32).parsestr(data) + else: + parsed = email.parser.BytesParser(policy=email.policy.compat32).parsebytes(data) + + # We have to wrap parsed.keys() in a set, because in the case of multiple + # values for a key (a list), the key will appear multiple times in the + # list of keys, but we're avoiding that by using get_all(). + for name in frozenset(parsed.keys()): + # Header names in RFC are case insensitive, so we'll normalize to all + # lower case to make comparisons easier. + name = name.lower() + + # We use get_all() here, even for fields that aren't multiple use, + # because otherwise someone could have e.g. two Name fields, and we + # would just silently ignore it rather than doing something about it. + headers = parsed.get_all(name) + + # The way the email module works when parsing bytes is that it + # unconditionally decodes the bytes as ascii using the surrogateescape + # handler. When you pull that data back out (such as with get_all() ), + # it looks to see if the str has any surrogate escapes, and if it does + # it wraps it in a Header object instead of returning the string. + # + # As such, we'll look for those Header objects, and fix up the encoding. + value = [] + # Flag if we have run into any issues processing the headers, thus + # signalling that the data belongs in 'unparsed'. + valid_encoding = True + for h in headers: + # It's unclear if this can return more types than just a Header or + # a str, so we'll just assert here to make sure. + assert isinstance(h, (email.header.Header, str)) + + # If it's a header object, we need to do our little dance to get + # the real data out of it. In cases where there is invalid data + # we're going to end up with mojibake, but there's no obvious, good + # way around that without reimplementing parts of the Header object + # ourselves. + # + # That should be fine since, if mojibacked happens, this key is + # going into the unparsed dict anyways. + if isinstance(h, email.header.Header): + # The Header object stores it's data as chunks, and each chunk + # can be independently encoded, so we'll need to check each + # of them. + chunks: List[Tuple[bytes, Optional[str]]] = [] + for bin, encoding in email.header.decode_header(h): + try: + bin.decode("utf8", "strict") + except UnicodeDecodeError: + # Enable mojibake. + encoding = "latin1" + valid_encoding = False + else: + encoding = "utf8" + chunks.append((bin, encoding)) + + # Turn our chunks back into a Header object, then let that + # Header object do the right thing to turn them into a + # string for us. + value.append(str(email.header.make_header(chunks))) + # This is already a string, so just add it. + else: + value.append(h) + + # We've processed all of our values to get them into a list of str, + # but we may have mojibake data, in which case this is an unparsed + # field. + if not valid_encoding: + unparsed[name] = value + continue + + raw_name = _EMAIL_TO_RAW_MAPPING.get(name) + if raw_name is None: + # This is a bit of a weird situation, we've encountered a key that + # we don't know what it means, so we don't know whether it's meant + # to be a list or not. + # + # Since we can't really tell one way or another, we'll just leave it + # as a list, even though it may be a single item list, because that's + # what makes the most sense for email headers. + unparsed[name] = value + continue + + # If this is one of our string fields, then we'll check to see if our + # value is a list of a single item. If it is then we'll assume that + # it was emitted as a single string, and unwrap the str from inside + # the list. + # + # If it's any other kind of data, then we haven't the faintest clue + # what we should parse it as, and we have to just add it to our list + # of unparsed stuff. + if raw_name in _STRING_FIELDS and len(value) == 1: + raw[raw_name] = value[0] + # If this is one of our list of string fields, then we can just assign + # the value, since email *only* has strings, and our get_all() call + # above ensures that this is a list. + elif raw_name in _LIST_STRING_FIELDS: + raw[raw_name] = value + # Special Case: Keywords + # The keywords field is implemented in the metadata spec as a str, + # but it conceptually is a list of strings, and is serialized using + # ", ".join(keywords), so we'll do some light data massaging to turn + # this into what it logically is. + elif raw_name == "keywords" and len(value) == 1: + raw[raw_name] = _parse_keywords(value[0]) + # Special Case: Project-URL + # The project urls is implemented in the metadata spec as a list of + # specially-formatted strings that represent a key and a value, which + # is fundamentally a mapping, however the email format doesn't support + # mappings in a sane way, so it was crammed into a list of strings + # instead. + # + # We will do a little light data massaging to turn this into a map as + # it logically should be. + elif raw_name == "project_urls": + try: + raw[raw_name] = _parse_project_urls(value) + except KeyError: + unparsed[name] = value + # Nothing that we've done has managed to parse this, so it'll just + # throw it in our unparseable data and move on. + else: + unparsed[name] = value + + # We need to support getting the Description from the message payload in + # addition to getting it from the the headers. This does mean, though, there + # is the possibility of it being set both ways, in which case we put both + # in 'unparsed' since we don't know which is right. + try: + payload = _get_payload(parsed, data) + except ValueError: + unparsed.setdefault("description", []).append( + parsed.get_payload(decode=isinstance(data, bytes)) + ) + else: + if payload: + # Check to see if we've already got a description, if so then both + # it, and this body move to unparseable. + if "description" in raw: + description_header = cast(str, raw.pop("description")) + unparsed.setdefault("description", []).extend( + [description_header, payload] + ) + elif "description" in unparsed: + unparsed["description"].append(payload) + else: + raw["description"] = payload + + # We need to cast our `raw` to a metadata, because a TypedDict only support + # literal key names, but we're computing our key names on purpose, but the + # way this function is implemented, our `TypedDict` can only have valid key + # names. + return cast(RawMetadata, raw), unparsed diff --git a/venv/lib/python3.10/site-packages/wheel/vendored/__init__.py b/venv/lib/python3.10/site-packages/packaging/py.typed similarity index 100% rename from venv/lib/python3.10/site-packages/wheel/vendored/__init__.py rename to venv/lib/python3.10/site-packages/packaging/py.typed diff --git a/venv/lib/python3.10/site-packages/packaging/requirements.py b/venv/lib/python3.10/site-packages/packaging/requirements.py new file mode 100644 index 0000000..f34bfa8 --- /dev/null +++ b/venv/lib/python3.10/site-packages/packaging/requirements.py @@ -0,0 +1,95 @@ +# This file is dual licensed under the terms of the Apache License, Version +# 2.0, and the BSD License. See the LICENSE file in the root of this repository +# for complete details. + +import urllib.parse +from typing import Any, List, Optional, Set + +from ._parser import parse_requirement as _parse_requirement +from ._tokenizer import ParserSyntaxError +from .markers import Marker, _normalize_extra_values +from .specifiers import SpecifierSet + + +class InvalidRequirement(ValueError): + """ + An invalid requirement was found, users should refer to PEP 508. + """ + + +class Requirement: + """Parse a requirement. + + Parse a given requirement string into its parts, such as name, specifier, + URL, and extras. Raises InvalidRequirement on a badly-formed requirement + string. + """ + + # TODO: Can we test whether something is contained within a requirement? + # If so how do we do that? Do we need to test against the _name_ of + # the thing as well as the version? What about the markers? + # TODO: Can we normalize the name and extra name? + + def __init__(self, requirement_string: str) -> None: + try: + parsed = _parse_requirement(requirement_string) + except ParserSyntaxError as e: + raise InvalidRequirement(str(e)) from e + + self.name: str = parsed.name + if parsed.url: + parsed_url = urllib.parse.urlparse(parsed.url) + if parsed_url.scheme == "file": + if urllib.parse.urlunparse(parsed_url) != parsed.url: + raise InvalidRequirement("Invalid URL given") + elif not (parsed_url.scheme and parsed_url.netloc) or ( + not parsed_url.scheme and not parsed_url.netloc + ): + raise InvalidRequirement(f"Invalid URL: {parsed.url}") + self.url: Optional[str] = parsed.url + else: + self.url = None + self.extras: Set[str] = set(parsed.extras if parsed.extras else []) + self.specifier: SpecifierSet = SpecifierSet(parsed.specifier) + self.marker: Optional[Marker] = None + if parsed.marker is not None: + self.marker = Marker.__new__(Marker) + self.marker._markers = _normalize_extra_values(parsed.marker) + + def __str__(self) -> str: + parts: List[str] = [self.name] + + if self.extras: + formatted_extras = ",".join(sorted(self.extras)) + parts.append(f"[{formatted_extras}]") + + if self.specifier: + parts.append(str(self.specifier)) + + if self.url: + parts.append(f"@ {self.url}") + if self.marker: + parts.append(" ") + + if self.marker: + parts.append(f"; {self.marker}") + + return "".join(parts) + + def __repr__(self) -> str: + return f"" + + def __hash__(self) -> int: + return hash((self.__class__.__name__, str(self))) + + def __eq__(self, other: Any) -> bool: + if not isinstance(other, Requirement): + return NotImplemented + + return ( + self.name == other.name + and self.extras == other.extras + and self.specifier == other.specifier + and self.url == other.url + and self.marker == other.marker + ) diff --git a/venv/lib/python3.10/site-packages/packaging/specifiers.py b/venv/lib/python3.10/site-packages/packaging/specifiers.py new file mode 100644 index 0000000..ba8fe37 --- /dev/null +++ b/venv/lib/python3.10/site-packages/packaging/specifiers.py @@ -0,0 +1,1008 @@ +# This file is dual licensed under the terms of the Apache License, Version +# 2.0, and the BSD License. See the LICENSE file in the root of this repository +# for complete details. +""" +.. testsetup:: + + from packaging.specifiers import Specifier, SpecifierSet, InvalidSpecifier + from packaging.version import Version +""" + +import abc +import itertools +import re +from typing import ( + Callable, + Iterable, + Iterator, + List, + Optional, + Set, + Tuple, + TypeVar, + Union, +) + +from .utils import canonicalize_version +from .version import Version + +UnparsedVersion = Union[Version, str] +UnparsedVersionVar = TypeVar("UnparsedVersionVar", bound=UnparsedVersion) +CallableOperator = Callable[[Version, str], bool] + + +def _coerce_version(version: UnparsedVersion) -> Version: + if not isinstance(version, Version): + version = Version(version) + return version + + +class InvalidSpecifier(ValueError): + """ + Raised when attempting to create a :class:`Specifier` with a specifier + string that is invalid. + + >>> Specifier("lolwat") + Traceback (most recent call last): + ... + packaging.specifiers.InvalidSpecifier: Invalid specifier: 'lolwat' + """ + + +class BaseSpecifier(metaclass=abc.ABCMeta): + @abc.abstractmethod + def __str__(self) -> str: + """ + Returns the str representation of this Specifier-like object. This + should be representative of the Specifier itself. + """ + + @abc.abstractmethod + def __hash__(self) -> int: + """ + Returns a hash value for this Specifier-like object. + """ + + @abc.abstractmethod + def __eq__(self, other: object) -> bool: + """ + Returns a boolean representing whether or not the two Specifier-like + objects are equal. + + :param other: The other object to check against. + """ + + @property + @abc.abstractmethod + def prereleases(self) -> Optional[bool]: + """Whether or not pre-releases as a whole are allowed. + + This can be set to either ``True`` or ``False`` to explicitly enable or disable + prereleases or it can be set to ``None`` (the default) to use default semantics. + """ + + @prereleases.setter + def prereleases(self, value: bool) -> None: + """Setter for :attr:`prereleases`. + + :param value: The value to set. + """ + + @abc.abstractmethod + def contains(self, item: str, prereleases: Optional[bool] = None) -> bool: + """ + Determines if the given item is contained within this specifier. + """ + + @abc.abstractmethod + def filter( + self, iterable: Iterable[UnparsedVersionVar], prereleases: Optional[bool] = None + ) -> Iterator[UnparsedVersionVar]: + """ + Takes an iterable of items and filters them so that only items which + are contained within this specifier are allowed in it. + """ + + +class Specifier(BaseSpecifier): + """This class abstracts handling of version specifiers. + + .. tip:: + + It is generally not required to instantiate this manually. You should instead + prefer to work with :class:`SpecifierSet` instead, which can parse + comma-separated version specifiers (which is what package metadata contains). + """ + + _operator_regex_str = r""" + (?P(~=|==|!=|<=|>=|<|>|===)) + """ + _version_regex_str = r""" + (?P + (?: + # The identity operators allow for an escape hatch that will + # do an exact string match of the version you wish to install. + # This will not be parsed by PEP 440 and we cannot determine + # any semantic meaning from it. This operator is discouraged + # but included entirely as an escape hatch. + (?<====) # Only match for the identity operator + \s* + [^\s;)]* # The arbitrary version can be just about anything, + # we match everything except for whitespace, a + # semi-colon for marker support, and a closing paren + # since versions can be enclosed in them. + ) + | + (?: + # The (non)equality operators allow for wild card and local + # versions to be specified so we have to define these two + # operators separately to enable that. + (?<===|!=) # Only match for equals and not equals + + \s* + v? + (?:[0-9]+!)? # epoch + [0-9]+(?:\.[0-9]+)* # release + + # You cannot use a wild card and a pre-release, post-release, a dev or + # local version together so group them with a | and make them optional. + (?: + \.\* # Wild card syntax of .* + | + (?: # pre release + [-_\.]? + (alpha|beta|preview|pre|a|b|c|rc) + [-_\.]? + [0-9]* + )? + (?: # post release + (?:-[0-9]+)|(?:[-_\.]?(post|rev|r)[-_\.]?[0-9]*) + )? + (?:[-_\.]?dev[-_\.]?[0-9]*)? # dev release + (?:\+[a-z0-9]+(?:[-_\.][a-z0-9]+)*)? # local + )? + ) + | + (?: + # The compatible operator requires at least two digits in the + # release segment. + (?<=~=) # Only match for the compatible operator + + \s* + v? + (?:[0-9]+!)? # epoch + [0-9]+(?:\.[0-9]+)+ # release (We have a + instead of a *) + (?: # pre release + [-_\.]? + (alpha|beta|preview|pre|a|b|c|rc) + [-_\.]? + [0-9]* + )? + (?: # post release + (?:-[0-9]+)|(?:[-_\.]?(post|rev|r)[-_\.]?[0-9]*) + )? + (?:[-_\.]?dev[-_\.]?[0-9]*)? # dev release + ) + | + (?: + # All other operators only allow a sub set of what the + # (non)equality operators do. Specifically they do not allow + # local versions to be specified nor do they allow the prefix + # matching wild cards. + (?=": "greater_than_equal", + "<": "less_than", + ">": "greater_than", + "===": "arbitrary", + } + + def __init__(self, spec: str = "", prereleases: Optional[bool] = None) -> None: + """Initialize a Specifier instance. + + :param spec: + The string representation of a specifier which will be parsed and + normalized before use. + :param prereleases: + This tells the specifier if it should accept prerelease versions if + applicable or not. The default of ``None`` will autodetect it from the + given specifiers. + :raises InvalidSpecifier: + If the given specifier is invalid (i.e. bad syntax). + """ + match = self._regex.search(spec) + if not match: + raise InvalidSpecifier(f"Invalid specifier: '{spec}'") + + self._spec: Tuple[str, str] = ( + match.group("operator").strip(), + match.group("version").strip(), + ) + + # Store whether or not this Specifier should accept prereleases + self._prereleases = prereleases + + # https://github.com/python/mypy/pull/13475#pullrequestreview-1079784515 + @property # type: ignore[override] + def prereleases(self) -> bool: + # If there is an explicit prereleases set for this, then we'll just + # blindly use that. + if self._prereleases is not None: + return self._prereleases + + # Look at all of our specifiers and determine if they are inclusive + # operators, and if they are if they are including an explicit + # prerelease. + operator, version = self._spec + if operator in ["==", ">=", "<=", "~=", "==="]: + # The == specifier can include a trailing .*, if it does we + # want to remove before parsing. + if operator == "==" and version.endswith(".*"): + version = version[:-2] + + # Parse the version, and if it is a pre-release than this + # specifier allows pre-releases. + if Version(version).is_prerelease: + return True + + return False + + @prereleases.setter + def prereleases(self, value: bool) -> None: + self._prereleases = value + + @property + def operator(self) -> str: + """The operator of this specifier. + + >>> Specifier("==1.2.3").operator + '==' + """ + return self._spec[0] + + @property + def version(self) -> str: + """The version of this specifier. + + >>> Specifier("==1.2.3").version + '1.2.3' + """ + return self._spec[1] + + def __repr__(self) -> str: + """A representation of the Specifier that shows all internal state. + + >>> Specifier('>=1.0.0') + =1.0.0')> + >>> Specifier('>=1.0.0', prereleases=False) + =1.0.0', prereleases=False)> + >>> Specifier('>=1.0.0', prereleases=True) + =1.0.0', prereleases=True)> + """ + pre = ( + f", prereleases={self.prereleases!r}" + if self._prereleases is not None + else "" + ) + + return f"<{self.__class__.__name__}({str(self)!r}{pre})>" + + def __str__(self) -> str: + """A string representation of the Specifier that can be round-tripped. + + >>> str(Specifier('>=1.0.0')) + '>=1.0.0' + >>> str(Specifier('>=1.0.0', prereleases=False)) + '>=1.0.0' + """ + return "{}{}".format(*self._spec) + + @property + def _canonical_spec(self) -> Tuple[str, str]: + canonical_version = canonicalize_version( + self._spec[1], + strip_trailing_zero=(self._spec[0] != "~="), + ) + return self._spec[0], canonical_version + + def __hash__(self) -> int: + return hash(self._canonical_spec) + + def __eq__(self, other: object) -> bool: + """Whether or not the two Specifier-like objects are equal. + + :param other: The other object to check against. + + The value of :attr:`prereleases` is ignored. + + >>> Specifier("==1.2.3") == Specifier("== 1.2.3.0") + True + >>> (Specifier("==1.2.3", prereleases=False) == + ... Specifier("==1.2.3", prereleases=True)) + True + >>> Specifier("==1.2.3") == "==1.2.3" + True + >>> Specifier("==1.2.3") == Specifier("==1.2.4") + False + >>> Specifier("==1.2.3") == Specifier("~=1.2.3") + False + """ + if isinstance(other, str): + try: + other = self.__class__(str(other)) + except InvalidSpecifier: + return NotImplemented + elif not isinstance(other, self.__class__): + return NotImplemented + + return self._canonical_spec == other._canonical_spec + + def _get_operator(self, op: str) -> CallableOperator: + operator_callable: CallableOperator = getattr( + self, f"_compare_{self._operators[op]}" + ) + return operator_callable + + def _compare_compatible(self, prospective: Version, spec: str) -> bool: + + # Compatible releases have an equivalent combination of >= and ==. That + # is that ~=2.2 is equivalent to >=2.2,==2.*. This allows us to + # implement this in terms of the other specifiers instead of + # implementing it ourselves. The only thing we need to do is construct + # the other specifiers. + + # We want everything but the last item in the version, but we want to + # ignore suffix segments. + prefix = ".".join( + list(itertools.takewhile(_is_not_suffix, _version_split(spec)))[:-1] + ) + + # Add the prefix notation to the end of our string + prefix += ".*" + + return self._get_operator(">=")(prospective, spec) and self._get_operator("==")( + prospective, prefix + ) + + def _compare_equal(self, prospective: Version, spec: str) -> bool: + + # We need special logic to handle prefix matching + if spec.endswith(".*"): + # In the case of prefix matching we want to ignore local segment. + normalized_prospective = canonicalize_version( + prospective.public, strip_trailing_zero=False + ) + # Get the normalized version string ignoring the trailing .* + normalized_spec = canonicalize_version(spec[:-2], strip_trailing_zero=False) + # Split the spec out by dots, and pretend that there is an implicit + # dot in between a release segment and a pre-release segment. + split_spec = _version_split(normalized_spec) + + # Split the prospective version out by dots, and pretend that there + # is an implicit dot in between a release segment and a pre-release + # segment. + split_prospective = _version_split(normalized_prospective) + + # 0-pad the prospective version before shortening it to get the correct + # shortened version. + padded_prospective, _ = _pad_version(split_prospective, split_spec) + + # Shorten the prospective version to be the same length as the spec + # so that we can determine if the specifier is a prefix of the + # prospective version or not. + shortened_prospective = padded_prospective[: len(split_spec)] + + return shortened_prospective == split_spec + else: + # Convert our spec string into a Version + spec_version = Version(spec) + + # If the specifier does not have a local segment, then we want to + # act as if the prospective version also does not have a local + # segment. + if not spec_version.local: + prospective = Version(prospective.public) + + return prospective == spec_version + + def _compare_not_equal(self, prospective: Version, spec: str) -> bool: + return not self._compare_equal(prospective, spec) + + def _compare_less_than_equal(self, prospective: Version, spec: str) -> bool: + + # NB: Local version identifiers are NOT permitted in the version + # specifier, so local version labels can be universally removed from + # the prospective version. + return Version(prospective.public) <= Version(spec) + + def _compare_greater_than_equal(self, prospective: Version, spec: str) -> bool: + + # NB: Local version identifiers are NOT permitted in the version + # specifier, so local version labels can be universally removed from + # the prospective version. + return Version(prospective.public) >= Version(spec) + + def _compare_less_than(self, prospective: Version, spec_str: str) -> bool: + + # Convert our spec to a Version instance, since we'll want to work with + # it as a version. + spec = Version(spec_str) + + # Check to see if the prospective version is less than the spec + # version. If it's not we can short circuit and just return False now + # instead of doing extra unneeded work. + if not prospective < spec: + return False + + # This special case is here so that, unless the specifier itself + # includes is a pre-release version, that we do not accept pre-release + # versions for the version mentioned in the specifier (e.g. <3.1 should + # not match 3.1.dev0, but should match 3.0.dev0). + if not spec.is_prerelease and prospective.is_prerelease: + if Version(prospective.base_version) == Version(spec.base_version): + return False + + # If we've gotten to here, it means that prospective version is both + # less than the spec version *and* it's not a pre-release of the same + # version in the spec. + return True + + def _compare_greater_than(self, prospective: Version, spec_str: str) -> bool: + + # Convert our spec to a Version instance, since we'll want to work with + # it as a version. + spec = Version(spec_str) + + # Check to see if the prospective version is greater than the spec + # version. If it's not we can short circuit and just return False now + # instead of doing extra unneeded work. + if not prospective > spec: + return False + + # This special case is here so that, unless the specifier itself + # includes is a post-release version, that we do not accept + # post-release versions for the version mentioned in the specifier + # (e.g. >3.1 should not match 3.0.post0, but should match 3.2.post0). + if not spec.is_postrelease and prospective.is_postrelease: + if Version(prospective.base_version) == Version(spec.base_version): + return False + + # Ensure that we do not allow a local version of the version mentioned + # in the specifier, which is technically greater than, to match. + if prospective.local is not None: + if Version(prospective.base_version) == Version(spec.base_version): + return False + + # If we've gotten to here, it means that prospective version is both + # greater than the spec version *and* it's not a pre-release of the + # same version in the spec. + return True + + def _compare_arbitrary(self, prospective: Version, spec: str) -> bool: + return str(prospective).lower() == str(spec).lower() + + def __contains__(self, item: Union[str, Version]) -> bool: + """Return whether or not the item is contained in this specifier. + + :param item: The item to check for. + + This is used for the ``in`` operator and behaves the same as + :meth:`contains` with no ``prereleases`` argument passed. + + >>> "1.2.3" in Specifier(">=1.2.3") + True + >>> Version("1.2.3") in Specifier(">=1.2.3") + True + >>> "1.0.0" in Specifier(">=1.2.3") + False + >>> "1.3.0a1" in Specifier(">=1.2.3") + False + >>> "1.3.0a1" in Specifier(">=1.2.3", prereleases=True) + True + """ + return self.contains(item) + + def contains( + self, item: UnparsedVersion, prereleases: Optional[bool] = None + ) -> bool: + """Return whether or not the item is contained in this specifier. + + :param item: + The item to check for, which can be a version string or a + :class:`Version` instance. + :param prereleases: + Whether or not to match prereleases with this Specifier. If set to + ``None`` (the default), it uses :attr:`prereleases` to determine + whether or not prereleases are allowed. + + >>> Specifier(">=1.2.3").contains("1.2.3") + True + >>> Specifier(">=1.2.3").contains(Version("1.2.3")) + True + >>> Specifier(">=1.2.3").contains("1.0.0") + False + >>> Specifier(">=1.2.3").contains("1.3.0a1") + False + >>> Specifier(">=1.2.3", prereleases=True).contains("1.3.0a1") + True + >>> Specifier(">=1.2.3").contains("1.3.0a1", prereleases=True) + True + """ + + # Determine if prereleases are to be allowed or not. + if prereleases is None: + prereleases = self.prereleases + + # Normalize item to a Version, this allows us to have a shortcut for + # "2.0" in Specifier(">=2") + normalized_item = _coerce_version(item) + + # Determine if we should be supporting prereleases in this specifier + # or not, if we do not support prereleases than we can short circuit + # logic if this version is a prereleases. + if normalized_item.is_prerelease and not prereleases: + return False + + # Actually do the comparison to determine if this item is contained + # within this Specifier or not. + operator_callable: CallableOperator = self._get_operator(self.operator) + return operator_callable(normalized_item, self.version) + + def filter( + self, iterable: Iterable[UnparsedVersionVar], prereleases: Optional[bool] = None + ) -> Iterator[UnparsedVersionVar]: + """Filter items in the given iterable, that match the specifier. + + :param iterable: + An iterable that can contain version strings and :class:`Version` instances. + The items in the iterable will be filtered according to the specifier. + :param prereleases: + Whether or not to allow prereleases in the returned iterator. If set to + ``None`` (the default), it will be intelligently decide whether to allow + prereleases or not (based on the :attr:`prereleases` attribute, and + whether the only versions matching are prereleases). + + This method is smarter than just ``filter(Specifier().contains, [...])`` + because it implements the rule from :pep:`440` that a prerelease item + SHOULD be accepted if no other versions match the given specifier. + + >>> list(Specifier(">=1.2.3").filter(["1.2", "1.3", "1.5a1"])) + ['1.3'] + >>> list(Specifier(">=1.2.3").filter(["1.2", "1.2.3", "1.3", Version("1.4")])) + ['1.2.3', '1.3', ] + >>> list(Specifier(">=1.2.3").filter(["1.2", "1.5a1"])) + ['1.5a1'] + >>> list(Specifier(">=1.2.3").filter(["1.3", "1.5a1"], prereleases=True)) + ['1.3', '1.5a1'] + >>> list(Specifier(">=1.2.3", prereleases=True).filter(["1.3", "1.5a1"])) + ['1.3', '1.5a1'] + """ + + yielded = False + found_prereleases = [] + + kw = {"prereleases": prereleases if prereleases is not None else True} + + # Attempt to iterate over all the values in the iterable and if any of + # them match, yield them. + for version in iterable: + parsed_version = _coerce_version(version) + + if self.contains(parsed_version, **kw): + # If our version is a prerelease, and we were not set to allow + # prereleases, then we'll store it for later in case nothing + # else matches this specifier. + if parsed_version.is_prerelease and not ( + prereleases or self.prereleases + ): + found_prereleases.append(version) + # Either this is not a prerelease, or we should have been + # accepting prereleases from the beginning. + else: + yielded = True + yield version + + # Now that we've iterated over everything, determine if we've yielded + # any values, and if we have not and we have any prereleases stored up + # then we will go ahead and yield the prereleases. + if not yielded and found_prereleases: + for version in found_prereleases: + yield version + + +_prefix_regex = re.compile(r"^([0-9]+)((?:a|b|c|rc)[0-9]+)$") + + +def _version_split(version: str) -> List[str]: + result: List[str] = [] + for item in version.split("."): + match = _prefix_regex.search(item) + if match: + result.extend(match.groups()) + else: + result.append(item) + return result + + +def _is_not_suffix(segment: str) -> bool: + return not any( + segment.startswith(prefix) for prefix in ("dev", "a", "b", "rc", "post") + ) + + +def _pad_version(left: List[str], right: List[str]) -> Tuple[List[str], List[str]]: + left_split, right_split = [], [] + + # Get the release segment of our versions + left_split.append(list(itertools.takewhile(lambda x: x.isdigit(), left))) + right_split.append(list(itertools.takewhile(lambda x: x.isdigit(), right))) + + # Get the rest of our versions + left_split.append(left[len(left_split[0]) :]) + right_split.append(right[len(right_split[0]) :]) + + # Insert our padding + left_split.insert(1, ["0"] * max(0, len(right_split[0]) - len(left_split[0]))) + right_split.insert(1, ["0"] * max(0, len(left_split[0]) - len(right_split[0]))) + + return (list(itertools.chain(*left_split)), list(itertools.chain(*right_split))) + + +class SpecifierSet(BaseSpecifier): + """This class abstracts handling of a set of version specifiers. + + It can be passed a single specifier (``>=3.0``), a comma-separated list of + specifiers (``>=3.0,!=3.1``), or no specifier at all. + """ + + def __init__( + self, specifiers: str = "", prereleases: Optional[bool] = None + ) -> None: + """Initialize a SpecifierSet instance. + + :param specifiers: + The string representation of a specifier or a comma-separated list of + specifiers which will be parsed and normalized before use. + :param prereleases: + This tells the SpecifierSet if it should accept prerelease versions if + applicable or not. The default of ``None`` will autodetect it from the + given specifiers. + + :raises InvalidSpecifier: + If the given ``specifiers`` are not parseable than this exception will be + raised. + """ + + # Split on `,` to break each individual specifier into it's own item, and + # strip each item to remove leading/trailing whitespace. + split_specifiers = [s.strip() for s in specifiers.split(",") if s.strip()] + + # Parsed each individual specifier, attempting first to make it a + # Specifier. + parsed: Set[Specifier] = set() + for specifier in split_specifiers: + parsed.add(Specifier(specifier)) + + # Turn our parsed specifiers into a frozen set and save them for later. + self._specs = frozenset(parsed) + + # Store our prereleases value so we can use it later to determine if + # we accept prereleases or not. + self._prereleases = prereleases + + @property + def prereleases(self) -> Optional[bool]: + # If we have been given an explicit prerelease modifier, then we'll + # pass that through here. + if self._prereleases is not None: + return self._prereleases + + # If we don't have any specifiers, and we don't have a forced value, + # then we'll just return None since we don't know if this should have + # pre-releases or not. + if not self._specs: + return None + + # Otherwise we'll see if any of the given specifiers accept + # prereleases, if any of them do we'll return True, otherwise False. + return any(s.prereleases for s in self._specs) + + @prereleases.setter + def prereleases(self, value: bool) -> None: + self._prereleases = value + + def __repr__(self) -> str: + """A representation of the specifier set that shows all internal state. + + Note that the ordering of the individual specifiers within the set may not + match the input string. + + >>> SpecifierSet('>=1.0.0,!=2.0.0') + =1.0.0')> + >>> SpecifierSet('>=1.0.0,!=2.0.0', prereleases=False) + =1.0.0', prereleases=False)> + >>> SpecifierSet('>=1.0.0,!=2.0.0', prereleases=True) + =1.0.0', prereleases=True)> + """ + pre = ( + f", prereleases={self.prereleases!r}" + if self._prereleases is not None + else "" + ) + + return f"" + + def __str__(self) -> str: + """A string representation of the specifier set that can be round-tripped. + + Note that the ordering of the individual specifiers within the set may not + match the input string. + + >>> str(SpecifierSet(">=1.0.0,!=1.0.1")) + '!=1.0.1,>=1.0.0' + >>> str(SpecifierSet(">=1.0.0,!=1.0.1", prereleases=False)) + '!=1.0.1,>=1.0.0' + """ + return ",".join(sorted(str(s) for s in self._specs)) + + def __hash__(self) -> int: + return hash(self._specs) + + def __and__(self, other: Union["SpecifierSet", str]) -> "SpecifierSet": + """Return a SpecifierSet which is a combination of the two sets. + + :param other: The other object to combine with. + + >>> SpecifierSet(">=1.0.0,!=1.0.1") & '<=2.0.0,!=2.0.1' + =1.0.0')> + >>> SpecifierSet(">=1.0.0,!=1.0.1") & SpecifierSet('<=2.0.0,!=2.0.1') + =1.0.0')> + """ + if isinstance(other, str): + other = SpecifierSet(other) + elif not isinstance(other, SpecifierSet): + return NotImplemented + + specifier = SpecifierSet() + specifier._specs = frozenset(self._specs | other._specs) + + if self._prereleases is None and other._prereleases is not None: + specifier._prereleases = other._prereleases + elif self._prereleases is not None and other._prereleases is None: + specifier._prereleases = self._prereleases + elif self._prereleases == other._prereleases: + specifier._prereleases = self._prereleases + else: + raise ValueError( + "Cannot combine SpecifierSets with True and False prerelease " + "overrides." + ) + + return specifier + + def __eq__(self, other: object) -> bool: + """Whether or not the two SpecifierSet-like objects are equal. + + :param other: The other object to check against. + + The value of :attr:`prereleases` is ignored. + + >>> SpecifierSet(">=1.0.0,!=1.0.1") == SpecifierSet(">=1.0.0,!=1.0.1") + True + >>> (SpecifierSet(">=1.0.0,!=1.0.1", prereleases=False) == + ... SpecifierSet(">=1.0.0,!=1.0.1", prereleases=True)) + True + >>> SpecifierSet(">=1.0.0,!=1.0.1") == ">=1.0.0,!=1.0.1" + True + >>> SpecifierSet(">=1.0.0,!=1.0.1") == SpecifierSet(">=1.0.0") + False + >>> SpecifierSet(">=1.0.0,!=1.0.1") == SpecifierSet(">=1.0.0,!=1.0.2") + False + """ + if isinstance(other, (str, Specifier)): + other = SpecifierSet(str(other)) + elif not isinstance(other, SpecifierSet): + return NotImplemented + + return self._specs == other._specs + + def __len__(self) -> int: + """Returns the number of specifiers in this specifier set.""" + return len(self._specs) + + def __iter__(self) -> Iterator[Specifier]: + """ + Returns an iterator over all the underlying :class:`Specifier` instances + in this specifier set. + + >>> sorted(SpecifierSet(">=1.0.0,!=1.0.1"), key=str) + [, =1.0.0')>] + """ + return iter(self._specs) + + def __contains__(self, item: UnparsedVersion) -> bool: + """Return whether or not the item is contained in this specifier. + + :param item: The item to check for. + + This is used for the ``in`` operator and behaves the same as + :meth:`contains` with no ``prereleases`` argument passed. + + >>> "1.2.3" in SpecifierSet(">=1.0.0,!=1.0.1") + True + >>> Version("1.2.3") in SpecifierSet(">=1.0.0,!=1.0.1") + True + >>> "1.0.1" in SpecifierSet(">=1.0.0,!=1.0.1") + False + >>> "1.3.0a1" in SpecifierSet(">=1.0.0,!=1.0.1") + False + >>> "1.3.0a1" in SpecifierSet(">=1.0.0,!=1.0.1", prereleases=True) + True + """ + return self.contains(item) + + def contains( + self, + item: UnparsedVersion, + prereleases: Optional[bool] = None, + installed: Optional[bool] = None, + ) -> bool: + """Return whether or not the item is contained in this SpecifierSet. + + :param item: + The item to check for, which can be a version string or a + :class:`Version` instance. + :param prereleases: + Whether or not to match prereleases with this SpecifierSet. If set to + ``None`` (the default), it uses :attr:`prereleases` to determine + whether or not prereleases are allowed. + + >>> SpecifierSet(">=1.0.0,!=1.0.1").contains("1.2.3") + True + >>> SpecifierSet(">=1.0.0,!=1.0.1").contains(Version("1.2.3")) + True + >>> SpecifierSet(">=1.0.0,!=1.0.1").contains("1.0.1") + False + >>> SpecifierSet(">=1.0.0,!=1.0.1").contains("1.3.0a1") + False + >>> SpecifierSet(">=1.0.0,!=1.0.1", prereleases=True).contains("1.3.0a1") + True + >>> SpecifierSet(">=1.0.0,!=1.0.1").contains("1.3.0a1", prereleases=True) + True + """ + # Ensure that our item is a Version instance. + if not isinstance(item, Version): + item = Version(item) + + # Determine if we're forcing a prerelease or not, if we're not forcing + # one for this particular filter call, then we'll use whatever the + # SpecifierSet thinks for whether or not we should support prereleases. + if prereleases is None: + prereleases = self.prereleases + + # We can determine if we're going to allow pre-releases by looking to + # see if any of the underlying items supports them. If none of them do + # and this item is a pre-release then we do not allow it and we can + # short circuit that here. + # Note: This means that 1.0.dev1 would not be contained in something + # like >=1.0.devabc however it would be in >=1.0.debabc,>0.0.dev0 + if not prereleases and item.is_prerelease: + return False + + if installed and item.is_prerelease: + item = Version(item.base_version) + + # We simply dispatch to the underlying specs here to make sure that the + # given version is contained within all of them. + # Note: This use of all() here means that an empty set of specifiers + # will always return True, this is an explicit design decision. + return all(s.contains(item, prereleases=prereleases) for s in self._specs) + + def filter( + self, iterable: Iterable[UnparsedVersionVar], prereleases: Optional[bool] = None + ) -> Iterator[UnparsedVersionVar]: + """Filter items in the given iterable, that match the specifiers in this set. + + :param iterable: + An iterable that can contain version strings and :class:`Version` instances. + The items in the iterable will be filtered according to the specifier. + :param prereleases: + Whether or not to allow prereleases in the returned iterator. If set to + ``None`` (the default), it will be intelligently decide whether to allow + prereleases or not (based on the :attr:`prereleases` attribute, and + whether the only versions matching are prereleases). + + This method is smarter than just ``filter(SpecifierSet(...).contains, [...])`` + because it implements the rule from :pep:`440` that a prerelease item + SHOULD be accepted if no other versions match the given specifier. + + >>> list(SpecifierSet(">=1.2.3").filter(["1.2", "1.3", "1.5a1"])) + ['1.3'] + >>> list(SpecifierSet(">=1.2.3").filter(["1.2", "1.3", Version("1.4")])) + ['1.3', ] + >>> list(SpecifierSet(">=1.2.3").filter(["1.2", "1.5a1"])) + [] + >>> list(SpecifierSet(">=1.2.3").filter(["1.3", "1.5a1"], prereleases=True)) + ['1.3', '1.5a1'] + >>> list(SpecifierSet(">=1.2.3", prereleases=True).filter(["1.3", "1.5a1"])) + ['1.3', '1.5a1'] + + An "empty" SpecifierSet will filter items based on the presence of prerelease + versions in the set. + + >>> list(SpecifierSet("").filter(["1.3", "1.5a1"])) + ['1.3'] + >>> list(SpecifierSet("").filter(["1.5a1"])) + ['1.5a1'] + >>> list(SpecifierSet("", prereleases=True).filter(["1.3", "1.5a1"])) + ['1.3', '1.5a1'] + >>> list(SpecifierSet("").filter(["1.3", "1.5a1"], prereleases=True)) + ['1.3', '1.5a1'] + """ + # Determine if we're forcing a prerelease or not, if we're not forcing + # one for this particular filter call, then we'll use whatever the + # SpecifierSet thinks for whether or not we should support prereleases. + if prereleases is None: + prereleases = self.prereleases + + # If we have any specifiers, then we want to wrap our iterable in the + # filter method for each one, this will act as a logical AND amongst + # each specifier. + if self._specs: + for spec in self._specs: + iterable = spec.filter(iterable, prereleases=bool(prereleases)) + return iter(iterable) + # If we do not have any specifiers, then we need to have a rough filter + # which will filter out any pre-releases, unless there are no final + # releases. + else: + filtered: List[UnparsedVersionVar] = [] + found_prereleases: List[UnparsedVersionVar] = [] + + for item in iterable: + parsed_version = _coerce_version(item) + + # Store any item which is a pre-release for later unless we've + # already found a final version or we are accepting prereleases + if parsed_version.is_prerelease and not prereleases: + if not filtered: + found_prereleases.append(item) + else: + filtered.append(item) + + # If we've found no items except for pre-releases, then we'll go + # ahead and use the pre-releases + if not filtered and found_prereleases and prereleases is None: + return iter(found_prereleases) + + return iter(filtered) diff --git a/venv/lib/python3.10/site-packages/wheel/vendored/packaging/tags.py b/venv/lib/python3.10/site-packages/packaging/tags.py similarity index 79% rename from venv/lib/python3.10/site-packages/wheel/vendored/packaging/tags.py rename to venv/lib/python3.10/site-packages/packaging/tags.py index 4e003a9..76d2434 100644 --- a/venv/lib/python3.10/site-packages/wheel/vendored/packaging/tags.py +++ b/venv/lib/python3.10/site-packages/packaging/tags.py @@ -2,14 +2,24 @@ # 2.0, and the BSD License. See the LICENSE file in the root of this repository # for complete details. -from __future__ import annotations - import logging import platform +import subprocess import sys import sysconfig from importlib.machinery import EXTENSION_SUFFIXES -from typing import Iterable, Iterator, Sequence, Tuple, cast +from typing import ( + Dict, + FrozenSet, + Iterable, + Iterator, + List, + Optional, + Sequence, + Tuple, + Union, + cast, +) from . import _manylinux, _musllinux @@ -18,7 +28,7 @@ PythonVersion = Sequence[int] MacVersion = Tuple[int, int] -INTERPRETER_SHORT_NAMES: dict[str, str] = { +INTERPRETER_SHORT_NAMES: Dict[str, str] = { "python": "py", # Generic. "cpython": "cp", "pypy": "pp", @@ -27,7 +37,7 @@ } -_32_BIT_INTERPRETER = sys.maxsize <= 2 ** 32 +_32_BIT_INTERPRETER = sys.maxsize <= 2**32 class Tag: @@ -84,7 +94,7 @@ def __repr__(self) -> str: return f"<{self} @ {id(self)}>" -def parse_tag(tag: str) -> frozenset[Tag]: +def parse_tag(tag: str) -> FrozenSet[Tag]: """ Parses the provided tag (e.g. `py3-none-any`) into a frozenset of Tag instances. @@ -100,8 +110,8 @@ def parse_tag(tag: str) -> frozenset[Tag]: return frozenset(tags) -def _get_config_var(name: str, warn: bool = False) -> int | str | None: - value = sysconfig.get_config_var(name) +def _get_config_var(name: str, warn: bool = False) -> Union[int, str, None]: + value: Union[int, str, None] = sysconfig.get_config_var(name) if value is None and warn: logger.debug( "Config variable '%s' is unset, Python ABI tag may be incorrect", name @@ -110,7 +120,7 @@ def _get_config_var(name: str, warn: bool = False) -> int | str | None: def _normalize_string(string: str) -> str: - return string.replace(".", "_").replace("-", "_") + return string.replace(".", "_").replace("-", "_").replace(" ", "_") def _abi3_applies(python_version: PythonVersion) -> bool: @@ -122,7 +132,7 @@ def _abi3_applies(python_version: PythonVersion) -> bool: return len(python_version) > 1 and tuple(python_version) >= (3, 2) -def _cpython_abis(py_version: PythonVersion, warn: bool = False) -> list[str]: +def _cpython_abis(py_version: PythonVersion, warn: bool = False) -> List[str]: py_version = tuple(py_version) # To allow for version comparison. abis = [] version = _version_nodot(py_version[:2]) @@ -159,9 +169,9 @@ def _cpython_abis(py_version: PythonVersion, warn: bool = False) -> list[str]: def cpython_tags( - python_version: PythonVersion | None = None, - abis: Iterable[str] | None = None, - platforms: Iterable[str] | None = None, + python_version: Optional[PythonVersion] = None, + abis: Optional[Iterable[str]] = None, + platforms: Optional[Iterable[str]] = None, *, warn: bool = False, ) -> Iterator[Tag]: @@ -215,16 +225,51 @@ def cpython_tags( yield Tag(interpreter, "abi3", platform_) -def _generic_abi() -> Iterator[str]: - abi = sysconfig.get_config_var("SOABI") - if abi: - yield _normalize_string(abi) +def _generic_abi() -> List[str]: + """ + Return the ABI tag based on EXT_SUFFIX. + """ + # The following are examples of `EXT_SUFFIX`. + # We want to keep the parts which are related to the ABI and remove the + # parts which are related to the platform: + # - linux: '.cpython-310-x86_64-linux-gnu.so' => cp310 + # - mac: '.cpython-310-darwin.so' => cp310 + # - win: '.cp310-win_amd64.pyd' => cp310 + # - win: '.pyd' => cp37 (uses _cpython_abis()) + # - pypy: '.pypy38-pp73-x86_64-linux-gnu.so' => pypy38_pp73 + # - graalpy: '.graalpy-38-native-x86_64-darwin.dylib' + # => graalpy_38_native + + ext_suffix = _get_config_var("EXT_SUFFIX", warn=True) + if not isinstance(ext_suffix, str) or ext_suffix[0] != ".": + raise SystemError("invalid sysconfig.get_config_var('EXT_SUFFIX')") + parts = ext_suffix.split(".") + if len(parts) < 3: + # CPython3.7 and earlier uses ".pyd" on Windows. + return _cpython_abis(sys.version_info[:2]) + soabi = parts[1] + if soabi.startswith("cpython"): + # non-windows + abi = "cp" + soabi.split("-")[1] + elif soabi.startswith("cp"): + # windows + abi = soabi.split("-")[0] + elif soabi.startswith("pypy"): + abi = "-".join(soabi.split("-")[:2]) + elif soabi.startswith("graalpy"): + abi = "-".join(soabi.split("-")[:3]) + elif soabi: + # pyston, ironpython, others? + abi = soabi + else: + return [] + return [_normalize_string(abi)] def generic_tags( - interpreter: str | None = None, - abis: Iterable[str] | None = None, - platforms: Iterable[str] | None = None, + interpreter: Optional[str] = None, + abis: Optional[Iterable[str]] = None, + platforms: Optional[Iterable[str]] = None, *, warn: bool = False, ) -> Iterator[Tag]: @@ -242,8 +287,9 @@ def generic_tags( interpreter = "".join([interp_name, interp_version]) if abis is None: abis = _generic_abi() + else: + abis = list(abis) platforms = list(platforms or platform_tags()) - abis = list(abis) if "none" not in abis: abis.append("none") for abi in abis: @@ -267,9 +313,9 @@ def _py_interpreter_range(py_version: PythonVersion) -> Iterator[str]: def compatible_tags( - python_version: PythonVersion | None = None, - interpreter: str | None = None, - platforms: Iterable[str] | None = None, + python_version: Optional[PythonVersion] = None, + interpreter: Optional[str] = None, + platforms: Optional[Iterable[str]] = None, ) -> Iterator[Tag]: """ Yields the sequence of tags that are compatible with a specific version of Python. @@ -301,7 +347,7 @@ def _mac_arch(arch: str, is_32bit: bool = _32_BIT_INTERPRETER) -> str: return "i386" -def _mac_binary_formats(version: MacVersion, cpu_arch: str) -> list[str]: +def _mac_binary_formats(version: MacVersion, cpu_arch: str) -> List[str]: formats = [cpu_arch] if cpu_arch == "x86_64": if version < (10, 4): @@ -334,7 +380,7 @@ def _mac_binary_formats(version: MacVersion, cpu_arch: str) -> list[str]: def mac_platforms( - version: MacVersion | None = None, arch: str | None = None + version: Optional[MacVersion] = None, arch: Optional[str] = None ) -> Iterator[str]: """ Yields the platform tags for a macOS system. @@ -347,6 +393,22 @@ def mac_platforms( version_str, _, cpu_arch = platform.mac_ver() if version is None: version = cast("MacVersion", tuple(map(int, version_str.split(".")[:2]))) + if version == (10, 16): + # When built against an older macOS SDK, Python will report macOS 10.16 + # instead of the real version. + version_str = subprocess.run( + [ + sys.executable, + "-sS", + "-c", + "import platform; print(platform.mac_ver()[0])", + ], + check=True, + env={"SYSTEM_VERSION_COMPAT": "0"}, + stdout=subprocess.PIPE, + universal_newlines=True, + ).stdout + version = cast("MacVersion", tuple(map(int, version_str.split(".")[:2]))) else: version = version if arch is None: @@ -437,6 +499,9 @@ def platform_tags() -> Iterator[str]: def interpreter_name() -> str: """ Returns the name of the running interpreter. + + Some implementations have a reserved, two-letter abbreviation which will + be returned when appropriate. """ name = sys.implementation.name return INTERPRETER_SHORT_NAMES.get(name) or name @@ -473,6 +538,9 @@ def sys_tags(*, warn: bool = False) -> Iterator[Tag]: yield from generic_tags() if interp_name == "pp": - yield from compatible_tags(interpreter="pp3") + interp = "pp3" + elif interp_name == "cp": + interp = "cp" + interpreter_version(warn=warn) else: - yield from compatible_tags() + interp = None + yield from compatible_tags(interpreter=interp) diff --git a/venv/lib/python3.10/site-packages/packaging/utils.py b/venv/lib/python3.10/site-packages/packaging/utils.py new file mode 100644 index 0000000..33c613b --- /dev/null +++ b/venv/lib/python3.10/site-packages/packaging/utils.py @@ -0,0 +1,141 @@ +# This file is dual licensed under the terms of the Apache License, Version +# 2.0, and the BSD License. See the LICENSE file in the root of this repository +# for complete details. + +import re +from typing import FrozenSet, NewType, Tuple, Union, cast + +from .tags import Tag, parse_tag +from .version import InvalidVersion, Version + +BuildTag = Union[Tuple[()], Tuple[int, str]] +NormalizedName = NewType("NormalizedName", str) + + +class InvalidWheelFilename(ValueError): + """ + An invalid wheel filename was found, users should refer to PEP 427. + """ + + +class InvalidSdistFilename(ValueError): + """ + An invalid sdist filename was found, users should refer to the packaging user guide. + """ + + +_canonicalize_regex = re.compile(r"[-_.]+") +# PEP 427: The build number must start with a digit. +_build_tag_regex = re.compile(r"(\d+)(.*)") + + +def canonicalize_name(name: str) -> NormalizedName: + # This is taken from PEP 503. + value = _canonicalize_regex.sub("-", name).lower() + return cast(NormalizedName, value) + + +def canonicalize_version( + version: Union[Version, str], *, strip_trailing_zero: bool = True +) -> str: + """ + This is very similar to Version.__str__, but has one subtle difference + with the way it handles the release segment. + """ + if isinstance(version, str): + try: + parsed = Version(version) + except InvalidVersion: + # Legacy versions cannot be normalized + return version + else: + parsed = version + + parts = [] + + # Epoch + if parsed.epoch != 0: + parts.append(f"{parsed.epoch}!") + + # Release segment + release_segment = ".".join(str(x) for x in parsed.release) + if strip_trailing_zero: + # NB: This strips trailing '.0's to normalize + release_segment = re.sub(r"(\.0)+$", "", release_segment) + parts.append(release_segment) + + # Pre-release + if parsed.pre is not None: + parts.append("".join(str(x) for x in parsed.pre)) + + # Post-release + if parsed.post is not None: + parts.append(f".post{parsed.post}") + + # Development release + if parsed.dev is not None: + parts.append(f".dev{parsed.dev}") + + # Local version segment + if parsed.local is not None: + parts.append(f"+{parsed.local}") + + return "".join(parts) + + +def parse_wheel_filename( + filename: str, +) -> Tuple[NormalizedName, Version, BuildTag, FrozenSet[Tag]]: + if not filename.endswith(".whl"): + raise InvalidWheelFilename( + f"Invalid wheel filename (extension must be '.whl'): {filename}" + ) + + filename = filename[:-4] + dashes = filename.count("-") + if dashes not in (4, 5): + raise InvalidWheelFilename( + f"Invalid wheel filename (wrong number of parts): {filename}" + ) + + parts = filename.split("-", dashes - 2) + name_part = parts[0] + # See PEP 427 for the rules on escaping the project name + if "__" in name_part or re.match(r"^[\w\d._]*$", name_part, re.UNICODE) is None: + raise InvalidWheelFilename(f"Invalid project name: {filename}") + name = canonicalize_name(name_part) + version = Version(parts[1]) + if dashes == 5: + build_part = parts[2] + build_match = _build_tag_regex.match(build_part) + if build_match is None: + raise InvalidWheelFilename( + f"Invalid build number: {build_part} in '{filename}'" + ) + build = cast(BuildTag, (int(build_match.group(1)), build_match.group(2))) + else: + build = () + tags = parse_tag(parts[-1]) + return (name, version, build, tags) + + +def parse_sdist_filename(filename: str) -> Tuple[NormalizedName, Version]: + if filename.endswith(".tar.gz"): + file_stem = filename[: -len(".tar.gz")] + elif filename.endswith(".zip"): + file_stem = filename[: -len(".zip")] + else: + raise InvalidSdistFilename( + f"Invalid sdist filename (extension must be '.tar.gz' or '.zip'):" + f" {filename}" + ) + + # We are requiring a PEP 440 version, which cannot contain dashes, + # so we split on the last dash. + name_part, sep, version_part = file_stem.rpartition("-") + if not sep: + raise InvalidSdistFilename(f"Invalid sdist filename: {filename}") + + name = canonicalize_name(name_part) + version = Version(version_part) + return (name, version) diff --git a/venv/lib/python3.10/site-packages/packaging/version.py b/venv/lib/python3.10/site-packages/packaging/version.py new file mode 100644 index 0000000..b30e8cb --- /dev/null +++ b/venv/lib/python3.10/site-packages/packaging/version.py @@ -0,0 +1,564 @@ +# This file is dual licensed under the terms of the Apache License, Version +# 2.0, and the BSD License. See the LICENSE file in the root of this repository +# for complete details. +""" +.. testsetup:: + + from packaging.version import parse, Version +""" + +import collections +import itertools +import re +from typing import Any, Callable, Optional, SupportsInt, Tuple, Union + +from ._structures import Infinity, InfinityType, NegativeInfinity, NegativeInfinityType + +__all__ = ["VERSION_PATTERN", "parse", "Version", "InvalidVersion"] + +InfiniteTypes = Union[InfinityType, NegativeInfinityType] +PrePostDevType = Union[InfiniteTypes, Tuple[str, int]] +SubLocalType = Union[InfiniteTypes, int, str] +LocalType = Union[ + NegativeInfinityType, + Tuple[ + Union[ + SubLocalType, + Tuple[SubLocalType, str], + Tuple[NegativeInfinityType, SubLocalType], + ], + ..., + ], +] +CmpKey = Tuple[ + int, Tuple[int, ...], PrePostDevType, PrePostDevType, PrePostDevType, LocalType +] +VersionComparisonMethod = Callable[[CmpKey, CmpKey], bool] + +_Version = collections.namedtuple( + "_Version", ["epoch", "release", "dev", "pre", "post", "local"] +) + + +def parse(version: str) -> "Version": + """Parse the given version string. + + >>> parse('1.0.dev1') + + + :param version: The version string to parse. + :raises InvalidVersion: When the version string is not a valid version. + """ + return Version(version) + + +class InvalidVersion(ValueError): + """Raised when a version string is not a valid version. + + >>> Version("invalid") + Traceback (most recent call last): + ... + packaging.version.InvalidVersion: Invalid version: 'invalid' + """ + + +class _BaseVersion: + _key: Tuple[Any, ...] + + def __hash__(self) -> int: + return hash(self._key) + + # Please keep the duplicated `isinstance` check + # in the six comparisons hereunder + # unless you find a way to avoid adding overhead function calls. + def __lt__(self, other: "_BaseVersion") -> bool: + if not isinstance(other, _BaseVersion): + return NotImplemented + + return self._key < other._key + + def __le__(self, other: "_BaseVersion") -> bool: + if not isinstance(other, _BaseVersion): + return NotImplemented + + return self._key <= other._key + + def __eq__(self, other: object) -> bool: + if not isinstance(other, _BaseVersion): + return NotImplemented + + return self._key == other._key + + def __ge__(self, other: "_BaseVersion") -> bool: + if not isinstance(other, _BaseVersion): + return NotImplemented + + return self._key >= other._key + + def __gt__(self, other: "_BaseVersion") -> bool: + if not isinstance(other, _BaseVersion): + return NotImplemented + + return self._key > other._key + + def __ne__(self, other: object) -> bool: + if not isinstance(other, _BaseVersion): + return NotImplemented + + return self._key != other._key + + +# Deliberately not anchored to the start and end of the string, to make it +# easier for 3rd party code to reuse +_VERSION_PATTERN = r""" + v? + (?: + (?:(?P[0-9]+)!)? # epoch + (?P[0-9]+(?:\.[0-9]+)*) # release segment + (?P
                                          # pre-release
+            [-_\.]?
+            (?P(a|b|c|rc|alpha|beta|pre|preview))
+            [-_\.]?
+            (?P[0-9]+)?
+        )?
+        (?P                                         # post release
+            (?:-(?P[0-9]+))
+            |
+            (?:
+                [-_\.]?
+                (?Ppost|rev|r)
+                [-_\.]?
+                (?P[0-9]+)?
+            )
+        )?
+        (?P                                          # dev release
+            [-_\.]?
+            (?Pdev)
+            [-_\.]?
+            (?P[0-9]+)?
+        )?
+    )
+    (?:\+(?P[a-z0-9]+(?:[-_\.][a-z0-9]+)*))?       # local version
+"""
+
+VERSION_PATTERN = _VERSION_PATTERN
+"""
+A string containing the regular expression used to match a valid version.
+
+The pattern is not anchored at either end, and is intended for embedding in larger
+expressions (for example, matching a version number as part of a file name). The
+regular expression should be compiled with the ``re.VERBOSE`` and ``re.IGNORECASE``
+flags set.
+
+:meta hide-value:
+"""
+
+
+class Version(_BaseVersion):
+    """This class abstracts handling of a project's versions.
+
+    A :class:`Version` instance is comparison aware and can be compared and
+    sorted using the standard Python interfaces.
+
+    >>> v1 = Version("1.0a5")
+    >>> v2 = Version("1.0")
+    >>> v1
+    
+    >>> v2
+    
+    >>> v1 < v2
+    True
+    >>> v1 == v2
+    False
+    >>> v1 > v2
+    False
+    >>> v1 >= v2
+    False
+    >>> v1 <= v2
+    True
+    """
+
+    _regex = re.compile(r"^\s*" + VERSION_PATTERN + r"\s*$", re.VERBOSE | re.IGNORECASE)
+    _key: CmpKey
+
+    def __init__(self, version: str) -> None:
+        """Initialize a Version object.
+
+        :param version:
+            The string representation of a version which will be parsed and normalized
+            before use.
+        :raises InvalidVersion:
+            If the ``version`` does not conform to PEP 440 in any way then this
+            exception will be raised.
+        """
+
+        # Validate the version and parse it into pieces
+        match = self._regex.search(version)
+        if not match:
+            raise InvalidVersion(f"Invalid version: '{version}'")
+
+        # Store the parsed out pieces of the version
+        self._version = _Version(
+            epoch=int(match.group("epoch")) if match.group("epoch") else 0,
+            release=tuple(int(i) for i in match.group("release").split(".")),
+            pre=_parse_letter_version(match.group("pre_l"), match.group("pre_n")),
+            post=_parse_letter_version(
+                match.group("post_l"), match.group("post_n1") or match.group("post_n2")
+            ),
+            dev=_parse_letter_version(match.group("dev_l"), match.group("dev_n")),
+            local=_parse_local_version(match.group("local")),
+        )
+
+        # Generate a key which will be used for sorting
+        self._key = _cmpkey(
+            self._version.epoch,
+            self._version.release,
+            self._version.pre,
+            self._version.post,
+            self._version.dev,
+            self._version.local,
+        )
+
+    def __repr__(self) -> str:
+        """A representation of the Version that shows all internal state.
+
+        >>> Version('1.0.0')
+        
+        """
+        return f""
+
+    def __str__(self) -> str:
+        """A string representation of the version that can be rounded-tripped.
+
+        >>> str(Version("1.0a5"))
+        '1.0a5'
+        """
+        parts = []
+
+        # Epoch
+        if self.epoch != 0:
+            parts.append(f"{self.epoch}!")
+
+        # Release segment
+        parts.append(".".join(str(x) for x in self.release))
+
+        # Pre-release
+        if self.pre is not None:
+            parts.append("".join(str(x) for x in self.pre))
+
+        # Post-release
+        if self.post is not None:
+            parts.append(f".post{self.post}")
+
+        # Development release
+        if self.dev is not None:
+            parts.append(f".dev{self.dev}")
+
+        # Local version segment
+        if self.local is not None:
+            parts.append(f"+{self.local}")
+
+        return "".join(parts)
+
+    @property
+    def epoch(self) -> int:
+        """The epoch of the version.
+
+        >>> Version("2.0.0").epoch
+        0
+        >>> Version("1!2.0.0").epoch
+        1
+        """
+        _epoch: int = self._version.epoch
+        return _epoch
+
+    @property
+    def release(self) -> Tuple[int, ...]:
+        """The components of the "release" segment of the version.
+
+        >>> Version("1.2.3").release
+        (1, 2, 3)
+        >>> Version("2.0.0").release
+        (2, 0, 0)
+        >>> Version("1!2.0.0.post0").release
+        (2, 0, 0)
+
+        Includes trailing zeroes but not the epoch or any pre-release / development /
+        post-release suffixes.
+        """
+        _release: Tuple[int, ...] = self._version.release
+        return _release
+
+    @property
+    def pre(self) -> Optional[Tuple[str, int]]:
+        """The pre-release segment of the version.
+
+        >>> print(Version("1.2.3").pre)
+        None
+        >>> Version("1.2.3a1").pre
+        ('a', 1)
+        >>> Version("1.2.3b1").pre
+        ('b', 1)
+        >>> Version("1.2.3rc1").pre
+        ('rc', 1)
+        """
+        _pre: Optional[Tuple[str, int]] = self._version.pre
+        return _pre
+
+    @property
+    def post(self) -> Optional[int]:
+        """The post-release number of the version.
+
+        >>> print(Version("1.2.3").post)
+        None
+        >>> Version("1.2.3.post1").post
+        1
+        """
+        return self._version.post[1] if self._version.post else None
+
+    @property
+    def dev(self) -> Optional[int]:
+        """The development number of the version.
+
+        >>> print(Version("1.2.3").dev)
+        None
+        >>> Version("1.2.3.dev1").dev
+        1
+        """
+        return self._version.dev[1] if self._version.dev else None
+
+    @property
+    def local(self) -> Optional[str]:
+        """The local version segment of the version.
+
+        >>> print(Version("1.2.3").local)
+        None
+        >>> Version("1.2.3+abc").local
+        'abc'
+        """
+        if self._version.local:
+            return ".".join(str(x) for x in self._version.local)
+        else:
+            return None
+
+    @property
+    def public(self) -> str:
+        """The public portion of the version.
+
+        >>> Version("1.2.3").public
+        '1.2.3'
+        >>> Version("1.2.3+abc").public
+        '1.2.3'
+        >>> Version("1.2.3+abc.dev1").public
+        '1.2.3'
+        """
+        return str(self).split("+", 1)[0]
+
+    @property
+    def base_version(self) -> str:
+        """The "base version" of the version.
+
+        >>> Version("1.2.3").base_version
+        '1.2.3'
+        >>> Version("1.2.3+abc").base_version
+        '1.2.3'
+        >>> Version("1!1.2.3+abc.dev1").base_version
+        '1!1.2.3'
+
+        The "base version" is the public version of the project without any pre or post
+        release markers.
+        """
+        parts = []
+
+        # Epoch
+        if self.epoch != 0:
+            parts.append(f"{self.epoch}!")
+
+        # Release segment
+        parts.append(".".join(str(x) for x in self.release))
+
+        return "".join(parts)
+
+    @property
+    def is_prerelease(self) -> bool:
+        """Whether this version is a pre-release.
+
+        >>> Version("1.2.3").is_prerelease
+        False
+        >>> Version("1.2.3a1").is_prerelease
+        True
+        >>> Version("1.2.3b1").is_prerelease
+        True
+        >>> Version("1.2.3rc1").is_prerelease
+        True
+        >>> Version("1.2.3dev1").is_prerelease
+        True
+        """
+        return self.dev is not None or self.pre is not None
+
+    @property
+    def is_postrelease(self) -> bool:
+        """Whether this version is a post-release.
+
+        >>> Version("1.2.3").is_postrelease
+        False
+        >>> Version("1.2.3.post1").is_postrelease
+        True
+        """
+        return self.post is not None
+
+    @property
+    def is_devrelease(self) -> bool:
+        """Whether this version is a development release.
+
+        >>> Version("1.2.3").is_devrelease
+        False
+        >>> Version("1.2.3.dev1").is_devrelease
+        True
+        """
+        return self.dev is not None
+
+    @property
+    def major(self) -> int:
+        """The first item of :attr:`release` or ``0`` if unavailable.
+
+        >>> Version("1.2.3").major
+        1
+        """
+        return self.release[0] if len(self.release) >= 1 else 0
+
+    @property
+    def minor(self) -> int:
+        """The second item of :attr:`release` or ``0`` if unavailable.
+
+        >>> Version("1.2.3").minor
+        2
+        >>> Version("1").minor
+        0
+        """
+        return self.release[1] if len(self.release) >= 2 else 0
+
+    @property
+    def micro(self) -> int:
+        """The third item of :attr:`release` or ``0`` if unavailable.
+
+        >>> Version("1.2.3").micro
+        3
+        >>> Version("1").micro
+        0
+        """
+        return self.release[2] if len(self.release) >= 3 else 0
+
+
+def _parse_letter_version(
+    letter: str, number: Union[str, bytes, SupportsInt]
+) -> Optional[Tuple[str, int]]:
+
+    if letter:
+        # We consider there to be an implicit 0 in a pre-release if there is
+        # not a numeral associated with it.
+        if number is None:
+            number = 0
+
+        # We normalize any letters to their lower case form
+        letter = letter.lower()
+
+        # We consider some words to be alternate spellings of other words and
+        # in those cases we want to normalize the spellings to our preferred
+        # spelling.
+        if letter == "alpha":
+            letter = "a"
+        elif letter == "beta":
+            letter = "b"
+        elif letter in ["c", "pre", "preview"]:
+            letter = "rc"
+        elif letter in ["rev", "r"]:
+            letter = "post"
+
+        return letter, int(number)
+    if not letter and number:
+        # We assume if we are given a number, but we are not given a letter
+        # then this is using the implicit post release syntax (e.g. 1.0-1)
+        letter = "post"
+
+        return letter, int(number)
+
+    return None
+
+
+_local_version_separators = re.compile(r"[\._-]")
+
+
+def _parse_local_version(local: str) -> Optional[LocalType]:
+    """
+    Takes a string like abc.1.twelve and turns it into ("abc", 1, "twelve").
+    """
+    if local is not None:
+        return tuple(
+            part.lower() if not part.isdigit() else int(part)
+            for part in _local_version_separators.split(local)
+        )
+    return None
+
+
+def _cmpkey(
+    epoch: int,
+    release: Tuple[int, ...],
+    pre: Optional[Tuple[str, int]],
+    post: Optional[Tuple[str, int]],
+    dev: Optional[Tuple[str, int]],
+    local: Optional[Tuple[SubLocalType]],
+) -> CmpKey:
+
+    # When we compare a release version, we want to compare it with all of the
+    # trailing zeros removed. So we'll use a reverse the list, drop all the now
+    # leading zeros until we come to something non zero, then take the rest
+    # re-reverse it back into the correct order and make it a tuple and use
+    # that for our sorting key.
+    _release = tuple(
+        reversed(list(itertools.dropwhile(lambda x: x == 0, reversed(release))))
+    )
+
+    # We need to "trick" the sorting algorithm to put 1.0.dev0 before 1.0a0.
+    # We'll do this by abusing the pre segment, but we _only_ want to do this
+    # if there is not a pre or a post segment. If we have one of those then
+    # the normal sorting rules will handle this case correctly.
+    if pre is None and post is None and dev is not None:
+        _pre: PrePostDevType = NegativeInfinity
+    # Versions without a pre-release (except as noted above) should sort after
+    # those with one.
+    elif pre is None:
+        _pre = Infinity
+    else:
+        _pre = pre
+
+    # Versions without a post segment should sort before those with one.
+    if post is None:
+        _post: PrePostDevType = NegativeInfinity
+
+    else:
+        _post = post
+
+    # Versions without a development segment should sort after those with one.
+    if dev is None:
+        _dev: PrePostDevType = Infinity
+
+    else:
+        _dev = dev
+
+    if local is None:
+        # Versions without a local segment should sort before those with one.
+        _local: LocalType = NegativeInfinity
+    else:
+        # Versions with a local segment need that segment parsed to implement
+        # the sorting rules in PEP440.
+        # - Alpha numeric segments sort before numeric segments
+        # - Alpha numeric segments sort lexicographically
+        # - Numeric segments sort numerically
+        # - Shorter versions sort before longer versions when the prefixes
+        #   match exactly
+        _local = tuple(
+            (i, "") if isinstance(i, int) else (NegativeInfinity, i) for i in local
+        )
+
+    return epoch, _release, _pre, _post, _dev, _local
diff --git a/venv/lib/python3.10/site-packages/parameterized-0.8.1.dist-info/INSTALLER b/venv/lib/python3.10/site-packages/parameterized-0.8.1.dist-info/INSTALLER
new file mode 100644
index 0000000..a1b589e
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/parameterized-0.8.1.dist-info/INSTALLER
@@ -0,0 +1 @@
+pip
diff --git a/venv/lib/python3.10/site-packages/parameterized-0.8.1.dist-info/LICENSE.txt b/venv/lib/python3.10/site-packages/parameterized-0.8.1.dist-info/LICENSE.txt
new file mode 100644
index 0000000..e58a36e
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/parameterized-0.8.1.dist-info/LICENSE.txt
@@ -0,0 +1,27 @@
+Unless stated otherwise in the source files, all code is copyright 2010 David
+Wolever . All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+   1. Redistributions of source code must retain the above copyright notice,
+   this list of conditions and the following disclaimer.
+
+   2. Redistributions in binary form must reproduce the above copyright notice,
+   this list of conditions and the following disclaimer in the documentation
+   and/or other materials provided with the distribution.
+
+THIS SOFTWARE IS PROVIDED BY DAVID WOLEVER ``AS IS'' AND ANY EXPRESS OR
+IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+EVENT SHALL DAVID WOLEVER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
+INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
+OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+The views and conclusions contained in the software and documentation are those
+of the authors and should not be interpreted as representing official policies,
+either expressed or implied, of David Wolever.
diff --git a/venv/lib/python3.10/site-packages/parameterized-0.8.1.dist-info/METADATA b/venv/lib/python3.10/site-packages/parameterized-0.8.1.dist-info/METADATA
new file mode 100644
index 0000000..e07835d
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/parameterized-0.8.1.dist-info/METADATA
@@ -0,0 +1,683 @@
+Metadata-Version: 2.1
+Name: parameterized
+Version: 0.8.1
+Summary: Parameterized testing with any Python test framework
+Home-page: https://github.com/wolever/parameterized
+Author: David Wolever
+Author-email: david@wolever.net
+License: FreeBSD
+Platform: UNKNOWN
+Classifier: Programming Language :: Python
+Classifier: Programming Language :: Python :: 3
+Classifier: License :: OSI Approved :: BSD License
+Provides-Extra: dev
+Requires-Dist: jinja2 ; extra == 'dev'
+
+Parameterized testing with any Python test framework
+====================================================
+
+.. image:: https://img.shields.io/pypi/v/parameterized.svg
+    :alt: PyPI
+    :target: https://pypi.org/project/parameterized/
+
+.. image:: https://circleci.com/gh/wolever/parameterized.svg?style=svg
+    :alt: Circle CI
+    :target: https://circleci.com/gh/wolever/parameterized
+
+
+Parameterized testing in Python sucks.
+
+``parameterized`` fixes that. For everything. Parameterized testing for nose,
+parameterized testing for py.test, parameterized testing for unittest.
+
+.. code:: python
+
+   # test_math.py
+   from nose.tools import assert_equal
+   from parameterized import parameterized, parameterized_class
+
+   import unittest
+   import math
+
+   @parameterized([
+       (2, 2, 4),
+       (2, 3, 8),
+       (1, 9, 1),
+       (0, 9, 0),
+   ])
+   def test_pow(base, exponent, expected):
+      assert_equal(math.pow(base, exponent), expected)
+
+   class TestMathUnitTest(unittest.TestCase):
+      @parameterized.expand([
+          ("negative", -1.5, -2.0),
+          ("integer", 1, 1.0),
+          ("large fraction", 1.6, 1),
+      ])
+      def test_floor(self, name, input, expected):
+          assert_equal(math.floor(input), expected)
+
+   @parameterized_class(('a', 'b', 'expected_sum', 'expected_product'), [
+      (1, 2, 3, 2),
+      (5, 5, 10, 25),
+   ])
+   class TestMathClass(unittest.TestCase):
+      def test_add(self):
+         assert_equal(self.a + self.b, self.expected_sum)
+
+      def test_multiply(self):
+         assert_equal(self.a * self.b, self.expected_product)
+
+   @parameterized_class([
+      { "a": 3, "expected": 2 },
+      { "b": 5, "expected": -4 },
+   ])
+   class TestMathClassDict(unittest.TestCase):
+      a = 1
+      b = 1
+
+      def test_subtract(self):
+         assert_equal(self.a - self.b, self.expected)
+
+
+With nose (and nose2)::
+
+    $ nosetests -v test_math.py
+    test_floor_0_negative (test_math.TestMathUnitTest) ... ok
+    test_floor_1_integer (test_math.TestMathUnitTest) ... ok
+    test_floor_2_large_fraction (test_math.TestMathUnitTest) ... ok
+    test_math.test_pow(2, 2, 4, {}) ... ok
+    test_math.test_pow(2, 3, 8, {}) ... ok
+    test_math.test_pow(1, 9, 1, {}) ... ok
+    test_math.test_pow(0, 9, 0, {}) ... ok
+    test_add (test_math.TestMathClass_0) ... ok
+    test_multiply (test_math.TestMathClass_0) ... ok
+    test_add (test_math.TestMathClass_1) ... ok
+    test_multiply (test_math.TestMathClass_1) ... ok
+    test_subtract (test_math.TestMathClassDict_0) ... ok
+
+    ----------------------------------------------------------------------
+    Ran 12 tests in 0.015s
+
+    OK
+
+As the package name suggests, nose is best supported and will be used for all
+further examples.
+
+
+With py.test (version 2.0 and above)::
+
+    $ py.test -v test_math.py
+    ============================= test session starts ==============================
+    platform darwin -- Python 3.6.1, pytest-3.1.3, py-1.4.34, pluggy-0.4.0
+    collecting ... collected 13 items
+
+    test_math.py::test_pow::[0] PASSED
+    test_math.py::test_pow::[1] PASSED
+    test_math.py::test_pow::[2] PASSED
+    test_math.py::test_pow::[3] PASSED
+    test_math.py::TestMathUnitTest::test_floor_0_negative PASSED
+    test_math.py::TestMathUnitTest::test_floor_1_integer PASSED
+    test_math.py::TestMathUnitTest::test_floor_2_large_fraction PASSED
+    test_math.py::TestMathClass_0::test_add PASSED
+    test_math.py::TestMathClass_0::test_multiply PASSED
+    test_math.py::TestMathClass_1::test_add PASSED
+    test_math.py::TestMathClass_1::test_multiply PASSED
+    test_math.py::TestMathClassDict_0::test_subtract PASSED
+    ==================== 12 passed, 4 warnings in 0.16 seconds =====================
+
+With unittest (and unittest2)::
+
+    $ python -m unittest -v test_math
+    test_floor_0_negative (test_math.TestMathUnitTest) ... ok
+    test_floor_1_integer (test_math.TestMathUnitTest) ... ok
+    test_floor_2_large_fraction (test_math.TestMathUnitTest) ... ok
+    test_add (test_math.TestMathClass_0) ... ok
+    test_multiply (test_math.TestMathClass_0) ... ok
+    test_add (test_math.TestMathClass_1) ... ok
+    test_multiply (test_math.TestMathClass_1) ... ok
+    test_subtract (test_math.TestMathClassDict_0) ... ok
+
+    ----------------------------------------------------------------------
+    Ran 8 tests in 0.001s
+
+    OK
+
+(note: because unittest does not support test decorators, only tests created
+with ``@parameterized.expand`` will be executed)
+
+With green::
+
+    $ green test_math.py -vvv
+    test_math
+      TestMathClass_1
+    .   test_method_a
+    .   test_method_b
+      TestMathClass_2
+    .   test_method_a
+    .   test_method_b
+      TestMathClass_3
+    .   test_method_a
+    .   test_method_b
+      TestMathUnitTest
+    .   test_floor_0_negative
+    .   test_floor_1_integer
+    .   test_floor_2_large_fraction
+      TestMathClass_0
+    .   test_add
+    .   test_multiply
+      TestMathClass_1
+    .   test_add
+    .   test_multiply
+      TestMathClassDict_0
+    .   test_subtract
+
+    Ran 12 tests in 0.121s
+
+    OK (passes=9)
+
+
+Installation
+------------
+
+::
+
+    $ pip install parameterized
+
+
+Compatibility
+-------------
+
+`Yes`__ (mostly).
+
+__ https://travis-ci.org/wolever/parameterized
+
+.. list-table::
+   :header-rows: 1
+   :stub-columns: 1
+
+   * -
+     - Py2.6
+     - Py2.7
+     - Py3.4
+     - Py3.5
+     - Py3.6
+     - Py3.7
+     - Py3.8
+     - Py3.9
+     - PyPy
+     - ``@mock.patch``
+   * - nose
+     - yes
+     - yes
+     - yes
+     - yes
+     - yes
+     - yes
+     - yes
+     - yes
+     - yes
+     - yes
+   * - nose2
+     - yes
+     - yes
+     - yes
+     - yes
+     - yes
+     - yes
+     - yes
+     - yes
+     - yes
+     - yes
+   * - py.test 2
+     - yes
+     - yes
+     - no*
+     - no*
+     - no*
+     - no*
+     - yes
+     - yes
+     - yes
+     - yes
+   * - py.test 3
+     - yes
+     - yes
+     - yes
+     - yes
+     - yes
+     - yes
+     - yes
+     - yes
+     - yes
+     - yes
+   * - py.test 4
+     - no**
+     - no**
+     - no**
+     - no**
+     - no**
+     - no**
+     - no**
+     - no**
+     - no**
+     - no**
+   * - py.test fixtures
+     - no†
+     - no†
+     - no†
+     - no†
+     - no†
+     - no†
+     - no†
+     - no†
+     - no†
+     - no†
+   * - | unittest
+       | (``@parameterized.expand``)
+     - yes
+     - yes
+     - yes
+     - yes
+     - yes
+     - yes
+     - yes
+     - yes
+     - yes
+     - yes
+   * - | unittest2
+       | (``@parameterized.expand``)
+     - yes
+     - yes
+     - yes
+     - yes
+     - yes
+     - yes
+     - yes
+     - yes
+     - yes
+     - yes
+
+\*: py.test 2 does `does not appear to work (#71)`__ under Python 3. Please comment on the related issues if you are affected.
+
+\*\*: py.test 4 is not yet supported (but coming!) in `issue #34`__
+
+†: py.test fixture support is documented in `issue #81`__
+
+__ https://github.com/wolever/parameterized/issues/71
+__ https://github.com/wolever/parameterized/issues/34
+__ https://github.com/wolever/parameterized/issues/81
+
+Dependencies
+------------
+
+(this section left intentionally blank)
+
+
+Exhaustive Usage Examples
+--------------------------
+
+The ``@parameterized`` and ``@parameterized.expand`` decorators accept a list
+or iterable of tuples or ``param(...)``, or a callable which returns a list or
+iterable:
+
+.. code:: python
+
+    from parameterized import parameterized, param
+
+    # A list of tuples
+    @parameterized([
+        (2, 3, 5),
+        (3, 5, 8),
+    ])
+    def test_add(a, b, expected):
+        assert_equal(a + b, expected)
+
+    # A list of params
+    @parameterized([
+        param("10", 10),
+        param("10", 16, base=16),
+    ])
+    def test_int(str_val, expected, base=10):
+        assert_equal(int(str_val, base=base), expected)
+
+    # An iterable of params
+    @parameterized(
+        param.explicit(*json.loads(line))
+        for line in open("testcases.jsons")
+    )
+    def test_from_json_file(...):
+        ...
+
+    # A callable which returns a list of tuples
+    def load_test_cases():
+        return [
+            ("test1", ),
+            ("test2", ),
+        ]
+    @parameterized(load_test_cases)
+    def test_from_function(name):
+        ...
+
+.. **
+
+Note that, when using an iterator or a generator, all the items will be loaded
+into memory before the start of the test run (we do this explicitly to ensure
+that generators are exhausted exactly once in multi-process or multi-threaded
+testing environments).
+
+The ``@parameterized`` decorator can be used test class methods, and standalone
+functions:
+
+.. code:: python
+
+    from parameterized import parameterized
+
+    class AddTest(object):
+        @parameterized([
+            (2, 3, 5),
+        ])
+        def test_add(self, a, b, expected):
+            assert_equal(a + b, expected)
+
+    @parameterized([
+        (2, 3, 5),
+    ])
+    def test_add(a, b, expected):
+        assert_equal(a + b, expected)
+
+
+And ``@parameterized.expand`` can be used to generate test methods in
+situations where test generators cannot be used (for example, when the test
+class is a subclass of ``unittest.TestCase``):
+
+.. code:: python
+
+    import unittest
+    from parameterized import parameterized
+
+    class AddTestCase(unittest.TestCase):
+        @parameterized.expand([
+            ("2 and 3", 2, 3, 5),
+            ("3 and 5", 2, 3, 5),
+        ])
+        def test_add(self, _, a, b, expected):
+            assert_equal(a + b, expected)
+
+Will create the test cases::
+
+    $ nosetests example.py
+    test_add_0_2_and_3 (example.AddTestCase) ... ok
+    test_add_1_3_and_5 (example.AddTestCase) ... ok
+
+    ----------------------------------------------------------------------
+    Ran 2 tests in 0.001s
+
+    OK
+
+Note that ``@parameterized.expand`` works by creating new methods on the test
+class. If the first parameter is a string, that string will be added to the end
+of the method name. For example, the test case above will generate the methods
+``test_add_0_2_and_3`` and ``test_add_1_3_and_5``.
+
+The names of the test cases generated by ``@parameterized.expand`` can be
+customized using the ``name_func`` keyword argument. The value should
+be a function which accepts three arguments: ``testcase_func``, ``param_num``,
+and ``params``, and it should return the name of the test case.
+``testcase_func`` will be the function to be tested, ``param_num`` will be the
+index of the test case parameters in the list of parameters, and ``param``
+(an instance of ``param``) will be the parameters which will be used.
+
+.. code:: python
+
+    import unittest
+    from parameterized import parameterized
+
+    def custom_name_func(testcase_func, param_num, param):
+        return "%s_%s" %(
+            testcase_func.__name__,
+            parameterized.to_safe_name("_".join(str(x) for x in param.args)),
+        )
+
+    class AddTestCase(unittest.TestCase):
+        @parameterized.expand([
+            (2, 3, 5),
+            (2, 3, 5),
+        ], name_func=custom_name_func)
+        def test_add(self, a, b, expected):
+            assert_equal(a + b, expected)
+
+Will create the test cases::
+
+    $ nosetests example.py
+    test_add_1_2_3 (example.AddTestCase) ... ok
+    test_add_2_3_5 (example.AddTestCase) ... ok
+
+    ----------------------------------------------------------------------
+    Ran 2 tests in 0.001s
+
+    OK
+
+
+The ``param(...)`` helper class stores the parameters for one specific test
+case.  It can be used to pass keyword arguments to test cases:
+
+.. code:: python
+
+    from parameterized import parameterized, param
+
+    @parameterized([
+        param("10", 10),
+        param("10", 16, base=16),
+    ])
+    def test_int(str_val, expected, base=10):
+        assert_equal(int(str_val, base=base), expected)
+
+
+If test cases have a docstring, the parameters for that test case will be
+appended to the first line of the docstring. This behavior can be controlled
+with the ``doc_func`` argument:
+
+.. code:: python
+
+    from parameterized import parameterized
+
+    @parameterized([
+        (1, 2, 3),
+        (4, 5, 9),
+    ])
+    def test_add(a, b, expected):
+        """ Test addition. """
+        assert_equal(a + b, expected)
+
+    def my_doc_func(func, num, param):
+        return "%s: %s with %s" %(num, func.__name__, param)
+
+    @parameterized([
+        (5, 4, 1),
+        (9, 6, 3),
+    ], doc_func=my_doc_func)
+    def test_subtraction(a, b, expected):
+        assert_equal(a - b, expected)
+
+::
+
+    $ nosetests example.py
+    Test addition. [with a=1, b=2, expected=3] ... ok
+    Test addition. [with a=4, b=5, expected=9] ... ok
+    0: test_subtraction with param(*(5, 4, 1)) ... ok
+    1: test_subtraction with param(*(9, 6, 3)) ... ok
+
+    ----------------------------------------------------------------------
+    Ran 4 tests in 0.001s
+
+    OK
+
+Finally ``@parameterized_class`` parameterizes an entire class, using
+either a list of attributes, or a list of dicts that will be applied to the
+class:
+
+.. code:: python
+
+    from yourapp.models import User
+    from parameterized import parameterized_class
+
+    @parameterized_class([
+       { "username": "user_1", "access_level": 1 },
+       { "username": "user_2", "access_level": 2, "expected_status_code": 404 },
+    ])
+    class TestUserAccessLevel(TestCase):
+       expected_status_code = 200
+
+       def setUp(self):
+          self.client.force_login(User.objects.get(username=self.username)[0])
+
+       def test_url_a(self):
+          response = self.client.get('/url')
+          self.assertEqual(response.status_code, self.expected_status_code)
+
+       def tearDown(self):
+          self.client.logout()
+
+
+    @parameterized_class(("username", "access_level", "expected_status_code"), [
+       ("user_1", 1, 200),
+       ("user_2", 2, 404)
+    ])
+    class TestUserAccessLevel(TestCase):
+       def setUp(self):
+          self.client.force_login(User.objects.get(username=self.username)[0])
+
+       def test_url_a(self):
+          response = self.client.get("/url")
+          self.assertEqual(response.status_code, self.expected_status_code)
+
+       def tearDown(self):
+          self.client.logout()
+
+
+The ``@parameterized_class`` decorator accepts a ``class_name_func`` argument,
+which controls the name of the parameterized classes generated by
+``@parameterized_class``:
+
+.. code:: python
+
+    from parameterized import parameterized, parameterized_class
+
+    def get_class_name(cls, num, params_dict):
+        # By default the generated class named includes either the "name"
+        # parameter (if present), or the first string value. This example shows
+        # multiple parameters being included in the generated class name:
+        return "%s_%s_%s%s" %(
+            cls.__name__,
+            num,
+            parameterized.to_safe_name(params_dict['a']),
+            parameterized.to_safe_name(params_dict['b']),
+        )
+
+    @parameterized_class([
+       { "a": "hello", "b": " world!", "expected": "hello world!" },
+       { "a": "say ", "b": " cheese :)", "expected": "say cheese :)" },
+    ], class_name_func=get_class_name)
+    class TestConcatenation(TestCase):
+      def test_concat(self):
+          self.assertEqual(self.a + self.b, self.expected)
+
+::
+
+    $ nosetests -v test_math.py
+    test_concat (test_concat.TestConcatenation_0_hello_world_) ... ok
+    test_concat (test_concat.TestConcatenation_0_say_cheese__) ... ok
+
+
+
+Using with Single Parameters
+............................
+
+If a test function only accepts one parameter and the value is not iterable,
+then it is possible to supply a list of values without wrapping each one in a
+tuple:
+
+.. code:: python
+
+   @parameterized([1, 2, 3])
+   def test_greater_than_zero(value):
+      assert value > 0
+
+Note, however, that if the single parameter *is* iterable (such as a list or
+tuple), then it *must* be wrapped in a tuple, list, or the ``param(...)``
+helper:
+
+.. code:: python
+
+   @parameterized([
+      ([1, 2, 3], ),
+      ([3, 3], ),
+      ([6], ),
+   ])
+   def test_sums_to_6(numbers):
+      assert sum(numbers) == 6
+
+(note, also, that Python requires single element tuples to be defined with a
+trailing comma: ``(foo, )``)
+
+
+Using with ``@mock.patch``
+..........................
+
+``parameterized`` can be used with ``mock.patch``, but the argument ordering
+can be confusing. The ``@mock.patch(...)`` decorator must come *below* the
+``@parameterized(...)``, and the mocked parameters must come *last*:
+
+.. code:: python
+
+   @mock.patch("os.getpid")
+   class TestOS(object):
+      @parameterized(...)
+      @mock.patch("os.fdopen")
+      @mock.patch("os.umask")
+      def test_method(self, param1, param2, ..., mock_umask, mock_fdopen, mock_getpid):
+         ...
+
+Note: the same holds true when using ``@parameterized.expand``.
+
+
+Migrating from ``nose-parameterized`` to ``parameterized``
+----------------------------------------------------------
+
+To migrate a codebase from ``nose-parameterized`` to ``parameterized``:
+
+1. Update your requirements file, replacing ``nose-parameterized`` with
+   ``parameterized``.
+
+2. Replace all references to ``nose_parameterized`` with ``parameterized``::
+
+    $ perl -pi -e 's/nose_parameterized/parameterized/g' your-codebase/
+
+3. You're done!
+
+
+FAQ
+---
+
+What happened to ``nose-parameterized``?
+    Originally only nose was supported. But now everything is supported, and it
+    only made sense to change the name!
+
+What do you mean when you say "nose is best supported"?
+    There are small caveates with ``py.test`` and ``unittest``: ``py.test``
+    does not show the parameter values (ex, it will show ``test_add[0]``
+    instead of ``test_add[1, 2, 3]``), and ``unittest``/``unittest2`` do not
+    support test generators so ``@parameterized.expand`` must be used.
+
+Why not use ``@pytest.mark.parametrize``?
+    Because spelling is difficult. Also, ``parameterized`` doesn't require you
+    to repeat argument names, and (using ``param``) it supports optional
+    keyword arguments.
+
+Why do I get an ``AttributeError: 'function' object has no attribute 'expand'`` with ``@parameterized.expand``?
+    You've likely installed the ``parametrized`` (note the missing *e*)
+    package. Use ``parameterized`` (with the *e*) instead and you'll be all
+    set.
+
+
diff --git a/venv/lib/python3.10/site-packages/parameterized-0.8.1.dist-info/RECORD b/venv/lib/python3.10/site-packages/parameterized-0.8.1.dist-info/RECORD
new file mode 100644
index 0000000..e4bc5c5
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/parameterized-0.8.1.dist-info/RECORD
@@ -0,0 +1,22 @@
+nose_parameterized/__init__.py,sha256=qY2GTkECum_2Gbb8QkcF855mogE04R5sJEV1LXfX9tk,461
+nose_parameterized/__pycache__/__init__.cpython-310.pyc,,
+nose_parameterized/__pycache__/compat.cpython-310.pyc,,
+nose_parameterized/__pycache__/parameterized.cpython-310.pyc,,
+nose_parameterized/__pycache__/test.cpython-310.pyc,,
+nose_parameterized/compat.py,sha256=HFuWOZzMieH15s0VkwxmSpf_3JjkmGXB1u2prnAir9M,276
+nose_parameterized/parameterized.py,sha256=EkDi07uCoyZmlUWVbmtW64Vs4FltAJRL_-_09YA_ons,14463
+nose_parameterized/test.py,sha256=WUte53WYa6wo-HcAJyrw5vMYX2pENTS9HRJg9IDRq_g,9251
+parameterized-0.8.1.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4
+parameterized-0.8.1.dist-info/LICENSE.txt,sha256=Aeb_aptOwAmK50RKx02snQgwzCm2T3GkGh650rsS4i4,1558
+parameterized-0.8.1.dist-info/METADATA,sha256=CDYj0whUOwUwXf6gCEaxPN9NrIKbbjlkZC-z0ryW0x0,18549
+parameterized-0.8.1.dist-info/RECORD,,
+parameterized-0.8.1.dist-info/REQUESTED,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+parameterized-0.8.1.dist-info/WHEEL,sha256=8zNYZbwQSXoB9IfXOjPfeNwvAsALAjffgk27FqvCWbo,110
+parameterized-0.8.1.dist-info/pbr.json,sha256=asElWiwNVlzBWybtV1vF52lD9jY2kC7avrFkDc0lmPI,47
+parameterized-0.8.1.dist-info/top_level.txt,sha256=FmnwidrcOm0vumnYZSgKXqAcpDIeC3Oz1mjMW006-3s,14
+parameterized/__init__.py,sha256=LvrSVjOTBOeGsf97IzQm1VZS6APxCMeM0NnD24LLQ6w,92
+parameterized/__pycache__/__init__.cpython-310.pyc,,
+parameterized/__pycache__/parameterized.cpython-310.pyc,,
+parameterized/__pycache__/test.cpython-310.pyc,,
+parameterized/parameterized.py,sha256=xLqP5bSl0FtWy2BXn2Vhl1Ir1Jm4MatNFtyxes_lQGw,22461
+parameterized/test.py,sha256=xrfGMkkrZq7QjRPjBxDNxvlX5_5-55D8Ly6p8FmRi2Y,18671
diff --git a/venv/lib/python3.10/site-packages/wheel/vendored/packaging/__init__.py b/venv/lib/python3.10/site-packages/parameterized-0.8.1.dist-info/REQUESTED
similarity index 100%
rename from venv/lib/python3.10/site-packages/wheel/vendored/packaging/__init__.py
rename to venv/lib/python3.10/site-packages/parameterized-0.8.1.dist-info/REQUESTED
diff --git a/venv/lib/python3.10/site-packages/parameterized-0.8.1.dist-info/WHEEL b/venv/lib/python3.10/site-packages/parameterized-0.8.1.dist-info/WHEEL
new file mode 100644
index 0000000..8b701e9
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/parameterized-0.8.1.dist-info/WHEEL
@@ -0,0 +1,6 @@
+Wheel-Version: 1.0
+Generator: bdist_wheel (0.33.6)
+Root-Is-Purelib: true
+Tag: py2-none-any
+Tag: py3-none-any
+
diff --git a/venv/lib/python3.10/site-packages/parameterized-0.8.1.dist-info/pbr.json b/venv/lib/python3.10/site-packages/parameterized-0.8.1.dist-info/pbr.json
new file mode 100644
index 0000000..36d4a60
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/parameterized-0.8.1.dist-info/pbr.json
@@ -0,0 +1 @@
+{"is_release": false, "git_version": "80cbc49"}
\ No newline at end of file
diff --git a/venv/lib/python3.10/site-packages/parameterized-0.8.1.dist-info/top_level.txt b/venv/lib/python3.10/site-packages/parameterized-0.8.1.dist-info/top_level.txt
new file mode 100644
index 0000000..f543eed
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/parameterized-0.8.1.dist-info/top_level.txt
@@ -0,0 +1 @@
+parameterized
diff --git a/venv/lib/python3.10/site-packages/parameterized/__init__.py b/venv/lib/python3.10/site-packages/parameterized/__init__.py
new file mode 100644
index 0000000..25ff0f1
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/parameterized/__init__.py
@@ -0,0 +1,3 @@
+from .parameterized import parameterized, param, parameterized_class
+
+__version__ = "0.8.1"
diff --git a/venv/lib/python3.10/site-packages/parameterized/parameterized.py b/venv/lib/python3.10/site-packages/parameterized/parameterized.py
new file mode 100644
index 0000000..969a157
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/parameterized/parameterized.py
@@ -0,0 +1,642 @@
+import re
+import sys
+import inspect
+import warnings
+from functools import wraps
+from types import MethodType as MethodType
+from collections import namedtuple
+
+try:
+    from collections import OrderedDict as MaybeOrderedDict
+except ImportError:
+    MaybeOrderedDict = dict
+
+from unittest import TestCase
+
+try:
+    from unittest import SkipTest
+except ImportError:
+    class SkipTest(Exception):
+        pass
+
+PY3 = sys.version_info[0] == 3
+PY2 = sys.version_info[0] == 2
+
+
+if PY3:
+    # Python 3 doesn't have an InstanceType, so just use a dummy type.
+    class InstanceType():
+        pass
+    lzip = lambda *a: list(zip(*a))
+    text_type = str
+    string_types = str,
+    bytes_type = bytes
+    def make_method(func, instance, type):
+        if instance is None:
+            return func
+        return MethodType(func, instance)
+else:
+    from types import InstanceType
+    lzip = zip
+    text_type = unicode
+    bytes_type = str
+    string_types = basestring,
+    def make_method(func, instance, type):
+        return MethodType(func, instance, type)
+
+def to_text(x):
+    if isinstance(x, text_type):
+        return x
+    try:
+        return text_type(x, "utf-8")
+    except UnicodeDecodeError:
+        return text_type(x, "latin1")
+
+CompatArgSpec = namedtuple("CompatArgSpec", "args varargs keywords defaults")
+
+
+def getargspec(func):
+    if PY2:
+        return CompatArgSpec(*inspect.getargspec(func))
+    args = inspect.getfullargspec(func)
+    if args.kwonlyargs:
+        raise TypeError((
+            "parameterized does not (yet) support functions with keyword "
+            "only arguments, but %r has keyword only arguments. "
+            "Please open an issue with your usecase if this affects you: "
+            "https://github.com/wolever/parameterized/issues/new"
+        ) %(func, ))
+    return CompatArgSpec(*args[:4])
+
+
+def skip_on_empty_helper(*a, **kw):
+    raise SkipTest("parameterized input is empty")
+
+
+def reapply_patches_if_need(func):
+
+    def dummy_wrapper(orgfunc):
+        @wraps(orgfunc)
+        def dummy_func(*args, **kwargs):
+            return orgfunc(*args, **kwargs)
+        return dummy_func
+
+    if hasattr(func, 'patchings'):
+        func = dummy_wrapper(func)
+        tmp_patchings = func.patchings
+        delattr(func, 'patchings')
+        for patch_obj in tmp_patchings:
+            func = patch_obj.decorate_callable(func)
+    return func
+
+
+def delete_patches_if_need(func):
+    if hasattr(func, 'patchings'):
+        func.patchings[:] = []
+
+
+_param = namedtuple("param", "args kwargs")
+
+class param(_param):
+    """ Represents a single parameter to a test case.
+
+        For example::
+
+            >>> p = param("foo", bar=16)
+            >>> p
+            param("foo", bar=16)
+            >>> p.args
+            ('foo', )
+            >>> p.kwargs
+            {'bar': 16}
+
+        Intended to be used as an argument to ``@parameterized``::
+
+            @parameterized([
+                param("foo", bar=16),
+            ])
+            def test_stuff(foo, bar=16):
+                pass
+        """
+
+    def __new__(cls, *args , **kwargs):
+        return _param.__new__(cls, args, kwargs)
+
+    @classmethod
+    def explicit(cls, args=None, kwargs=None):
+        """ Creates a ``param`` by explicitly specifying ``args`` and
+            ``kwargs``::
+
+                >>> param.explicit([1,2,3])
+                param(*(1, 2, 3))
+                >>> param.explicit(kwargs={"foo": 42})
+                param(*(), **{"foo": "42"})
+            """
+        args = args or ()
+        kwargs = kwargs or {}
+        return cls(*args, **kwargs)
+
+    @classmethod
+    def from_decorator(cls, args):
+        """ Returns an instance of ``param()`` for ``@parameterized`` argument
+            ``args``::
+
+                >>> param.from_decorator((42, ))
+                param(args=(42, ), kwargs={})
+                >>> param.from_decorator("foo")
+                param(args=("foo", ), kwargs={})
+            """
+        if isinstance(args, param):
+            return args
+        elif isinstance(args, string_types):
+            args = (args, )
+        try:
+            return cls(*args)
+        except TypeError as e:
+            if "after * must be" not in str(e):
+                raise
+            raise TypeError(
+                "Parameters must be tuples, but %r is not (hint: use '(%r, )')"
+                %(args, args),
+            )
+
+    def __repr__(self):
+        return "param(*%r, **%r)" %self
+
+
+class QuietOrderedDict(MaybeOrderedDict):
+    """ When OrderedDict is available, use it to make sure that the kwargs in
+        doc strings are consistently ordered. """
+    __str__ = dict.__str__
+    __repr__ = dict.__repr__
+
+
+def parameterized_argument_value_pairs(func, p):
+    """Return tuples of parameterized arguments and their values.
+
+        This is useful if you are writing your own doc_func
+        function and need to know the values for each parameter name::
+
+            >>> def func(a, foo=None, bar=42, **kwargs): pass
+            >>> p = param(1, foo=7, extra=99)
+            >>> parameterized_argument_value_pairs(func, p)
+            [("a", 1), ("foo", 7), ("bar", 42), ("**kwargs", {"extra": 99})]
+
+        If the function's first argument is named ``self`` then it will be
+        ignored::
+
+            >>> def func(self, a): pass
+            >>> p = param(1)
+            >>> parameterized_argument_value_pairs(func, p)
+            [("a", 1)]
+
+        Additionally, empty ``*args`` or ``**kwargs`` will be ignored::
+
+            >>> def func(foo, *args): pass
+            >>> p = param(1)
+            >>> parameterized_argument_value_pairs(func, p)
+            [("foo", 1)]
+            >>> p = param(1, 16)
+            >>> parameterized_argument_value_pairs(func, p)
+            [("foo", 1), ("*args", (16, ))]
+    """
+    argspec = getargspec(func)
+    arg_offset = 1 if argspec.args[:1] == ["self"] else 0
+
+    named_args = argspec.args[arg_offset:]
+
+    result = lzip(named_args, p.args)
+    named_args = argspec.args[len(result) + arg_offset:]
+    varargs = p.args[len(result):]
+
+    result.extend([
+        (name, p.kwargs.get(name, default))
+        for (name, default)
+        in zip(named_args, argspec.defaults or [])
+    ])
+
+    seen_arg_names = set([ n for (n, _) in result ])
+    keywords = QuietOrderedDict(sorted([
+        (name, p.kwargs[name])
+        for name in p.kwargs
+        if name not in seen_arg_names
+    ]))
+
+    if varargs:
+        result.append(("*%s" %(argspec.varargs, ), tuple(varargs)))
+
+    if keywords:
+        result.append(("**%s" %(argspec.keywords, ), keywords))
+
+    return result
+
+
+def short_repr(x, n=64):
+    """ A shortened repr of ``x`` which is guaranteed to be ``unicode``::
+
+            >>> short_repr("foo")
+            u"foo"
+            >>> short_repr("123456789", n=4)
+            u"12...89"
+    """
+
+    x_repr = to_text(repr(x))
+    if len(x_repr) > n:
+        x_repr = x_repr[:n//2] + "..." + x_repr[len(x_repr) - n//2:]
+    return x_repr
+
+
+def default_doc_func(func, num, p):
+    if func.__doc__ is None:
+        return None
+
+    all_args_with_values = parameterized_argument_value_pairs(func, p)
+
+    # Assumes that the function passed is a bound method.
+    descs = ["%s=%s" %(n, short_repr(v)) for n, v in all_args_with_values]
+
+    # The documentation might be a multiline string, so split it
+    # and just work with the first string, ignoring the period
+    # at the end if there is one.
+    first, nl, rest = func.__doc__.lstrip().partition("\n")
+    suffix = ""
+    if first.endswith("."):
+        suffix = "."
+        first = first[:-1]
+    args = "%s[with %s]" %(len(first) and " " or "", ", ".join(descs))
+    return "".join(
+        to_text(x)
+        for x in [first.rstrip(), args, suffix, nl, rest]
+    )
+
+
+def default_name_func(func, num, p):
+    base_name = func.__name__
+    name_suffix = "_%s" %(num, )
+
+    if len(p.args) > 0 and isinstance(p.args[0], string_types):
+        name_suffix += "_" + parameterized.to_safe_name(p.args[0])
+    return base_name + name_suffix
+
+
+_test_runner_override = None
+_test_runner_guess = False
+_test_runners = set(["unittest", "unittest2", "nose", "nose2", "pytest"])
+_test_runner_aliases = {
+    "_pytest": "pytest",
+}
+
+
+def set_test_runner(name):
+    global _test_runner_override
+    if name not in _test_runners:
+        raise TypeError(
+            "Invalid test runner: %r (must be one of: %s)"
+            %(name, ", ".join(_test_runners)),
+        )
+    _test_runner_override = name
+
+
+def detect_runner():
+    """ Guess which test runner we're using by traversing the stack and looking
+        for the first matching module. This *should* be reasonably safe, as
+        it's done during test disocvery where the test runner should be the
+        stack frame immediately outside. """
+    if _test_runner_override is not None:
+        return _test_runner_override
+    global _test_runner_guess
+    if _test_runner_guess is False:
+        stack = inspect.stack()
+        for record in reversed(stack):
+            frame = record[0]
+            module = frame.f_globals.get("__name__").partition(".")[0]
+            if module in _test_runner_aliases:
+                module = _test_runner_aliases[module]
+            if module in _test_runners:
+                _test_runner_guess = module
+                break
+            if record[1].endswith("python2.6/unittest.py"):
+                _test_runner_guess = "unittest"
+                break
+        else:
+            _test_runner_guess = None
+    return _test_runner_guess
+
+
+class parameterized(object):
+    """ Parameterize a test case::
+
+            class TestInt(object):
+                @parameterized([
+                    ("A", 10),
+                    ("F", 15),
+                    param("10", 42, base=42)
+                ])
+                def test_int(self, input, expected, base=16):
+                    actual = int(input, base=base)
+                    assert_equal(actual, expected)
+
+            @parameterized([
+                (2, 3, 5)
+                (3, 5, 8),
+            ])
+            def test_add(a, b, expected):
+                assert_equal(a + b, expected)
+        """
+
+    def __init__(self, input, doc_func=None, skip_on_empty=False):
+        self.get_input = self.input_as_callable(input)
+        self.doc_func = doc_func or default_doc_func
+        self.skip_on_empty = skip_on_empty
+
+    def __call__(self, test_func):
+        self.assert_not_in_testcase_subclass()
+
+        @wraps(test_func)
+        def wrapper(test_self=None):
+            test_cls = test_self and type(test_self)
+            if test_self is not None:
+                if issubclass(test_cls, InstanceType):
+                    raise TypeError((
+                        "@parameterized can't be used with old-style classes, but "
+                        "%r has an old-style class. Consider using a new-style "
+                        "class, or '@parameterized.expand' "
+                        "(see http://stackoverflow.com/q/54867/71522 for more "
+                        "information on old-style classes)."
+                    ) %(test_self, ))
+
+            original_doc = wrapper.__doc__
+            for num, args in enumerate(wrapper.parameterized_input):
+                p = param.from_decorator(args)
+                unbound_func, nose_tuple = self.param_as_nose_tuple(test_self, test_func, num, p)
+                try:
+                    wrapper.__doc__ = nose_tuple[0].__doc__
+                    # Nose uses `getattr(instance, test_func.__name__)` to get
+                    # a method bound to the test instance (as opposed to a
+                    # method bound to the instance of the class created when
+                    # tests were being enumerated). Set a value here to make
+                    # sure nose can get the correct test method.
+                    if test_self is not None:
+                        setattr(test_cls, test_func.__name__, unbound_func)
+                    yield nose_tuple
+                finally:
+                    if test_self is not None:
+                        delattr(test_cls, test_func.__name__)
+                    wrapper.__doc__ = original_doc
+
+        input = self.get_input()
+        if not input:
+            if not self.skip_on_empty:
+                raise ValueError(
+                    "Parameters iterable is empty (hint: use "
+                    "`parameterized([], skip_on_empty=True)` to skip "
+                    "this test when the input is empty)"
+                )
+            wrapper = wraps(test_func)(skip_on_empty_helper)
+
+        wrapper.parameterized_input = input
+        wrapper.parameterized_func = test_func
+        test_func.__name__ = "_parameterized_original_%s" %(test_func.__name__, )
+
+        return wrapper
+
+    def param_as_nose_tuple(self, test_self, func, num, p):
+        nose_func = wraps(func)(lambda *args: func(*args[:-1], **args[-1]))
+        nose_func.__doc__ = self.doc_func(func, num, p)
+        # Track the unbound function because we need to setattr the unbound
+        # function onto the class for nose to work (see comments above), and
+        # Python 3 doesn't let us pull the function out of a bound method.
+        unbound_func = nose_func
+        if test_self is not None:
+            # Under nose on Py2 we need to return an unbound method to make
+            # sure that the `self` in the method is properly shared with the
+            # `self` used in `setUp` and `tearDown`. But only there. Everyone
+            # else needs a bound method.
+            func_self = (
+                None if PY2 and detect_runner() == "nose" else
+                test_self
+            )
+            nose_func = make_method(nose_func, func_self, type(test_self))
+        return unbound_func, (nose_func, ) + p.args + (p.kwargs or {}, )
+
+    def assert_not_in_testcase_subclass(self):
+        parent_classes = self._terrible_magic_get_defining_classes()
+        if any(issubclass(cls, TestCase) for cls in parent_classes):
+            raise Exception("Warning: '@parameterized' tests won't work "
+                            "inside subclasses of 'TestCase' - use "
+                            "'@parameterized.expand' instead.")
+
+    def _terrible_magic_get_defining_classes(self):
+        """ Returns the set of parent classes of the class currently being defined.
+            Will likely only work if called from the ``parameterized`` decorator.
+            This function is entirely @brandon_rhodes's fault, as he suggested
+            the implementation: http://stackoverflow.com/a/8793684/71522
+            """
+        stack = inspect.stack()
+        if len(stack) <= 4:
+            return []
+        frame = stack[4]
+        code_context = frame[4] and frame[4][0].strip()
+        if not (code_context and code_context.startswith("class ")):
+            return []
+        _, _, parents = code_context.partition("(")
+        parents, _, _ = parents.partition(")")
+        return eval("[" + parents + "]", frame[0].f_globals, frame[0].f_locals)
+
+    @classmethod
+    def input_as_callable(cls, input):
+        if callable(input):
+            return lambda: cls.check_input_values(input())
+        input_values = cls.check_input_values(input)
+        return lambda: input_values
+
+    @classmethod
+    def check_input_values(cls, input_values):
+        # Explicitly convery non-list inputs to a list so that:
+        # 1. A helpful exception will be raised if they aren't iterable, and
+        # 2. Generators are unwrapped exactly once (otherwise `nosetests
+        #    --processes=n` has issues; see:
+        #    https://github.com/wolever/nose-parameterized/pull/31)
+        if not isinstance(input_values, list):
+            input_values = list(input_values)
+        return [ param.from_decorator(p) for p in input_values ]
+
+    @classmethod
+    def expand(cls, input, name_func=None, doc_func=None, skip_on_empty=False,
+               **legacy):
+        """ A "brute force" method of parameterizing test cases. Creates new
+            test cases and injects them into the namespace that the wrapped
+            function is being defined in. Useful for parameterizing tests in
+            subclasses of 'UnitTest', where Nose test generators don't work.
+
+            >>> @parameterized.expand([("foo", 1, 2)])
+            ... def test_add1(name, input, expected):
+            ...     actual = add1(input)
+            ...     assert_equal(actual, expected)
+            ...
+            >>> locals()
+            ... 'test_add1_foo_0':  ...
+            >>>
+            """
+
+        if "testcase_func_name" in legacy:
+            warnings.warn("testcase_func_name= is deprecated; use name_func=",
+                          DeprecationWarning, stacklevel=2)
+            if not name_func:
+                name_func = legacy["testcase_func_name"]
+
+        if "testcase_func_doc" in legacy:
+            warnings.warn("testcase_func_doc= is deprecated; use doc_func=",
+                          DeprecationWarning, stacklevel=2)
+            if not doc_func:
+                doc_func = legacy["testcase_func_doc"]
+
+        doc_func = doc_func or default_doc_func
+        name_func = name_func or default_name_func
+
+        def parameterized_expand_wrapper(f, instance=None):
+            frame_locals = inspect.currentframe().f_back.f_locals
+
+            parameters = cls.input_as_callable(input)()
+
+            if not parameters:
+                if not skip_on_empty:
+                    raise ValueError(
+                        "Parameters iterable is empty (hint: use "
+                        "`parameterized.expand([], skip_on_empty=True)` to skip "
+                        "this test when the input is empty)"
+                    )
+                return wraps(f)(skip_on_empty_helper)
+
+            digits = len(str(len(parameters) - 1))
+            for num, p in enumerate(parameters):
+                name = name_func(f, "{num:0>{digits}}".format(digits=digits, num=num), p)
+                # If the original function has patches applied by 'mock.patch',
+                # re-construct all patches on the just former decoration layer
+                # of param_as_standalone_func so as not to share
+                # patch objects between new functions
+                nf = reapply_patches_if_need(f)
+                frame_locals[name] = cls.param_as_standalone_func(p, nf, name)
+                frame_locals[name].__doc__ = doc_func(f, num, p)
+
+            # Delete original patches to prevent new function from evaluating
+            # original patching object as well as re-constructed patches.
+            delete_patches_if_need(f)
+
+            f.__test__ = False
+        return parameterized_expand_wrapper
+
+    @classmethod
+    def param_as_standalone_func(cls, p, func, name):
+        @wraps(func)
+        def standalone_func(*a):
+            return func(*(a + p.args), **p.kwargs)
+        standalone_func.__name__ = name
+
+        # place_as is used by py.test to determine what source file should be
+        # used for this test.
+        standalone_func.place_as = func
+
+        # Remove __wrapped__ because py.test will try to look at __wrapped__
+        # to determine which parameters should be used with this test case,
+        # and obviously we don't need it to do any parameterization.
+        try:
+            del standalone_func.__wrapped__
+        except AttributeError:
+            pass
+        return standalone_func
+
+    @classmethod
+    def to_safe_name(cls, s):
+        return str(re.sub("[^a-zA-Z0-9_]+", "_", s))
+
+
+def parameterized_class(attrs, input_values=None, class_name_func=None, classname_func=None):
+    """ Parameterizes a test class by setting attributes on the class.
+
+        Can be used in two ways:
+
+        1) With a list of dictionaries containing attributes to override::
+
+            @parameterized_class([
+                { "username": "foo" },
+                { "username": "bar", "access_level": 2 },
+            ])
+            class TestUserAccessLevel(TestCase):
+                ...
+
+        2) With a tuple of attributes, then a list of tuples of values:
+
+            @parameterized_class(("username", "access_level"), [
+                ("foo", 1),
+                ("bar", 2)
+            ])
+            class TestUserAccessLevel(TestCase):
+                ...
+
+    """
+
+    if isinstance(attrs, string_types):
+        attrs = [attrs]
+
+    input_dicts = (
+        attrs if input_values is None else
+        [dict(zip(attrs, vals)) for vals in input_values]
+    )
+
+    class_name_func = class_name_func or default_class_name_func
+    
+    if classname_func:
+        warnings.warn(
+            "classname_func= is deprecated; use class_name_func= instead. "
+            "See: https://github.com/wolever/parameterized/pull/74#issuecomment-613577057",
+            DeprecationWarning,
+            stacklevel=2,
+        )
+        class_name_func = lambda cls, idx, input: classname_func(cls, idx, input_dicts)
+
+    def decorator(base_class):
+        test_class_module = sys.modules[base_class.__module__].__dict__
+        for idx, input_dict in enumerate(input_dicts):
+            test_class_dict = dict(base_class.__dict__)
+            test_class_dict.update(input_dict)
+
+            name = class_name_func(base_class, idx, input_dict)
+
+            test_class_module[name] = type(name, (base_class, ), test_class_dict)
+
+        # We need to leave the base class in place (see issue #73), but if we
+        # leave the test_ methods in place, the test runner will try to pick
+        # them up and run them... which doesn't make sense, since no parameters
+        # will have been applied.
+        # Address this by iterating over the base class and remove all test
+        # methods.
+        for method_name in list(base_class.__dict__):
+            if method_name.startswith("test"):
+                delattr(base_class, method_name)
+        return base_class
+
+    return decorator
+
+
+def get_class_name_suffix(params_dict):
+    if "name" in params_dict:
+        return parameterized.to_safe_name(params_dict["name"])
+
+    params_vals = (
+        params_dict.values() if PY3 else
+        (v for (_, v) in sorted(params_dict.items()))
+    )
+    return parameterized.to_safe_name(next((
+        v for v in params_vals
+        if isinstance(v, string_types)
+    ), ""))
+
+
+def default_class_name_func(cls, num, params_dict):
+    suffix = get_class_name_suffix(params_dict)
+    return "%s_%s%s" %(
+        cls.__name__,
+        num,
+        suffix and "_" + suffix,
+    )
diff --git a/venv/lib/python3.10/site-packages/parameterized/test.py b/venv/lib/python3.10/site-packages/parameterized/test.py
new file mode 100644
index 0000000..f98d865
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/parameterized/test.py
@@ -0,0 +1,554 @@
+# coding=utf-8
+
+import inspect
+import mock
+from unittest import TestCase
+from nose.tools import assert_equal, assert_raises
+
+from .parameterized import (
+    PY3, PY2, parameterized, param, parameterized_argument_value_pairs,
+    short_repr, detect_runner, parameterized_class, SkipTest,
+)
+
+def assert_contains(haystack, needle):
+    if needle not in haystack:
+        raise AssertionError("%r not in %r" %(needle, haystack))
+
+runner = detect_runner()
+UNITTEST = runner.startswith("unittest")
+NOSE2 = (runner == "nose2")
+PYTEST = (runner == "pytest")
+
+SKIP_FLAGS = {
+    "generator": UNITTEST,
+    "standalone": UNITTEST,
+    # nose2 doesn't run tests on old-style classes under Py2, so don't expect
+    # these tests to run under nose2.
+    "py2nose2": (PY2 and NOSE2),
+    "pytest": PYTEST,
+}
+
+missing_tests = set()
+
+def expect(skip, tests=None):
+    if tests is None:
+        tests = skip
+        skip = None
+    if any(SKIP_FLAGS.get(f) for f in (skip or "").split()):
+        return
+    missing_tests.update(tests)
+
+test_params = [
+    (42, ),
+    "foo0",
+    param("foo1"),
+    param("foo2", bar=42),
+]
+
+expect("standalone", [
+    "test_naked_function('foo0', bar=None)",
+    "test_naked_function('foo1', bar=None)",
+    "test_naked_function('foo2', bar=42)",
+    "test_naked_function(42, bar=None)",
+])
+
+@parameterized(test_params)
+def test_naked_function(foo, bar=None):
+    missing_tests.remove("test_naked_function(%r, bar=%r)" %(foo, bar))
+
+
+class TestParameterized(object):
+    expect("generator", [
+        "test_instance_method('foo0', bar=None)",
+        "test_instance_method('foo1', bar=None)",
+        "test_instance_method('foo2', bar=42)",
+        "test_instance_method(42, bar=None)",
+    ])
+
+    @parameterized(test_params)
+    def test_instance_method(self, foo, bar=None):
+        missing_tests.remove("test_instance_method(%r, bar=%r)" %(foo, bar))
+
+
+if not PYTEST:
+    # py.test doesn't use xunit-style setup/teardown, so these tests don't apply
+    class TestSetupTeardown(object):
+        expect("generator", [
+            "test_setup(setup 1)",
+            "teardown_called(teardown 1)",
+            "test_setup(setup 2)",
+            "teardown_called(teardown 2)",
+        ])
+
+        stack = ["setup 1", "teardown 1", "setup 2", "teardown 2"]
+        actual_order = "error: setup not called"
+
+        def setUp(self):
+            self.actual_order = self.stack.pop(0)
+
+        def tearDown(self):
+            missing_tests.remove("teardown_called(%s)" %(self.stack.pop(0), ))
+
+        @parameterized([(1, ), (2, )])
+        def test_setup(self, count, *a):
+            assert_equal(self.actual_order, "setup %s" %(count, ))
+            missing_tests.remove("test_setup(%s)" %(self.actual_order, ))
+
+
+def custom_naming_func(custom_tag):
+    def custom_naming_func(testcase_func, param_num, param):
+        return testcase_func.__name__ + ('_%s_name_' % custom_tag) + str(param.args[0])
+
+    return custom_naming_func
+
+
+@mock.patch("os.getpid")
+class TestParameterizedExpandWithMockPatchForClass(TestCase):
+    expect([
+        "test_one_function_patch_decorator('foo1', 'umask', 'getpid')",
+        "test_one_function_patch_decorator('foo0', 'umask', 'getpid')",
+        "test_one_function_patch_decorator(42, 'umask', 'getpid')",
+    ])
+
+    @parameterized.expand([(42, ), "foo0", param("foo1")])
+    @mock.patch("os.umask")
+    def test_one_function_patch_decorator(self, foo, mock_umask, mock_getpid):
+        missing_tests.remove("test_one_function_patch_decorator(%r, %r, %r)" %
+                             (foo, mock_umask._mock_name,
+                              mock_getpid._mock_name))
+
+    expect([
+        "test_multiple_function_patch_decorator"
+        "(42, 51, 'umask', 'fdopen', 'getpid')",
+        "test_multiple_function_patch_decorator"
+        "('foo0', 'bar0', 'umask', 'fdopen', 'getpid')",
+        "test_multiple_function_patch_decorator"
+        "('foo1', 'bar1', 'umask', 'fdopen', 'getpid')",
+    ])
+
+    @parameterized.expand([(42, 51), ("foo0", "bar0"), param("foo1", "bar1")])
+    @mock.patch("os.fdopen")
+    @mock.patch("os.umask")
+    def test_multiple_function_patch_decorator(self, foo, bar, mock_umask,
+                                               mock_fdopen, mock_getpid):
+        missing_tests.remove("test_multiple_function_patch_decorator"
+                             "(%r, %r, %r, %r, %r)" %
+                             (foo, bar, mock_umask._mock_name,
+                              mock_fdopen._mock_name, mock_getpid._mock_name))
+
+
+@mock.patch("os.getpid")
+class TestParameterizedExpandWithNoExpand(object):
+    expect("generator", [
+        "test_patch_class_no_expand(42, 51, 'umask', 'getpid')",
+    ])
+
+    @parameterized([(42, 51)])
+    @mock.patch("os.umask")
+    def test_patch_class_no_expand(self, foo, bar, mock_umask, mock_getpid):
+        missing_tests.remove("test_patch_class_no_expand"
+                             "(%r, %r, %r, %r)" %
+                             (foo, bar, mock_umask._mock_name,
+                              mock_getpid._mock_name))
+
+
+class TestParameterizedExpandWithNoMockPatchForClass(TestCase):
+    expect([
+        "test_one_function_patch_decorator('foo1', 'umask')",
+        "test_one_function_patch_decorator('foo0', 'umask')",
+        "test_one_function_patch_decorator(42, 'umask')",
+    ])
+
+    @parameterized.expand([(42, ), "foo0", param("foo1")])
+    @mock.patch("os.umask")
+    def test_one_function_patch_decorator(self, foo, mock_umask):
+        missing_tests.remove("test_one_function_patch_decorator(%r, %r)" %
+                             (foo, mock_umask._mock_name))
+
+    expect([
+        "test_multiple_function_patch_decorator(42, 51, 'umask', 'fdopen')",
+        "test_multiple_function_patch_decorator('foo0', 'bar0', 'umask', 'fdopen')",
+        "test_multiple_function_patch_decorator('foo1', 'bar1', 'umask', 'fdopen')",
+    ])
+
+    @parameterized.expand([(42, 51), ("foo0", "bar0"), param("foo1", "bar1")])
+    @mock.patch("os.fdopen")
+    @mock.patch("os.umask")
+    def test_multiple_function_patch_decorator(self, foo, bar, mock_umask,
+                                               mock_fdopen):
+        missing_tests.remove("test_multiple_function_patch_decorator"
+                             "(%r, %r, %r, %r)" %
+                             (foo, bar, mock_umask._mock_name,
+                              mock_fdopen._mock_name))
+
+
+class TestParameterizedExpandWithNoMockPatchForClassNoExpand(object):
+    expect("generator", [
+        "test_patch_no_expand(42, 51, 'umask')",
+    ])
+
+    @parameterized([(42, 51)])
+    @mock.patch("os.umask")
+    def test_patch_no_expand(self, foo, bar, mock_umask):
+        missing_tests.remove("test_patch_no_expand(%r, %r, %r)" %
+                             (foo, bar, mock_umask._mock_name))
+
+
+expect("standalone", [
+    "test_mock_patch_standalone_function(42, 'umask')",
+])
+
+@parameterized([(42, )])
+@mock.patch("os.umask")
+def test_mock_patch_standalone_function(foo, mock_umask):
+    missing_tests.remove(
+        "test_mock_patch_standalone_function(%r, %r)" %(
+            foo, mock_umask._mock_name
+        )
+    )
+
+
+class TestParamerizedOnTestCase(TestCase):
+    expect([
+        "test_on_TestCase('foo0', bar=None)",
+        "test_on_TestCase('foo1', bar=None)",
+        "test_on_TestCase('foo2', bar=42)",
+        "test_on_TestCase(42, bar=None)",
+    ])
+
+    @parameterized.expand(test_params)
+    def test_on_TestCase(self, foo, bar=None):
+        missing_tests.remove("test_on_TestCase(%r, bar=%r)" %(foo, bar))
+
+    expect([
+        "test_on_TestCase2_custom_name_42(42, bar=None)",
+        "test_on_TestCase2_custom_name_foo0('foo0', bar=None)",
+        "test_on_TestCase2_custom_name_foo1('foo1', bar=None)",
+        "test_on_TestCase2_custom_name_foo2('foo2', bar=42)",
+    ])
+
+    @parameterized.expand(test_params,
+                          name_func=custom_naming_func("custom"))
+    def test_on_TestCase2(self, foo, bar=None):
+        stack = inspect.stack()
+        frame = stack[1]
+        frame_locals = frame[0].f_locals
+        nose_test_method_name = frame_locals['a'][0]._testMethodName
+        expected_name = "test_on_TestCase2_custom_name_" + str(foo)
+        assert_equal(nose_test_method_name, expected_name,
+                     "Test Method name '%s' did not get customized to expected: '%s'" %
+                     (nose_test_method_name, expected_name))
+        missing_tests.remove("%s(%r, bar=%r)" %(expected_name, foo, bar))
+
+
+class TestParameterizedExpandDocstring(TestCase):
+    def _assert_docstring(self, expected_docstring, rstrip=False):
+        """ Checks the current test method's docstring. Must be called directly
+            from the test method. """
+        stack = inspect.stack()
+        f_locals = stack[3][0].f_locals
+        test_method = (
+            f_locals.get("testMethod") or # Py27
+            f_locals.get("function") or # Py33
+            f_locals.get("method") or # Py38
+            f_locals.get("testfunction") or # Py382
+            None
+        )
+        if test_method is None:
+            raise AssertionError("uh oh, unittest changed a local variable name")
+        actual_docstring = test_method.__doc__
+        if rstrip:
+            actual_docstring = actual_docstring.rstrip()
+        assert_equal(actual_docstring, expected_docstring)
+
+    @parameterized.expand([param("foo")],
+                          doc_func=lambda f, n, p: "stuff")
+    def test_custom_doc_func(self, foo, bar=None):
+        """Documentation"""
+        self._assert_docstring("stuff")
+
+    @parameterized.expand([param("foo")])
+    def test_single_line_docstring(self, foo):
+        """Documentation."""
+        self._assert_docstring("Documentation [with foo=%r]." %(foo, ))
+
+    @parameterized.expand([param("foo")])
+    def test_empty_docstring(self, foo):
+        ""
+        self._assert_docstring("[with foo=%r]" %(foo, ))
+
+    @parameterized.expand([param("foo")])
+    def test_multiline_documentation(self, foo):
+        """Documentation.
+
+        More"""
+        self._assert_docstring(
+            "Documentation [with foo=%r].\n\n"
+            "        More" %(foo, )
+        )
+
+    @parameterized.expand([param("foo")])
+    def test_unicode_docstring(self, foo):
+        u"""Döcumentation."""
+        self._assert_docstring(u"Döcumentation [with foo=%r]." %(foo, ))
+
+    @parameterized.expand([param("foo", )])
+    def test_default_values_get_correct_value(self, foo, bar=12):
+        """Documentation"""
+        self._assert_docstring("Documentation [with foo=%r, bar=%r]" %(foo, bar))
+
+    @parameterized.expand([param("foo", )])
+    def test_with_leading_newline(self, foo, bar=12):
+        """
+        Documentation
+        """
+        self._assert_docstring("Documentation [with foo=%r, bar=%r]" %(foo, bar), rstrip=True)
+
+
+def test_warns_when_using_parameterized_with_TestCase():
+    try:
+        class TestTestCaseWarnsOnBadUseOfParameterized(TestCase):
+            @parameterized([(42, )])
+            def test_in_subclass_of_TestCase(self, foo):
+                pass
+    except Exception as e:
+        assert_contains(str(e), "parameterized.expand")
+    else:
+        raise AssertionError("Expected exception not raised")
+
+def test_helpful_error_on_invalid_parameters():
+    try:
+        parameterized([1432141234243])(lambda: None)
+    except Exception as e:
+        assert_contains(str(e), "Parameters must be tuples")
+    else:
+        raise AssertionError("Expected exception not raised")
+
+
+def test_helpful_error_on_empty_iterable_input():
+    try:
+        parameterized([])(lambda: None)
+    except ValueError as e:
+        assert_contains(str(e), "iterable is empty")
+    else:
+        raise AssertionError("Expected exception not raised")
+
+def test_skip_test_on_empty_iterable():
+    func = parameterized([], skip_on_empty=True)(lambda: None)
+    assert_raises(SkipTest, func)
+
+
+def test_helpful_error_on_empty_iterable_input_expand():
+    try:
+        class ExpectErrorOnEmptyInput(TestCase):
+            @parameterized.expand([])
+            def test_expect_error(self):
+                pass
+    except ValueError as e:
+        assert_contains(str(e), "iterable is empty")
+    else:
+        raise AssertionError("Expected exception not raised")
+
+
+expect("stadalone generator", [
+    "test_wrapped_iterable_input('foo')",
+])
+@parameterized(lambda: iter(["foo"]))
+def test_wrapped_iterable_input(foo):
+    missing_tests.remove("test_wrapped_iterable_input(%r)" %(foo, ))
+
+def test_helpful_error_on_non_iterable_input():
+    try:
+        parameterized(lambda: 42)(lambda: None)
+    except Exception as e:
+        assert_contains(str(e), "is not iterable")
+    else:
+        raise AssertionError("Expected exception not raised")
+
+
+def tearDownModule():
+    missing = sorted(list(missing_tests))
+    assert_equal(missing, [])
+
+def test_old_style_classes():
+    if PY3:
+        raise SkipTest("Py3 doesn't have old-style classes")
+    class OldStyleClass:
+        @parameterized(["foo"])
+        def parameterized_method(self, param):
+            pass
+    try:
+        list(OldStyleClass().parameterized_method())
+    except TypeError as e:
+        assert_contains(str(e), "new-style")
+        assert_contains(str(e), "parameterized.expand")
+        assert_contains(str(e), "OldStyleClass")
+    else:
+        raise AssertionError("expected TypeError not raised by old-style class")
+
+
+class TestOldStyleClass:
+    expect("py2nose2 generator", [
+        "test_on_old_style_class('foo')",
+        "test_on_old_style_class('bar')",
+    ])
+
+    @parameterized.expand(["foo", "bar"])
+    def test_old_style_classes(self, param):
+        missing_tests.remove("test_on_old_style_class(%r)" %(param, ))
+
+
+@parameterized([
+    ("", param(), []),
+    ("*a, **kw", param(), []),
+    ("*a, **kw", param(1, foo=42), [("*a", (1, )), ("**kw", {"foo": 42})]),
+    ("foo", param(1), [("foo", 1)]),
+    ("foo, *a", param(1), [("foo", 1)]),
+    ("foo, *a", param(1, 9), [("foo", 1), ("*a", (9, ))]),
+    ("foo, *a, **kw", param(1, bar=9), [("foo", 1), ("**kw", {"bar": 9})]),
+    ("x=9", param(), [("x", 9)]),
+    ("x=9", param(1), [("x", 1)]),
+    ("x, y=9, *a, **kw", param(1), [("x", 1), ("y", 9)]),
+    ("x, y=9, *a, **kw", param(1, 2), [("x", 1), ("y", 2)]),
+    ("x, y=9, *a, **kw", param(1, 2, 3), [("x", 1), ("y", 2), ("*a", (3, ))]),
+    ("x, y=9, *a, **kw", param(1, y=2), [("x", 1), ("y", 2)]),
+    ("x, y=9, *a, **kw", param(1, z=2), [("x", 1), ("y", 9), ("**kw", {"z": 2})]),
+    ("x, y=9, *a, **kw", param(1, 2, 3, z=3), [("x", 1), ("y", 2), ("*a", (3, )), ("**kw", {"z": 3})]),
+])
+def test_parameterized_argument_value_pairs(func_params, p, expected):
+    helper = eval("lambda %s: None" %(func_params, ))
+    actual = parameterized_argument_value_pairs(helper, p)
+    assert_equal(actual, expected)
+
+
+@parameterized([
+    ("abcd", "'abcd'"),
+    ("123456789", "'12...89'"),
+    (123456789, "123...789"),
+    (123456789, "12...89", 4),
+])
+def test_short_repr(input, expected, n=6):
+    assert_equal(short_repr(input, n=n), expected)
+
+@parameterized([
+    ("foo", ),
+])
+def test_with_docstring(input):
+    """ Docstring! """
+    pass
+
+
+cases_over_10 = [(i, i+1) for i in range(11)]
+
+@parameterized(cases_over_10)
+def test_cases_over_10(input, expected):
+    assert_equal(input, expected-1)
+
+
+@parameterized_class(("a", "b", "c"), [
+    ("foo", 1, 2),
+    (0, 1, 2),
+])
+class TestParameterizedClass(TestCase):
+    expect([
+        "TestParameterizedClass_0_foo:test_method_a('foo', 1, 2)",
+        "TestParameterizedClass_0_foo:test_method_b('foo', 1, 2)",
+        "TestParameterizedClass_0_foo:testCamelCaseMethodC('foo', 1, 2)",
+        "TestParameterizedClass_1:test_method_a(0, 1, 2)",
+        "TestParameterizedClass_1:test_method_b(0, 1, 2)",
+        "TestParameterizedClass_1:testCamelCaseMethodC(0, 1, 2)",
+    ])
+
+    def _assertions(self, test_name):
+        assert hasattr(self, "a")
+        assert_equal(self.b + self.c, 3)
+        missing_tests.remove("%s:%s(%r, %r, %r)" %(
+            self.__class__.__name__,
+            test_name,
+            self.a,
+            self.b,
+            self.c,
+        ))
+
+    def test_method_a(self):
+        self._assertions("test_method_a")
+
+    def test_method_b(self):
+        self._assertions("test_method_b")
+
+    def testCamelCaseMethodC(self):
+        self._assertions("testCamelCaseMethodC")
+
+
+@parameterized_class(("a", ), [
+    (1, ),
+    (2, ),
+], class_name_func=lambda cls, idx, attrs: "%s_custom_func_%s" %(cls.__name__, attrs["a"]))
+class TestNamedParameterizedClass(TestCase):
+    expect([
+        "TestNamedParameterizedClass_custom_func_1:test_method(1)",
+        "TestNamedParameterizedClass_custom_func_2:test_method(2)",
+    ])
+
+    def test_method(self):
+        missing_tests.remove("%s:test_method(%r)" %(
+            self.__class__.__name__,
+            self.a,
+        ))
+
+
+@parameterized_class([
+    {"foo": 42},
+    {"bar": "some stuff"},
+    {"bar": "other stuff", "name": "some name", "foo": 12},
+])
+class TestParameterizedClassDict(TestCase):
+    expect([
+        "TestParameterizedClassDict_0:setUp(42, 'empty')",
+        "TestParameterizedClassDict_0:test_method(42, 'empty')",
+        "TestParameterizedClassDict_0:tearDown(42, 'empty')",
+        "TestParameterizedClassDict_1_some_stuff:setUp(0, 'some stuff')",
+        "TestParameterizedClassDict_1_some_stuff:test_method(0, 'some stuff')",
+        "TestParameterizedClassDict_1_some_stuff:tearDown(0, 'some stuff')",
+        "TestParameterizedClassDict_2_some_name:setUp(12, 'other stuff')",
+        "TestParameterizedClassDict_2_some_name:test_method(12, 'other stuff')",
+        "TestParameterizedClassDict_2_some_name:tearDown(12, 'other stuff')",
+    ])
+
+    foo = 0
+    bar = 'empty'
+
+    def setUp(self):
+        # Ensure that super() works (issue #73)
+        super(TestParameterizedClassDict, self).setUp()
+        missing_tests.remove("%s:setUp(%r, %r)" %(
+            self.__class__.__name__,
+            self.foo,
+            self.bar,
+        ))
+
+    def tearDown(self):
+        # Ensure that super() works (issue #73)
+        super(TestParameterizedClassDict, self).tearDown()
+        missing_tests.remove("%s:tearDown(%r, %r)" %(
+            self.__class__.__name__,
+            self.foo,
+            self.bar,
+        ))
+
+    def test_method(self):
+        missing_tests.remove("%s:test_method(%r, %r)" %(
+            self.__class__.__name__,
+            self.foo,
+            self.bar,
+        ))
+
+
+class TestUnicodeDocstring(object):
+    @parameterized.expand([
+        'value1',
+        'vålüé¡'
+    ])
+    def test_with_docstring(self, param):
+        """ Это док-стринг, содержащий не-ascii символы """
+        pass
diff --git a/venv/lib/python3.10/site-packages/pip-22.3.1.dist-info/METADATA b/venv/lib/python3.10/site-packages/pip-22.3.1.dist-info/METADATA
deleted file mode 100644
index e935e1a..0000000
--- a/venv/lib/python3.10/site-packages/pip-22.3.1.dist-info/METADATA
+++ /dev/null
@@ -1,88 +0,0 @@
-Metadata-Version: 2.1
-Name: pip
-Version: 22.3.1
-Summary: The PyPA recommended tool for installing Python packages.
-Home-page: https://pip.pypa.io/
-Author: The pip developers
-Author-email: distutils-sig@python.org
-License: MIT
-Project-URL: Documentation, https://pip.pypa.io
-Project-URL: Source, https://github.com/pypa/pip
-Project-URL: Changelog, https://pip.pypa.io/en/stable/news/
-Classifier: Development Status :: 5 - Production/Stable
-Classifier: Intended Audience :: Developers
-Classifier: License :: OSI Approved :: MIT License
-Classifier: Topic :: Software Development :: Build Tools
-Classifier: Programming Language :: Python
-Classifier: Programming Language :: Python :: 3
-Classifier: Programming Language :: Python :: 3 :: Only
-Classifier: Programming Language :: Python :: 3.7
-Classifier: Programming Language :: Python :: 3.8
-Classifier: Programming Language :: Python :: 3.9
-Classifier: Programming Language :: Python :: 3.10
-Classifier: Programming Language :: Python :: 3.11
-Classifier: Programming Language :: Python :: Implementation :: CPython
-Classifier: Programming Language :: Python :: Implementation :: PyPy
-Requires-Python: >=3.7
-License-File: LICENSE.txt
-
-pip - The Python Package Installer
-==================================
-
-.. image:: https://img.shields.io/pypi/v/pip.svg
-   :target: https://pypi.org/project/pip/
-
-.. image:: https://readthedocs.org/projects/pip/badge/?version=latest
-   :target: https://pip.pypa.io/en/latest
-
-pip is the `package installer`_ for Python. You can use pip to install packages from the `Python Package Index`_ and other indexes.
-
-Please take a look at our documentation for how to install and use pip:
-
-* `Installation`_
-* `Usage`_
-
-We release updates regularly, with a new version every 3 months. Find more details in our documentation:
-
-* `Release notes`_
-* `Release process`_
-
-In pip 20.3, we've `made a big improvement to the heart of pip`_; `learn more`_. We want your input, so `sign up for our user experience research studies`_ to help us do it right.
-
-**Note**: pip 21.0, in January 2021, removed Python 2 support, per pip's `Python 2 support policy`_. Please migrate to Python 3.
-
-If you find bugs, need help, or want to talk to the developers, please use our mailing lists or chat rooms:
-
-* `Issue tracking`_
-* `Discourse channel`_
-* `User IRC`_
-
-If you want to get involved head over to GitHub to get the source code, look at our development documentation and feel free to jump on the developer mailing lists and chat rooms:
-
-* `GitHub page`_
-* `Development documentation`_
-* `Development IRC`_
-
-Code of Conduct
----------------
-
-Everyone interacting in the pip project's codebases, issue trackers, chat
-rooms, and mailing lists is expected to follow the `PSF Code of Conduct`_.
-
-.. _package installer: https://packaging.python.org/guides/tool-recommendations/
-.. _Python Package Index: https://pypi.org
-.. _Installation: https://pip.pypa.io/en/stable/installation/
-.. _Usage: https://pip.pypa.io/en/stable/
-.. _Release notes: https://pip.pypa.io/en/stable/news.html
-.. _Release process: https://pip.pypa.io/en/latest/development/release-process/
-.. _GitHub page: https://github.com/pypa/pip
-.. _Development documentation: https://pip.pypa.io/en/latest/development
-.. _made a big improvement to the heart of pip: https://pyfound.blogspot.com/2020/11/pip-20-3-new-resolver.html
-.. _learn more: https://pip.pypa.io/en/latest/user_guide/#changes-to-the-pip-dependency-resolver-in-20-3-2020
-.. _sign up for our user experience research studies: https://pyfound.blogspot.com/2020/03/new-pip-resolver-to-roll-out-this-year.html
-.. _Python 2 support policy: https://pip.pypa.io/en/latest/development/release-process/#python-2-support
-.. _Issue tracking: https://github.com/pypa/pip/issues
-.. _Discourse channel: https://discuss.python.org/c/packaging
-.. _User IRC: https://kiwiirc.com/nextclient/#ircs://irc.libera.chat:+6697/pypa
-.. _Development IRC: https://kiwiirc.com/nextclient/#ircs://irc.libera.chat:+6697/pypa-dev
-.. _PSF Code of Conduct: https://github.com/pypa/.github/blob/main/CODE_OF_CONDUCT.md
diff --git a/venv/lib/python3.10/site-packages/pip-22.3.1.dist-info/RECORD b/venv/lib/python3.10/site-packages/pip-22.3.1.dist-info/RECORD
deleted file mode 100644
index 13ffde3..0000000
--- a/venv/lib/python3.10/site-packages/pip-22.3.1.dist-info/RECORD
+++ /dev/null
@@ -1,1051 +0,0 @@
-pip/__init__.py,sha256=Z2hXGRMvmdhpmmqr0OW1fA2Jje8tnmU0uzibRoUF-w8,357
-pip/__main__.py,sha256=mXwWDftNLMKfwVqKFWGE_uuBZvGSIiUELhLkeysIuZc,1198
-pip/__pip-runner__.py,sha256=EnrfKmKMzWAdqg_JicLCOP9Y95Ux7zHh4ObvqLtQcjo,1444
-pip/py.typed,sha256=EBVvvPRTn_eIpz5e5QztSCdrMX7Qwd7VP93RSoIlZ2I,286
-pip/_internal/__init__.py,sha256=nnFCuxrPMgALrIDxSoy-H6Zj4W4UY60D-uL1aJyq0pc,573
-pip/_internal/build_env.py,sha256=gEAT8R6SuWbg2mcrsmOTKWMw_x5pedMzvSTxQS57JZs,10234
-pip/_internal/cache.py,sha256=C3n78VnBga9rjPXZqht_4A4d-T25poC7K0qBM7FHDhU,10734
-pip/_internal/configuration.py,sha256=uBKTus43pDIO6IzT2mLWQeROmHhtnoabhniKNjPYvD0,13529
-pip/_internal/exceptions.py,sha256=BfvcyN2iEv3Sf00SVmSk59lEeZEBHELqkuoN2KeIWKc,20942
-pip/_internal/main.py,sha256=r-UnUe8HLo5XFJz8inTcOOTiu_sxNhgHb6VwlGUllOI,340
-pip/_internal/pyproject.py,sha256=ob0Gb0l12YLZNxjdpZGRfWHgjqhZTnSVv96RuJyNOfs,7074
-pip/_internal/self_outdated_check.py,sha256=R3MmjCyUt_lkUNMc6p3xVSx7vX28XiDh3VDs5OrYn6Q,8020
-pip/_internal/wheel_builder.py,sha256=8cObBCu4mIsMJqZM7xXI9DO3vldiAnRNa1Gt6izPPTs,13079
-pip/_internal/cli/__init__.py,sha256=FkHBgpxxb-_gd6r1FjnNhfMOzAUYyXoXKJ6abijfcFU,132
-pip/_internal/cli/autocompletion.py,sha256=wY2JPZY2Eji1vhR7bVo-yCBPJ9LCy6P80iOAhZD1Vi8,6676
-pip/_internal/cli/base_command.py,sha256=t1D5x40Hfn9HnPnMt-iSxvqL14nht2olBCacW74pc-k,7842
-pip/_internal/cli/cmdoptions.py,sha256=Jlarlzz9qv9tC_tCaEbcc_jVvrPreFLBBUnDgoyWflw,29381
-pip/_internal/cli/command_context.py,sha256=RHgIPwtObh5KhMrd3YZTkl8zbVG-6Okml7YbFX4Ehg0,774
-pip/_internal/cli/main.py,sha256=ioJ8IVlb2K1qLOxR-tXkee9lURhYV89CDM71MKag7YY,2472
-pip/_internal/cli/main_parser.py,sha256=laDpsuBDl6kyfywp9eMMA9s84jfH2TJJn-vmL0GG90w,4338
-pip/_internal/cli/parser.py,sha256=tWP-K1uSxnJyXu3WE0kkH3niAYRBeuUaxeydhzOdhL4,10817
-pip/_internal/cli/progress_bars.py,sha256=So4mPoSjXkXiSHiTzzquH3VVyVD_njXlHJSExYPXAow,1968
-pip/_internal/cli/req_command.py,sha256=ypTutLv4j_efxC2f6C6aCQufxre-zaJdi5m_tWlLeBk,18172
-pip/_internal/cli/spinners.py,sha256=hIJ83GerdFgFCdobIA23Jggetegl_uC4Sp586nzFbPE,5118
-pip/_internal/cli/status_codes.py,sha256=sEFHUaUJbqv8iArL3HAtcztWZmGOFX01hTesSytDEh0,116
-pip/_internal/commands/__init__.py,sha256=5oRO9O3dM2vGuh0bFw4HOVletryrz5HHMmmPWwJrH9U,3882
-pip/_internal/commands/cache.py,sha256=muaT0mbL-ZUpn6AaushVAipzTiMwE4nV2BLbJBwt_KQ,7582
-pip/_internal/commands/check.py,sha256=0gjXR7j36xJT5cs2heYU_dfOfpnFfzX8OoPNNoKhqdM,1685
-pip/_internal/commands/completion.py,sha256=H0TJvGrdsoleuIyQKzJbicLFppYx2OZA0BLNpQDeFjI,4129
-pip/_internal/commands/configuration.py,sha256=NB5uf8HIX8-li95YLoZO09nALIWlLCHDF5aifSKcBn8,9815
-pip/_internal/commands/debug.py,sha256=kVjn-O1ixLk0webD0w9vfFFq_GCTUTd2hmLOnYtDCig,6573
-pip/_internal/commands/download.py,sha256=LwKEyYMG2L67nQRyGo8hQdNEeMU2bmGWqJfcB8JDXas,5289
-pip/_internal/commands/freeze.py,sha256=gCjoD6foBZPBAAYx5t8zZLkJhsF_ZRtnb3dPuD7beO8,2951
-pip/_internal/commands/hash.py,sha256=EVVOuvGtoPEdFi8SNnmdqlCQrhCxV-kJsdwtdcCnXGQ,1703
-pip/_internal/commands/help.py,sha256=gcc6QDkcgHMOuAn5UxaZwAStsRBrnGSn_yxjS57JIoM,1132
-pip/_internal/commands/index.py,sha256=1VVXXj5MsI2qH-N7uniQQyVkg-KCn_RdjiyiUmkUS5U,4762
-pip/_internal/commands/inspect.py,sha256=mRJ9aIkBQN0IJ7Um8pzaxAzVPIgL8KfWHx1fWKJgUAQ,3374
-pip/_internal/commands/install.py,sha256=_XbW0PyxtZCMMNqo8mDaOq3TBRiJNFM-94CR27mburc,31726
-pip/_internal/commands/list.py,sha256=Fk1TSxB33NlRS4qlLQ0xwnytnF9-zkQJbKQYv2xc4Q4,12343
-pip/_internal/commands/search.py,sha256=sbBZiARRc050QquOKcCvOr2K3XLsoYebLKZGRi__iUI,5697
-pip/_internal/commands/show.py,sha256=CJI8q4SSY0X346K1hi4Th8Nbyhl4nxPTBJUuzOlTaYE,6129
-pip/_internal/commands/uninstall.py,sha256=0JQhifYxecNrJAwoILFwjm9V1V3liXzNT-y4bgRXXPw,3680
-pip/_internal/commands/wheel.py,sha256=mbFJd4dmUfrVFJkQbK8n2zHyRcD3AI91f7EUo9l3KYg,7396
-pip/_internal/distributions/__init__.py,sha256=Hq6kt6gXBgjNit5hTTWLAzeCNOKoB-N0pGYSqehrli8,858
-pip/_internal/distributions/base.py,sha256=jrF1Vi7eGyqFqMHrieh1PIOrGU7KeCxhYPZnbvtmvGY,1221
-pip/_internal/distributions/installed.py,sha256=NI2OgsgH9iBq9l5vB-56vOg5YsybOy-AU4VE5CSCO2I,729
-pip/_internal/distributions/sdist.py,sha256=SQBdkatXSigKGG_SaD0U0p1Jwdfrg26UCNcHgkXZfdA,6494
-pip/_internal/distributions/wheel.py,sha256=m-J4XO-gvFerlYsFzzSXYDvrx8tLZlJFTCgDxctn8ig,1164
-pip/_internal/index/__init__.py,sha256=vpt-JeTZefh8a-FC22ZeBSXFVbuBcXSGiILhQZJaNpQ,30
-pip/_internal/index/collector.py,sha256=Pb9FW9STH2lwaApCIdMCivsbPP5pSYQp5bh3nLQBkDU,16503
-pip/_internal/index/package_finder.py,sha256=kmcMu5_i-BP6v3NQGY0_am1ezxM2Gk4t00arZMmm4sc,37596
-pip/_internal/index/sources.py,sha256=SVyPitv08-Qalh2_Bk5diAJ9GAA_d-a93koouQodAG0,6557
-pip/_internal/locations/__init__.py,sha256=QhB-Y6TNyaU010cimm2T4wM5loe8oRdjLwJ6xmsGc-k,17552
-pip/_internal/locations/_distutils.py,sha256=wgHDvHGNZHtlcHkQjYovHzkEUBzisR0iOh7OqCIkB5g,6302
-pip/_internal/locations/_sysconfig.py,sha256=nM-DiVHXWTxippdmN0MGVl5r7OIfIMy3vgDMlo8c_oo,7867
-pip/_internal/locations/base.py,sha256=ufyDqPwZ4jLbScD44u8AwTVI-3ft8O78UGrroQI5f68,2573
-pip/_internal/metadata/__init__.py,sha256=84j1dPJaIoz5Q2ZTPi0uB1iaDAHiUNfKtYSGQCfFKpo,4280
-pip/_internal/metadata/_json.py,sha256=BTkWfFDrWFwuSodImjtbAh8wCL3isecbnjTb5E6UUDI,2595
-pip/_internal/metadata/base.py,sha256=vIwIo1BtoqegehWMAXhNrpLGYBq245rcaCNkBMPnTU8,25277
-pip/_internal/metadata/pkg_resources.py,sha256=WjwiNdRsvxqxL4MA5Tb5a_q3Q3sUhdpbZF8wGLtPMI0,9773
-pip/_internal/metadata/importlib/__init__.py,sha256=9ZVO8BoE7NEZPmoHp5Ap_NJo0HgNIezXXg-TFTtt3Z4,107
-pip/_internal/metadata/importlib/_compat.py,sha256=GAe_prIfCE4iUylrnr_2dJRlkkBVRUbOidEoID7LPoE,1882
-pip/_internal/metadata/importlib/_dists.py,sha256=BUV8y6D0PePZrEN3vfJL-m1FDqZ6YPRgAiBeBinHhNg,8181
-pip/_internal/metadata/importlib/_envs.py,sha256=7BxanCh3T7arusys__O2ZHJdnmDhQXFmfU7x1-jB5xI,7457
-pip/_internal/models/__init__.py,sha256=3DHUd_qxpPozfzouoqa9g9ts1Czr5qaHfFxbnxriepM,63
-pip/_internal/models/candidate.py,sha256=6pcABsaR7CfIHlbJbr2_kMkVJFL_yrYjTx6SVWUnCPQ,990
-pip/_internal/models/direct_url.py,sha256=HLO0sL2aYB6n45bwmd72TDN05sLHJlOQI8M01l2SH3I,5877
-pip/_internal/models/format_control.py,sha256=DJpMYjxeYKKQdwNcML2_F0vtAh-qnKTYe-CpTxQe-4g,2520
-pip/_internal/models/index.py,sha256=tYnL8oxGi4aSNWur0mG8DAP7rC6yuha_MwJO8xw0crI,1030
-pip/_internal/models/installation_report.py,sha256=ad1arqtxrSFBvWnm6mRqmG12HLV3pZZcZcHrlTFIiqU,2617
-pip/_internal/models/link.py,sha256=9HWL14UQTMxRCnY6dmAz09rGElJrMAcHn2OJZCBx0tk,18083
-pip/_internal/models/scheme.py,sha256=3EFQp_ICu_shH1-TBqhl0QAusKCPDFOlgHFeN4XowWs,738
-pip/_internal/models/search_scope.py,sha256=iGPQQ6a4Lau8oGQ_FWj8aRLik8A21o03SMO5KnSt-Cg,4644
-pip/_internal/models/selection_prefs.py,sha256=KZdi66gsR-_RUXUr9uejssk3rmTHrQVJWeNA2sV-VSY,1907
-pip/_internal/models/target_python.py,sha256=qKpZox7J8NAaPmDs5C_aniwfPDxzvpkrCKqfwndG87k,3858
-pip/_internal/models/wheel.py,sha256=YqazoIZyma_Q1ejFa1C7NHKQRRWlvWkdK96VRKmDBeI,3600
-pip/_internal/network/__init__.py,sha256=jf6Tt5nV_7zkARBrKojIXItgejvoegVJVKUbhAa5Ioc,50
-pip/_internal/network/auth.py,sha256=a3C7Xaa8kTJjXkdi_wrUjqaySc8Z9Yz7U6QIbXfzMyc,12190
-pip/_internal/network/cache.py,sha256=hgXftU-eau4MWxHSLquTMzepYq5BPC2zhCkhN3glBy8,2145
-pip/_internal/network/download.py,sha256=HvDDq9bVqaN3jcS3DyVJHP7uTqFzbShdkf7NFSoHfkw,6096
-pip/_internal/network/lazy_wheel.py,sha256=PbPyuleNhtEq6b2S7rufoGXZWMD15FAGL4XeiAQ8FxA,7638
-pip/_internal/network/session.py,sha256=BpDOJ7_Xw5VkgPYWsePzcaqOfcyRZcB2AW7W0HGBST0,18443
-pip/_internal/network/utils.py,sha256=6A5SrUJEEUHxbGtbscwU2NpCyz-3ztiDlGWHpRRhsJ8,4073
-pip/_internal/network/xmlrpc.py,sha256=AzQgG4GgS152_cqmGr_Oz2MIXsCal-xfsis7fA7nmU0,1791
-pip/_internal/operations/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
-pip/_internal/operations/check.py,sha256=ca4O9CkPt9Em9sLCf3H0iVt1GIcW7M8C0U5XooaBuT4,5109
-pip/_internal/operations/freeze.py,sha256=mwTZ2uML8aQgo3k8MR79a7SZmmmvdAJqdyaknKbavmg,9784
-pip/_internal/operations/prepare.py,sha256=BeYXrLFpRoV5XBnRXQHxRA2plyC36kK9Pms5D9wjCo4,25091
-pip/_internal/operations/build/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
-pip/_internal/operations/build/build_tracker.py,sha256=vf81EwomN3xe9G8qRJED0VGqNikmRQRQoobNsxi5Xrs,4133
-pip/_internal/operations/build/metadata.py,sha256=ES_uRmAvhrNm_nDTpZxshBfUsvnXtkj-g_4rZrH9Rww,1404
-pip/_internal/operations/build/metadata_editable.py,sha256=_Rai0VZjxoeJUkjkuICrq45LtjwFoDOveosMYH43rKc,1456
-pip/_internal/operations/build/metadata_legacy.py,sha256=o-eU21As175hDC7dluM1fJJ_FqokTIShyWpjKaIpHZw,2198
-pip/_internal/operations/build/wheel.py,sha256=AO9XnTGhTgHtZmU8Dkbfo1OGr41rBuSDjIgAa4zUKgE,1063
-pip/_internal/operations/build/wheel_editable.py,sha256=TVETY-L_M_dSEKBhTIcQOP75zKVXw8tuq1U354Mm30A,1405
-pip/_internal/operations/build/wheel_legacy.py,sha256=C9j6rukgQI1n_JeQLoZGuDdfUwzCXShyIdPTp6edbMQ,3064
-pip/_internal/operations/install/__init__.py,sha256=mX7hyD2GNBO2mFGokDQ30r_GXv7Y_PLdtxcUv144e-s,51
-pip/_internal/operations/install/editable_legacy.py,sha256=ee4kfJHNuzTdKItbfAsNOSEwq_vD7DRPGkBdK48yBhU,1354
-pip/_internal/operations/install/legacy.py,sha256=cHdcHebyzf8w7OaOLwcsTNSMSSV8WBoAPFLay_9CjE8,4105
-pip/_internal/operations/install/wheel.py,sha256=CxzEg2wTPX4SxNTPIx0ozTqF1X7LhpCyP3iM2FjcKUE,27407
-pip/_internal/req/__init__.py,sha256=rUQ9d_Sh3E5kNYqX9pkN0D06YL-LrtcbJQ-LiIonq08,2807
-pip/_internal/req/constructors.py,sha256=ypjtq1mOQ3d2mFkFPMf_6Mr8SLKeHQk3tUKHA1ddG0U,16611
-pip/_internal/req/req_file.py,sha256=N6lPO3c0to_G73YyGAnk7VUYmed5jV4Qxgmt1xtlXVg,17646
-pip/_internal/req/req_install.py,sha256=4tzyVGPHJ1-GXowm6PBT52BGIlbc4w7fhVqf-55bmRg,35600
-pip/_internal/req/req_set.py,sha256=j3esG0s6SzoVReX9rWn4rpYNtyET_fwxbwJPRimvRxo,2858
-pip/_internal/req/req_uninstall.py,sha256=ZFQfgSNz6H1BMsgl87nQNr2iaQCcbFcmXpW8rKVQcic,24045
-pip/_internal/resolution/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
-pip/_internal/resolution/base.py,sha256=qlmh325SBVfvG6Me9gc5Nsh5sdwHBwzHBq6aEXtKsLA,583
-pip/_internal/resolution/legacy/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
-pip/_internal/resolution/legacy/resolver.py,sha256=9em8D5TcSsEN4xZM1WreaRShOnyM4LlvhMSHpUPsocE,24129
-pip/_internal/resolution/resolvelib/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
-pip/_internal/resolution/resolvelib/base.py,sha256=u1O4fkvCO4mhmu5i32xrDv9AX5NgUci_eYVyBDQhTIM,5220
-pip/_internal/resolution/resolvelib/candidates.py,sha256=6kQZeMzwibnL4lO6bW0hUQQjNEvXfADdFphRRkRvOtc,18963
-pip/_internal/resolution/resolvelib/factory.py,sha256=OnjkLIgyk5Tol7uOOqapA1D4qiRHWmPU18DF1yN5N8o,27878
-pip/_internal/resolution/resolvelib/found_candidates.py,sha256=hvL3Hoa9VaYo-qEOZkBi2Iqw251UDxPz-uMHVaWmLpE,5705
-pip/_internal/resolution/resolvelib/provider.py,sha256=Vd4jW_NnyifB-HMkPYtZIO70M3_RM0MbL5YV6XyBM-w,9914
-pip/_internal/resolution/resolvelib/reporter.py,sha256=3ZVVYrs5PqvLFJkGLcuXoMK5mTInFzl31xjUpDBpZZk,2526
-pip/_internal/resolution/resolvelib/requirements.py,sha256=B1ndvKPSuyyyTEXt9sKhbwminViSWnBrJa7qO2ln4Z0,5455
-pip/_internal/resolution/resolvelib/resolver.py,sha256=nYZ9bTFXj5c1ILKnkSgU7tUCTYyo5V5J-J0sKoA7Wzg,11533
-pip/_internal/utils/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
-pip/_internal/utils/_log.py,sha256=-jHLOE_THaZz5BFcCnoSL9EYAtJ0nXem49s9of4jvKw,1015
-pip/_internal/utils/appdirs.py,sha256=swgcTKOm3daLeXTW6v5BUS2Ti2RvEnGRQYH_yDXklAo,1665
-pip/_internal/utils/compat.py,sha256=ACyBfLgj3_XG-iA5omEDrXqDM0cQKzi8h8HRBInzG6Q,1884
-pip/_internal/utils/compatibility_tags.py,sha256=ydin8QG8BHqYRsPY4OL6cmb44CbqXl1T0xxS97VhHkk,5377
-pip/_internal/utils/datetime.py,sha256=m21Y3wAtQc-ji6Veb6k_M5g6A0ZyFI4egchTdnwh-pQ,242
-pip/_internal/utils/deprecation.py,sha256=OLc7GzDwPob9y8jscDYCKUNBV-9CWwqFplBOJPLOpBM,5764
-pip/_internal/utils/direct_url_helpers.py,sha256=6F1tc2rcKaCZmgfVwsE6ObIe_Pux23mUVYA-2D9wCFc,3206
-pip/_internal/utils/distutils_args.py,sha256=bYUt4wfFJRaeGO4VHia6FNaA8HlYXMcKuEq1zYijY5g,1115
-pip/_internal/utils/egg_link.py,sha256=5MVlpz5LirT4iLQq86OYzjXaYF0D4Qk1dprEI7ThST4,2203
-pip/_internal/utils/encoding.py,sha256=qqsXDtiwMIjXMEiIVSaOjwH5YmirCaK-dIzb6-XJsL0,1169
-pip/_internal/utils/entrypoints.py,sha256=YlhLTRl2oHBAuqhc-zmL7USS67TPWVHImjeAQHreZTQ,3064
-pip/_internal/utils/filesystem.py,sha256=RhMIXUaNVMGjc3rhsDahWQ4MavvEQDdqXqgq-F6fpw8,5122
-pip/_internal/utils/filetypes.py,sha256=i8XAQ0eFCog26Fw9yV0Yb1ygAqKYB1w9Cz9n0fj8gZU,716
-pip/_internal/utils/glibc.py,sha256=tDfwVYnJCOC0BNVpItpy8CGLP9BjkxFHdl0mTS0J7fc,3110
-pip/_internal/utils/hashes.py,sha256=1WhkVNIHNfuYLafBHThIjVKGplxFJXSlQtuG2mXNlJI,4831
-pip/_internal/utils/inject_securetransport.py,sha256=o-QRVMGiENrTJxw3fAhA7uxpdEdw6M41TjHYtSVRrcg,795
-pip/_internal/utils/logging.py,sha256=U2q0i1n8hPS2gQh8qcocAg5dovGAa_bR24akmXMzrk4,11632
-pip/_internal/utils/misc.py,sha256=49Rs2NgrD4JGTKFt0farCm7FIAi-rjyoxgioArhCW_0,21617
-pip/_internal/utils/models.py,sha256=5GoYU586SrxURMvDn_jBMJInitviJg4O5-iOU-6I0WY,1193
-pip/_internal/utils/packaging.py,sha256=5Wm6_x7lKrlqVjPI5MBN_RurcRHwVYoQ7Ksrs84de7s,2108
-pip/_internal/utils/setuptools_build.py,sha256=4i3CuS34yNrkePnZ73rR47pyDzpZBo-SX9V5PNDSSHY,5662
-pip/_internal/utils/subprocess.py,sha256=MYySbvY7qBevRxq_RFfOsDqG4vMqrB4vDoL_eyPE6Bo,9197
-pip/_internal/utils/temp_dir.py,sha256=aCX489gRa4Nu0dMKRFyGhV6maJr60uEynu5uCbKR4Qg,7702
-pip/_internal/utils/unpacking.py,sha256=SBb2iV1crb89MDRTEKY86R4A_UOWApTQn9VQVcMDOlE,8821
-pip/_internal/utils/urls.py,sha256=AhaesUGl-9it6uvG6fsFPOr9ynFpGaTMk4t5XTX7Z_Q,1759
-pip/_internal/utils/virtualenv.py,sha256=4_48qMzCwB_F5jIK5BC_ua7uiAMVifmQWU9NdaGUoVA,3459
-pip/_internal/utils/wheel.py,sha256=lXOgZyTlOm5HmK8tw5iw0A3_5A6wRzsXHOaQkIvvloU,4549
-pip/_internal/vcs/__init__.py,sha256=UAqvzpbi0VbZo3Ub6skEeZAw-ooIZR-zX_WpCbxyCoU,596
-pip/_internal/vcs/bazaar.py,sha256=zq-Eu2NtJffc6kOsyv2kmRTnKg9qeIXE-KH5JeKck70,3518
-pip/_internal/vcs/git.py,sha256=mjhwudCx9WlLNkxZ6_kOKmueF0rLoU2i1xeASKF6yiQ,18116
-pip/_internal/vcs/mercurial.py,sha256=Bzbd518Jsx-EJI0IhIobiQqiRsUv5TWYnrmRIFWE0Gw,5238
-pip/_internal/vcs/subversion.py,sha256=AeUVE9d9qp-0QSOMiUvuFHy1TK950E3QglN7ipP13sI,11728
-pip/_internal/vcs/versioncontrol.py,sha256=KUOc-hN51em9jrqxKwUR3JnkgSE-xSOqMiiJcSaL6B8,22811
-pip/_vendor/__init__.py,sha256=fNxOSVD0auElsD8fN9tuq5psfgMQ-RFBtD4X5gjlRkg,4966
-pip/_vendor/six.py,sha256=TOOfQi7nFGfMrIvtdr6wX4wyHH8M7aknmuLfo2cBBrM,34549
-pip/_vendor/typing_extensions.py,sha256=VKZ_nHsuzDbKOVUY2CTdavwBgfZ2EXRyluZHRzUYAbg,80114
-pip/_vendor/vendor.txt,sha256=07gLL_CcEHdl1XM0g4PH2L4gsTTMlJr8WWIC11yEyMo,469
-pip/_vendor/cachecontrol/__init__.py,sha256=hrxlv3q7upsfyMw8k3gQ9vagBax1pYHSGGqYlZ0Zk0M,465
-pip/_vendor/cachecontrol/_cmd.py,sha256=lxUXqfNTVx84zf6tcWbkLZHA6WVBRtJRpfeA9ZqhaAY,1379
-pip/_vendor/cachecontrol/adapter.py,sha256=ew9OYEQHEOjvGl06ZsuX8W3DAvHWsQKHwWAxISyGug8,5033
-pip/_vendor/cachecontrol/cache.py,sha256=Tty45fOjH40fColTGkqKQvQQmbYsMpk-nCyfLcv2vG4,1535
-pip/_vendor/cachecontrol/compat.py,sha256=LNx7vqBndYdHU8YuJt53ab_8rzMGTXVrvMb7CZJkxG0,778
-pip/_vendor/cachecontrol/controller.py,sha256=bAYrt7x_VH4toNpI066LQxbHpYGpY1MxxmZAhspplvw,16416
-pip/_vendor/cachecontrol/filewrapper.py,sha256=X4BAQOO26GNOR7nH_fhTzAfeuct2rBQcx_15MyFBpcs,3946
-pip/_vendor/cachecontrol/heuristics.py,sha256=8kAyuZLSCyEIgQr6vbUwfhpqg9ows4mM0IV6DWazevI,4154
-pip/_vendor/cachecontrol/serialize.py,sha256=_U1NU_C-SDgFzkbAxAsPDgMTHeTWZZaHCQnZN_jh0U8,7105
-pip/_vendor/cachecontrol/wrapper.py,sha256=X3-KMZ20Ho3VtqyVaXclpeQpFzokR5NE8tZSfvKVaB8,774
-pip/_vendor/cachecontrol/caches/__init__.py,sha256=h-1cUmOz6mhLsjTjOrJ8iPejpGdLCyG4lzTftfGZvLg,242
-pip/_vendor/cachecontrol/caches/file_cache.py,sha256=GpexcE29LoY4MaZwPUTcUBZaDdcsjqyLxZFznk8Hbr4,5271
-pip/_vendor/cachecontrol/caches/redis_cache.py,sha256=mp-QWonP40I3xJGK3XVO-Gs9a3UjzlqqEmp9iLJH9F4,1033
-pip/_vendor/certifi/__init__.py,sha256=luDjIGxDSrQ9O0zthdz5Lnt069Z_7eR1GIEefEaf-Ys,94
-pip/_vendor/certifi/__main__.py,sha256=1k3Cr95vCxxGRGDljrW3wMdpZdL3Nhf0u1n-k2qdsCY,255
-pip/_vendor/certifi/cacert.pem,sha256=3l8CcWt_qL42030rGieD3SLufICFX0bYtGhDl_EXVPI,286370
-pip/_vendor/certifi/core.py,sha256=ZwiOsv-sD_ouU1ft8wy_xZ3LQ7UbcVzyqj2XNyrsZis,4279
-pip/_vendor/chardet/__init__.py,sha256=9-r0i294avRciob2HKVcKf6GJmXPHpgMqIijVrqHBDU,3705
-pip/_vendor/chardet/big5freq.py,sha256=ltcfP-3PjlNHCoo5e4a7C4z-2DhBTXRfY6jbMbB7P30,31274
-pip/_vendor/chardet/big5prober.py,sha256=neUXIlq35507yibstiznZWFzyNcMn6EXrqJaUJVPWKg,1741
-pip/_vendor/chardet/chardistribution.py,sha256=M9NTKdM72KieFKy4TT5eml4PP0WaVcXuY5PpWSFD0FA,9608
-pip/_vendor/chardet/charsetgroupprober.py,sha256=CaIBAmNitEsYuSgMvgAsMREN4cLxMj5OYwMhVo6MAxk,3817
-pip/_vendor/chardet/charsetprober.py,sha256=Eo3w8sCmbvnVKOGNW1iy50KATVs8xV-gF7cQ0VG85dQ,4801
-pip/_vendor/chardet/codingstatemachine.py,sha256=BiGR9kgTYbS4gJI5qBmE52HMOBOR_roDvXf7aIehdEk,3559
-pip/_vendor/chardet/cp949prober.py,sha256=kCQEaOCzMntqv7pAyXEobWTRgIUxYfoiUr0btXO1nI8,1838
-pip/_vendor/chardet/enums.py,sha256=Rodw4p61Vg9U-oCo6eUuT7uDzKwIbCaA15HwbvCoCNk,1619
-pip/_vendor/chardet/escprober.py,sha256=girD61r3NsQLnMQXsWWBU4hHuRJzTH3V7-VfTUr-nQY,3864
-pip/_vendor/chardet/escsm.py,sha256=0Vs4iPPovberMoSxxnK5pI161Xf-mtKgOl14g5Xc7zg,12021
-pip/_vendor/chardet/eucjpprober.py,sha256=pGgs4lINwCEDV2bxqIZ6hXpaj2j4l2oLsMx6kuOK_zQ,3676
-pip/_vendor/chardet/euckrfreq.py,sha256=3mHuRvXfsq_QcQysDQFb8qSudvTiol71C6Ic2w57tKM,13566
-pip/_vendor/chardet/euckrprober.py,sha256=qBuSS2zXWaoUmGdzz3owAnD1GNhuKR_8bYzDC3yxe6I,1731
-pip/_vendor/chardet/euctwfreq.py,sha256=2alILE1Lh5eqiFJZjzRkMQXolNJRHY5oBQd-vmZYFFM,36913
-pip/_vendor/chardet/euctwprober.py,sha256=SLnCoJC94jZL8PJio60Q8PZACJA1rVPtUdWMa1W8Pwk,1731
-pip/_vendor/chardet/gb2312freq.py,sha256=49OrdXzD-HXqwavkqjo8Z7gvs58hONNzDhAyMENNkvY,20735
-pip/_vendor/chardet/gb2312prober.py,sha256=NS_i52jZE0TnWGkKqFduvu9fzW0nMcS2XbYJ8qSX8hY,1737
-pip/_vendor/chardet/hebrewprober.py,sha256=1l1hXF8-2IWDrPkf85UvAO1GVtMfY1r11kDgOqa-gU4,13919
-pip/_vendor/chardet/jisfreq.py,sha256=mm8tfrwqhpOd3wzZKS4NJqkYBQVcDfTM2JiQ5aW932E,25796
-pip/_vendor/chardet/johabfreq.py,sha256=dBpOYG34GRX6SL8k_LbS9rxZPMjLjoMlgZ03Pz5Hmqc,42498
-pip/_vendor/chardet/johabprober.py,sha256=C18osd4vMPfy9facw-Y1Lor_9UrW0PeV-zxM2fu441c,1730
-pip/_vendor/chardet/jpcntx.py,sha256=m1gDpPkRca4EDwym8XSL5YdoILFnFsDbNBYMQV7_-NE,26797
-pip/_vendor/chardet/langbulgarianmodel.py,sha256=vmbvYFP8SZkSxoBvLkFqKiH1sjma5ihk3PTpdy71Rr4,104562
-pip/_vendor/chardet/langgreekmodel.py,sha256=JfB7bupjjJH2w3X_mYnQr9cJA_7EuITC2cRW13fUjeI,98484
-pip/_vendor/chardet/langhebrewmodel.py,sha256=3HXHaLQPNAGcXnJjkIJfozNZLTvTJmf4W5Awi6zRRKc,98196
-pip/_vendor/chardet/langhungarianmodel.py,sha256=WxbeQIxkv8YtApiNqxQcvj-tMycsoI4Xy-fwkDHpP_Y,101363
-pip/_vendor/chardet/langrussianmodel.py,sha256=s395bTZ87ESTrZCOdgXbEjZ9P1iGPwCl_8xSsac_DLY,128035
-pip/_vendor/chardet/langthaimodel.py,sha256=7bJlQitRpTnVGABmbSznHnJwOHDy3InkTvtFUx13WQI,102774
-pip/_vendor/chardet/langturkishmodel.py,sha256=XY0eGdTIy4eQ9Xg1LVPZacb-UBhHBR-cq0IpPVHowKc,95372
-pip/_vendor/chardet/latin1prober.py,sha256=u_iGcQMUcZLXvj4B_WXx4caA0C5oaE2Qj1KTpz_RQ1I,5260
-pip/_vendor/chardet/mbcharsetprober.py,sha256=iKKuB6o_FF80NynRLBDT0UtwOnpLqmL_OspRPMib7CM,3367
-pip/_vendor/chardet/mbcsgroupprober.py,sha256=1D_kp9nv2_NQRddq9I2WDvB35OJh7Tfpo-OYTnL3B5o,2056
-pip/_vendor/chardet/mbcssm.py,sha256=EfORNu1WXgnFvpFarU8uJHS8KFif63xmgrHOB4DdDdY,30068
-pip/_vendor/chardet/sbcharsetprober.py,sha256=VvtWiNRLbHDZ5xgnofsmP1u8VQIkkaAuw3Ir9m1zDzQ,6199
-pip/_vendor/chardet/sbcsgroupprober.py,sha256=mekr4E3hgT4onmwi8oi1iEGW1CN-Z-BArG6kOtCunJw,4129
-pip/_vendor/chardet/sjisprober.py,sha256=sLfWS25PVFr5cDGhEf6h_s-RJsyeSteA-4ynsTl_UvA,3749
-pip/_vendor/chardet/universaldetector.py,sha256=BHeNWt1kn0yQgnR6xNtLAjiNmEQpSHYlKEvuZ9QyR1k,13288
-pip/_vendor/chardet/utf1632prober.py,sha256=N42YJEOkVDB67c38t5aJhXMG1QvnyWWDMNY5ERzniU0,8289
-pip/_vendor/chardet/utf8prober.py,sha256=mnLaSBV4gg-amt2WmxKFKWy4vVBedMNgjdbvgzBo0Dc,2709
-pip/_vendor/chardet/version.py,sha256=u_QYi-DXU1s7fyC_Rwa0I0-UcxMVmH7Co6c7QGKbe3g,242
-pip/_vendor/chardet/cli/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
-pip/_vendor/chardet/cli/chardetect.py,sha256=1qMxT3wrp5vP6ugSf1-Zz3BWwlbCWJ0jzeCuhgX85vw,2406
-pip/_vendor/chardet/metadata/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
-pip/_vendor/chardet/metadata/languages.py,sha256=HcaBygWtZq3gR8prIkJp_etvkhm2V4pUIToqjPZhgrc,13280
-pip/_vendor/colorama/__init__.py,sha256=ihDoWQOkapwF7sqQ99AoDoEF3vGYm40OtmgW211cLZw,239
-pip/_vendor/colorama/ansi.py,sha256=Top4EeEuaQdBWdteKMEcGOTeKeF19Q-Wo_6_Cj5kOzQ,2522
-pip/_vendor/colorama/ansitowin32.py,sha256=gGrO7MVtwc-j1Sq3jKfZpERT1JWmYSOsTVDiTnFbZU4,10830
-pip/_vendor/colorama/initialise.py,sha256=PprovDNxMTrvoNHFcL2NZjpH2XzDc8BLxLxiErfUl4k,1915
-pip/_vendor/colorama/win32.py,sha256=bJ8Il9jwaBN5BJ8bmN6FoYZ1QYuMKv2j8fGrXh7TJjw,5404
-pip/_vendor/colorama/winterm.py,sha256=2y_2b7Zsv34feAsP67mLOVc-Bgq51mdYGo571VprlrM,6438
-pip/_vendor/distlib/__init__.py,sha256=acgfseOC55dNrVAzaBKpUiH3Z6V7Q1CaxsiQ3K7pC-E,581
-pip/_vendor/distlib/compat.py,sha256=tfoMrj6tujk7G4UC2owL6ArgDuCKabgBxuJRGZSmpko,41259
-pip/_vendor/distlib/database.py,sha256=o_mw0fAr93NDAHHHfqG54Y1Hi9Rkfrp2BX15XWZYK50,51697
-pip/_vendor/distlib/index.py,sha256=HFiDG7LMoaBs829WuotrfIwcErOOExUOR_AeBtw_TCU,20834
-pip/_vendor/distlib/locators.py,sha256=wNzG-zERzS_XGls-nBPVVyLRHa2skUlkn0-5n0trMWA,51991
-pip/_vendor/distlib/manifest.py,sha256=nQEhYmgoreaBZzyFzwYsXxJARu3fo4EkunU163U16iE,14811
-pip/_vendor/distlib/markers.py,sha256=TpHHHLgkzyT7YHbwj-2i6weRaq-Ivy2-MUnrDkjau-U,5058
-pip/_vendor/distlib/metadata.py,sha256=g_DIiu8nBXRzA-mWPRpatHGbmFZqaFoss7z9TG7QSUU,39801
-pip/_vendor/distlib/resources.py,sha256=LwbPksc0A1JMbi6XnuPdMBUn83X7BPuFNWqPGEKI698,10820
-pip/_vendor/distlib/scripts.py,sha256=BmkTKmiTk4m2cj-iueliatwz3ut_9SsABBW51vnQnZU,18102
-pip/_vendor/distlib/t32.exe,sha256=a0GV5kCoWsMutvliiCKmIgV98eRZ33wXoS-XrqvJQVs,97792
-pip/_vendor/distlib/t64-arm.exe,sha256=68TAa32V504xVBnufojh0PcenpR3U4wAqTqf-MZqbPw,182784
-pip/_vendor/distlib/t64.exe,sha256=gaYY8hy4fbkHYTTnA4i26ct8IQZzkBG2pRdy0iyuBrc,108032
-pip/_vendor/distlib/util.py,sha256=31dPXn3Rfat0xZLeVoFpuniyhe6vsbl9_QN-qd9Lhlk,66262
-pip/_vendor/distlib/version.py,sha256=WG__LyAa2GwmA6qSoEJtvJE8REA1LZpbSizy8WvhJLk,23513
-pip/_vendor/distlib/w32.exe,sha256=R4csx3-OGM9kL4aPIzQKRo5TfmRSHZo6QWyLhDhNBks,91648
-pip/_vendor/distlib/w64-arm.exe,sha256=xdyYhKj0WDcVUOCb05blQYvzdYIKMbmJn2SZvzkcey4,168448
-pip/_vendor/distlib/w64.exe,sha256=ejGf-rojoBfXseGLpya6bFTFPWRG21X5KvU8J5iU-K0,101888
-pip/_vendor/distlib/wheel.py,sha256=Rgqs658VsJ3R2845qwnZD8XQryV2CzWw2mghwLvxxsI,43898
-pip/_vendor/distro/__init__.py,sha256=2fHjF-SfgPvjyNZ1iHh_wjqWdR_Yo5ODHwZC0jLBPhc,981
-pip/_vendor/distro/__main__.py,sha256=bu9d3TifoKciZFcqRBuygV3GSuThnVD_m2IK4cz96Vs,64
-pip/_vendor/distro/distro.py,sha256=UYQG_9H_iSOt422uasA92HlY7aXeTnWKdV-IhsSAdwQ,48841
-pip/_vendor/idna/__init__.py,sha256=KJQN1eQBr8iIK5SKrJ47lXvxG0BJ7Lm38W4zT0v_8lk,849
-pip/_vendor/idna/codec.py,sha256=6ly5odKfqrytKT9_7UrlGklHnf1DSK2r9C6cSM4sa28,3374
-pip/_vendor/idna/compat.py,sha256=0_sOEUMT4CVw9doD3vyRhX80X19PwqFoUBs7gWsFME4,321
-pip/_vendor/idna/core.py,sha256=1JxchwKzkxBSn7R_oCE12oBu3eVux0VzdxolmIad24M,12950
-pip/_vendor/idna/idnadata.py,sha256=xUjqKqiJV8Ho_XzBpAtv5JFoVPSupK-SUXvtjygUHqw,44375
-pip/_vendor/idna/intranges.py,sha256=YBr4fRYuWH7kTKS2tXlFjM24ZF1Pdvcir-aywniInqg,1881
-pip/_vendor/idna/package_data.py,sha256=C_jHJzmX8PI4xq0jpzmcTMxpb5lDsq4o5VyxQzlVrZE,21
-pip/_vendor/idna/uts46data.py,sha256=zvjZU24s58_uAS850Mcd0NnD0X7_gCMAMjzWNIeUJdc,206539
-pip/_vendor/msgpack/__init__.py,sha256=NryGaKLDk_Egd58ZxXpnuI7OWO27AXz7S6CBFRM3sAY,1132
-pip/_vendor/msgpack/exceptions.py,sha256=dCTWei8dpkrMsQDcjQk74ATl9HsIBH0ybt8zOPNqMYc,1081
-pip/_vendor/msgpack/ext.py,sha256=TuldJPkYu8Wo_Xh0tFGL2l06-gY88NSR8tOje9fo2Wg,6080
-pip/_vendor/msgpack/fallback.py,sha256=OORDn86-fHBPlu-rPlMdM10KzkH6S_Rx9CHN1b7o4cg,34557
-pip/_vendor/packaging/__about__.py,sha256=ugASIO2w1oUyH8_COqQ2X_s0rDhjbhQC3yJocD03h2c,661
-pip/_vendor/packaging/__init__.py,sha256=b9Kk5MF7KxhhLgcDmiUWukN-LatWFxPdNug0joPhHSk,497
-pip/_vendor/packaging/_manylinux.py,sha256=XcbiXB-qcjv3bcohp6N98TMpOP4_j3m-iOA8ptK2GWY,11488
-pip/_vendor/packaging/_musllinux.py,sha256=_KGgY_qc7vhMGpoqss25n2hiLCNKRtvz9mCrS7gkqyc,4378
-pip/_vendor/packaging/_structures.py,sha256=q3eVNmbWJGG_S0Dit_S3Ao8qQqz_5PYTXFAKBZe5yr4,1431
-pip/_vendor/packaging/markers.py,sha256=AJBOcY8Oq0kYc570KuuPTkvuqjAlhufaE2c9sCUbm64,8487
-pip/_vendor/packaging/requirements.py,sha256=NtDlPBtojpn1IUC85iMjPNsUmufjpSlwnNA-Xb4m5NA,4676
-pip/_vendor/packaging/specifiers.py,sha256=LRQ0kFsHrl5qfcFNEEJrIFYsnIHQUJXY9fIsakTrrqE,30110
-pip/_vendor/packaging/tags.py,sha256=lmsnGNiJ8C4D_Pf9PbM0qgbZvD9kmB9lpZBQUZa3R_Y,15699
-pip/_vendor/packaging/utils.py,sha256=dJjeat3BS-TYn1RrUFVwufUMasbtzLfYRoy_HXENeFQ,4200
-pip/_vendor/packaging/version.py,sha256=_fLRNrFrxYcHVfyo8vk9j8s6JM8N_xsSxVFr6RJyco8,14665
-pip/_vendor/pep517/__init__.py,sha256=QJpRfzTpk6YSPgjcxp9-MCAiS5dEdzf9Bh0UXophG6c,130
-pip/_vendor/pep517/_compat.py,sha256=by6evrYnqkisiM-MQcvOKs5bgDMzlOSgZqRHNqf04zE,138
-pip/_vendor/pep517/build.py,sha256=VLtq0hOvNWCfX0FkdvTKEr-TmyrbaX0UqghpU7bHO1w,3443
-pip/_vendor/pep517/check.py,sha256=o0Mp_PX1yOM2WNq1ZdDph3YA7RObj2UGQUCUF-46RaU,6083
-pip/_vendor/pep517/colorlog.py,sha256=eCV1W52xzBjA-sOlKzUcvabRiFa11Y7hA791u-85_c8,3994
-pip/_vendor/pep517/dirtools.py,sha256=JiZ1Hlt2LNaLZEhNa_pm1YyG3MUoRh7KxY6hJ8ac-w0,607
-pip/_vendor/pep517/envbuild.py,sha256=nkTt1ZY7MXVgYOhPTyTr-VOxQ-q_Qc1touXfQgM56Bs,6081
-pip/_vendor/pep517/meta.py,sha256=budDWsV3I2OnnpSvXQ_ycuTqxh8G7DABoazAq-j8OlQ,2520
-pip/_vendor/pep517/wrappers.py,sha256=jcxIy-1Kl8I2xAZgbr6qNjF5b_6Q5gTndf9cxF0p5gM,12721
-pip/_vendor/pep517/in_process/__init__.py,sha256=4yDanGyKTXQtLhqRo9eEZ1CsLFezEAEZMfqEd88xrvY,872
-pip/_vendor/pep517/in_process/_in_process.py,sha256=JDpTxlKMDN1QfN_ey4IDtE6ZVSWtzP0_WLSqt1TyGaA,10801
-pip/_vendor/pkg_resources/__init__.py,sha256=NnpQ3g6BCHzpMgOR_OLBmYtniY4oOzdKpwqghfq_6ug,108287
-pip/_vendor/pkg_resources/py31compat.py,sha256=CRk8fkiPRDLsbi5pZcKsHI__Pbmh_94L8mr9Qy9Ab2U,562
-pip/_vendor/platformdirs/__init__.py,sha256=x0aUmmovXXuRFVrVQBtwIiovX12B7rUkdV4F9UlLz0Y,12831
-pip/_vendor/platformdirs/__main__.py,sha256=ZmsnTxEOxtTvwa-Y_Vfab_JN3X4XCVeN8X0yyy9-qnc,1176
-pip/_vendor/platformdirs/android.py,sha256=GKizhyS7ESRiU67u8UnBJLm46goau9937EchXWbPBlk,4068
-pip/_vendor/platformdirs/api.py,sha256=MXKHXOL3eh_-trSok-JUTjAR_zjmmKF3rjREVABjP8s,4910
-pip/_vendor/platformdirs/macos.py,sha256=-3UXQewbT0yMhMdkzRXfXGAntmLIH7Qt4a9Hlf8I5_Y,2655
-pip/_vendor/platformdirs/unix.py,sha256=b4aVYTz0qZ50HntwOXo8r6tp82jAa3qTjxw-WlnC2yc,6910
-pip/_vendor/platformdirs/version.py,sha256=tsBKKPDX3LLh39yHXeTYauGRbRd-AmOJr9SwKldlFIU,78
-pip/_vendor/platformdirs/windows.py,sha256=ISruopR5UGBePC0BxCxXevkZYfjJsIZc49YWU5iYfQ4,6439
-pip/_vendor/pygments/__init__.py,sha256=5oLcMLXD0cTG8YcHBPITtK1fS0JBASILEvEnWkTezgE,2999
-pip/_vendor/pygments/__main__.py,sha256=p0_rz3JZmNZMNZBOqDojaEx1cr9wmA9FQZX_TYl74lQ,353
-pip/_vendor/pygments/cmdline.py,sha256=rc0fah4eknRqFgn1wKNEwkq0yWnSqYOGaA4PaIeOxVY,23685
-pip/_vendor/pygments/console.py,sha256=hQfqCFuOlGk7DW2lPQYepsw-wkOH1iNt9ylNA1eRymM,1697
-pip/_vendor/pygments/filter.py,sha256=NglMmMPTRRv-zuRSE_QbWid7JXd2J4AvwjCW2yWALXU,1938
-pip/_vendor/pygments/formatter.py,sha256=6-TS2Y8pUMeWIUolWwr1O8ruC-U6HydWDwOdbAiJgJQ,2917
-pip/_vendor/pygments/lexer.py,sha256=ZPB_TGn_qzrXodRFwEdPzzJk6LZBo9BlfSy3lacc6zg,32005
-pip/_vendor/pygments/modeline.py,sha256=gIbMSYrjSWPk0oATz7W9vMBYkUyTK2OcdVyKjioDRvA,986
-pip/_vendor/pygments/plugin.py,sha256=5rPxEoB_89qQMpOs0nI4KyLOzAHNlbQiwEMOKxqNmv8,2591
-pip/_vendor/pygments/regexopt.py,sha256=c6xcXGpGgvCET_3VWawJJqAnOp0QttFpQEdOPNY2Py0,3072
-pip/_vendor/pygments/scanner.py,sha256=F2T2G6cpkj-yZtzGQr-sOBw5w5-96UrJWveZN6va2aM,3092
-pip/_vendor/pygments/sphinxext.py,sha256=F8L0211sPnXaiWutN0lkSUajWBwlgDMIEFFAbMWOvZY,4630
-pip/_vendor/pygments/style.py,sha256=RRnussX1YiK9Z7HipIvKorImxu3-HnkdpPCO4u925T0,6257
-pip/_vendor/pygments/token.py,sha256=vA2yNHGJBHfq4jNQSah7C9DmIOp34MmYHPA8P-cYAHI,6184
-pip/_vendor/pygments/unistring.py,sha256=gP3gK-6C4oAFjjo9HvoahsqzuV4Qz0jl0E0OxfDerHI,63187
-pip/_vendor/pygments/util.py,sha256=KgwpWWC3By5AiNwxGTI7oI9aXupH2TyZWukafBJe0Mg,9110
-pip/_vendor/pygments/filters/__init__.py,sha256=b5YuXB9rampSy2-cMtKxGQoMDfrG4_DcvVwZrzTlB6w,40386
-pip/_vendor/pygments/formatters/__init__.py,sha256=YTqGeHS17fNXCLMZpf7oCxBCKLB9YLsZ8IAsjGhawyg,4810
-pip/_vendor/pygments/formatters/_mapping.py,sha256=fCZgvsM6UEuZUG7J6lr47eVss5owKd_JyaNbDfxeqmQ,4104
-pip/_vendor/pygments/formatters/bbcode.py,sha256=JrL4ITjN-KzPcuQpPMBf1pm33eW2sDUNr8WzSoAJsJA,3314
-pip/_vendor/pygments/formatters/groff.py,sha256=xrOFoLbafSA9uHsSLRogy79_Zc4GWJ8tMK2hCdTJRsw,5086
-pip/_vendor/pygments/formatters/html.py,sha256=QNt9prPgxmbKx2M-nfDwoR1bIg06-sNouQuWnE434Wc,35441
-pip/_vendor/pygments/formatters/img.py,sha256=h75Y7IRZLZxDEIwyoOsdRLTwm7kLVPbODKkgEiJ0iKI,21938
-pip/_vendor/pygments/formatters/irc.py,sha256=iwk5tDJOxbCV64SCmOFyvk__x6RD60ay0nUn7ko9n7U,5871
-pip/_vendor/pygments/formatters/latex.py,sha256=thPbytJCIs2AUXsO3NZwqKtXJ-upOlcXP4CXsx94G4w,19351
-pip/_vendor/pygments/formatters/other.py,sha256=PczqK1Rms43lz6iucOLPeBMxIncPKOGBt-195w1ynII,5073
-pip/_vendor/pygments/formatters/pangomarkup.py,sha256=ZZzMsKJKXrsDniFeMTkIpe7aQ4VZYRHu0idWmSiUJ2U,2212
-pip/_vendor/pygments/formatters/rtf.py,sha256=abrKlWjipBkQvhIICxtjYTUNv6WME0iJJObFvqVuudE,5014
-pip/_vendor/pygments/formatters/svg.py,sha256=6MM9YyO8NhU42RTQfTWBiagWMnsf9iG5gwhqSriHORE,7335
-pip/_vendor/pygments/formatters/terminal.py,sha256=NpEGvwkC6LgMLQTjVzGrJXji3XcET1sb5JCunSCzoRo,4674
-pip/_vendor/pygments/formatters/terminal256.py,sha256=4v4OVizvsxtwWBpIy_Po30zeOzE5oJg_mOc1-rCjMDk,11753
-pip/_vendor/pygments/lexers/__init__.py,sha256=8d80-XfL5UKDCC1wRD1a_ZBZDkZ2HOe7Zul8SsnNYFE,11174
-pip/_vendor/pygments/lexers/_mapping.py,sha256=zEiCV5FPiBioMJQJjw9kk7IJ5Y9GwknS4VJPYlcNchs,70232
-pip/_vendor/pygments/lexers/python.py,sha256=gZROs9iNSOA18YyVghP1cUCD0OwYZ04a6PCwgSOCeSA,53376
-pip/_vendor/pygments/styles/__init__.py,sha256=iZDZ7PBKb55SpGlE1--cx9cbmWx5lVTH4bXO87t2Vok,3419
-pip/_vendor/pyparsing/__init__.py,sha256=ZPdI7pPo4IYXcABw-51AcqOzsxVvDtqnQbyn_qYWZvo,9171
-pip/_vendor/pyparsing/actions.py,sha256=wU9i32e0y1ymxKE3OUwSHO-SFIrt1h_wv6Ws0GQjpNU,6426
-pip/_vendor/pyparsing/common.py,sha256=lFL97ooIeR75CmW5hjURZqwDCTgruqltcTCZ-ulLO2Q,12936
-pip/_vendor/pyparsing/core.py,sha256=AzTm1KFT1FIhiw2zvXZJmrpQoAwB0wOmeDCiR6SYytw,213344
-pip/_vendor/pyparsing/exceptions.py,sha256=3LbSafD32NYb1Tzt85GHNkhEAU1eZkTtNSk24cPMemo,9023
-pip/_vendor/pyparsing/helpers.py,sha256=QpUOjW0-psvueMwWb9bQpU2noqKCv98_wnw1VSzSdVo,39129
-pip/_vendor/pyparsing/results.py,sha256=HgNvWVXBdQP-Q6PtJfoCEeOJk2nwEvG-2KVKC5sGA30,25341
-pip/_vendor/pyparsing/testing.py,sha256=7tu4Abp4uSeJV0N_yEPRmmNUhpd18ZQP3CrX41DM814,13402
-pip/_vendor/pyparsing/unicode.py,sha256=fwuhMj30SQ165Cv7HJpu-rSxGbRm93kN9L4Ei7VGc1Y,10787
-pip/_vendor/pyparsing/util.py,sha256=kq772O5YSeXOSdP-M31EWpbH_ayj7BMHImBYo9xPD5M,6805
-pip/_vendor/pyparsing/diagram/__init__.py,sha256=KW0PV_TvWKnL7jysz0pQbZ24nzWWu2ZfNaeyUIIywIg,23685
-pip/_vendor/requests/__init__.py,sha256=3XN75ZS4slWy3TQsEGF7-Q6l2R146teU-s2_rXNhxhU,5178
-pip/_vendor/requests/__version__.py,sha256=nJVa3ef2yRyeYMhy7yHnRyjjpnNTDykZsE4Sp9irBC4,440
-pip/_vendor/requests/_internal_utils.py,sha256=aSPlF4uDhtfKxEayZJJ7KkAxtormeTfpwKSBSwtmAUw,1397
-pip/_vendor/requests/adapters.py,sha256=GFEz5koZaMZD86v0SHXKVB5SE9MgslEjkCQzldkNwVM,21443
-pip/_vendor/requests/api.py,sha256=dyvkDd5itC9z2g0wHl_YfD1yf6YwpGWLO7__8e21nks,6377
-pip/_vendor/requests/auth.py,sha256=h-HLlVx9j8rKV5hfSAycP2ApOSglTz77R0tz7qCbbEE,10187
-pip/_vendor/requests/certs.py,sha256=PVPooB0jP5hkZEULSCwC074532UFbR2Ptgu0I5zwmCs,575
-pip/_vendor/requests/compat.py,sha256=IhK9quyX0RRuWTNcg6d2JGSAOUbM6mym2p_2XjLTwf4,1286
-pip/_vendor/requests/cookies.py,sha256=kD3kNEcCj-mxbtf5fJsSaT86eGoEYpD3X0CSgpzl7BM,18560
-pip/_vendor/requests/exceptions.py,sha256=FA-_kVwBZ2jhXauRctN_ewHVK25b-fj0Azyz1THQ0Kk,3823
-pip/_vendor/requests/help.py,sha256=FnAAklv8MGm_qb2UilDQgS6l0cUttiCFKUjx0zn2XNA,3879
-pip/_vendor/requests/hooks.py,sha256=CiuysiHA39V5UfcCBXFIx83IrDpuwfN9RcTUgv28ftQ,733
-pip/_vendor/requests/models.py,sha256=GZRMMrGwDOLVvVfFHLUq0qTfIWDla3NcFHa1f5xs9Q8,35287
-pip/_vendor/requests/packages.py,sha256=njJmVifY4aSctuW3PP5EFRCxjEwMRDO6J_feG2dKWsI,695
-pip/_vendor/requests/sessions.py,sha256=KUqJcRRLovNefUs7ScOXSUVCcfSayTFWtbiJ7gOSlTI,30180
-pip/_vendor/requests/status_codes.py,sha256=FvHmT5uH-_uimtRz5hH9VCbt7VV-Nei2J9upbej6j8g,4235
-pip/_vendor/requests/structures.py,sha256=-IbmhVz06S-5aPSZuUthZ6-6D9XOjRuTXHOabY041XM,2912
-pip/_vendor/requests/utils.py,sha256=0gzSOcx9Ya4liAbHnHuwt4jM78lzCZZoDFgkmsInNUg,33240
-pip/_vendor/resolvelib/__init__.py,sha256=UL-B2BDI0_TRIqkfGwLHKLxY-LjBlomz7941wDqzB1I,537
-pip/_vendor/resolvelib/providers.py,sha256=roVmFBItQJ0TkhNua65h8LdNny7rmeqVEXZu90QiP4o,5872
-pip/_vendor/resolvelib/reporters.py,sha256=fW91NKf-lK8XN7i6Yd_rczL5QeOT3sc6AKhpaTEnP3E,1583
-pip/_vendor/resolvelib/resolvers.py,sha256=2wYzVGBGerbmcIpH8cFmgSKgLSETz8jmwBMGjCBMHG4,17592
-pip/_vendor/resolvelib/structs.py,sha256=IVIYof6sA_N4ZEiE1C1UhzTX495brCNnyCdgq6CYq28,4794
-pip/_vendor/resolvelib/compat/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
-pip/_vendor/resolvelib/compat/collections_abc.py,sha256=uy8xUZ-NDEw916tugUXm8HgwCGiMO0f-RcdnpkfXfOs,156
-pip/_vendor/rich/__init__.py,sha256=zREyQ22R3zKg8gMdhiikczdVQYtZNeayHNrbBg5scm0,5944
-pip/_vendor/rich/__main__.py,sha256=BmTmBWI93ytq75IEPi1uAAdeRYzFfDbgaAXjsX1ogig,8808
-pip/_vendor/rich/_cell_widths.py,sha256=2n4EiJi3X9sqIq0O16kUZ_zy6UYMd3xFfChlKfnW1Hc,10096
-pip/_vendor/rich/_emoji_codes.py,sha256=hu1VL9nbVdppJrVoijVshRlcRRe_v3dju3Mmd2sKZdY,140235
-pip/_vendor/rich/_emoji_replace.py,sha256=n-kcetsEUx2ZUmhQrfeMNc-teeGhpuSQ5F8VPBsyvDo,1064
-pip/_vendor/rich/_export_format.py,sha256=nHArqOljIlYn6NruhWsAsh-fHo7oJC3y9BDJyAa-QYQ,2114
-pip/_vendor/rich/_extension.py,sha256=Xt47QacCKwYruzjDi-gOBq724JReDj9Cm9xUi5fr-34,265
-pip/_vendor/rich/_inspect.py,sha256=oZJGw31e64dwXSCmrDnvZbwVb1ZKhWfU8wI3VWohjJk,9695
-pip/_vendor/rich/_log_render.py,sha256=1ByI0PA1ZpxZY3CGJOK54hjlq4X-Bz_boIjIqCd8Kns,3225
-pip/_vendor/rich/_loop.py,sha256=hV_6CLdoPm0va22Wpw4zKqM0RYsz3TZxXj0PoS-9eDQ,1236
-pip/_vendor/rich/_palettes.py,sha256=cdev1JQKZ0JvlguV9ipHgznTdnvlIzUFDBb0It2PzjI,7063
-pip/_vendor/rich/_pick.py,sha256=evDt8QN4lF5CiwrUIXlOJCntitBCOsI3ZLPEIAVRLJU,423
-pip/_vendor/rich/_ratio.py,sha256=2lLSliL025Y-YMfdfGbutkQDevhcyDqc-DtUYW9mU70,5472
-pip/_vendor/rich/_spinners.py,sha256=U2r1_g_1zSjsjiUdAESc2iAMc3i4ri_S8PYP6kQ5z1I,19919
-pip/_vendor/rich/_stack.py,sha256=-C8OK7rxn3sIUdVwxZBBpeHhIzX0eI-VM3MemYfaXm0,351
-pip/_vendor/rich/_timer.py,sha256=zelxbT6oPFZnNrwWPpc1ktUeAT-Vc4fuFcRZLQGLtMI,417
-pip/_vendor/rich/_win32_console.py,sha256=P0vxI2fcndym1UU1S37XAzQzQnkyY7YqAKmxm24_gug,22820
-pip/_vendor/rich/_windows.py,sha256=dvNl9TmfPzNVxiKk5WDFihErZ5796g2UC9-KGGyfXmk,1926
-pip/_vendor/rich/_windows_renderer.py,sha256=t74ZL3xuDCP3nmTp9pH1L5LiI2cakJuQRQleHCJerlk,2783
-pip/_vendor/rich/_wrap.py,sha256=xfV_9t0Sg6rzimmrDru8fCVmUlalYAcHLDfrJZnbbwQ,1840
-pip/_vendor/rich/abc.py,sha256=ON-E-ZqSSheZ88VrKX2M3PXpFbGEUUZPMa_Af0l-4f0,890
-pip/_vendor/rich/align.py,sha256=FV6_GS-8uhIyViMng3hkIWSFaTgMohK1Oqyjl8I8mGE,10368
-pip/_vendor/rich/ansi.py,sha256=HtaPG7dvgL6_yo0sQmx5CM05DJ4_1goY5SWXXOYNaKs,6820
-pip/_vendor/rich/bar.py,sha256=a7UD303BccRCrEhGjfMElpv5RFYIinaAhAuqYqhUvmw,3264
-pip/_vendor/rich/box.py,sha256=1Iv1sUWqjtp5XwLwGH-AJ8HgyXZ7dRFUkO0z3M_bRl8,9864
-pip/_vendor/rich/cells.py,sha256=zMjFI15wCpgjLR14lHdfFMVC6qMDi5OsKIB0PYZBBMk,4503
-pip/_vendor/rich/color.py,sha256=kp87L8V4-3qayE6CUxtW_nP8Ujfew_-DAhNwYMXBMOY,17957
-pip/_vendor/rich/color_triplet.py,sha256=3lhQkdJbvWPoLDO-AnYImAWmJvV5dlgYNCVZ97ORaN4,1054
-pip/_vendor/rich/columns.py,sha256=HUX0KcMm9dsKNi11fTbiM_h2iDtl8ySCaVcxlalEzq8,7131
-pip/_vendor/rich/console.py,sha256=bTT9DNX03V4cQXefg22d-gLSs_e_ZY2zdCvLIlEyU2Q,95885
-pip/_vendor/rich/constrain.py,sha256=1VIPuC8AgtKWrcncQrjBdYqA3JVWysu6jZo1rrh7c7Q,1288
-pip/_vendor/rich/containers.py,sha256=aKgm5UDHn5Nmui6IJaKdsZhbHClh_X7D-_Wg8Ehrr7s,5497
-pip/_vendor/rich/control.py,sha256=DSkHTUQLorfSERAKE_oTAEUFefZnZp4bQb4q8rHbKws,6630
-pip/_vendor/rich/default_styles.py,sha256=WqVh-RPNEsx0Wxf3fhS_fCn-wVqgJ6Qfo-Zg7CoCsLE,7954
-pip/_vendor/rich/diagnose.py,sha256=an6uouwhKPAlvQhYpNNpGq9EJysfMIOvvCbO3oSoR24,972
-pip/_vendor/rich/emoji.py,sha256=omTF9asaAnsM4yLY94eR_9dgRRSm1lHUszX20D1yYCQ,2501
-pip/_vendor/rich/errors.py,sha256=5pP3Kc5d4QJ_c0KFsxrfyhjiPVe7J1zOqSFbFAzcV-Y,642
-pip/_vendor/rich/file_proxy.py,sha256=4gCbGRXg0rW35Plaf0UVvj3dfENHuzc_n8I_dBqxI7o,1616
-pip/_vendor/rich/filesize.py,sha256=yShoVpARafJBreyZFaAhC4OhnJ6ydC1WXR-Ez4wU_YQ,2507
-pip/_vendor/rich/highlighter.py,sha256=3WW6PACGlq0e3YDjfqiMBQ0dYZwu7pcoFYUgJy01nb0,9585
-pip/_vendor/rich/json.py,sha256=RCm4lXBXrjvXHpqrWPH8wdGP0jEo4IohLmkddlhRY18,5051
-pip/_vendor/rich/jupyter.py,sha256=QyoKoE_8IdCbrtiSHp9TsTSNyTHY0FO5whE7jOTd9UE,3252
-pip/_vendor/rich/layout.py,sha256=E3xJ4fomizUADwime3VA0lBXoMSPl9blEokIzVBjO0Q,14074
-pip/_vendor/rich/live.py,sha256=emVaLUua-FKSYqZXmtJJjBIstO99CqMOuA6vMAKVkO0,14172
-pip/_vendor/rich/live_render.py,sha256=zElm3PrfSIvjOce28zETHMIUf9pFYSUA5o0AflgUP64,3667
-pip/_vendor/rich/logging.py,sha256=10j13lPr-QuYqEEBz_2aRJp8gNYvSN2wmCUlUqJcPLM,11471
-pip/_vendor/rich/markup.py,sha256=xzF4uAafiEeEYDJYt_vUnJOGoTU8RrH-PH7WcWYXjCg,8198
-pip/_vendor/rich/measure.py,sha256=HmrIJX8sWRTHbgh8MxEay_83VkqNW_70s8aKP5ZcYI8,5305
-pip/_vendor/rich/padding.py,sha256=kTFGsdGe0os7tXLnHKpwTI90CXEvrceeZGCshmJy5zw,4970
-pip/_vendor/rich/pager.py,sha256=SO_ETBFKbg3n_AgOzXm41Sv36YxXAyI3_R-KOY2_uSc,828
-pip/_vendor/rich/palette.py,sha256=lInvR1ODDT2f3UZMfL1grq7dY_pDdKHw4bdUgOGaM4Y,3396
-pip/_vendor/rich/panel.py,sha256=CzdojkDAjxAKgvDxis47nWzUh1V2NniOqkJJQajosG8,8744
-pip/_vendor/rich/pretty.py,sha256=CalVLVW3mvTn1hvI9Pgi2v-y4S-5zUWBK-PH7SlVs-U,36576
-pip/_vendor/rich/progress.py,sha256=zjQRwd3TmDnAvSjTPsNPHFjmqE9GOEX3bf0Lj56hIL8,59746
-pip/_vendor/rich/progress_bar.py,sha256=zHHaFPEfIhW2fq6Fnl5vBY7AUpP1N0HVGElISUHsnqw,8161
-pip/_vendor/rich/prompt.py,sha256=x0mW-pIPodJM4ry6grgmmLrl8VZp99kqcmdnBe70YYA,11303
-pip/_vendor/rich/protocol.py,sha256=5hHHDDNHckdk8iWH5zEbi-zuIVSF5hbU2jIo47R7lTE,1391
-pip/_vendor/rich/region.py,sha256=rNT9xZrVZTYIXZC0NYn41CJQwYNbR-KecPOxTgQvB8Y,166
-pip/_vendor/rich/repr.py,sha256=Je91CIrZN_av9L3FRCKCs5yoX2LvczrCNKqUbVsjUvQ,4449
-pip/_vendor/rich/rule.py,sha256=V6AWI0wCb6DB0rvN967FRMlQrdlG7HoZdfEAHyeG8CM,4773
-pip/_vendor/rich/scope.py,sha256=HX13XsJfqzQHpPfw4Jn9JmJjCsRj9uhHxXQEqjkwyLA,2842
-pip/_vendor/rich/screen.py,sha256=YoeReESUhx74grqb0mSSb9lghhysWmFHYhsbMVQjXO8,1591
-pip/_vendor/rich/segment.py,sha256=6XdX0MfL18tUCaUWDWncIqx0wpq3GiaqzhYP779JvRA,24224
-pip/_vendor/rich/spinner.py,sha256=7b8MCleS4fa46HX0AzF98zfu6ZM6fAL0UgYzPOoakF4,4374
-pip/_vendor/rich/status.py,sha256=gJsIXIZeSo3urOyxRUjs6VrhX5CZrA0NxIQ-dxhCnwo,4425
-pip/_vendor/rich/style.py,sha256=4WnUEkHNMp9Tfmd8cmbxWGby7QeTk2LUTQzFSs46EQc,26240
-pip/_vendor/rich/styled.py,sha256=eZNnzGrI4ki_54pgY3Oj0T-x3lxdXTYh4_ryDB24wBU,1258
-pip/_vendor/rich/syntax.py,sha256=_M08KbE11nNWNBPooFLKAA7lWkThPzlGUsuesxQYsuA,34697
-pip/_vendor/rich/table.py,sha256=r_lahmj45cINCWLYaIjq9yEv3gve8E6bkYTP8NDqApE,39515
-pip/_vendor/rich/terminal_theme.py,sha256=1j5-ufJfnvlAo5Qsi_ACZiXDmwMXzqgmFByObT9-yJY,3370
-pip/_vendor/rich/text.py,sha256=oajdGIeHcLcSdOwbC48_20ylDsHAS5fsPZD_Ih0clyA,44666
-pip/_vendor/rich/theme.py,sha256=GKNtQhDBZKAzDaY0vQVQQFzbc0uWfFe6CJXA-syT7zQ,3627
-pip/_vendor/rich/themes.py,sha256=0xgTLozfabebYtcJtDdC5QkX5IVUEaviqDUJJh4YVFk,102
-pip/_vendor/rich/traceback.py,sha256=MORQpXH7AvhAAThW8oIbtwffXb8M6XRkSkcJ52JuA3g,26060
-pip/_vendor/rich/tree.py,sha256=BMbUYNjS9uodNPfvtY_odmU09GA5QzcMbQ5cJZhllQI,9169
-pip/_vendor/tenacity/__init__.py,sha256=rjcWJVq5PcNJNC42rt-TAGGskM-RUEkZbDKu1ra7IPo,18364
-pip/_vendor/tenacity/_asyncio.py,sha256=HEb0BVJEeBJE9P-m9XBxh1KcaF96BwoeqkJCL5sbVcQ,3314
-pip/_vendor/tenacity/_utils.py,sha256=-y68scDcyoqvTJuJJ0GTfjdSCljEYlbCYvgk7nM4NdM,1944
-pip/_vendor/tenacity/after.py,sha256=dlmyxxFy2uqpLXDr838DiEd7jgv2AGthsWHGYcGYsaI,1496
-pip/_vendor/tenacity/before.py,sha256=7XtvRmO0dRWUp8SVn24OvIiGFj8-4OP5muQRUiWgLh0,1376
-pip/_vendor/tenacity/before_sleep.py,sha256=ThyDvqKU5yle_IvYQz_b6Tp6UjUS0PhVp6zgqYl9U6Y,1908
-pip/_vendor/tenacity/nap.py,sha256=fRWvnz1aIzbIq9Ap3gAkAZgDH6oo5zxMrU6ZOVByq0I,1383
-pip/_vendor/tenacity/retry.py,sha256=Cy504Ss3UrRV7lnYgvymF66WD1wJ2dbM869kDcjuDes,7550
-pip/_vendor/tenacity/stop.py,sha256=sKHmHaoSaW6sKu3dTxUVKr1-stVkY7lw4Y9yjZU30zQ,2790
-pip/_vendor/tenacity/tornadoweb.py,sha256=E8lWO2nwe6dJgoB-N2HhQprYLDLB_UdSgFnv-EN6wKE,2145
-pip/_vendor/tenacity/wait.py,sha256=tdLTESRm5E237VHG0SxCDXRa0DHKPKVq285kslHVURc,8011
-pip/_vendor/tomli/__init__.py,sha256=JhUwV66DB1g4Hvt1UQCVMdfCu-IgAV8FXmvDU9onxd4,396
-pip/_vendor/tomli/_parser.py,sha256=g9-ENaALS-B8dokYpCuzUFalWlog7T-SIYMjLZSWrtM,22633
-pip/_vendor/tomli/_re.py,sha256=dbjg5ChZT23Ka9z9DHOXfdtSpPwUfdgMXnj8NOoly-w,2943
-pip/_vendor/tomli/_types.py,sha256=-GTG2VUqkpxwMqzmVO4F7ybKddIbAnuAHXfmWQcTi3Q,254
-pip/_vendor/urllib3/__init__.py,sha256=iXLcYiJySn0GNbWOOZDDApgBL1JgP44EZ8i1760S8Mc,3333
-pip/_vendor/urllib3/_collections.py,sha256=Rp1mVyBgc_UlAcp6M3at1skJBXR5J43NawRTvW2g_XY,10811
-pip/_vendor/urllib3/_version.py,sha256=GhuGBUT_MtRxHEHDb-LYs5yLPeYWlCwFBPjGZmVJbVg,64
-pip/_vendor/urllib3/connection.py,sha256=8976wL6sGeVMW0JnXvx5mD00yXu87uQjxtB9_VL8dx8,20070
-pip/_vendor/urllib3/connectionpool.py,sha256=vEzk1iJEw1qR2vHBo7m3Y98iDfna6rKkUz3AyK5lJKQ,39093
-pip/_vendor/urllib3/exceptions.py,sha256=0Mnno3KHTNfXRfY7638NufOPkUb6mXOm-Lqj-4x2w8A,8217
-pip/_vendor/urllib3/fields.py,sha256=kvLDCg_JmH1lLjUUEY_FLS8UhY7hBvDPuVETbY8mdrM,8579
-pip/_vendor/urllib3/filepost.py,sha256=5b_qqgRHVlL7uLtdAYBzBh-GHmU5AfJVt_2N0XS3PeY,2440
-pip/_vendor/urllib3/poolmanager.py,sha256=0KOOJECoeLYVjUHvv-0h4Oq3FFQQ2yb-Fnjkbj8gJO0,19786
-pip/_vendor/urllib3/request.py,sha256=ZFSIqX0C6WizixecChZ3_okyu7BEv0lZu1VT0s6h4SM,5985
-pip/_vendor/urllib3/response.py,sha256=p3VBYPhwBca77wCZfmoXvEDVVC3SdF7yxQ6TXuxy1BI,30109
-pip/_vendor/urllib3/contrib/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
-pip/_vendor/urllib3/contrib/_appengine_environ.py,sha256=bDbyOEhW2CKLJcQqAKAyrEHN-aklsyHFKq6vF8ZFsmk,957
-pip/_vendor/urllib3/contrib/appengine.py,sha256=lfzpHFmJiO82shClLEm3QB62SYgHWnjpZOH_2JhU5Tc,11034
-pip/_vendor/urllib3/contrib/ntlmpool.py,sha256=ej9gGvfAb2Gt00lafFp45SIoRz-QwrQ4WChm6gQmAlM,4538
-pip/_vendor/urllib3/contrib/pyopenssl.py,sha256=rt9NEIP8iMBLxxRhH0jLnmshW-OFP83jEayxMSqu2MU,17182
-pip/_vendor/urllib3/contrib/securetransport.py,sha256=yhZdmVjY6PI6EeFbp7qYOp6-vp1Rkv2NMuOGaEj7pmc,34448
-pip/_vendor/urllib3/contrib/socks.py,sha256=aRi9eWXo9ZEb95XUxef4Z21CFlnnjbEiAo9HOseoMt4,7097
-pip/_vendor/urllib3/contrib/_securetransport/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
-pip/_vendor/urllib3/contrib/_securetransport/bindings.py,sha256=4Xk64qIkPBt09A5q-RIFUuDhNc9mXilVapm7WnYnzRw,17632
-pip/_vendor/urllib3/contrib/_securetransport/low_level.py,sha256=B2JBB2_NRP02xK6DCa1Pa9IuxrPwxzDzZbixQkb7U9M,13922
-pip/_vendor/urllib3/packages/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
-pip/_vendor/urllib3/packages/six.py,sha256=b9LM0wBXv7E7SrbCjAm4wwN-hrH-iNxv18LgWNMMKPo,34665
-pip/_vendor/urllib3/packages/backports/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
-pip/_vendor/urllib3/packages/backports/makefile.py,sha256=nbzt3i0agPVP07jqqgjhaYjMmuAi_W5E0EywZivVO8E,1417
-pip/_vendor/urllib3/util/__init__.py,sha256=JEmSmmqqLyaw8P51gUImZh8Gwg9i1zSe-DoqAitn2nc,1155
-pip/_vendor/urllib3/util/connection.py,sha256=5Lx2B1PW29KxBn2T0xkN1CBgRBa3gGVJBKoQoRogEVk,4901
-pip/_vendor/urllib3/util/proxy.py,sha256=zUvPPCJrp6dOF0N4GAVbOcl6o-4uXKSrGiTkkr5vUS4,1605
-pip/_vendor/urllib3/util/queue.py,sha256=nRgX8_eX-_VkvxoX096QWoz8Ps0QHUAExILCY_7PncM,498
-pip/_vendor/urllib3/util/request.py,sha256=C0OUt2tcU6LRiQJ7YYNP9GvPrSvl7ziIBekQ-5nlBZk,3997
-pip/_vendor/urllib3/util/response.py,sha256=GJpg3Egi9qaJXRwBh5wv-MNuRWan5BIu40oReoxWP28,3510
-pip/_vendor/urllib3/util/retry.py,sha256=iESg2PvViNdXBRY4MpL4h0kqwOOkHkxmLn1kkhFHPU8,22001
-pip/_vendor/urllib3/util/ssl_.py,sha256=X4-AqW91aYPhPx6-xbf66yHFQKbqqfC_5Zt4WkLX1Hc,17177
-pip/_vendor/urllib3/util/ssl_match_hostname.py,sha256=Ir4cZVEjmAk8gUAIHWSi7wtOO83UCYABY2xFD1Ql_WA,5758
-pip/_vendor/urllib3/util/ssltransport.py,sha256=NA-u5rMTrDFDFC8QzRKUEKMG0561hOD4qBTr3Z4pv6E,6895
-pip/_vendor/urllib3/util/timeout.py,sha256=QSbBUNOB9yh6AnDn61SrLQ0hg5oz0I9-uXEG91AJuIg,10003
-pip/_vendor/urllib3/util/url.py,sha256=49HwObaTUUjqVe4qvSUvIjZyf3ghgNA6-OLm3kmkFKM,14287
-pip/_vendor/urllib3/util/wait.py,sha256=fOX0_faozG2P7iVojQoE1mbydweNyTcm-hXEfFrTtLI,5403
-pip/_vendor/webencodings/__init__.py,sha256=qOBJIuPy_4ByYH6W_bNgJF-qYQ2DoU-dKsDu5yRWCXg,10579
-pip/_vendor/webencodings/labels.py,sha256=4AO_KxTddqGtrL9ns7kAPjb0CcN6xsCIxbK37HY9r3E,8979
-pip/_vendor/webencodings/mklabels.py,sha256=GYIeywnpaLnP0GSic8LFWgd0UVvO_l1Nc6YoF-87R_4,1305
-pip/_vendor/webencodings/tests.py,sha256=OtGLyjhNY1fvkW1GvLJ_FV9ZoqC9Anyjr7q3kxTbzNs,6563
-pip/_vendor/webencodings/x_user_defined.py,sha256=yOqWSdmpytGfUgh_Z6JYgDNhoc-BAHyyeeT15Fr42tM,4307
-pip-22.3.1.dist-info/LICENSE.txt,sha256=Y0MApmnUmurmWxLGxIySTFGkzfPR_whtw0VtyLyqIQQ,1093
-pip-22.3.1.dist-info/METADATA,sha256=a9COYc5qzklDgbGlrKYkypMXon4A6IDgpeUTWLr7zzY,4072
-pip-22.3.1.dist-info/WHEEL,sha256=G16H4A3IeoQmnOrYV4ueZGKSjhipXx8zc8nu9FGlvMA,92
-pip-22.3.1.dist-info/entry_points.txt,sha256=ynZN1_707_L23Oa8_O5LOxEoccj1nDa4xHT5galfN7o,125
-pip-22.3.1.dist-info/top_level.txt,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4
-pip-22.3.1.dist-info/RECORD,,
-pip/_vendor/six.cpython-310.pyc,,
-pip/_vendor/rich/_stack.cpython-310.pyc,,
-pip/_vendor/rich/terminal_theme.cpython-310.pyc,,
-pip/_vendor/urllib3/util/ssltransport.cpython-310.pyc,,
-pip/_vendor/pkg_resources/py31compat.cpython-310.pyc,,
-pip/_vendor/cachecontrol/filewrapper.cpython-310.pyc,,
-pip/_vendor/urllib3/__pycache__,,
-pip/_vendor/rich/color_triplet.cpython-310.pyc,,
-pip/_vendor/cachecontrol/wrapper.cpython-310.pyc,,
-pip/_vendor/distlib/scripts.cpython-310.pyc,,
-pip/_vendor/chardet/euctwprober.cpython-310.pyc,,
-pip/_internal/req/__pycache__,,
-pip/_vendor/resolvelib/__pycache__,,
-pip/_vendor/rich/bar.cpython-310.pyc,,
-pip/_vendor/pep517/__pycache__,,
-pip/_vendor/chardet/latin1prober.cpython-310.pyc,,
-pip/_internal/utils/_log.cpython-310.pyc,,
-pip/_internal/operations/install/__pycache__,,
-pip/_vendor/urllib3/util/proxy.cpython-310.pyc,,
-pip/_vendor/urllib3/contrib/_appengine_environ.cpython-310.pyc,,
-pip/_vendor/tenacity/__init__.cpython-310.pyc,,
-pip/_internal/utils/setuptools_build.cpython-310.pyc,,
-pip/_internal/cache.cpython-310.pyc,,
-pip/_internal/__pycache__,,
-pip/_vendor/rich/segment.cpython-310.pyc,,
-pip/_vendor/rich/region.cpython-310.pyc,,
-pip/_vendor/rich/json.cpython-310.pyc,,
-pip/_internal/distributions/__pycache__,,
-pip/_vendor/urllib3/packages/six.cpython-310.pyc,,
-pip/_vendor/rich/screen.cpython-310.pyc,,
-pip/_vendor/cachecontrol/cache.cpython-310.pyc,,
-pip/_vendor/distlib/resources.cpython-310.pyc,,
-pip/_vendor/cachecontrol/__pycache__,,
-pip/_internal/locations/base.cpython-310.pyc,,
-pip/_internal/locations/_distutils.cpython-310.pyc,,
-pip/_internal/operations/check.cpython-310.pyc,,
-pip/_vendor/rich/prompt.cpython-310.pyc,,
-pip/_vendor/idna/uts46data.cpython-310.pyc,,
-pip/_vendor/cachecontrol/compat.cpython-310.pyc,,
-pip/_vendor/idna/__init__.cpython-310.pyc,,
-pip/_vendor/__pycache__,,
-pip/_internal/operations/install/wheel.cpython-310.pyc,,
-pip/_vendor/rich/markup.cpython-310.pyc,,
-pip/_vendor/pygments/sphinxext.cpython-310.pyc,,
-pip/_vendor/pep517/in_process/__pycache__,,
-pip/_vendor/cachecontrol/controller.cpython-310.pyc,,
-pip/_internal/utils/virtualenv.cpython-310.pyc,,
-pip/_internal/network/session.cpython-310.pyc,,
-pip/_vendor/pyparsing/common.cpython-310.pyc,,
-pip/_internal/resolution/resolvelib/__pycache__,,
-pip/_vendor/requests/hooks.cpython-310.pyc,,
-pip/_internal/distributions/wheel.cpython-310.pyc,,
-pip/_internal/utils/misc.cpython-310.pyc,,
-pip/_internal/utils/subprocess.cpython-310.pyc,,
-pip/_internal/operations/build/wheel_legacy.cpython-310.pyc,,
-pip/_vendor/urllib3/connectionpool.cpython-310.pyc,,
-pip/_internal/resolution/legacy/resolver.cpython-310.pyc,,
-pip/_vendor/rich/errors.cpython-310.pyc,,
-pip/_vendor/cachecontrol/caches/__pycache__,,
-pip/_internal/models/scheme.cpython-310.pyc,,
-pip/_internal/commands/help.cpython-310.pyc,,
-pip/_internal/metadata/importlib/__init__.cpython-310.pyc,,
-pip/_internal/req/req_uninstall.cpython-310.pyc,,
-pip/_vendor/rich/_win32_console.cpython-310.pyc,,
-../../../bin/pip3,,
-pip/_internal/cli/main.cpython-310.pyc,,
-pip/_internal/cli/__init__.cpython-310.pyc,,
-pip/_vendor/rich/cells.cpython-310.pyc,,
-pip/_vendor/rich/__init__.cpython-310.pyc,,
-pip/_vendor/urllib3/packages/__pycache__,,
-pip/_vendor/pygments/formatters/pangomarkup.cpython-310.pyc,,
-pip/_vendor/rich/rule.cpython-310.pyc,,
-pip/_vendor/rich/_windows.cpython-310.pyc,,
-pip/_vendor/pep517/envbuild.cpython-310.pyc,,
-pip/_internal/models/target_python.cpython-310.pyc,,
-pip/_vendor/cachecontrol/serialize.cpython-310.pyc,,
-pip/_internal/resolution/legacy/__init__.cpython-310.pyc,,
-pip/_vendor/platformdirs/api.cpython-310.pyc,,
-pip/_vendor/pygments/__init__.cpython-310.pyc,,
-pip/_internal/models/__pycache__,,
-pip/_internal/req/req_set.cpython-310.pyc,,
-pip/_vendor/packaging/markers.cpython-310.pyc,,
-pip/_vendor/chardet/cli/__init__.cpython-310.pyc,,
-pip/_internal/models/format_control.cpython-310.pyc,,
-pip/_internal/operations/freeze.cpython-310.pyc,,
-pip/_vendor/chardet/charsetprober.cpython-310.pyc,,
-pip/_vendor/msgpack/exceptions.cpython-310.pyc,,
-pip/_vendor/urllib3/util/ssl_.cpython-310.pyc,,
-pip/_vendor/urllib3/contrib/pyopenssl.cpython-310.pyc,,
-pip/_internal/commands/completion.cpython-310.pyc,,
-pip/_vendor/rich/console.cpython-310.pyc,,
-pip/_vendor/requests/auth.cpython-310.pyc,,
-pip/_vendor/rich/pretty.cpython-310.pyc,,
-pip/_vendor/rich/_log_render.cpython-310.pyc,,
-pip/_vendor/requests/__init__.cpython-310.pyc,,
-pip/_vendor/pygments/console.cpython-310.pyc,,
-pip/_internal/utils/deprecation.cpython-310.pyc,,
-pip/_vendor/chardet/metadata/__init__.cpython-310.pyc,,
-pip/_vendor/colorama/initialise.cpython-310.pyc,,
-pip/_vendor/tenacity/__pycache__,,
-pip/_vendor/colorama/ansitowin32.cpython-310.pyc,,
-pip/_internal/metadata/importlib/_compat.cpython-310.pyc,,
-pip/_internal/metadata/importlib/_dists.cpython-310.pyc,,
-pip/_vendor/rich/default_styles.cpython-310.pyc,,
-pip/_internal/utils/models.cpython-310.pyc,,
-pip/_internal/cli/main_parser.cpython-310.pyc,,
-pip/_vendor/rich/layout.cpython-310.pyc,,
-pip/_vendor/requests/api.cpython-310.pyc,,
-pip/_internal/metadata/base.cpython-310.pyc,,
-pip/_vendor/tenacity/_utils.cpython-310.pyc,,
-pip/_vendor/rich/text.cpython-310.pyc,,
-pip/_internal/utils/filesystem.cpython-310.pyc,,
-pip/_vendor/idna/__pycache__,,
-pip/_vendor/chardet/codingstatemachine.cpython-310.pyc,,
-../../../bin/pip-3.10,,
-pip/_internal/commands/search.cpython-310.pyc,,
-pip/_vendor/idna/compat.cpython-310.pyc,,
-pip/_vendor/distlib/util.cpython-310.pyc,,
-pip/_internal/resolution/resolvelib/resolver.cpython-310.pyc,,
-pip/_vendor/platformdirs/unix.cpython-310.pyc,,
-pip/_internal/utils/urls.cpython-310.pyc,,
-pip/_vendor/chardet/sjisprober.cpython-310.pyc,,
-pip/_internal/commands/list.cpython-310.pyc,,
-pip/_internal/resolution/__init__.cpython-310.pyc,,
-pip/_vendor/rich/control.cpython-310.pyc,,
-pip/_vendor/packaging/version.cpython-310.pyc,,
-pip/_vendor/distro/distro.cpython-310.pyc,,
-pip/_vendor/platformdirs/__pycache__,,
-pip/_vendor/rich/file_proxy.cpython-310.pyc,,
-pip/_internal/metadata/importlib/__pycache__,,
-pip/_vendor/urllib3/util/connection.cpython-310.pyc,,
-pip/_vendor/rich/_export_format.cpython-310.pyc,,
-pip/_vendor/pyparsing/results.cpython-310.pyc,,
-pip/_internal/utils/egg_link.cpython-310.pyc,,
-pip/_vendor/chardet/escprober.cpython-310.pyc,,
-pip/_vendor/rich/__pycache__,,
-pip/_vendor/rich/padding.cpython-310.pyc,,
-pip/_vendor/platformdirs/android.cpython-310.pyc,,
-pip/_internal/req/constructors.cpython-310.pyc,,
-pip/_vendor/pyparsing/exceptions.cpython-310.pyc,,
-pip/_internal/resolution/legacy/__pycache__,,
-pip/_vendor/pygments/__pycache__,,
-pip/_vendor/platformdirs/__main__.cpython-310.pyc,,
-pip/_internal/commands/freeze.cpython-310.pyc,,
-pip/_vendor/cachecontrol/caches/file_cache.cpython-310.pyc,,
-pip/_vendor/packaging/specifiers.cpython-310.pyc,,
-pip/_internal/resolution/base.cpython-310.pyc,,
-pip/_vendor/rich/themes.cpython-310.pyc,,
-pip/_vendor/chardet/cli/__pycache__,,
-pip/_vendor/pep517/wrappers.cpython-310.pyc,,
-pip/_vendor/pyparsing/testing.cpython-310.pyc,,
-pip/_vendor/urllib3/fields.cpython-310.pyc,,
-pip/_vendor/platformdirs/version.cpython-310.pyc,,
-pip/_vendor/pygments/__main__.cpython-310.pyc,,
-pip/_vendor/platformdirs/windows.cpython-310.pyc,,
-pip/_internal/req/req_install.cpython-310.pyc,,
-pip/_vendor/rich/_ratio.cpython-310.pyc,,
-pip/_vendor/urllib3/contrib/securetransport.cpython-310.pyc,,
-pip/_vendor/rich/box.cpython-310.pyc,,
-pip/_internal/cli/progress_bars.cpython-310.pyc,,
-pip/_vendor/tenacity/nap.cpython-310.pyc,,
-pip/_vendor/rich/table.cpython-310.pyc,,
-pip/_vendor/requests/structures.cpython-310.pyc,,
-pip/_vendor/pygments/formatters/rtf.cpython-310.pyc,,
-pip/_vendor/platformdirs/macos.cpython-310.pyc,,
-pip/_vendor/packaging/_structures.cpython-310.pyc,,
-pip/_vendor/distlib/metadata.cpython-310.pyc,,
-pip/_vendor/requests/__pycache__,,
-pip/_internal/models/search_scope.cpython-310.pyc,,
-pip/_vendor/certifi/core.cpython-310.pyc,,
-pip/_internal/locations/__init__.cpython-310.pyc,,
-pip/_vendor/requests/compat.cpython-310.pyc,,
-pip/_vendor/cachecontrol/caches/redis_cache.cpython-310.pyc,,
-pip/_vendor/pygments/lexer.cpython-310.pyc,,
-pip/_internal/models/index.cpython-310.pyc,,
-pip/_internal/index/package_finder.cpython-310.pyc,,
-pip/_internal/build_env.cpython-310.pyc,,
-pip/_vendor/urllib3/contrib/_securetransport/low_level.cpython-310.pyc,,
-pip/_internal/vcs/versioncontrol.cpython-310.pyc,,
-pip/_vendor/tenacity/retry.cpython-310.pyc,,
-pip/_vendor/tomli/_re.cpython-310.pyc,,
-pip/_vendor/chardet/johabfreq.cpython-310.pyc,,
-pip/_internal/network/utils.cpython-310.pyc,,
-pip/_internal/utils/compatibility_tags.cpython-310.pyc,,
-pip/_vendor/urllib3/util/response.cpython-310.pyc,,
-pip-22.3.1.dist-info/__pycache__,,
-pip/_internal/commands/__init__.cpython-310.pyc,,
-pip/_vendor/pygments/formatter.cpython-310.pyc,,
-pip/_vendor/pygments/formatters/html.cpython-310.pyc,,
-pip/_internal/operations/build/__init__.cpython-310.pyc,,
-pip/_internal/cli/cmdoptions.cpython-310.pyc,,
-pip/_vendor/pep517/colorlog.cpython-310.pyc,,
-pip/_vendor/chardet/escsm.cpython-310.pyc,,
-pip/_internal/utils/__init__.cpython-310.pyc,,
-pip/_internal/operations/build/metadata_legacy.cpython-310.pyc,,
-pip/_vendor/rich/logging.cpython-310.pyc,,
-pip/_vendor/rich/_wrap.cpython-310.pyc,,
-pip/_vendor/urllib3/filepost.cpython-310.pyc,,
-pip/_vendor/requests/certs.cpython-310.pyc,,
-pip/_vendor/pygments/styles/__init__.cpython-310.pyc,,
-pip/_vendor/pyparsing/unicode.cpython-310.pyc,,
-pip/_vendor/urllib3/_version.cpython-310.pyc,,
-pip/_vendor/urllib3/packages/backports/__pycache__,,
-pip/_vendor/rich/ansi.cpython-310.pyc,,
-pip/_vendor/tenacity/tornadoweb.cpython-310.pyc,,
-pip/_vendor/chardet/__init__.cpython-310.pyc,,
-pip/_vendor/chardet/euckrprober.cpython-310.pyc,,
-pip/_vendor/pygments/plugin.cpython-310.pyc,,
-pip/_internal/index/__init__.cpython-310.pyc,,
-pip/_internal/vcs/bazaar.cpython-310.pyc,,
-pip/_vendor/distlib/__init__.cpython-310.pyc,,
-pip/_internal/vcs/__init__.cpython-310.pyc,,
-pip/_vendor/chardet/langrussianmodel.cpython-310.pyc,,
-pip/_vendor/urllib3/util/request.cpython-310.pyc,,
-pip/_vendor/chardet/mbcsgroupprober.cpython-310.pyc,,
-pip/_internal/resolution/resolvelib/candidates.cpython-310.pyc,,
-pip/_vendor/chardet/johabprober.cpython-310.pyc,,
-pip/_internal/utils/encoding.cpython-310.pyc,,
-pip/_internal/network/download.cpython-310.pyc,,
-pip/_vendor/tenacity/wait.cpython-310.pyc,,
-pip/_vendor/rich/constrain.cpython-310.pyc,,
-pip/_internal/metadata/_json.cpython-310.pyc,,
-pip/_internal/cli/base_command.cpython-310.pyc,,
-pip/_vendor/webencodings/tests.cpython-310.pyc,,
-pip/_vendor/rich/_pick.cpython-310.pyc,,
-pip/_internal/network/lazy_wheel.cpython-310.pyc,,
-pip/_vendor/tomli/__init__.cpython-310.pyc,,
-pip/_vendor/urllib3/contrib/ntlmpool.cpython-310.pyc,,
-pip/_vendor/chardet/charsetgroupprober.cpython-310.pyc,,
-pip/_vendor/colorama/win32.cpython-310.pyc,,
-pip/_vendor/distlib/manifest.cpython-310.pyc,,
-pip/_internal/cli/autocompletion.cpython-310.pyc,,
-pip/_vendor/pep517/check.cpython-310.pyc,,
-pip/_vendor/rich/scope.cpython-310.pyc,,
-pip/_vendor/urllib3/util/retry.cpython-310.pyc,,
-pip/_vendor/rich/_palettes.cpython-310.pyc,,
-pip/_internal/utils/entrypoints.cpython-310.pyc,,
-pip/_vendor/chardet/euctwfreq.cpython-310.pyc,,
-pip/_vendor/pyparsing/core.cpython-310.pyc,,
-pip/_vendor/rich/protocol.cpython-310.pyc,,
-pip/_internal/resolution/resolvelib/provider.cpython-310.pyc,,
-pip/_vendor/pygments/scanner.cpython-310.pyc,,
-pip/_vendor/idna/codec.cpython-310.pyc,,
-pip/_vendor/chardet/big5freq.cpython-310.pyc,,
-pip/_internal/commands/download.cpython-310.pyc,,
-pip/_vendor/urllib3/util/__init__.cpython-310.pyc,,
-pip/_vendor/rich/_extension.cpython-310.pyc,,
-pip/_vendor/chardet/langhebrewmodel.cpython-310.pyc,,
-pip/_vendor/webencodings/labels.cpython-310.pyc,,
-pip/_internal/req/req_file.cpython-310.pyc,,
-pip/_vendor/rich/tree.cpython-310.pyc,,
-pip/_vendor/distlib/markers.cpython-310.pyc,,
-pip/_vendor/rich/syntax.cpython-310.pyc,,
-pip/_internal/commands/uninstall.cpython-310.pyc,,
-pip/_internal/metadata/__init__.cpython-310.pyc,,
-pip/_vendor/urllib3/util/ssl_match_hostname.cpython-310.pyc,,
-pip/_vendor/pyparsing/util.cpython-310.pyc,,
-pip/_internal/locations/__pycache__,,
-pip/_internal/vcs/git.cpython-310.pyc,,
-pip/_internal/operations/build/metadata_editable.cpython-310.pyc,,
-pip/_vendor/cachecontrol/_cmd.cpython-310.pyc,,
-pip/_internal/vcs/subversion.cpython-310.pyc,,
-pip/_vendor/pep517/build.cpython-310.pyc,,
-pip/_internal/utils/filetypes.cpython-310.pyc,,
-pip/_vendor/rich/diagnose.cpython-310.pyc,,
-pip/_vendor/rich/_inspect.cpython-310.pyc,,
-pip/_vendor/colorama/__init__.cpython-310.pyc,,
-pip/_vendor/pygments/styles/__pycache__,,
-pip/_vendor/requests/sessions.cpython-310.pyc,,
-pip/_internal/operations/prepare.cpython-310.pyc,,
-pip/_vendor/chardet/langbulgarianmodel.cpython-310.pyc,,
-pip/_vendor/chardet/eucjpprober.cpython-310.pyc,,
-pip/_vendor/msgpack/__init__.cpython-310.pyc,,
-pip/_vendor/pygments/filters/__init__.cpython-310.pyc,,
-pip/_vendor/chardet/__pycache__,,
-pip/__init__.cpython-310.pyc,,
-pip/_vendor/certifi/__init__.cpython-310.pyc,,
-pip/_vendor/urllib3/exceptions.cpython-310.pyc,,
-pip/_internal/index/__pycache__,,
-pip/__pip-runner__.cpython-310.pyc,,
-pip/_internal/metadata/importlib/_envs.cpython-310.pyc,,
-pip/_internal/vcs/__pycache__,,
-pip/_internal/operations/__init__.cpython-310.pyc,,
-pip/_vendor/distlib/__pycache__,,
-pip/_vendor/cachecontrol/adapter.cpython-310.pyc,,
-pip/_internal/index/collector.cpython-310.pyc,,
-pip/_vendor/distlib/compat.cpython-310.pyc,,
-pip/_vendor/pygments/formatters/svg.cpython-310.pyc,,
-pip-22.3.1.dist-info/INSTALLER,,
-pip/_internal/resolution/resolvelib/reporter.cpython-310.pyc,,
-pip/_internal/operations/build/wheel.cpython-310.pyc,,
-pip/_internal/utils/appdirs.cpython-310.pyc,,
-pip/_internal/commands/install.cpython-310.pyc,,
-pip/_vendor/pygments/token.cpython-310.pyc,,
-pip/_internal/utils/wheel.cpython-310.pyc,,
-pip/_internal/commands/hash.cpython-310.pyc,,
-pip/_internal/exceptions.cpython-310.pyc,,
-pip/_vendor/urllib3/contrib/__init__.cpython-310.pyc,,
-pip/_vendor/tomli/__pycache__,,
-pip/_vendor/pep517/meta.cpython-310.pyc,,
-pip/_vendor/chardet/euckrfreq.cpython-310.pyc,,
-pip/_vendor/pygments/formatters/__init__.cpython-310.pyc,,
-pip/_vendor/distlib/wheel.cpython-310.pyc,,
-pip/_internal/req/__init__.cpython-310.pyc,,
-pip/_vendor/tenacity/after.cpython-310.pyc,,
-pip/_vendor/rich/_cell_widths.cpython-310.pyc,,
-pip-22.3.1.virtualenv,,
-pip/_internal/cli/parser.cpython-310.pyc,,
-pip/_internal/cli/__pycache__,,
-pip/_vendor/distlib/database.cpython-310.pyc,,
-pip/_internal/resolution/resolvelib/found_candidates.cpython-310.pyc,,
-pip/_internal/main.cpython-310.pyc,,
-pip/_internal/network/auth.cpython-310.pyc,,
-pip/_internal/models/link.cpython-310.pyc,,
-pip/_vendor/chardet/langturkishmodel.cpython-310.pyc,,
-pip/_internal/utils/logging.cpython-310.pyc,,
-pip/_internal/cli/req_command.cpython-310.pyc,,
-pip/_vendor/urllib3/packages/backports/makefile.cpython-310.pyc,,
-pip/_vendor/chardet/gb2312freq.cpython-310.pyc,,
-pip/_vendor/rich/progress_bar.cpython-310.pyc,,
-pip/_vendor/pygments/cmdline.cpython-310.pyc,,
-pip/_vendor/urllib3/util/__pycache__,,
-pip/_internal/network/__init__.cpython-310.pyc,,
-pip/_internal/utils/glibc.cpython-310.pyc,,
-pip/_internal/vcs/mercurial.cpython-310.pyc,,
-pip/_vendor/tenacity/_asyncio.cpython-310.pyc,,
-pip/_vendor/pygments/formatters/img.cpython-310.pyc,,
-pip/_vendor/rich/__main__.cpython-310.pyc,,
-pip/_vendor/requests/__version__.cpython-310.pyc,,
-pip/_internal/operations/build/metadata.cpython-310.pyc,,
-pip/_internal/pyproject.cpython-310.pyc,,
-pip/_vendor/urllib3/util/url.cpython-310.pyc,,
-pip/_vendor/rich/_windows_renderer.cpython-310.pyc,,
-pip/_internal/metadata/__pycache__,,
-pip/_internal/cli/status_codes.cpython-310.pyc,,
-pip/_vendor/requests/help.cpython-310.pyc,,
-pip/_internal/commands/configuration.cpython-310.pyc,,
-pip/_vendor/resolvelib/providers.cpython-310.pyc,,
-pip/_vendor/pyparsing/__init__.cpython-310.pyc,,
-pip/_vendor/distro/__init__.cpython-310.pyc,,
-pip/_vendor/packaging/tags.cpython-310.pyc,,
-pip/_vendor/chardet/mbcharsetprober.cpython-310.pyc,,
-pip/_vendor/cachecontrol/caches/__init__.cpython-310.pyc,,
-pip/_vendor/webencodings/__init__.cpython-310.pyc,,
-pip/_internal/distributions/base.cpython-310.pyc,,
-pip/_vendor/typing_extensions.cpython-310.pyc,,
-pip/_vendor/pygments/formatters/terminal.cpython-310.pyc,,
-pip/_vendor/chardet/metadata/__pycache__,,
-pip/_vendor/rich/live.cpython-310.pyc,,
-pip/_vendor/chardet/chardistribution.cpython-310.pyc,,
-pip/_internal/utils/packaging.cpython-310.pyc,,
-pip/_vendor/rich/_emoji_codes.cpython-310.pyc,,
-pip/_vendor/msgpack/__pycache__,,
-pip/_vendor/chardet/metadata/languages.cpython-310.pyc,,
-pip/_vendor/pygments/filters/__pycache__,,
-pip/_vendor/pygments/modeline.cpython-310.pyc,,
-pip/__pycache__,,
-pip/_vendor/certifi/__pycache__,,
-pip/_vendor/msgpack/fallback.cpython-310.pyc,,
-pip/_vendor/urllib3/contrib/_securetransport/bindings.cpython-310.pyc,,
-pip/_internal/operations/__pycache__,,
-pip/_vendor/urllib3/contrib/appengine.cpython-310.pyc,,
-pip/_internal/models/__init__.cpython-310.pyc,,
-pip/_vendor/pygments/lexers/_mapping.cpython-310.pyc,,
-pip/_internal/models/installation_report.cpython-310.pyc,,
-pip/_internal/resolution/resolvelib/base.cpython-310.pyc,,
-pip/_vendor/urllib3/connection.cpython-310.pyc,,
-pip/_vendor/pygments/lexers/python.cpython-310.pyc,,
-pip/_vendor/requests/status_codes.cpython-310.pyc,,
-pip/_internal/commands/debug.cpython-310.pyc,,
-pip/_vendor/chardet/universaldetector.cpython-310.pyc,,
-pip/_vendor/urllib3/contrib/__pycache__,,
-pip/_vendor/pygments/style.cpython-310.pyc,,
-pip/_vendor/urllib3/util/queue.cpython-310.pyc,,
-pip/_vendor/certifi/__main__.cpython-310.pyc,,
-pip/_internal/locations/_sysconfig.cpython-310.pyc,,
-pip/_internal/resolution/__pycache__,,
-pip/_vendor/rich/align.cpython-310.pyc,,
-pip/_vendor/rich/filesize.cpython-310.pyc,,
-pip/_vendor/pygments/formatters/bbcode.cpython-310.pyc,,
-pip/_vendor/pygments/formatters/__pycache__,,
-pip/_vendor/chardet/jisfreq.cpython-310.pyc,,
-pip/_vendor/requests/models.cpython-310.pyc,,
-pip/_vendor/pygments/lexers/__init__.cpython-310.pyc,,
-pip/_vendor/chardet/big5prober.cpython-310.pyc,,
-pip/_vendor/distlib/index.cpython-310.pyc,,
-pip/_internal/operations/install/legacy.cpython-310.pyc,,
-pip/_internal/models/direct_url.cpython-310.pyc,,
-pip/_vendor/packaging/__init__.cpython-310.pyc,,
-pip/_internal/metadata/pkg_resources.cpython-310.pyc,,
-pip/_internal/models/selection_prefs.cpython-310.pyc,,
-pip/_internal/models/candidate.cpython-310.pyc,,
-pip/_internal/index/sources.cpython-310.pyc,,
-pip/_vendor/packaging/__about__.cpython-310.pyc,,
-pip/_internal/network/cache.cpython-310.pyc,,
-pip/_vendor/colorama/ansi.cpython-310.pyc,,
-pip/_internal/network/__pycache__,,
-pip/_internal/utils/direct_url_helpers.cpython-310.pyc,,
-pip/_vendor/pyparsing/helpers.cpython-310.pyc,,
-pip/_vendor/pkg_resources/__init__.cpython-310.pyc,,
-pip/_vendor/rich/columns.cpython-310.pyc,,
-pip/_internal/utils/temp_dir.cpython-310.pyc,,
-pip/_vendor/rich/emoji.cpython-310.pyc,,
-pip/_internal/self_outdated_check.cpython-310.pyc,,
-pip/_vendor/urllib3/response.cpython-310.pyc,,
-pip/_vendor/requests/adapters.cpython-310.pyc,,
-pip/_vendor/resolvelib/reporters.cpython-310.pyc,,
-pip/_vendor/rich/palette.cpython-310.pyc,,
-pip/_internal/commands/inspect.cpython-310.pyc,,
-pip/_vendor/resolvelib/compat/__init__.cpython-310.pyc,,
-pip/_vendor/pyparsing/__pycache__,,
-pip/_vendor/colorama/winterm.cpython-310.pyc,,
-pip/_vendor/rich/status.cpython-310.pyc,,
-pip/_vendor/distro/__pycache__,,
-pip/_internal/utils/hashes.cpython-310.pyc,,
-pip/_vendor/pygments/filter.cpython-310.pyc,,
-pip/_vendor/msgpack/ext.cpython-310.pyc,,
-pip/_vendor/rich/styled.cpython-310.pyc,,
-pip/_vendor/resolvelib/compat/collections_abc.cpython-310.pyc,,
-pip/_vendor/webencodings/__pycache__,,
-pip/_internal/operations/build/build_tracker.cpython-310.pyc,,
-pip/_vendor/platformdirs/__init__.cpython-310.pyc,,
-pip/_vendor/packaging/utils.cpython-310.pyc,,
-pip/_internal/utils/datetime.cpython-310.pyc,,
-pip/_vendor/packaging/requirements.cpython-310.pyc,,
-pip/_vendor/urllib3/util/wait.cpython-310.pyc,,
-pip/_vendor/rich/repr.cpython-310.pyc,,
-pip/_internal/cli/command_context.cpython-310.pyc,,
-pip/_vendor/rich/containers.cpython-310.pyc,,
-pip/_vendor/chardet/hebrewprober.cpython-310.pyc,,
-pip/_vendor/idna/idnadata.cpython-310.pyc,,
-pip/_vendor/urllib3/request.cpython-310.pyc,,
-pip/_vendor/distro/__main__.cpython-310.pyc,,
-pip/_internal/commands/cache.cpython-310.pyc,,
-pip/_internal/utils/unpacking.cpython-310.pyc,,
-pip/_vendor/urllib3/contrib/socks.cpython-310.pyc,,
-pip/_internal/commands/__pycache__,,
-pip/_vendor/pygments/unistring.cpython-310.pyc,,
-pip/_internal/operations/build/__pycache__,,
-pip/_vendor/rich/spinner.cpython-310.pyc,,
-pip/_internal/utils/__pycache__,,
-pip/_vendor/pep517/dirtools.cpython-310.pyc,,
-pip/_vendor/urllib3/poolmanager.cpython-310.pyc,,
-pip/_internal/utils/compat.cpython-310.pyc,,
-pip/_vendor/rich/_emoji_replace.cpython-310.pyc,,
-pip/_vendor/requests/exceptions.cpython-310.pyc,,
-pip/_vendor/cachecontrol/heuristics.cpython-310.pyc,,
-pip/_vendor/tenacity/before_sleep.cpython-310.pyc,,
-pip/_vendor/pyparsing/diagram/__init__.cpython-310.pyc,,
-pip/_vendor/chardet/sbcharsetprober.cpython-310.pyc,,
-pip/_vendor/rich/panel.cpython-310.pyc,,
-pip/_vendor/requests/_internal_utils.cpython-310.pyc,,
-pip/_vendor/urllib3/contrib/_securetransport/__init__.cpython-310.pyc,,
-pip/_vendor/pygments/formatters/_mapping.cpython-310.pyc,,
-pip/_vendor/pygments/formatters/other.cpython-310.pyc,,
-pip/_vendor/chardet/langthaimodel.cpython-310.pyc,,
-pip/_internal/commands/wheel.cpython-310.pyc,,
-pip/_vendor/rich/theme.cpython-310.pyc,,
-pip/_internal/resolution/resolvelib/factory.cpython-310.pyc,,
-pip/_vendor/tenacity/before.cpython-310.pyc,,
-pip/_vendor/webencodings/x_user_defined.cpython-310.pyc,,
-pip/_vendor/urllib3/util/timeout.cpython-310.pyc,,
-pip/_internal/models/wheel.cpython-310.pyc,,
-pip/_internal/configuration.cpython-310.pyc,,
-pip/_vendor/pygments/formatters/irc.cpython-310.pyc,,
-pip/_vendor/pygments/lexers/__pycache__,,
-pip/_vendor/tenacity/stop.cpython-310.pyc,,
-pip/_vendor/packaging/_musllinux.cpython-310.pyc,,
-pip/_vendor/rich/traceback.cpython-310.pyc,,
-pip/_vendor/idna/package_data.cpython-310.pyc,,
-pip/_vendor/rich/live_render.cpython-310.pyc,,
-pip/_vendor/pygments/formatters/groff.cpython-310.pyc,,
-pip/_vendor/packaging/__pycache__,,
-pip/_vendor/rich/_loop.cpython-310.pyc,,
-pip/_vendor/urllib3/__init__.cpython-310.pyc,,
-pip/_vendor/chardet/jpcntx.cpython-310.pyc,,
-pip/_vendor/chardet/version.cpython-310.pyc,,
-pip/_vendor/resolvelib/__init__.cpython-310.pyc,,
-pip/_vendor/rich/jupyter.cpython-310.pyc,,
-pip/_vendor/idna/core.cpython-310.pyc,,
-pip/_vendor/chardet/mbcssm.cpython-310.pyc,,
-pip/_internal/commands/check.cpython-310.pyc,,
-pip/_vendor/pep517/__init__.cpython-310.pyc,,
-pip/_vendor/distlib/version.cpython-310.pyc,,
-pip/_internal/network/xmlrpc.cpython-310.pyc,,
-pip/_internal/operations/install/__init__.cpython-310.pyc,,
-pip/_vendor/pygments/regexopt.cpython-310.pyc,,
-pip/_vendor/urllib3/packages/backports/__init__.cpython-310.pyc,,
-pip/_internal/__init__.cpython-310.pyc,,
-../../../bin/pip,,
-pip/_vendor/packaging/_manylinux.cpython-310.pyc,,
-pip/_vendor/pygments/formatters/latex.cpython-310.pyc,,
-pip/_vendor/pkg_resources/__pycache__,,
-pip/_internal/distributions/__init__.cpython-310.pyc,,
-pip/_vendor/urllib3/_collections.cpython-310.pyc,,
-pip/_vendor/webencodings/mklabels.cpython-310.pyc,,
-pip/_vendor/chardet/cp949prober.cpython-310.pyc,,
-pip/_internal/commands/show.cpython-310.pyc,,
-pip/_vendor/cachecontrol/__init__.cpython-310.pyc,,
-pip/_vendor/chardet/gb2312prober.cpython-310.pyc,,
-pip/_vendor/resolvelib/compat/__pycache__,,
-pip/_vendor/pep517/in_process/_in_process.cpython-310.pyc,,
-pip/_vendor/rich/progress.cpython-310.pyc,,
-pip/_vendor/chardet/enums.cpython-310.pyc,,
-pip/_vendor/requests/utils.cpython-310.pyc,,
-pip/_vendor/__init__.cpython-310.pyc,,
-pip/_internal/cli/spinners.cpython-310.pyc,,
-pip/_vendor/rich/_timer.cpython-310.pyc,,
-pip/_internal/operations/install/editable_legacy.cpython-310.pyc,,
-pip/_vendor/resolvelib/resolvers.cpython-310.pyc,,
-pip/_vendor/chardet/langhungarianmodel.cpython-310.pyc,,
-pip/_vendor/pep517/in_process/__init__.cpython-310.pyc,,
-pip/_vendor/tomli/_types.cpython-310.pyc,,
-pip/_vendor/rich/_spinners.cpython-310.pyc,,
-pip/_vendor/pyparsing/actions.cpython-310.pyc,,
-pip/_vendor/rich/measure.cpython-310.pyc,,
-pip/_internal/resolution/resolvelib/__init__.cpython-310.pyc,,
-pip/_vendor/requests/cookies.cpython-310.pyc,,
-pip/_vendor/colorama/__pycache__,,
-pip/_vendor/tomli/_parser.cpython-310.pyc,,
-pip/_vendor/chardet/sbcsgroupprober.cpython-310.pyc,,
-pip/_vendor/pygments/formatters/terminal256.cpython-310.pyc,,
-pip/_vendor/chardet/utf8prober.cpython-310.pyc,,
-pip/_vendor/requests/packages.cpython-310.pyc,,
-pip/_vendor/urllib3/packages/__init__.cpython-310.pyc,,
-pip/_internal/distributions/sdist.cpython-310.pyc,,
-pip/_vendor/rich/highlighter.cpython-310.pyc,,
-pip/_vendor/pyparsing/diagram/__pycache__,,
-pip/_internal/operations/build/wheel_editable.cpython-310.pyc,,
-pip/_vendor/pep517/_compat.cpython-310.pyc,,
-pip/_vendor/idna/intranges.cpython-310.pyc,,
-pip/_vendor/distlib/locators.cpython-310.pyc,,
-pip/_vendor/pygments/util.cpython-310.pyc,,
-pip/_vendor/urllib3/contrib/_securetransport/__pycache__,,
-pip/_internal/distributions/installed.cpython-310.pyc,,
-pip/_internal/utils/distutils_args.cpython-310.pyc,,
-pip/_vendor/rich/style.cpython-310.pyc,,
-../../../bin/pip3.10,,
-pip/_internal/utils/inject_securetransport.cpython-310.pyc,,
-pip/_vendor/rich/abc.cpython-310.pyc,,
-pip/_vendor/chardet/langgreekmodel.cpython-310.pyc,,
-pip/_vendor/rich/color.cpython-310.pyc,,
-pip/_vendor/rich/pager.cpython-310.pyc,,
-pip/_vendor/chardet/cli/chardetect.cpython-310.pyc,,
-pip/_internal/wheel_builder.cpython-310.pyc,,
-pip/_internal/commands/index.cpython-310.pyc,,
-pip/_vendor/resolvelib/structs.cpython-310.pyc,,
-pip/_internal/resolution/resolvelib/requirements.cpython-310.pyc,,
-pip/__main__.cpython-310.pyc,,
-pip/_vendor/chardet/utf1632prober.cpython-310.pyc,,
\ No newline at end of file
diff --git a/venv/lib/python3.10/site-packages/pip-22.3.1.dist-info/entry_points.txt b/venv/lib/python3.10/site-packages/pip-22.3.1.dist-info/entry_points.txt
deleted file mode 100644
index 5367846..0000000
--- a/venv/lib/python3.10/site-packages/pip-22.3.1.dist-info/entry_points.txt
+++ /dev/null
@@ -1,4 +0,0 @@
-[console_scripts]
-pip = pip._internal.cli.main:main
-pip3 = pip._internal.cli.main:main
-pip3.10 = pip._internal.cli.main:main
diff --git a/venv/lib/python3.10/site-packages/pip/__init__.py b/venv/lib/python3.10/site-packages/pip/__init__.py
index 5563b5d..8a50472 100644
--- a/venv/lib/python3.10/site-packages/pip/__init__.py
+++ b/venv/lib/python3.10/site-packages/pip/__init__.py
@@ -1,6 +1,6 @@
 from typing import List, Optional
 
-__version__ = "22.3.1"
+__version__ = "22.0.2"
 
 
 def main(args: Optional[List[str]] = None) -> int:
diff --git a/venv/lib/python3.10/site-packages/pip/__pip-runner__.py b/venv/lib/python3.10/site-packages/pip/__pip-runner__.py
deleted file mode 100644
index 49a148a..0000000
--- a/venv/lib/python3.10/site-packages/pip/__pip-runner__.py
+++ /dev/null
@@ -1,50 +0,0 @@
-"""Execute exactly this copy of pip, within a different environment.
-
-This file is named as it is, to ensure that this module can't be imported via
-an import statement.
-"""
-
-# /!\ This version compatibility check section must be Python 2 compatible. /!\
-
-import sys
-
-# Copied from setup.py
-PYTHON_REQUIRES = (3, 7)
-
-
-def version_str(version):  # type: ignore
-    return ".".join(str(v) for v in version)
-
-
-if sys.version_info[:2] < PYTHON_REQUIRES:
-    raise SystemExit(
-        "This version of pip does not support python {} (requires >={}).".format(
-            version_str(sys.version_info[:2]), version_str(PYTHON_REQUIRES)
-        )
-    )
-
-# From here on, we can use Python 3 features, but the syntax must remain
-# Python 2 compatible.
-
-import runpy  # noqa: E402
-from importlib.machinery import PathFinder  # noqa: E402
-from os.path import dirname  # noqa: E402
-
-PIP_SOURCES_ROOT = dirname(dirname(__file__))
-
-
-class PipImportRedirectingFinder:
-    @classmethod
-    def find_spec(self, fullname, path=None, target=None):  # type: ignore
-        if fullname != "pip":
-            return None
-
-        spec = PathFinder.find_spec(fullname, [PIP_SOURCES_ROOT], target)
-        assert spec, (PIP_SOURCES_ROOT, fullname)
-        return spec
-
-
-sys.meta_path.insert(0, PipImportRedirectingFinder())
-
-assert __name__ == "__main__", "Cannot run __pip-runner__.py as a non-main module"
-runpy.run_module("pip", run_name="__main__", alter_sys=True)
diff --git a/venv/lib/python3.10/site-packages/pip/_internal/build_env.py b/venv/lib/python3.10/site-packages/pip/_internal/build_env.py
index cc2b38b..daeb7fb 100644
--- a/venv/lib/python3.10/site-packages/pip/_internal/build_env.py
+++ b/venv/lib/python3.10/site-packages/pip/_internal/build_env.py
@@ -1,16 +1,17 @@
 """Build Environment used for isolation during sdist building
 """
 
+import contextlib
 import logging
 import os
 import pathlib
-import site
 import sys
 import textwrap
+import zipfile
 from collections import OrderedDict
 from sysconfig import get_paths
 from types import TracebackType
-from typing import TYPE_CHECKING, Iterable, List, Optional, Set, Tuple, Type
+from typing import TYPE_CHECKING, Iterable, Iterator, List, Optional, Set, Tuple, Type
 
 from pip._vendor.certifi import where
 from pip._vendor.packaging.requirements import Requirement
@@ -19,7 +20,7 @@
 from pip import __file__ as pip_location
 from pip._internal.cli.spinners import open_spinner
 from pip._internal.locations import get_platlib, get_prefixed_libs, get_purelib
-from pip._internal.metadata import get_default_environment, get_environment
+from pip._internal.metadata import get_environment
 from pip._internal.utils.subprocess import call_subprocess
 from pip._internal.utils.temp_dir import TempDirectory, tempdir_kinds
 
@@ -40,40 +41,30 @@ def __init__(self, path: str) -> None:
         self.lib_dirs = get_prefixed_libs(path)
 
 
-def get_runnable_pip() -> str:
-    """Get a file to pass to a Python executable, to run the currently-running pip.
+@contextlib.contextmanager
+def _create_standalone_pip() -> Iterator[str]:
+    """Create a "standalone pip" zip file.
 
-    This is used to run a pip subprocess, for installing requirements into the build
-    environment.
+    The zip file's content is identical to the currently-running pip.
+    It will be used to install requirements into the build environment.
     """
     source = pathlib.Path(pip_location).resolve().parent
 
+    # Return the current instance if `source` is not a directory. We can't build
+    # a zip from this, and it likely means the instance is already standalone.
     if not source.is_dir():
-        # This would happen if someone is using pip from inside a zip file. In that
-        # case, we can use that directly.
-        return str(source)
+        yield str(source)
+        return
 
-    return os.fsdecode(source / "__pip-runner__.py")
-
-
-def _get_system_sitepackages() -> Set[str]:
-    """Get system site packages
-
-    Usually from site.getsitepackages,
-    but fallback on `get_purelib()/get_platlib()` if unavailable
-    (e.g. in a virtualenv created by virtualenv<20)
-
-    Returns normalized set of strings.
-    """
-    if hasattr(site, "getsitepackages"):
-        system_sites = site.getsitepackages()
-    else:
-        # virtualenv < 20 overwrites site.py without getsitepackages
-        # fallback on get_purelib/get_platlib.
-        # this is known to miss things, but shouldn't in the cases
-        # where getsitepackages() has been removed (inside a virtualenv)
-        system_sites = [get_purelib(), get_platlib()]
-    return {os.path.normcase(path) for path in system_sites}
+    with TempDirectory(kind="standalone-pip") as tmp_dir:
+        pip_zip = os.path.join(tmp_dir.path, "__env_pip__.zip")
+        kwargs = {}
+        if sys.version_info >= (3, 8):
+            kwargs["strict_timestamps"] = False
+        with zipfile.ZipFile(pip_zip, "w", **kwargs) as zf:
+            for child in source.rglob("*"):
+                zf.write(child, child.relative_to(source.parent).as_posix())
+        yield os.path.join(pip_zip, "pip")
 
 
 class BuildEnvironment:
@@ -96,8 +87,9 @@ def __init__(self) -> None:
         # Customize site to:
         # - ensure .pth files are honored
         # - prevent access to system site packages
-        system_sites = _get_system_sitepackages()
-
+        system_sites = {
+            os.path.normcase(site) for site in (get_purelib(), get_platlib())
+        }
         self._site_dir = os.path.join(temp_dir.path, "site")
         if not os.path.exists(self._site_dir):
             os.mkdir(self._site_dir)
@@ -176,17 +168,9 @@ def check_requirements(
         missing = set()
         conflicting = set()
         if reqs:
-            env = (
-                get_environment(self._lib_dirs)
-                if hasattr(self, "_lib_dirs")
-                else get_default_environment()
-            )
+            env = get_environment(self._lib_dirs)
             for req_str in reqs:
                 req = Requirement(req_str)
-                # We're explicitly evaluating with an empty extra value, since build
-                # environments are not provided any mechanism to select specific extras.
-                if req.marker is not None and not req.marker.evaluate({"extra": ""}):
-                    continue
                 dist = env.get_distribution(req.name)
                 if not dist:
                     missing.add(req_str)
@@ -195,7 +179,7 @@ def check_requirements(
                     installed_req_str = f"{req.name}=={dist.version}"
                 else:
                     installed_req_str = f"{req.name}==={dist.version}"
-                if not req.specifier.contains(dist.version, prereleases=True):
+                if dist.version not in req.specifier:
                     conflicting.add((installed_req_str, req_str))
                 # FIXME: Consider direct URL?
         return conflicting, missing
@@ -213,13 +197,15 @@ def install_requirements(
         prefix.setup = True
         if not requirements:
             return
-        self._install_requirements(
-            get_runnable_pip(),
-            finder,
-            requirements,
-            prefix,
-            kind=kind,
-        )
+        with contextlib.ExitStack() as ctx:
+            pip_runnable = ctx.enter_context(_create_standalone_pip())
+            self._install_requirements(
+                pip_runnable,
+                finder,
+                requirements,
+                prefix,
+                kind=kind,
+            )
 
     @staticmethod
     def _install_requirements(
diff --git a/venv/lib/python3.10/site-packages/pip/_internal/cache.py b/venv/lib/python3.10/site-packages/pip/_internal/cache.py
index c53b7f0..1d6df22 100644
--- a/venv/lib/python3.10/site-packages/pip/_internal/cache.py
+++ b/venv/lib/python3.10/site-packages/pip/_internal/cache.py
@@ -5,14 +5,12 @@
 import json
 import logging
 import os
-from pathlib import Path
 from typing import Any, Dict, List, Optional, Set
 
 from pip._vendor.packaging.tags import Tag, interpreter_name, interpreter_version
 from pip._vendor.packaging.utils import canonicalize_name
 
 from pip._internal.exceptions import InvalidWheelFilename
-from pip._internal.models.direct_url import DirectUrl
 from pip._internal.models.format_control import FormatControl
 from pip._internal.models.link import Link
 from pip._internal.models.wheel import Wheel
@@ -21,8 +19,6 @@
 
 logger = logging.getLogger(__name__)
 
-ORIGIN_JSON_NAME = "origin.json"
-
 
 def _hash_dict(d: Dict[str, str]) -> str:
     """Return a stable sha224 of a dictionary."""
@@ -208,10 +204,6 @@ def __init__(
     ):
         self.link = link
         self.persistent = persistent
-        self.origin: Optional[DirectUrl] = None
-        origin_direct_url_path = Path(self.link.file_path).parent / ORIGIN_JSON_NAME
-        if origin_direct_url_path.exists():
-            self.origin = DirectUrl.from_json(origin_direct_url_path.read_text())
 
 
 class WheelCache(Cache):
@@ -221,11 +213,7 @@ class WheelCache(Cache):
     when a certain link is not found in the simple wheel cache first.
     """
 
-    def __init__(
-        self, cache_dir: str, format_control: Optional[FormatControl] = None
-    ) -> None:
-        if format_control is None:
-            format_control = FormatControl()
+    def __init__(self, cache_dir: str, format_control: FormatControl) -> None:
         super().__init__(cache_dir, format_control, {"binary"})
         self._wheel_cache = SimpleWheelCache(cache_dir, format_control)
         self._ephem_cache = EphemWheelCache(format_control)
@@ -274,20 +262,3 @@ def get_cache_entry(
             return CacheEntry(retval, persistent=False)
 
         return None
-
-    @staticmethod
-    def record_download_origin(cache_dir: str, download_info: DirectUrl) -> None:
-        origin_path = Path(cache_dir) / ORIGIN_JSON_NAME
-        if origin_path.is_file():
-            origin = DirectUrl.from_json(origin_path.read_text())
-            # TODO: use DirectUrl.equivalent when https://github.com/pypa/pip/pull/10564
-            # is merged.
-            if origin.url != download_info.url:
-                logger.warning(
-                    "Origin URL %s in cache entry %s does not match download URL %s. "
-                    "This is likely a pip bug or a cache corruption issue.",
-                    origin.url,
-                    cache_dir,
-                    download_info.url,
-                )
-        origin_path.write_text(download_info.to_json(), encoding="utf-8")
diff --git a/venv/lib/python3.10/site-packages/pip/_internal/cli/base_command.py b/venv/lib/python3.10/site-packages/pip/_internal/cli/base_command.py
index 5bd7e67..f5dc0fe 100644
--- a/venv/lib/python3.10/site-packages/pip/_internal/cli/base_command.py
+++ b/venv/lib/python3.10/site-packages/pip/_internal/cli/base_command.py
@@ -10,8 +10,6 @@
 from optparse import Values
 from typing import Any, Callable, List, Optional, Tuple
 
-from pip._vendor.rich import traceback as rich_traceback
-
 from pip._internal.cli import cmdoptions
 from pip._internal.cli.command_context import CommandContextMixIn
 from pip._internal.cli.parser import ConfigOptionParser, UpdatingDefaultsHelpFormatter
@@ -151,6 +149,13 @@ def _main(self, args: List[str]) -> int:
                 )
                 options.cache_dir = None
 
+        if "2020-resolver" in options.features_enabled:
+            logger.warning(
+                "--use-feature=2020-resolver no longer has any effect, "
+                "since it is now the default dependency resolver in pip. "
+                "This will become an error in pip 21.0."
+            )
+
         def intercepts_unhandled_exc(
             run_func: Callable[..., int]
         ) -> Callable[..., int]:
@@ -161,7 +166,7 @@ def exc_logging_wrapper(*args: Any) -> int:
                     assert isinstance(status, int)
                     return status
                 except DiagnosticPipError as exc:
-                    logger.error("[present-rich] %s", exc)
+                    logger.error("[present-diagnostic] %s", exc)
                     logger.debug("Exception information:", exc_info=True)
 
                     return ERROR
@@ -210,7 +215,6 @@ def exc_logging_wrapper(*args: Any) -> int:
                 run = intercepts_unhandled_exc(self.run)
             else:
                 run = self.run
-                rich_traceback.install(show_locals=True)
             return run(options, args)
         finally:
             self.handle_pip_version_check(options)
diff --git a/venv/lib/python3.10/site-packages/pip/_internal/cli/cmdoptions.py b/venv/lib/python3.10/site-packages/pip/_internal/cli/cmdoptions.py
index b4e2560..b7e54f7 100644
--- a/venv/lib/python3.10/site-packages/pip/_internal/cli/cmdoptions.py
+++ b/venv/lib/python3.10/site-packages/pip/_internal/cli/cmdoptions.py
@@ -10,7 +10,6 @@
 # The following comment should be removed at some point in the future.
 # mypy: strict-optional=False
 
-import importlib.util
 import logging
 import os
 import textwrap
@@ -22,6 +21,7 @@
 from pip._vendor.packaging.utils import canonicalize_name
 
 from pip._internal.cli.parser import ConfigOptionParser
+from pip._internal.cli.progress_bars import BAR_TYPES
 from pip._internal.exceptions import CommandError
 from pip._internal.locations import USER_CACHE_DIR, get_src_prefix
 from pip._internal.models.format_control import FormatControl
@@ -59,6 +59,31 @@ def make_option_group(group: Dict[str, Any], parser: ConfigOptionParser) -> Opti
     return option_group
 
 
+def check_install_build_global(
+    options: Values, check_options: Optional[Values] = None
+) -> None:
+    """Disable wheels if per-setup.py call options are set.
+
+    :param options: The OptionParser options to update.
+    :param check_options: The options to check, if not supplied defaults to
+        options.
+    """
+    if check_options is None:
+        check_options = options
+
+    def getname(n: str) -> Optional[Any]:
+        return getattr(check_options, n, None)
+
+    names = ["build_options", "global_options", "install_options"]
+    if any(map(getname, names)):
+        control = options.format_control
+        control.disallow_binaries()
+        logger.warning(
+            "Disabling all use of wheels due to the use of --build-option "
+            "/ --global-option / --install-option.",
+        )
+
+
 def check_dist_restriction(options: Values, check_target: bool = False) -> None:
     """Function for determining if custom platform options are allowed.
 
@@ -164,13 +189,6 @@ class PipOption(Option):
     ),
 )
 
-python: Callable[..., Option] = partial(
-    Option,
-    "--python",
-    dest="python",
-    help="Run pip with the specified Python interpreter.",
-)
-
 verbose: Callable[..., Option] = partial(
     Option,
     "-v",
@@ -218,9 +236,13 @@ class PipOption(Option):
     "--progress-bar",
     dest="progress_bar",
     type="choice",
-    choices=["on", "off"],
+    choices=list(BAR_TYPES.keys()),
     default="on",
-    help="Specify whether the progress bar should be used [on, off] (default: on)",
+    help=(
+        "Specify type of progress to be displayed ["
+        + "|".join(BAR_TYPES.keys())
+        + "] (default: %default)"
+    ),
 )
 
 log: Callable[..., Option] = partial(
@@ -250,7 +272,7 @@ class PipOption(Option):
     dest="proxy",
     type="str",
     default="",
-    help="Specify a proxy in the form scheme://[user:passwd@]proxy.server:port.",
+    help="Specify a proxy in the form [user:passwd@]proxy.server:port.",
 )
 
 retries: Callable[..., Option] = partial(
@@ -731,15 +753,6 @@ def _handle_no_cache_dir(
     "if this option is used.",
 )
 
-check_build_deps: Callable[..., Option] = partial(
-    Option,
-    "--check-build-dependencies",
-    dest="check_build_deps",
-    action="store_true",
-    default=False,
-    help="Check the build dependencies when PEP517 is used.",
-)
-
 
 def _handle_no_use_pep517(
     option: Option, opt: str, value: str, parser: OptionParser
@@ -762,12 +775,6 @@ def _handle_no_use_pep517(
         """
         raise_option_error(parser, option=option, msg=msg)
 
-    # If user doesn't wish to use pep517, we check if setuptools is installed
-    # and raise error if it is not.
-    if not importlib.util.find_spec("setuptools"):
-        msg = "It is not possible to use --no-use-pep517 without setuptools installed."
-        raise_option_error(parser, option=option, msg=msg)
-
     # Otherwise, --no-use-pep517 was passed via the command-line.
     parser.values.use_pep517 = False
 
@@ -792,33 +799,6 @@ def _handle_no_use_pep517(
     help=SUPPRESS_HELP,
 )
 
-
-def _handle_config_settings(
-    option: Option, opt_str: str, value: str, parser: OptionParser
-) -> None:
-    key, sep, val = value.partition("=")
-    if sep != "=":
-        parser.error(f"Arguments to {opt_str} must be of the form KEY=VAL")  # noqa
-    dest = getattr(parser.values, option.dest)
-    if dest is None:
-        dest = {}
-        setattr(parser.values, option.dest, dest)
-    dest[key] = val
-
-
-config_settings: Callable[..., Option] = partial(
-    Option,
-    "--config-settings",
-    dest="config_settings",
-    type=str,
-    action="callback",
-    callback=_handle_config_settings,
-    metavar="settings",
-    help="Configuration settings to be passed to the PEP 517 build backend. "
-    "Settings take the form KEY=VALUE. Use multiple --config-settings options "
-    "to pass multiple keys to the backend.",
-)
-
 install_options: Callable[..., Option] = partial(
     Option,
     "--install-option",
@@ -873,20 +853,11 @@ def _handle_config_settings(
     "--disable-pip-version-check",
     dest="disable_pip_version_check",
     action="store_true",
-    default=False,
+    default=True,
     help="Don't periodically check PyPI to determine whether a new version "
     "of pip is available for download. Implied with --no-index.",
 )
 
-root_user_action: Callable[..., Option] = partial(
-    Option,
-    "--root-user-action",
-    dest="root_user_action",
-    default="warn",
-    choices=["warn", "ignore"],
-    help="Action if pip is run as a root user. By default, a warning message is shown.",
-)
-
 
 def _handle_merge_hash(
     option: Option, opt_str: str, value: str, parser: OptionParser
@@ -982,11 +953,7 @@ def check_list_path_option(options: Values) -> None:
     metavar="feature",
     action="append",
     default=[],
-    choices=[
-        "fast-deps",
-        "truststore",
-        "no-binary-enable-wheel-cache",
-    ],
+    choices=["2020-resolver", "fast-deps", "in-tree-build"],
     help="Enable new functionality, that may be backward incompatible.",
 )
 
@@ -999,6 +966,9 @@ def check_list_path_option(options: Values) -> None:
     default=[],
     choices=[
         "legacy-resolver",
+        "out-of-tree-build",
+        "backtrack-on-build-failures",
+        "html5lib",
     ],
     help=("Enable deprecated functionality, that will be removed in the future."),
 )
@@ -1015,7 +985,6 @@ def check_list_path_option(options: Values) -> None:
         debug_mode,
         isolated_mode,
         require_virtualenv,
-        python,
         verbose,
         version,
         quiet,
diff --git a/venv/lib/python3.10/site-packages/pip/_internal/cli/command_context.py b/venv/lib/python3.10/site-packages/pip/_internal/cli/command_context.py
index 139995a..ed68322 100644
--- a/venv/lib/python3.10/site-packages/pip/_internal/cli/command_context.py
+++ b/venv/lib/python3.10/site-packages/pip/_internal/cli/command_context.py
@@ -1,5 +1,5 @@
 from contextlib import ExitStack, contextmanager
-from typing import ContextManager, Generator, TypeVar
+from typing import ContextManager, Iterator, TypeVar
 
 _T = TypeVar("_T", covariant=True)
 
@@ -11,7 +11,7 @@ def __init__(self) -> None:
         self._main_context = ExitStack()
 
     @contextmanager
-    def main_context(self) -> Generator[None, None, None]:
+    def main_context(self) -> Iterator[None]:
         assert not self._in_main_context
 
         self._in_main_context = True
diff --git a/venv/lib/python3.10/site-packages/pip/_internal/cli/main_parser.py b/venv/lib/python3.10/site-packages/pip/_internal/cli/main_parser.py
index 5ade356..3666ab0 100644
--- a/venv/lib/python3.10/site-packages/pip/_internal/cli/main_parser.py
+++ b/venv/lib/python3.10/site-packages/pip/_internal/cli/main_parser.py
@@ -2,11 +2,9 @@
 """
 
 import os
-import subprocess
 import sys
-from typing import List, Optional, Tuple
+from typing import List, Tuple
 
-from pip._internal.build_env import get_runnable_pip
 from pip._internal.cli import cmdoptions
 from pip._internal.cli.parser import ConfigOptionParser, UpdatingDefaultsHelpFormatter
 from pip._internal.commands import commands_dict, get_similar_commands
@@ -47,25 +45,6 @@ def create_main_parser() -> ConfigOptionParser:
     return parser
 
 
-def identify_python_interpreter(python: str) -> Optional[str]:
-    # If the named file exists, use it.
-    # If it's a directory, assume it's a virtual environment and
-    # look for the environment's Python executable.
-    if os.path.exists(python):
-        if os.path.isdir(python):
-            # bin/python for Unix, Scripts/python.exe for Windows
-            # Try both in case of odd cases like cygwin.
-            for exe in ("bin/python", "Scripts/python.exe"):
-                py = os.path.join(python, exe)
-                if os.path.exists(py):
-                    return py
-        else:
-            return python
-
-    # Could not find the interpreter specified
-    return None
-
-
 def parse_command(args: List[str]) -> Tuple[str, List[str]]:
     parser = create_main_parser()
 
@@ -78,32 +57,6 @@ def parse_command(args: List[str]) -> Tuple[str, List[str]]:
     #  args_else: ['install', '--user', 'INITools']
     general_options, args_else = parser.parse_args(args)
 
-    # --python
-    if general_options.python and "_PIP_RUNNING_IN_SUBPROCESS" not in os.environ:
-        # Re-invoke pip using the specified Python interpreter
-        interpreter = identify_python_interpreter(general_options.python)
-        if interpreter is None:
-            raise CommandError(
-                f"Could not locate Python interpreter {general_options.python}"
-            )
-
-        pip_cmd = [
-            interpreter,
-            get_runnable_pip(),
-        ]
-        pip_cmd.extend(args)
-
-        # Set a flag so the child doesn't re-invoke itself, causing
-        # an infinite loop.
-        os.environ["_PIP_RUNNING_IN_SUBPROCESS"] = "1"
-        returncode = 0
-        try:
-            proc = subprocess.run(pip_cmd)
-            returncode = proc.returncode
-        except (subprocess.SubprocessError, OSError) as exc:
-            raise CommandError(f"Failed to run pip under {interpreter}: {exc}")
-        sys.exit(returncode)
-
     # --version
     if general_options.version:
         sys.stdout.write(parser.version)
diff --git a/venv/lib/python3.10/site-packages/pip/_internal/cli/parser.py b/venv/lib/python3.10/site-packages/pip/_internal/cli/parser.py
index c762cf2..a1c99a8 100644
--- a/venv/lib/python3.10/site-packages/pip/_internal/cli/parser.py
+++ b/venv/lib/python3.10/site-packages/pip/_internal/cli/parser.py
@@ -6,7 +6,7 @@
 import sys
 import textwrap
 from contextlib import suppress
-from typing import Any, Dict, Generator, List, Tuple
+from typing import Any, Dict, Iterator, List, Tuple
 
 from pip._internal.cli.status_codes import UNKNOWN_ERROR
 from pip._internal.configuration import Configuration, ConfigurationError
@@ -175,9 +175,7 @@ def check_default(self, option: optparse.Option, key: str, val: Any) -> Any:
             print(f"An error occurred during configuration: {exc}")
             sys.exit(3)
 
-    def _get_ordered_configuration_items(
-        self,
-    ) -> Generator[Tuple[str, Any], None, None]:
+    def _get_ordered_configuration_items(self) -> Iterator[Tuple[str, Any]]:
         # Configuration gives keys in an unordered manner. Order them.
         override_order = ["global", self.name, ":env:"]
 
diff --git a/venv/lib/python3.10/site-packages/pip/_internal/cli/progress_bars.py b/venv/lib/python3.10/site-packages/pip/_internal/cli/progress_bars.py
index 0ad1403..ffa1964 100644
--- a/venv/lib/python3.10/site-packages/pip/_internal/cli/progress_bars.py
+++ b/venv/lib/python3.10/site-packages/pip/_internal/cli/progress_bars.py
@@ -1,6 +1,11 @@
 import functools
-from typing import Callable, Generator, Iterable, Iterator, Optional, Tuple
+import itertools
+import sys
+from signal import SIGINT, default_int_handler, signal
+from typing import Any, Callable, Iterator, Optional, Tuple
 
+from pip._vendor.progress.bar import Bar, FillingCirclesBar, IncrementalBar
+from pip._vendor.progress.spinner import Spinner
 from pip._vendor.rich.progress import (
     BarColumn,
     DownloadColumn,
@@ -14,17 +19,263 @@
     TransferSpeedColumn,
 )
 
+from pip._internal.utils.compat import WINDOWS
 from pip._internal.utils.logging import get_indentation
+from pip._internal.utils.misc import format_size
 
-DownloadProgressRenderer = Callable[[Iterable[bytes]], Iterator[bytes]]
+try:
+    from pip._vendor import colorama
+# Lots of different errors can come from this, including SystemError and
+# ImportError.
+except Exception:
+    colorama = None
 
+DownloadProgressRenderer = Callable[[Iterator[bytes]], Iterator[bytes]]
 
+
+def _select_progress_class(preferred: Bar, fallback: Bar) -> Bar:
+    encoding = getattr(preferred.file, "encoding", None)
+
+    # If we don't know what encoding this file is in, then we'll just assume
+    # that it doesn't support unicode and use the ASCII bar.
+    if not encoding:
+        return fallback
+
+    # Collect all of the possible characters we want to use with the preferred
+    # bar.
+    characters = [
+        getattr(preferred, "empty_fill", ""),
+        getattr(preferred, "fill", ""),
+    ]
+    characters += list(getattr(preferred, "phases", []))
+
+    # Try to decode the characters we're using for the bar using the encoding
+    # of the given file, if this works then we'll assume that we can use the
+    # fancier bar and if not we'll fall back to the plaintext bar.
+    try:
+        "".join(characters).encode(encoding)
+    except UnicodeEncodeError:
+        return fallback
+    else:
+        return preferred
+
+
+_BaseBar: Any = _select_progress_class(IncrementalBar, Bar)
+
+
+class InterruptibleMixin:
+    """
+    Helper to ensure that self.finish() gets called on keyboard interrupt.
+
+    This allows downloads to be interrupted without leaving temporary state
+    (like hidden cursors) behind.
+
+    This class is similar to the progress library's existing SigIntMixin
+    helper, but as of version 1.2, that helper has the following problems:
+
+    1. It calls sys.exit().
+    2. It discards the existing SIGINT handler completely.
+    3. It leaves its own handler in place even after an uninterrupted finish,
+       which will have unexpected delayed effects if the user triggers an
+       unrelated keyboard interrupt some time after a progress-displaying
+       download has already completed, for example.
+    """
+
+    def __init__(self, *args: Any, **kwargs: Any) -> None:
+        """
+        Save the original SIGINT handler for later.
+        """
+        # https://github.com/python/mypy/issues/5887
+        super().__init__(*args, **kwargs)  # type: ignore
+
+        self.original_handler = signal(SIGINT, self.handle_sigint)
+
+        # If signal() returns None, the previous handler was not installed from
+        # Python, and we cannot restore it. This probably should not happen,
+        # but if it does, we must restore something sensible instead, at least.
+        # The least bad option should be Python's default SIGINT handler, which
+        # just raises KeyboardInterrupt.
+        if self.original_handler is None:
+            self.original_handler = default_int_handler
+
+    def finish(self) -> None:
+        """
+        Restore the original SIGINT handler after finishing.
+
+        This should happen regardless of whether the progress display finishes
+        normally, or gets interrupted.
+        """
+        super().finish()  # type: ignore
+        signal(SIGINT, self.original_handler)
+
+    def handle_sigint(self, signum, frame):  # type: ignore
+        """
+        Call self.finish() before delegating to the original SIGINT handler.
+
+        This handler should only be in place while the progress display is
+        active.
+        """
+        self.finish()
+        self.original_handler(signum, frame)
+
+
+class SilentBar(Bar):
+    def update(self) -> None:
+        pass
+
+
+class BlueEmojiBar(IncrementalBar):
+
+    suffix = "%(percent)d%%"
+    bar_prefix = " "
+    bar_suffix = " "
+    phases = ("\U0001F539", "\U0001F537", "\U0001F535")
+
+
+class DownloadProgressMixin:
+    def __init__(self, *args: Any, **kwargs: Any) -> None:
+        # https://github.com/python/mypy/issues/5887
+        super().__init__(*args, **kwargs)  # type: ignore
+        self.message: str = (" " * (get_indentation() + 2)) + self.message
+
+    @property
+    def downloaded(self) -> str:
+        return format_size(self.index)  # type: ignore
+
+    @property
+    def download_speed(self) -> str:
+        # Avoid zero division errors...
+        if self.avg == 0.0:  # type: ignore
+            return "..."
+        return format_size(1 / self.avg) + "/s"  # type: ignore
+
+    @property
+    def pretty_eta(self) -> str:
+        if self.eta:  # type: ignore
+            return f"eta {self.eta_td}"  # type: ignore
+        return ""
+
+    def iter(self, it):  # type: ignore
+        for x in it:
+            yield x
+            # B305 is incorrectly raised here
+            # https://github.com/PyCQA/flake8-bugbear/issues/59
+            self.next(len(x))  # noqa: B305
+        self.finish()
+
+
+class WindowsMixin:
+    def __init__(self, *args: Any, **kwargs: Any) -> None:
+        # The Windows terminal does not support the hide/show cursor ANSI codes
+        # even with colorama. So we'll ensure that hide_cursor is False on
+        # Windows.
+        # This call needs to go before the super() call, so that hide_cursor
+        # is set in time. The base progress bar class writes the "hide cursor"
+        # code to the terminal in its init, so if we don't set this soon
+        # enough, we get a "hide" with no corresponding "show"...
+        if WINDOWS and self.hide_cursor:  # type: ignore
+            self.hide_cursor = False
+
+        # https://github.com/python/mypy/issues/5887
+        super().__init__(*args, **kwargs)  # type: ignore
+
+        # Check if we are running on Windows and we have the colorama module,
+        # if we do then wrap our file with it.
+        if WINDOWS and colorama:
+            self.file = colorama.AnsiToWin32(self.file)  # type: ignore
+            # The progress code expects to be able to call self.file.isatty()
+            # but the colorama.AnsiToWin32() object doesn't have that, so we'll
+            # add it.
+            self.file.isatty = lambda: self.file.wrapped.isatty()
+            # The progress code expects to be able to call self.file.flush()
+            # but the colorama.AnsiToWin32() object doesn't have that, so we'll
+            # add it.
+            self.file.flush = lambda: self.file.wrapped.flush()
+
+
+class BaseDownloadProgressBar(WindowsMixin, InterruptibleMixin, DownloadProgressMixin):
+
+    file = sys.stdout
+    message = "%(percent)d%%"
+    suffix = "%(downloaded)s %(download_speed)s %(pretty_eta)s"
+
+
+class DefaultDownloadProgressBar(BaseDownloadProgressBar, _BaseBar):
+    pass
+
+
+class DownloadSilentBar(BaseDownloadProgressBar, SilentBar):
+    pass
+
+
+class DownloadBar(BaseDownloadProgressBar, Bar):
+    pass
+
+
+class DownloadFillingCirclesBar(BaseDownloadProgressBar, FillingCirclesBar):
+    pass
+
+
+class DownloadBlueEmojiProgressBar(BaseDownloadProgressBar, BlueEmojiBar):
+    pass
+
+
+class DownloadProgressSpinner(
+    WindowsMixin, InterruptibleMixin, DownloadProgressMixin, Spinner
+):
+
+    file = sys.stdout
+    suffix = "%(downloaded)s %(download_speed)s"
+
+    def next_phase(self) -> str:
+        if not hasattr(self, "_phaser"):
+            self._phaser = itertools.cycle(self.phases)
+        return next(self._phaser)
+
+    def update(self) -> None:
+        message = self.message % self
+        phase = self.next_phase()
+        suffix = self.suffix % self
+        line = "".join(
+            [
+                message,
+                " " if message else "",
+                phase,
+                " " if suffix else "",
+                suffix,
+            ]
+        )
+
+        self.writeln(line)
+
+
+BAR_TYPES = {
+    "off": (DownloadSilentBar, DownloadSilentBar),
+    "on": (DefaultDownloadProgressBar, DownloadProgressSpinner),
+    "ascii": (DownloadBar, DownloadProgressSpinner),
+    "pretty": (DownloadFillingCirclesBar, DownloadProgressSpinner),
+    "emoji": (DownloadBlueEmojiProgressBar, DownloadProgressSpinner),
+}
+
+
+def _legacy_progress_bar(
+    progress_bar: str, max: Optional[int]
+) -> DownloadProgressRenderer:
+    if max is None or max == 0:
+        return BAR_TYPES[progress_bar][1]().iter  # type: ignore
+    else:
+        return BAR_TYPES[progress_bar][0](max=max).iter
+
+
+#
+# Modern replacement, for our legacy progress bars.
+#
 def _rich_progress_bar(
-    iterable: Iterable[bytes],
+    iterable: Iterator[bytes],
     *,
     bar_type: str,
     size: int,
-) -> Generator[bytes, None, None]:
+) -> Iterator[bytes]:
     assert bar_type == "on", "This should only be used in the default mode."
 
     if not size:
@@ -64,5 +315,7 @@ def get_download_progress_renderer(
     """
     if bar_type == "on":
         return functools.partial(_rich_progress_bar, bar_type=bar_type, size=size)
-    else:
+    elif bar_type == "off":
         return iter  # no-op, when passed an iterator
+    else:
+        return _legacy_progress_bar(bar_type, size)
diff --git a/venv/lib/python3.10/site-packages/pip/_internal/cli/req_command.py b/venv/lib/python3.10/site-packages/pip/_internal/cli/req_command.py
index 1044809..5d4d1f0 100644
--- a/venv/lib/python3.10/site-packages/pip/_internal/cli/req_command.py
+++ b/venv/lib/python3.10/site-packages/pip/_internal/cli/req_command.py
@@ -10,7 +10,7 @@
 import sys
 from functools import partial
 from optparse import Values
-from typing import TYPE_CHECKING, Any, List, Optional, Tuple
+from typing import Any, List, Optional, Tuple
 
 from pip._internal.cache import WheelCache
 from pip._internal.cli import cmdoptions
@@ -22,7 +22,6 @@
 from pip._internal.models.selection_prefs import SelectionPreferences
 from pip._internal.models.target_python import TargetPython
 from pip._internal.network.session import PipSession
-from pip._internal.operations.build.build_tracker import BuildTracker
 from pip._internal.operations.prepare import RequirementPreparer
 from pip._internal.req.constructors import (
     install_req_from_editable,
@@ -32,8 +31,10 @@
 )
 from pip._internal.req.req_file import parse_requirements
 from pip._internal.req.req_install import InstallRequirement
+from pip._internal.req.req_tracker import RequirementTracker
 from pip._internal.resolution.base import BaseResolver
 from pip._internal.self_outdated_check import pip_self_version_check
+from pip._internal.utils.deprecation import deprecated
 from pip._internal.utils.temp_dir import (
     TempDirectory,
     TempDirectoryTypeRegistry,
@@ -41,33 +42,9 @@
 )
 from pip._internal.utils.virtualenv import running_under_virtualenv
 
-if TYPE_CHECKING:
-    from ssl import SSLContext
-
 logger = logging.getLogger(__name__)
 
 
-def _create_truststore_ssl_context() -> Optional["SSLContext"]:
-    if sys.version_info < (3, 10):
-        raise CommandError("The truststore feature is only available for Python 3.10+")
-
-    try:
-        import ssl
-    except ImportError:
-        logger.warning("Disabling truststore since ssl support is missing")
-        return None
-
-    try:
-        import truststore
-    except ImportError:
-        raise CommandError(
-            "To use the truststore feature, 'truststore' must be installed into "
-            "pip's current environment."
-        )
-
-    return truststore.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
-
-
 class SessionCommandMixin(CommandContextMixIn):
 
     """
@@ -107,27 +84,15 @@ def _build_session(
         options: Values,
         retries: Optional[int] = None,
         timeout: Optional[int] = None,
-        fallback_to_certifi: bool = False,
     ) -> PipSession:
-        cache_dir = options.cache_dir
-        assert not cache_dir or os.path.isabs(cache_dir)
-
-        if "truststore" in options.features_enabled:
-            try:
-                ssl_context = _create_truststore_ssl_context()
-            except Exception:
-                if not fallback_to_certifi:
-                    raise
-                ssl_context = None
-        else:
-            ssl_context = None
-
+        assert not options.cache_dir or os.path.isabs(options.cache_dir)
         session = PipSession(
-            cache=os.path.join(cache_dir, "http") if cache_dir else None,
+            cache=(
+                os.path.join(options.cache_dir, "http") if options.cache_dir else None
+            ),
             retries=retries if retries is not None else options.retries,
             trusted_hosts=options.trusted_hosts,
             index_urls=self._get_index_urls(options),
-            ssl_context=ssl_context,
         )
 
         # Handle custom ca-bundles from the user
@@ -177,14 +142,7 @@ def handle_pip_version_check(self, options: Values) -> None:
 
         # Otherwise, check if we're using the latest version of pip available.
         session = self._build_session(
-            options,
-            retries=0,
-            timeout=min(5, options.timeout),
-            # This is set to ensure the function does not fail when truststore is
-            # specified in use-feature but cannot be loaded. This usually raises a
-            # CommandError and shows a nice user-facing error, but this function is not
-            # called in that try-except block.
-            fallback_to_certifi=True,
+            options, retries=0, timeout=min(5, options.timeout)
         )
         with session:
             pip_self_version_check(session, options)
@@ -269,12 +227,37 @@ def determine_resolver_variant(options: Values) -> str:
 
         return "2020-resolver"
 
+    @staticmethod
+    def determine_build_failure_suppression(options: Values) -> bool:
+        """Determines whether build failures should be suppressed and backtracked on."""
+        if "backtrack-on-build-failures" not in options.deprecated_features_enabled:
+            return False
+
+        if "legacy-resolver" in options.deprecated_features_enabled:
+            raise CommandError("Cannot backtrack with legacy resolver.")
+
+        deprecated(
+            reason=(
+                "Backtracking on build failures can mask issues related to how "
+                "a package generates metadata or builds a wheel. This flag will "
+                "be removed in pip 22.2."
+            ),
+            gone_in=None,
+            replacement=(
+                "avoiding known-bad versions by explicitly telling pip to ignore them "
+                "(either directly as requirements, or via a constraints file)"
+            ),
+            feature_flag=None,
+            issue=10655,
+        )
+        return True
+
     @classmethod
     def make_requirement_preparer(
         cls,
         temp_build_dir: TempDirectory,
         options: Values,
-        build_tracker: BuildTracker,
+        req_tracker: RequirementTracker,
         session: PipSession,
         finder: PackageFinder,
         use_user_site: bool,
@@ -305,13 +288,33 @@ def make_requirement_preparer(
                     "fast-deps has no effect when used with the legacy resolver."
                 )
 
+        in_tree_build = "out-of-tree-build" not in options.deprecated_features_enabled
+        if "in-tree-build" in options.features_enabled:
+            deprecated(
+                reason="In-tree builds are now the default.",
+                replacement="to remove the --use-feature=in-tree-build flag",
+                gone_in="22.1",
+            )
+        if "out-of-tree-build" in options.deprecated_features_enabled:
+            deprecated(
+                reason="Out-of-tree builds are deprecated.",
+                replacement=None,
+                gone_in="22.1",
+            )
+
+        if options.progress_bar not in {"on", "off"}:
+            deprecated(
+                reason="Custom progress bar styles are deprecated",
+                replacement="to use the default progress bar style.",
+                gone_in="22.1",
+            )
+
         return RequirementPreparer(
             build_dir=temp_build_dir_path,
             src_dir=options.src_dir,
             download_dir=download_dir,
             build_isolation=options.build_isolation,
-            check_build_deps=options.check_build_deps,
-            build_tracker=build_tracker,
+            req_tracker=req_tracker,
             session=session,
             progress_bar=options.progress_bar,
             finder=finder,
@@ -319,6 +322,7 @@ def make_requirement_preparer(
             use_user_site=use_user_site,
             lazy_wheel=lazy_wheel,
             verbosity=verbosity,
+            in_tree_build=in_tree_build,
         )
 
     @classmethod
@@ -343,8 +347,8 @@ def make_resolver(
             install_req_from_req_string,
             isolated=options.isolated_mode,
             use_pep517=use_pep517,
-            config_settings=getattr(options, "config_settings", None),
         )
+        suppress_build_failures = cls.determine_build_failure_suppression(options)
         resolver_variant = cls.determine_resolver_variant(options)
         # The long import name and duplicated invocation is needed to convince
         # Mypy into correctly typechecking. Otherwise it would complain the
@@ -364,6 +368,7 @@ def make_resolver(
                 force_reinstall=force_reinstall,
                 upgrade_strategy=upgrade_strategy,
                 py_version_info=py_version_info,
+                suppress_build_failures=suppress_build_failures,
             )
         import pip._internal.resolution.legacy.resolver
 
@@ -414,7 +419,6 @@ def get_requirements(
                 isolated=options.isolated_mode,
                 use_pep517=options.use_pep517,
                 user_supplied=True,
-                config_settings=getattr(options, "config_settings", None),
             )
             requirements.append(req_to_add)
 
@@ -424,7 +428,6 @@ def get_requirements(
                 user_supplied=True,
                 isolated=options.isolated_mode,
                 use_pep517=options.use_pep517,
-                config_settings=getattr(options, "config_settings", None),
             )
             requirements.append(req_to_add)
 
@@ -499,4 +502,5 @@ def _build_package_finder(
             link_collector=link_collector,
             selection_prefs=selection_prefs,
             target_python=target_python,
+            use_deprecated_html5lib="html5lib" in options.deprecated_features_enabled,
         )
diff --git a/venv/lib/python3.10/site-packages/pip/_internal/cli/spinners.py b/venv/lib/python3.10/site-packages/pip/_internal/cli/spinners.py
index cf2b976..1e313e1 100644
--- a/venv/lib/python3.10/site-packages/pip/_internal/cli/spinners.py
+++ b/venv/lib/python3.10/site-packages/pip/_internal/cli/spinners.py
@@ -3,7 +3,9 @@
 import logging
 import sys
 import time
-from typing import IO, Generator, Optional
+from typing import IO, Iterator
+
+from pip._vendor.progress import HIDE_CURSOR, SHOW_CURSOR
 
 from pip._internal.utils.compat import WINDOWS
 from pip._internal.utils.logging import get_indentation
@@ -23,7 +25,7 @@ class InteractiveSpinner(SpinnerInterface):
     def __init__(
         self,
         message: str,
-        file: Optional[IO[str]] = None,
+        file: IO[str] = None,
         spin_chars: str = "-\\|/",
         # Empirically, 8 updates/second looks nice
         min_update_interval_seconds: float = 0.125,
@@ -113,7 +115,7 @@ def reset(self) -> None:
 
 
 @contextlib.contextmanager
-def open_spinner(message: str) -> Generator[SpinnerInterface, None, None]:
+def open_spinner(message: str) -> Iterator[SpinnerInterface]:
     # Interactive spinner goes directly to sys.stdout rather than being routed
     # through the logging system, but it acts like it has level INFO,
     # i.e. it's only displayed if we're at level INFO or better.
@@ -136,12 +138,8 @@ def open_spinner(message: str) -> Generator[SpinnerInterface, None, None]:
         spinner.finish("done")
 
 
-HIDE_CURSOR = "\x1b[?25l"
-SHOW_CURSOR = "\x1b[?25h"
-
-
 @contextlib.contextmanager
-def hidden_cursor(file: IO[str]) -> Generator[None, None, None]:
+def hidden_cursor(file: IO[str]) -> Iterator[None]:
     # The Windows terminal does not support the hide/show cursor ANSI codes,
     # even via colorama. So don't even try.
     if WINDOWS:
diff --git a/venv/lib/python3.10/site-packages/pip/_internal/commands/__init__.py b/venv/lib/python3.10/site-packages/pip/_internal/commands/__init__.py
index 858a410..c72f24f 100644
--- a/venv/lib/python3.10/site-packages/pip/_internal/commands/__init__.py
+++ b/venv/lib/python3.10/site-packages/pip/_internal/commands/__init__.py
@@ -38,11 +38,6 @@
         "FreezeCommand",
         "Output installed packages in requirements format.",
     ),
-    "inspect": CommandInfo(
-        "pip._internal.commands.inspect",
-        "InspectCommand",
-        "Inspect the python environment.",
-    ),
     "list": CommandInfo(
         "pip._internal.commands.list",
         "ListCommand",
diff --git a/venv/lib/python3.10/site-packages/pip/_internal/commands/cache.py b/venv/lib/python3.10/site-packages/pip/_internal/commands/cache.py
index c5f0330..f1a489d 100644
--- a/venv/lib/python3.10/site-packages/pip/_internal/commands/cache.py
+++ b/venv/lib/python3.10/site-packages/pip/_internal/commands/cache.py
@@ -105,9 +105,9 @@ def get_cache_info(self, options: Values, args: List[Any]) -> None:
                     Package index page cache location: {http_cache_location}
                     Package index page cache size: {http_cache_size}
                     Number of HTTP files: {num_http_files}
-                    Locally built wheels location: {wheels_cache_location}
-                    Locally built wheels size: {wheels_cache_size}
-                    Number of locally built wheels: {package_count}
+                    Wheels location: {wheels_cache_location}
+                    Wheels size: {wheels_cache_size}
+                    Number of wheels: {package_count}
                 """
             )
             .format(
@@ -140,7 +140,7 @@ def list_cache_items(self, options: Values, args: List[Any]) -> None:
 
     def format_for_human(self, files: List[str]) -> None:
         if not files:
-            logger.info("No locally built wheels cached.")
+            logger.info("Nothing cached.")
             return
 
         results = []
diff --git a/venv/lib/python3.10/site-packages/pip/_internal/commands/completion.py b/venv/lib/python3.10/site-packages/pip/_internal/commands/completion.py
index deaa308..c0fb4ca 100644
--- a/venv/lib/python3.10/site-packages/pip/_internal/commands/completion.py
+++ b/venv/lib/python3.10/site-packages/pip/_internal/commands/completion.py
@@ -43,28 +43,6 @@
         end
         complete -fa "(__fish_complete_pip)" -c {prog}
     """,
-    "powershell": """
-        if ((Test-Path Function:\\TabExpansion) -and -not `
-            (Test-Path Function:\\_pip_completeBackup)) {{
-            Rename-Item Function:\\TabExpansion _pip_completeBackup
-        }}
-        function TabExpansion($line, $lastWord) {{
-            $lastBlock = [regex]::Split($line, '[|;]')[-1].TrimStart()
-            if ($lastBlock.StartsWith("{prog} ")) {{
-                $Env:COMP_WORDS=$lastBlock
-                $Env:COMP_CWORD=$lastBlock.Split().Length - 1
-                $Env:PIP_AUTO_COMPLETE=1
-                (& {prog}).Split()
-                Remove-Item Env:COMP_WORDS
-                Remove-Item Env:COMP_CWORD
-                Remove-Item Env:PIP_AUTO_COMPLETE
-            }}
-            elseif (Test-Path Function:\\_pip_completeBackup) {{
-                # Fall back on existing tab expansion
-                _pip_completeBackup $line $lastWord
-            }}
-        }}
-    """,
 }
 
 
@@ -98,14 +76,6 @@ def add_options(self) -> None:
             dest="shell",
             help="Emit completion code for fish",
         )
-        self.cmd_opts.add_option(
-            "--powershell",
-            "-p",
-            action="store_const",
-            const="powershell",
-            dest="shell",
-            help="Emit completion code for powershell",
-        )
 
         self.parser.insert_option_group(0, self.cmd_opts)
 
diff --git a/venv/lib/python3.10/site-packages/pip/_internal/commands/configuration.py b/venv/lib/python3.10/site-packages/pip/_internal/commands/configuration.py
index 84b134e..c6c74ed 100644
--- a/venv/lib/python3.10/site-packages/pip/_internal/commands/configuration.py
+++ b/venv/lib/python3.10/site-packages/pip/_internal/commands/configuration.py
@@ -27,17 +27,11 @@ class ConfigurationCommand(Command):
 
     - list: List the active configuration (or from the file specified)
     - edit: Edit the configuration file in an editor
-    - get: Get the value associated with command.option
-    - set: Set the command.option=value
-    - unset: Unset the value associated with command.option
+    - get: Get the value associated with name
+    - set: Set the name=value
+    - unset: Unset the value associated with name
     - debug: List the configuration files and values defined under them
 
-    Configuration keys should be dot separated command and option name,
-    with the special prefix "global" affecting any command. For example,
-    "pip config set global.index-url https://example.org/" would configure
-    the index url for all commands, but "pip config set download.timeout 10"
-    would configure a 10 second timeout only for "pip download" commands.
-
     If none of --user, --global and --site are passed, a virtual
     environment configuration file is used if one is active and the file
     exists. Otherwise, all modifications happen to the user file by
@@ -49,9 +43,9 @@ class ConfigurationCommand(Command):
         %prog [] list
         %prog [] [--editor ] edit
 
-        %prog [] get command.option
-        %prog [] set command.option value
-        %prog [] unset command.option
+        %prog [] get name
+        %prog [] set name value
+        %prog [] unset name
         %prog [] debug
     """
 
@@ -228,19 +222,9 @@ def open_in_editor(self, options: Values, args: List[str]) -> None:
         fname = self.configuration.get_file_to_edit()
         if fname is None:
             raise PipError("Could not determine appropriate file.")
-        elif '"' in fname:
-            # This shouldn't happen, unless we see a username like that.
-            # If that happens, we'd appreciate a pull request fixing this.
-            raise PipError(
-                f'Can not open an editor for a file name containing "\n{fname}'
-            )
 
         try:
-            subprocess.check_call(f'{editor} "{fname}"', shell=True)
-        except FileNotFoundError as e:
-            if not e.filename:
-                e.filename = editor
-            raise
+            subprocess.check_call([editor, fname])
         except subprocess.CalledProcessError as e:
             raise PipError(
                 "Editor Subprocess exited with exit code {}".format(e.returncode)
diff --git a/venv/lib/python3.10/site-packages/pip/_internal/commands/debug.py b/venv/lib/python3.10/site-packages/pip/_internal/commands/debug.py
index 6fad1fe..d3f1f28 100644
--- a/venv/lib/python3.10/site-packages/pip/_internal/commands/debug.py
+++ b/venv/lib/python3.10/site-packages/pip/_internal/commands/debug.py
@@ -1,4 +1,3 @@
-import importlib.resources
 import locale
 import logging
 import os
@@ -11,6 +10,7 @@
 from pip._vendor.certifi import where
 from pip._vendor.packaging.version import parse as parse_version
 
+from pip import __file__ as pip_location
 from pip._internal.cli import cmdoptions
 from pip._internal.cli.base_command import Command
 from pip._internal.cli.cmdoptions import make_target_python
@@ -35,7 +35,11 @@ def show_sys_implementation() -> None:
 
 
 def create_vendor_txt_map() -> Dict[str, str]:
-    with importlib.resources.open_text("pip._vendor", "vendor.txt") as f:
+    vendor_txt_path = os.path.join(
+        os.path.dirname(pip_location), "_vendor", "vendor.txt"
+    )
+
+    with open(vendor_txt_path) as f:
         # Purge non version specifying lines.
         # Also, remove any space prefix or suffixes (including comments).
         lines = [
@@ -43,7 +47,7 @@ def create_vendor_txt_map() -> Dict[str, str]:
         ]
 
     # Transform into "module" -> version dict.
-    return dict(line.split("==", 1) for line in lines)
+    return dict(line.split("==", 1) for line in lines)  # type: ignore
 
 
 def get_module_from_module_name(module_name: str) -> ModuleType:
@@ -63,7 +67,6 @@ def get_vendor_version_from_module(module_name: str) -> Optional[str]:
 
     if not version:
         # Try to find version in debundled module info.
-        assert module.__file__ is not None
         env = get_environment([os.path.dirname(module.__file__)])
         dist = env.get_distribution(module_name)
         if dist:
diff --git a/venv/lib/python3.10/site-packages/pip/_internal/commands/download.py b/venv/lib/python3.10/site-packages/pip/_internal/commands/download.py
index 4132e08..233b7e9 100644
--- a/venv/lib/python3.10/site-packages/pip/_internal/commands/download.py
+++ b/venv/lib/python3.10/site-packages/pip/_internal/commands/download.py
@@ -7,11 +7,7 @@
 from pip._internal.cli.cmdoptions import make_target_python
 from pip._internal.cli.req_command import RequirementCommand, with_cleanup
 from pip._internal.cli.status_codes import SUCCESS
-from pip._internal.operations.build.build_tracker import get_build_tracker
-from pip._internal.req.req_install import (
-    LegacySetupPyOptionsCheckMode,
-    check_legacy_setup_py_options,
-)
+from pip._internal.req.req_tracker import get_requirement_tracker
 from pip._internal.utils.misc import ensure_dir, normalize_path, write_output
 from pip._internal.utils.temp_dir import TempDirectory
 
@@ -53,7 +49,6 @@ def add_options(self) -> None:
         self.cmd_opts.add_option(cmdoptions.no_build_isolation())
         self.cmd_opts.add_option(cmdoptions.use_pep517())
         self.cmd_opts.add_option(cmdoptions.no_use_pep517())
-        self.cmd_opts.add_option(cmdoptions.check_build_deps())
         self.cmd_opts.add_option(cmdoptions.ignore_requires_python())
 
         self.cmd_opts.add_option(
@@ -100,7 +95,7 @@ def run(self, options: Values, args: List[str]) -> int:
             ignore_requires_python=options.ignore_requires_python,
         )
 
-        build_tracker = self.enter_context(get_build_tracker())
+        req_tracker = self.enter_context(get_requirement_tracker())
 
         directory = TempDirectory(
             delete=not options.no_clean,
@@ -109,14 +104,11 @@ def run(self, options: Values, args: List[str]) -> int:
         )
 
         reqs = self.get_requirements(args, options, finder, session)
-        check_legacy_setup_py_options(
-            options, reqs, LegacySetupPyOptionsCheckMode.DOWNLOAD
-        )
 
         preparer = self.make_requirement_preparer(
             temp_build_dir=directory,
             options=options,
-            build_tracker=build_tracker,
+            req_tracker=req_tracker,
             session=session,
             finder=finder,
             download_dir=options.download_dir,
@@ -129,7 +121,6 @@ def run(self, options: Values, args: List[str]) -> int:
             finder=finder,
             options=options,
             ignore_requires_python=options.ignore_requires_python,
-            use_pep517=options.use_pep517,
             py_version_info=options.python_version,
         )
 
diff --git a/venv/lib/python3.10/site-packages/pip/_internal/commands/freeze.py b/venv/lib/python3.10/site-packages/pip/_internal/commands/freeze.py
index 5fa6d39..6e9cc76 100644
--- a/venv/lib/python3.10/site-packages/pip/_internal/commands/freeze.py
+++ b/venv/lib/python3.10/site-packages/pip/_internal/commands/freeze.py
@@ -8,7 +8,7 @@
 from pip._internal.operations.freeze import freeze
 from pip._internal.utils.compat import stdlib_pkgs
 
-DEV_PKGS = {"pip", "setuptools", "distribute", "wheel"}
+DEV_PKGS = {"pip", "setuptools", "distribute", "wheel", "pkg-resources"}
 
 
 class FreezeCommand(Command):
diff --git a/venv/lib/python3.10/site-packages/pip/_internal/commands/index.py b/venv/lib/python3.10/site-packages/pip/_internal/commands/index.py
index b4bf0ac..9d8aae3 100644
--- a/venv/lib/python3.10/site-packages/pip/_internal/commands/index.py
+++ b/venv/lib/python3.10/site-packages/pip/_internal/commands/index.py
@@ -97,6 +97,7 @@ def _build_package_finder(
             link_collector=link_collector,
             selection_prefs=selection_prefs,
             target_python=target_python,
+            use_deprecated_html5lib="html5lib" in options.deprecated_features_enabled,
         )
 
     def get_available_package_versions(self, options: Values, args: List[Any]) -> None:
diff --git a/venv/lib/python3.10/site-packages/pip/_internal/commands/inspect.py b/venv/lib/python3.10/site-packages/pip/_internal/commands/inspect.py
deleted file mode 100644
index a4e3599..0000000
--- a/venv/lib/python3.10/site-packages/pip/_internal/commands/inspect.py
+++ /dev/null
@@ -1,97 +0,0 @@
-import logging
-from optparse import Values
-from typing import Any, Dict, List
-
-from pip._vendor.packaging.markers import default_environment
-from pip._vendor.rich import print_json
-
-from pip import __version__
-from pip._internal.cli import cmdoptions
-from pip._internal.cli.req_command import Command
-from pip._internal.cli.status_codes import SUCCESS
-from pip._internal.metadata import BaseDistribution, get_environment
-from pip._internal.utils.compat import stdlib_pkgs
-from pip._internal.utils.urls import path_to_url
-
-logger = logging.getLogger(__name__)
-
-
-class InspectCommand(Command):
-    """
-    Inspect the content of a Python environment and produce a report in JSON format.
-    """
-
-    ignore_require_venv = True
-    usage = """
-      %prog [options]"""
-
-    def add_options(self) -> None:
-        self.cmd_opts.add_option(
-            "--local",
-            action="store_true",
-            default=False,
-            help=(
-                "If in a virtualenv that has global access, do not list "
-                "globally-installed packages."
-            ),
-        )
-        self.cmd_opts.add_option(
-            "--user",
-            dest="user",
-            action="store_true",
-            default=False,
-            help="Only output packages installed in user-site.",
-        )
-        self.cmd_opts.add_option(cmdoptions.list_path())
-        self.parser.insert_option_group(0, self.cmd_opts)
-
-    def run(self, options: Values, args: List[str]) -> int:
-        logger.warning(
-            "pip inspect is currently an experimental command. "
-            "The output format may change in a future release without prior warning."
-        )
-
-        cmdoptions.check_list_path_option(options)
-        dists = get_environment(options.path).iter_installed_distributions(
-            local_only=options.local,
-            user_only=options.user,
-            skip=set(stdlib_pkgs),
-        )
-        output = {
-            "version": "0",
-            "pip_version": __version__,
-            "installed": [self._dist_to_dict(dist) for dist in dists],
-            "environment": default_environment(),
-            # TODO tags? scheme?
-        }
-        print_json(data=output)
-        return SUCCESS
-
-    def _dist_to_dict(self, dist: BaseDistribution) -> Dict[str, Any]:
-        res: Dict[str, Any] = {
-            "metadata": dist.metadata_dict,
-            "metadata_location": dist.info_location,
-        }
-        # direct_url. Note that we don't have download_info (as in the installation
-        # report) since it is not recorded in installed metadata.
-        direct_url = dist.direct_url
-        if direct_url is not None:
-            res["direct_url"] = direct_url.to_dict()
-        else:
-            # Emulate direct_url for legacy editable installs.
-            editable_project_location = dist.editable_project_location
-            if editable_project_location is not None:
-                res["direct_url"] = {
-                    "url": path_to_url(editable_project_location),
-                    "dir_info": {
-                        "editable": True,
-                    },
-                }
-        # installer
-        installer = dist.installer
-        if dist.installer:
-            res["installer"] = installer
-        # requested
-        if dist.installed_with_dist_info:
-            res["requested"] = dist.requested
-        return res
diff --git a/venv/lib/python3.10/site-packages/pip/_internal/commands/install.py b/venv/lib/python3.10/site-packages/pip/_internal/commands/install.py
index e081c27..34e4c2f 100644
--- a/venv/lib/python3.10/site-packages/pip/_internal/commands/install.py
+++ b/venv/lib/python3.10/site-packages/pip/_internal/commands/install.py
@@ -1,5 +1,4 @@
 import errno
-import json
 import operator
 import os
 import shutil
@@ -8,7 +7,6 @@
 from typing import Iterable, List, Optional
 
 from pip._vendor.packaging.utils import canonicalize_name
-from pip._vendor.rich import print_json
 
 from pip._internal.cache import WheelCache
 from pip._internal.cli import cmdoptions
@@ -23,20 +21,11 @@
 from pip._internal.locations import get_scheme
 from pip._internal.metadata import get_environment
 from pip._internal.models.format_control import FormatControl
-from pip._internal.models.installation_report import InstallationReport
-from pip._internal.operations.build.build_tracker import get_build_tracker
 from pip._internal.operations.check import ConflictDetails, check_install_conflicts
 from pip._internal.req import install_given_reqs
-from pip._internal.req.req_install import (
-    InstallRequirement,
-    LegacySetupPyOptionsCheckMode,
-    check_legacy_setup_py_options,
-)
+from pip._internal.req.req_install import InstallRequirement
+from pip._internal.req.req_tracker import get_requirement_tracker
 from pip._internal.utils.compat import WINDOWS
-from pip._internal.utils.deprecation import (
-    LegacyInstallReasonFailedBdistWheel,
-    deprecated,
-)
 from pip._internal.utils.distutils_args import parse_distutils_args
 from pip._internal.utils.filesystem import test_writable_dir
 from pip._internal.utils.logging import getLogger
@@ -52,7 +41,7 @@
     virtualenv_no_global,
 )
 from pip._internal.wheel_builder import (
-    BdistWheelAllowedPredicate,
+    BinaryAllowedPredicate,
     build,
     should_build_for_install_command,
 )
@@ -60,9 +49,7 @@
 logger = getLogger(__name__)
 
 
-def get_check_bdist_wheel_allowed(
-    format_control: FormatControl,
-) -> BdistWheelAllowedPredicate:
+def get_check_binary_allowed(format_control: FormatControl) -> BinaryAllowedPredicate:
     def check_binary_allowed(req: InstallRequirement) -> bool:
         canonical_name = canonicalize_name(req.name or "")
         allowed_formats = format_control.get_allowed_formats(canonical_name)
@@ -98,17 +85,6 @@ def add_options(self) -> None:
         self.cmd_opts.add_option(cmdoptions.pre())
 
         self.cmd_opts.add_option(cmdoptions.editable())
-        self.cmd_opts.add_option(
-            "--dry-run",
-            action="store_true",
-            dest="dry_run",
-            default=False,
-            help=(
-                "Don't actually install anything, just print what would be. "
-                "Can be used in combination with --ignore-installed "
-                "to 'resolve' the requirements."
-            ),
-        )
         self.cmd_opts.add_option(
             "-t",
             "--target",
@@ -213,9 +189,7 @@ def add_options(self) -> None:
         self.cmd_opts.add_option(cmdoptions.no_build_isolation())
         self.cmd_opts.add_option(cmdoptions.use_pep517())
         self.cmd_opts.add_option(cmdoptions.no_use_pep517())
-        self.cmd_opts.add_option(cmdoptions.check_build_deps())
 
-        self.cmd_opts.add_option(cmdoptions.config_settings())
         self.cmd_opts.add_option(cmdoptions.install_options())
         self.cmd_opts.add_option(cmdoptions.global_options())
 
@@ -248,12 +222,12 @@ def add_options(self) -> None:
             default=True,
             help="Do not warn about broken dependencies",
         )
+
         self.cmd_opts.add_option(cmdoptions.no_binary())
         self.cmd_opts.add_option(cmdoptions.only_binary())
         self.cmd_opts.add_option(cmdoptions.prefer_binary())
         self.cmd_opts.add_option(cmdoptions.require_hashes())
         self.cmd_opts.add_option(cmdoptions.progress_bar())
-        self.cmd_opts.add_option(cmdoptions.root_user_action())
 
         index_opts = cmdoptions.make_option_group(
             cmdoptions.index_group,
@@ -263,27 +237,12 @@ def add_options(self) -> None:
         self.parser.insert_option_group(0, index_opts)
         self.parser.insert_option_group(0, self.cmd_opts)
 
-        self.cmd_opts.add_option(
-            "--report",
-            dest="json_report_file",
-            metavar="file",
-            default=None,
-            help=(
-                "Generate a JSON file describing what pip did to install "
-                "the provided requirements. "
-                "Can be used in combination with --dry-run and --ignore-installed "
-                "to 'resolve' the requirements. "
-                "When - is used as file name it writes to stdout. "
-                "When writing to stdout, please combine with the --quiet option "
-                "to avoid mixing pip logging output with JSON output."
-            ),
-        )
-
     @with_cleanup
     def run(self, options: Values, args: List[str]) -> int:
         if options.use_user_site and options.target_dir is not None:
             raise CommandError("Can not combine '--user' and '--target'")
 
+        cmdoptions.check_install_build_global(options)
         upgrade_strategy = "to-satisfy-only"
         if options.upgrade:
             upgrade_strategy = options.upgrade_strategy
@@ -332,7 +291,9 @@ def run(self, options: Values, args: List[str]) -> int:
             target_python=target_python,
             ignore_requires_python=options.ignore_requires_python,
         )
-        build_tracker = self.enter_context(get_build_tracker())
+        wheel_cache = WheelCache(options.cache_dir, options.format_control)
+
+        req_tracker = self.enter_context(get_requirement_tracker())
 
         directory = TempDirectory(
             delete=not options.no_clean,
@@ -342,28 +303,6 @@ def run(self, options: Values, args: List[str]) -> int:
 
         try:
             reqs = self.get_requirements(args, options, finder, session)
-            check_legacy_setup_py_options(
-                options, reqs, LegacySetupPyOptionsCheckMode.INSTALL
-            )
-
-            if "no-binary-enable-wheel-cache" in options.features_enabled:
-                # TODO: remove format_control from WheelCache when the deprecation cycle
-                # is over
-                wheel_cache = WheelCache(options.cache_dir)
-            else:
-                if options.format_control.no_binary:
-                    deprecated(
-                        reason=(
-                            "--no-binary currently disables reading from "
-                            "the cache of locally built wheels. In the future "
-                            "--no-binary will not influence the wheel cache."
-                        ),
-                        replacement="to use the --no-cache-dir option",
-                        feature_flag="no-binary-enable-wheel-cache",
-                        issue=11453,
-                        gone_in="23.1",
-                    )
-                wheel_cache = WheelCache(options.cache_dir, options.format_control)
 
             # Only when installing is it permitted to use PEP 660.
             # In other circumstances (pip wheel, pip download) we generate
@@ -376,7 +315,7 @@ def run(self, options: Values, args: List[str]) -> int:
             preparer = self.make_requirement_preparer(
                 temp_build_dir=directory,
                 options=options,
-                build_tracker=build_tracker,
+                req_tracker=req_tracker,
                 session=session,
                 finder=finder,
                 use_user_site=options.use_user_site,
@@ -401,32 +340,6 @@ def run(self, options: Values, args: List[str]) -> int:
                 reqs, check_supported_wheels=not options.target_dir
             )
 
-            if options.json_report_file:
-                logger.warning(
-                    "--report is currently an experimental option. "
-                    "The output format may change in a future release "
-                    "without prior warning."
-                )
-
-                report = InstallationReport(requirement_set.requirements_to_install)
-                if options.json_report_file == "-":
-                    print_json(data=report.to_dict())
-                else:
-                    with open(options.json_report_file, "w", encoding="utf-8") as f:
-                        json.dump(report.to_dict(), f, indent=2, ensure_ascii=False)
-
-            if options.dry_run:
-                would_install_items = sorted(
-                    (r.metadata["name"], r.metadata["version"])
-                    for r in requirement_set.requirements_to_install
-                )
-                if would_install_items:
-                    write_output(
-                        "Would install %s",
-                        " ".join("-".join(item) for item in would_install_items),
-                    )
-                return SUCCESS
-
             try:
                 pip_req = requirement_set.get_requirement("pip")
             except KeyError:
@@ -437,14 +350,12 @@ def run(self, options: Values, args: List[str]) -> int:
                 modifying_pip = pip_req.satisfied_by is None
             protect_pip_from_modification_on_windows(modifying_pip=modifying_pip)
 
-            check_bdist_wheel_allowed = get_check_bdist_wheel_allowed(
-                finder.format_control
-            )
+            check_binary_allowed = get_check_binary_allowed(finder.format_control)
 
             reqs_to_build = [
                 r
                 for r in requirement_set.requirements.values()
-                if should_build_for_install_command(r, check_bdist_wheel_allowed)
+                if should_build_for_install_command(r, check_binary_allowed)
             ]
 
             _, build_failures = build(
@@ -452,7 +363,7 @@ def run(self, options: Values, args: List[str]) -> int:
                 wheel_cache=wheel_cache,
                 verify=True,
                 build_options=[],
-                global_options=global_options,
+                global_options=[],
             )
 
             # If we're using PEP 517, we cannot do a legacy setup.py install
@@ -473,7 +384,7 @@ def run(self, options: Values, args: List[str]) -> int:
             # those.
             for r in build_failures:
                 if not r.use_pep517:
-                    r.legacy_install_reason = LegacyInstallReasonFailedBdistWheel
+                    r.legacy_install_reason = 8368
 
             to_install = resolver.get_installation_order(requirement_set)
 
@@ -553,8 +464,8 @@ def run(self, options: Values, args: List[str]) -> int:
             self._handle_target_dir(
                 options.target_dir, target_temp_dir, options.upgrade
             )
-        if options.root_user_action == "warn":
-            warn_if_run_as_root()
+
+        warn_if_run_as_root()
         return SUCCESS
 
     def _handle_target_dir(
diff --git a/venv/lib/python3.10/site-packages/pip/_internal/commands/list.py b/venv/lib/python3.10/site-packages/pip/_internal/commands/list.py
index 8e1426d..3a545e9 100644
--- a/venv/lib/python3.10/site-packages/pip/_internal/commands/list.py
+++ b/venv/lib/python3.10/site-packages/pip/_internal/commands/list.py
@@ -1,7 +1,7 @@
 import json
 import logging
 from optparse import Values
-from typing import TYPE_CHECKING, Generator, List, Optional, Sequence, Tuple, cast
+from typing import TYPE_CHECKING, Iterator, List, Optional, Sequence, Tuple, cast
 
 from pip._vendor.packaging.utils import canonicalize_name
 
@@ -33,6 +33,8 @@ class _DistWithLatestInfo(BaseDistribution):
     _ProcessedDists = Sequence[_DistWithLatestInfo]
 
 
+from pip._vendor.packaging.version import parse
+
 logger = logging.getLogger(__name__)
 
 
@@ -149,17 +151,13 @@ def _build_package_finder(
         return PackageFinder.create(
             link_collector=link_collector,
             selection_prefs=selection_prefs,
+            use_deprecated_html5lib="html5lib" in options.deprecated_features_enabled,
         )
 
     def run(self, options: Values, args: List[str]) -> int:
         if options.outdated and options.uptodate:
             raise CommandError("Options --outdated and --uptodate cannot be combined.")
 
-        if options.outdated and options.list_format == "freeze":
-            raise CommandError(
-                "List format 'freeze' can not be used with the --outdated option."
-            )
-
         cmdoptions.check_list_path_option(options)
 
         skip = set(stdlib_pkgs)
@@ -198,7 +196,7 @@ def get_outdated(
         return [
             dist
             for dist in self.iter_packages_latest_infos(packages, options)
-            if dist.latest_version > dist.version
+            if parse(str(dist.latest_version)) > parse(str(dist.version))
         ]
 
     def get_uptodate(
@@ -207,7 +205,7 @@ def get_uptodate(
         return [
             dist
             for dist in self.iter_packages_latest_infos(packages, options)
-            if dist.latest_version == dist.version
+            if parse(str(dist.latest_version)) == parse(str(dist.version))
         ]
 
     def get_not_required(
@@ -226,7 +224,7 @@ def get_not_required(
 
     def iter_packages_latest_infos(
         self, packages: "_ProcessedDists", options: Values
-    ) -> Generator["_DistWithLatestInfo", None, None]:
+    ) -> Iterator["_DistWithLatestInfo"]:
         with self._build_session(options) as session:
             finder = self._build_package_finder(options, session)
 
diff --git a/venv/lib/python3.10/site-packages/pip/_internal/commands/show.py b/venv/lib/python3.10/site-packages/pip/_internal/commands/show.py
index 212167c..d5540d6 100644
--- a/venv/lib/python3.10/site-packages/pip/_internal/commands/show.py
+++ b/venv/lib/python3.10/site-packages/pip/_internal/commands/show.py
@@ -1,6 +1,6 @@
 import logging
 from optparse import Values
-from typing import Generator, Iterable, Iterator, List, NamedTuple, Optional
+from typing import Iterator, List, NamedTuple, Optional
 
 from pip._vendor.packaging.utils import canonicalize_name
 
@@ -60,7 +60,6 @@ class _PackageInfo(NamedTuple):
     classifiers: List[str]
     summary: str
     homepage: str
-    project_urls: List[str]
     author: str
     author_email: str
     license: str
@@ -68,7 +67,7 @@ class _PackageInfo(NamedTuple):
     files: Optional[List[str]]
 
 
-def search_packages_info(query: List[str]) -> Generator[_PackageInfo, None, None]:
+def search_packages_info(query: List[str]) -> Iterator[_PackageInfo]:
     """
     Gather details from installed distributions. Print distribution name,
     version, location, and installed files. Installed files requires a
@@ -77,7 +76,7 @@ def search_packages_info(query: List[str]) -> Generator[_PackageInfo, None, None
     """
     env = get_default_environment()
 
-    installed = {dist.canonical_name: dist for dist in env.iter_all_distributions()}
+    installed = {dist.canonical_name: dist for dist in env.iter_distributions()}
     query_names = [canonicalize_name(name) for name in query]
     missing = sorted(
         [name for name, pkg in zip(query, query_names) if pkg not in installed]
@@ -127,7 +126,6 @@ def _get_requiring_packages(current_dist: BaseDistribution) -> Iterator[str]:
             classifiers=metadata.get_all("Classifier", []),
             summary=metadata.get("Summary", ""),
             homepage=metadata.get("Home-page", ""),
-            project_urls=metadata.get_all("Project-URL", []),
             author=metadata.get("Author", ""),
             author_email=metadata.get("Author-email", ""),
             license=metadata.get("License", ""),
@@ -137,7 +135,7 @@ def _get_requiring_packages(current_dist: BaseDistribution) -> Iterator[str]:
 
 
 def print_results(
-    distributions: Iterable[_PackageInfo],
+    distributions: Iterator[_PackageInfo],
     list_files: bool,
     verbose: bool,
 ) -> bool:
@@ -170,9 +168,6 @@ def print_results(
             write_output("Entry-points:")
             for entry in dist.entry_points:
                 write_output("  %s", entry.strip())
-            write_output("Project-URLs:")
-            for project_url in dist.project_urls:
-                write_output("  %s", project_url)
         if list_files:
             write_output("Files:")
             if dist.files is None:
diff --git a/venv/lib/python3.10/site-packages/pip/_internal/commands/uninstall.py b/venv/lib/python3.10/site-packages/pip/_internal/commands/uninstall.py
index dea8077..bb9e8e6 100644
--- a/venv/lib/python3.10/site-packages/pip/_internal/commands/uninstall.py
+++ b/venv/lib/python3.10/site-packages/pip/_internal/commands/uninstall.py
@@ -4,7 +4,6 @@
 
 from pip._vendor.packaging.utils import canonicalize_name
 
-from pip._internal.cli import cmdoptions
 from pip._internal.cli.base_command import Command
 from pip._internal.cli.req_command import SessionCommandMixin, warn_if_run_as_root
 from pip._internal.cli.status_codes import SUCCESS
@@ -54,7 +53,7 @@ def add_options(self) -> None:
             action="store_true",
             help="Don't ask for confirmation of uninstall deletions.",
         )
-        self.cmd_opts.add_option(cmdoptions.root_user_action())
+
         self.parser.insert_option_group(0, self.cmd_opts)
 
     def run(self, options: Values, args: List[str]) -> int:
@@ -101,6 +100,6 @@ def run(self, options: Values, args: List[str]) -> int:
             )
             if uninstall_pathset:
                 uninstall_pathset.commit()
-        if options.root_user_action == "warn":
-            warn_if_run_as_root()
+
+        warn_if_run_as_root()
         return SUCCESS
diff --git a/venv/lib/python3.10/site-packages/pip/_internal/commands/wheel.py b/venv/lib/python3.10/site-packages/pip/_internal/commands/wheel.py
index 1afbd56..d5b20dc 100644
--- a/venv/lib/python3.10/site-packages/pip/_internal/commands/wheel.py
+++ b/venv/lib/python3.10/site-packages/pip/_internal/commands/wheel.py
@@ -9,13 +9,8 @@
 from pip._internal.cli.req_command import RequirementCommand, with_cleanup
 from pip._internal.cli.status_codes import SUCCESS
 from pip._internal.exceptions import CommandError
-from pip._internal.operations.build.build_tracker import get_build_tracker
-from pip._internal.req.req_install import (
-    InstallRequirement,
-    LegacySetupPyOptionsCheckMode,
-    check_legacy_setup_py_options,
-)
-from pip._internal.utils.deprecation import deprecated
+from pip._internal.req.req_install import InstallRequirement
+from pip._internal.req.req_tracker import get_requirement_tracker
 from pip._internal.utils.misc import ensure_dir, normalize_path
 from pip._internal.utils.temp_dir import TempDirectory
 from pip._internal.wheel_builder import build, should_build_for_wheel_command
@@ -31,8 +26,10 @@ class WheelCommand(RequirementCommand):
     recompiling your software during every install. For more details, see the
     wheel docs: https://wheel.readthedocs.io/en/latest/
 
-    'pip wheel' uses the build system interface as described here:
-    https://pip.pypa.io/en/stable/reference/build-system/
+    Requirements: setuptools>=0.8, and wheel.
+
+    'pip wheel' uses the bdist_wheel setuptools extension from the wheel
+    package to build individual wheels.
 
     """
 
@@ -62,7 +59,6 @@ def add_options(self) -> None:
         self.cmd_opts.add_option(cmdoptions.no_build_isolation())
         self.cmd_opts.add_option(cmdoptions.use_pep517())
         self.cmd_opts.add_option(cmdoptions.no_use_pep517())
-        self.cmd_opts.add_option(cmdoptions.check_build_deps())
         self.cmd_opts.add_option(cmdoptions.constraints())
         self.cmd_opts.add_option(cmdoptions.editable())
         self.cmd_opts.add_option(cmdoptions.requirements())
@@ -79,7 +75,6 @@ def add_options(self) -> None:
             help="Don't verify if built wheel is valid.",
         )
 
-        self.cmd_opts.add_option(cmdoptions.config_settings())
         self.cmd_opts.add_option(cmdoptions.build_options())
         self.cmd_opts.add_option(cmdoptions.global_options())
 
@@ -105,6 +100,8 @@ def add_options(self) -> None:
 
     @with_cleanup
     def run(self, options: Values, args: List[str]) -> int:
+        cmdoptions.check_install_build_global(options)
+
         session = self.get_default_session(options)
 
         finder = self._build_package_finder(options, session)
@@ -113,7 +110,7 @@ def run(self, options: Values, args: List[str]) -> int:
         options.wheel_dir = normalize_path(options.wheel_dir)
         ensure_dir(options.wheel_dir)
 
-        build_tracker = self.enter_context(get_build_tracker())
+        req_tracker = self.enter_context(get_requirement_tracker())
 
         directory = TempDirectory(
             delete=not options.no_clean,
@@ -122,33 +119,11 @@ def run(self, options: Values, args: List[str]) -> int:
         )
 
         reqs = self.get_requirements(args, options, finder, session)
-        check_legacy_setup_py_options(
-            options, reqs, LegacySetupPyOptionsCheckMode.WHEEL
-        )
-
-        if "no-binary-enable-wheel-cache" in options.features_enabled:
-            # TODO: remove format_control from WheelCache when the deprecation cycle
-            # is over
-            wheel_cache = WheelCache(options.cache_dir)
-        else:
-            if options.format_control.no_binary:
-                deprecated(
-                    reason=(
-                        "--no-binary currently disables reading from "
-                        "the cache of locally built wheels. In the future "
-                        "--no-binary will not influence the wheel cache."
-                    ),
-                    replacement="to use the --no-cache-dir option",
-                    feature_flag="no-binary-enable-wheel-cache",
-                    issue=11453,
-                    gone_in="23.1",
-                )
-            wheel_cache = WheelCache(options.cache_dir, options.format_control)
 
         preparer = self.make_requirement_preparer(
             temp_build_dir=directory,
             options=options,
-            build_tracker=build_tracker,
+            req_tracker=req_tracker,
             session=session,
             finder=finder,
             download_dir=options.wheel_dir,
diff --git a/venv/lib/python3.10/site-packages/pip/_internal/configuration.py b/venv/lib/python3.10/site-packages/pip/_internal/configuration.py
index 8fd46c9..a8092d1 100644
--- a/venv/lib/python3.10/site-packages/pip/_internal/configuration.py
+++ b/venv/lib/python3.10/site-packages/pip/_internal/configuration.py
@@ -142,19 +142,13 @@ def items(self) -> Iterable[Tuple[str, Any]]:
 
     def get_value(self, key: str) -> Any:
         """Get a value from the configuration."""
-        orig_key = key
-        key = _normalize_name(key)
         try:
             return self._dictionary[key]
         except KeyError:
-            # disassembling triggers a more useful error message than simply
-            # "No such key" in the case that the key isn't in the form command.option
-            _disassemble_key(key)
-            raise ConfigurationError(f"No such key - {orig_key}")
+            raise ConfigurationError(f"No such key - {key}")
 
     def set_value(self, key: str, value: Any) -> None:
         """Modify a value in the configuration."""
-        key = _normalize_name(key)
         self._ensure_have_load_only()
 
         assert self.load_only
@@ -173,13 +167,11 @@ def set_value(self, key: str, value: Any) -> None:
 
     def unset_value(self, key: str) -> None:
         """Unset a value in the configuration."""
-        orig_key = key
-        key = _normalize_name(key)
         self._ensure_have_load_only()
 
         assert self.load_only
         if key not in self._config[self.load_only]:
-            raise ConfigurationError(f"No such key - {orig_key}")
+            raise ConfigurationError(f"No such key - {key}")
 
         fname, parser = self._get_parser_to_modify()
 
diff --git a/venv/lib/python3.10/site-packages/pip/_internal/distributions/base.py b/venv/lib/python3.10/site-packages/pip/_internal/distributions/base.py
index 75ce2dc..149fff5 100644
--- a/venv/lib/python3.10/site-packages/pip/_internal/distributions/base.py
+++ b/venv/lib/python3.10/site-packages/pip/_internal/distributions/base.py
@@ -31,9 +31,6 @@ def get_metadata_distribution(self) -> BaseDistribution:
 
     @abc.abstractmethod
     def prepare_distribution_metadata(
-        self,
-        finder: PackageFinder,
-        build_isolation: bool,
-        check_build_deps: bool,
+        self, finder: PackageFinder, build_isolation: bool
     ) -> None:
         raise NotImplementedError()
diff --git a/venv/lib/python3.10/site-packages/pip/_internal/distributions/installed.py b/venv/lib/python3.10/site-packages/pip/_internal/distributions/installed.py
index edb38aa..be5962f 100644
--- a/venv/lib/python3.10/site-packages/pip/_internal/distributions/installed.py
+++ b/venv/lib/python3.10/site-packages/pip/_internal/distributions/installed.py
@@ -15,9 +15,6 @@ def get_metadata_distribution(self) -> BaseDistribution:
         return self.req.satisfied_by
 
     def prepare_distribution_metadata(
-        self,
-        finder: PackageFinder,
-        build_isolation: bool,
-        check_build_deps: bool,
+        self, finder: PackageFinder, build_isolation: bool
     ) -> None:
         pass
diff --git a/venv/lib/python3.10/site-packages/pip/_internal/distributions/sdist.py b/venv/lib/python3.10/site-packages/pip/_internal/distributions/sdist.py
index 4c25647..bdaf403 100644
--- a/venv/lib/python3.10/site-packages/pip/_internal/distributions/sdist.py
+++ b/venv/lib/python3.10/site-packages/pip/_internal/distributions/sdist.py
@@ -22,10 +22,7 @@ def get_metadata_distribution(self) -> BaseDistribution:
         return self.req.get_dist()
 
     def prepare_distribution_metadata(
-        self,
-        finder: PackageFinder,
-        build_isolation: bool,
-        check_build_deps: bool,
+        self, finder: PackageFinder, build_isolation: bool
     ) -> None:
         # Load pyproject.toml, to determine whether PEP 517 is to be used
         self.req.load_pyproject_toml()
@@ -46,18 +43,7 @@ def prepare_distribution_metadata(
             self.req.isolated_editable_sanity_check()
             # Install the dynamic build requirements.
             self._install_build_reqs(finder)
-        # Check if the current environment provides build dependencies
-        should_check_deps = self.req.use_pep517 and check_build_deps
-        if should_check_deps:
-            pyproject_requires = self.req.pyproject_requires
-            assert pyproject_requires is not None
-            conflicting, missing = self.req.build_env.check_requirements(
-                pyproject_requires
-            )
-            if conflicting:
-                self._raise_conflicts("the backend dependencies", conflicting)
-            if missing:
-                self._raise_missing_reqs(missing)
+
         self.req.prepare_metadata()
 
     def _prepare_build_backend(self, finder: PackageFinder) -> None:
@@ -139,12 +125,3 @@ def _raise_conflicts(
             ),
         )
         raise InstallationError(error_message)
-
-    def _raise_missing_reqs(self, missing: Set[str]) -> None:
-        format_string = (
-            "Some build dependencies for {requirement} are missing: {missing}."
-        )
-        error_message = format_string.format(
-            requirement=self.req, missing=", ".join(map(repr, sorted(missing)))
-        )
-        raise InstallationError(error_message)
diff --git a/venv/lib/python3.10/site-packages/pip/_internal/distributions/wheel.py b/venv/lib/python3.10/site-packages/pip/_internal/distributions/wheel.py
index 03aac77..340b0f3 100644
--- a/venv/lib/python3.10/site-packages/pip/_internal/distributions/wheel.py
+++ b/venv/lib/python3.10/site-packages/pip/_internal/distributions/wheel.py
@@ -26,9 +26,6 @@ def get_metadata_distribution(self) -> BaseDistribution:
         return get_wheel_distribution(wheel, canonicalize_name(self.req.name))
 
     def prepare_distribution_metadata(
-        self,
-        finder: PackageFinder,
-        build_isolation: bool,
-        check_build_deps: bool,
+        self, finder: PackageFinder, build_isolation: bool
     ) -> None:
         pass
diff --git a/venv/lib/python3.10/site-packages/pip/_internal/exceptions.py b/venv/lib/python3.10/site-packages/pip/_internal/exceptions.py
index 2ab1f59..97b9612 100644
--- a/venv/lib/python3.10/site-packages/pip/_internal/exceptions.py
+++ b/venv/lib/python3.10/site-packages/pip/_internal/exceptions.py
@@ -288,10 +288,7 @@ class NetworkConnectionError(PipError):
     """HTTP connection error"""
 
     def __init__(
-        self,
-        error_msg: str,
-        response: Optional[Response] = None,
-        request: Optional[Request] = None,
+        self, error_msg: str, response: Response = None, request: Request = None
     ) -> None:
         """
         Initialize NetworkConnectionError with  `request` and `response`
@@ -335,8 +332,8 @@ class MetadataInconsistent(InstallationError):
     """Built metadata contains inconsistent information.
 
     This is raised when the metadata contains values (e.g. name and version)
-    that do not match the information previously obtained from sdist filename,
-    user-supplied ``#egg=`` value, or an install requirement name.
+    that do not match the information previously obtained from sdist filename
+    or user-supplied ``#egg=`` value.
     """
 
     def __init__(
@@ -348,10 +345,11 @@ def __init__(
         self.m_val = m_val
 
     def __str__(self) -> str:
-        return (
-            f"Requested {self.ireq} has inconsistent {self.field}: "
-            f"expected {self.f_val!r}, but metadata has {self.m_val!r}"
+        template = (
+            "Requested {} has inconsistent {}: "
+            "filename has {!r}, but metadata has {!r}"
         )
+        return template.format(self.ireq, self.field, self.f_val, self.m_val)
 
 
 class LegacyInstallFailure(DiagnosticPipError):
diff --git a/venv/lib/python3.10/site-packages/pip/_internal/index/collector.py b/venv/lib/python3.10/site-packages/pip/_internal/index/collector.py
index 0120610..4ecbb33 100644
--- a/venv/lib/python3.10/site-packages/pip/_internal/index/collector.py
+++ b/venv/lib/python3.10/site-packages/pip/_internal/index/collector.py
@@ -2,19 +2,21 @@
 The main purpose of this module is to expose LinkCollector.collect_sources().
 """
 
+import cgi
 import collections
-import email.message
 import functools
 import itertools
-import json
 import logging
 import os
+import re
 import urllib.parse
 import urllib.request
+import xml.etree.ElementTree
 from html.parser import HTMLParser
 from optparse import Values
 from typing import (
     TYPE_CHECKING,
+    Any,
     Callable,
     Dict,
     Iterable,
@@ -27,7 +29,7 @@
     Union,
 )
 
-from pip._vendor import requests
+from pip._vendor import html5lib, requests
 from pip._vendor.requests import Response
 from pip._vendor.requests.exceptions import RetryError, SSLError
 
@@ -36,8 +38,9 @@
 from pip._internal.models.search_scope import SearchScope
 from pip._internal.network.session import PipSession
 from pip._internal.network.utils import raise_for_status
+from pip._internal.utils.deprecation import deprecated
 from pip._internal.utils.filetypes import is_archive_file
-from pip._internal.utils.misc import redact_auth_from_url
+from pip._internal.utils.misc import pairwise, redact_auth_from_url
 from pip._internal.vcs import vcs
 
 from .sources import CandidatesFromPage, LinkSource, build_source
@@ -49,6 +52,7 @@
 
 logger = logging.getLogger(__name__)
 
+HTMLElement = xml.etree.ElementTree.Element
 ResponseHeaders = MutableMapping[str, str]
 
 
@@ -63,46 +67,32 @@ def _match_vcs_scheme(url: str) -> Optional[str]:
     return None
 
 
-class _NotAPIContent(Exception):
+class _NotHTML(Exception):
     def __init__(self, content_type: str, request_desc: str) -> None:
         super().__init__(content_type, request_desc)
         self.content_type = content_type
         self.request_desc = request_desc
 
 
-def _ensure_api_header(response: Response) -> None:
-    """
-    Check the Content-Type header to ensure the response contains a Simple
-    API Response.
+def _ensure_html_header(response: Response) -> None:
+    """Check the Content-Type header to ensure the response contains HTML.
 
-    Raises `_NotAPIContent` if the content type is not a valid content-type.
+    Raises `_NotHTML` if the content type is not text/html.
     """
-    content_type = response.headers.get("Content-Type", "Unknown")
-
-    content_type_l = content_type.lower()
-    if content_type_l.startswith(
-        (
-            "text/html",
-            "application/vnd.pypi.simple.v1+html",
-            "application/vnd.pypi.simple.v1+json",
-        )
-    ):
-        return
-
-    raise _NotAPIContent(content_type, response.request.method)
+    content_type = response.headers.get("Content-Type", "")
+    if not content_type.lower().startswith("text/html"):
+        raise _NotHTML(content_type, response.request.method)
 
 
 class _NotHTTP(Exception):
     pass
 
 
-def _ensure_api_response(url: str, session: PipSession) -> None:
-    """
-    Send a HEAD request to the URL, and ensure the response contains a simple
-    API Response.
+def _ensure_html_response(url: str, session: PipSession) -> None:
+    """Send a HEAD request to the URL, and ensure the response contains HTML.
 
     Raises `_NotHTTP` if the URL is not available for a HEAD request, or
-    `_NotAPIContent` if the content type is not a valid content type.
+    `_NotHTML` if the content type is not text/html.
     """
     scheme, netloc, path, query, fragment = urllib.parse.urlsplit(url)
     if scheme not in {"http", "https"}:
@@ -111,37 +101,31 @@ def _ensure_api_response(url: str, session: PipSession) -> None:
     resp = session.head(url, allow_redirects=True)
     raise_for_status(resp)
 
-    _ensure_api_header(resp)
+    _ensure_html_header(resp)
 
 
-def _get_simple_response(url: str, session: PipSession) -> Response:
-    """Access an Simple API response with GET, and return the response.
+def _get_html_response(url: str, session: PipSession) -> Response:
+    """Access an HTML page with GET, and return the response.
 
     This consists of three parts:
 
     1. If the URL looks suspiciously like an archive, send a HEAD first to
-       check the Content-Type is HTML or Simple API, to avoid downloading a
-       large file. Raise `_NotHTTP` if the content type cannot be determined, or
-       `_NotAPIContent` if it is not HTML or a Simple API.
+       check the Content-Type is HTML, to avoid downloading a large file.
+       Raise `_NotHTTP` if the content type cannot be determined, or
+       `_NotHTML` if it is not HTML.
     2. Actually perform the request. Raise HTTP exceptions on network failures.
-    3. Check the Content-Type header to make sure we got a Simple API response,
-       and raise `_NotAPIContent` otherwise.
+    3. Check the Content-Type header to make sure we got HTML, and raise
+       `_NotHTML` otherwise.
     """
     if is_archive_file(Link(url).filename):
-        _ensure_api_response(url, session=session)
+        _ensure_html_response(url, session=session)
 
     logger.debug("Getting page %s", redact_auth_from_url(url))
 
     resp = session.get(
         url,
         headers={
-            "Accept": ", ".join(
-                [
-                    "application/vnd.pypi.simple.v1+json",
-                    "application/vnd.pypi.simple.v1+html; q=0.1",
-                    "text/html; q=0.01",
-                ]
-            ),
+            "Accept": "text/html",
             # We don't want to blindly returned cached data for
             # /simple/, because authors generally expecting that
             # twine upload && pip install will function, but if
@@ -163,16 +147,9 @@ def _get_simple_response(url: str, session: PipSession) -> Response:
     # The check for archives above only works if the url ends with
     # something that looks like an archive. However that is not a
     # requirement of an url. Unless we issue a HEAD request on every
-    # url we cannot know ahead of time for sure if something is a
-    # Simple API response or not. However we can check after we've
-    # downloaded it.
-    _ensure_api_header(resp)
-
-    logger.debug(
-        "Fetched page %s as %s",
-        redact_auth_from_url(url),
-        resp.headers.get("Content-Type", "Unknown"),
-    )
+    # url we cannot know ahead of time for sure if something is HTML
+    # or not. However we can check after we've downloaded it.
+    _ensure_html_header(resp)
 
     return resp
 
@@ -180,16 +157,123 @@ def _get_simple_response(url: str, session: PipSession) -> Response:
 def _get_encoding_from_headers(headers: ResponseHeaders) -> Optional[str]:
     """Determine if we have any encoding information in our headers."""
     if headers and "Content-Type" in headers:
-        m = email.message.Message()
-        m["content-type"] = headers["Content-Type"]
-        charset = m.get_param("charset")
-        if charset:
-            return str(charset)
+        content_type, params = cgi.parse_header(headers["Content-Type"])
+        if "charset" in params:
+            return params["charset"]
     return None
 
 
+def _determine_base_url(document: HTMLElement, page_url: str) -> str:
+    """Determine the HTML document's base URL.
+
+    This looks for a ```` tag in the HTML document. If present, its href
+    attribute denotes the base URL of anchor tags in the document. If there is
+    no such tag (or if it does not have a valid href attribute), the HTML
+    file's URL is used as the base URL.
+
+    :param document: An HTML document representation. The current
+        implementation expects the result of ``html5lib.parse()``.
+    :param page_url: The URL of the HTML document.
+
+    TODO: Remove when `html5lib` is dropped.
+    """
+    for base in document.findall(".//base"):
+        href = base.get("href")
+        if href is not None:
+            return href
+    return page_url
+
+
+def _clean_url_path_part(part: str) -> str:
+    """
+    Clean a "part" of a URL path (i.e. after splitting on "@" characters).
+    """
+    # We unquote prior to quoting to make sure nothing is double quoted.
+    return urllib.parse.quote(urllib.parse.unquote(part))
+
+
+def _clean_file_url_path(part: str) -> str:
+    """
+    Clean the first part of a URL path that corresponds to a local
+    filesystem path (i.e. the first part after splitting on "@" characters).
+    """
+    # We unquote prior to quoting to make sure nothing is double quoted.
+    # Also, on Windows the path part might contain a drive letter which
+    # should not be quoted. On Linux where drive letters do not
+    # exist, the colon should be quoted. We rely on urllib.request
+    # to do the right thing here.
+    return urllib.request.pathname2url(urllib.request.url2pathname(part))
+
+
+# percent-encoded:                   /
+_reserved_chars_re = re.compile("(@|%2F)", re.IGNORECASE)
+
+
+def _clean_url_path(path: str, is_local_path: bool) -> str:
+    """
+    Clean the path portion of a URL.
+    """
+    if is_local_path:
+        clean_func = _clean_file_url_path
+    else:
+        clean_func = _clean_url_path_part
+
+    # Split on the reserved characters prior to cleaning so that
+    # revision strings in VCS URLs are properly preserved.
+    parts = _reserved_chars_re.split(path)
+
+    cleaned_parts = []
+    for to_clean, reserved in pairwise(itertools.chain(parts, [""])):
+        cleaned_parts.append(clean_func(to_clean))
+        # Normalize %xx escapes (e.g. %2f -> %2F)
+        cleaned_parts.append(reserved.upper())
+
+    return "".join(cleaned_parts)
+
+
+def _clean_link(url: str) -> str:
+    """
+    Make sure a link is fully quoted.
+    For example, if ' ' occurs in the URL, it will be replaced with "%20",
+    and without double-quoting other characters.
+    """
+    # Split the URL into parts according to the general structure
+    # `scheme://netloc/path;parameters?query#fragment`.
+    result = urllib.parse.urlparse(url)
+    # If the netloc is empty, then the URL refers to a local filesystem path.
+    is_local_path = not result.netloc
+    path = _clean_url_path(result.path, is_local_path=is_local_path)
+    return urllib.parse.urlunparse(result._replace(path=path))
+
+
+def _create_link_from_element(
+    element_attribs: Dict[str, Optional[str]],
+    page_url: str,
+    base_url: str,
+) -> Optional[Link]:
+    """
+    Convert an anchor element's attributes in a simple repository page to a Link.
+    """
+    href = element_attribs.get("href")
+    if not href:
+        return None
+
+    url = _clean_link(urllib.parse.urljoin(base_url, href))
+    pyrequire = element_attribs.get("data-requires-python")
+    yanked_reason = element_attribs.get("data-yanked")
+
+    link = Link(
+        url,
+        comes_from=page_url,
+        requires_python=pyrequire,
+        yanked_reason=yanked_reason,
+    )
+
+    return link
+
+
 class CacheablePageContent:
-    def __init__(self, page: "IndexContent") -> None:
+    def __init__(self, page: "HTMLPage") -> None:
         assert page.cache_link_parsing
         self.page = page
 
@@ -201,66 +285,113 @@ def __hash__(self) -> int:
 
 
 class ParseLinks(Protocol):
-    def __call__(self, page: "IndexContent") -> Iterable[Link]:
+    def __call__(
+        self, page: "HTMLPage", use_deprecated_html5lib: bool
+    ) -> Iterable[Link]:
         ...
 
 
-def with_cached_index_content(fn: ParseLinks) -> ParseLinks:
+def with_cached_html_pages(fn: ParseLinks) -> ParseLinks:
     """
-    Given a function that parses an Iterable[Link] from an IndexContent, cache the
-    function's result (keyed by CacheablePageContent), unless the IndexContent
+    Given a function that parses an Iterable[Link] from an HTMLPage, cache the
+    function's result (keyed by CacheablePageContent), unless the HTMLPage
     `page` has `page.cache_link_parsing == False`.
     """
 
     @functools.lru_cache(maxsize=None)
-    def wrapper(cacheable_page: CacheablePageContent) -> List[Link]:
-        return list(fn(cacheable_page.page))
+    def wrapper(
+        cacheable_page: CacheablePageContent, use_deprecated_html5lib: bool
+    ) -> List[Link]:
+        return list(fn(cacheable_page.page, use_deprecated_html5lib))
 
     @functools.wraps(fn)
-    def wrapper_wrapper(page: "IndexContent") -> List[Link]:
+    def wrapper_wrapper(page: "HTMLPage", use_deprecated_html5lib: bool) -> List[Link]:
         if page.cache_link_parsing:
-            return wrapper(CacheablePageContent(page))
-        return list(fn(page))
+            return wrapper(CacheablePageContent(page), use_deprecated_html5lib)
+        return list(fn(page, use_deprecated_html5lib))
 
     return wrapper_wrapper
 
 
-@with_cached_index_content
-def parse_links(page: "IndexContent") -> Iterable[Link]:
+def _parse_links_html5lib(page: "HTMLPage") -> Iterable[Link]:
     """
-    Parse a Simple API's Index Content, and yield its anchor elements as Link objects.
+    Parse an HTML document, and yield its anchor elements as Link objects.
+
+    TODO: Remove when `html5lib` is dropped.
     """
+    document = html5lib.parse(
+        page.content,
+        transport_encoding=page.encoding,
+        namespaceHTMLElements=False,
+    )
+
+    url = page.url
+    base_url = _determine_base_url(document, url)
+    for anchor in document.findall(".//a"):
+        link = _create_link_from_element(
+            anchor.attrib,
+            page_url=url,
+            base_url=base_url,
+        )
+        if link is None:
+            continue
+        yield link
 
-    content_type_l = page.content_type.lower()
-    if content_type_l.startswith("application/vnd.pypi.simple.v1+json"):
-        data = json.loads(page.content)
-        for file in data.get("files", []):
-            link = Link.from_json(file, page.url)
-            if link is None:
-                continue
-            yield link
-        return
 
-    parser = HTMLLinkParser(page.url)
+@with_cached_html_pages
+def parse_links(page: "HTMLPage", use_deprecated_html5lib: bool) -> Iterable[Link]:
+    """
+    Parse an HTML document, and yield its anchor elements as Link objects.
+    """
     encoding = page.encoding or "utf-8"
+
+    # Check if the page starts with a valid doctype, to decide whether to use
+    # http.parser or (deprecated) html5lib for parsing -- unless explicitly
+    # requested to use html5lib.
+    if not use_deprecated_html5lib:
+        expected_doctype = "".encode(encoding)
+        actual_start = page.content[: len(expected_doctype)]
+        if actual_start.decode(encoding).lower() != "":
+            deprecated(
+                reason=(
+                    f"The HTML index page being used ({page.url}) is not a proper "
+                    "HTML 5 document. This is in violation of PEP 503 which requires "
+                    "these pages to be well-formed HTML 5 documents. Please reach out "
+                    "to the owners of this index page, and ask them to update this "
+                    "index page to a valid HTML 5 document."
+                ),
+                replacement=None,
+                gone_in="22.2",
+                issue=10825,
+            )
+            use_deprecated_html5lib = True
+
+    if use_deprecated_html5lib:
+        yield from _parse_links_html5lib(page)
+        return
+
+    parser = HTMLLinkParser()
     parser.feed(page.content.decode(encoding))
 
     url = page.url
     base_url = parser.base_url or url
     for anchor in parser.anchors:
-        link = Link.from_element(anchor, page_url=url, base_url=base_url)
+        link = _create_link_from_element(
+            anchor,
+            page_url=url,
+            base_url=base_url,
+        )
         if link is None:
             continue
         yield link
 
 
-class IndexContent:
-    """Represents one response (or page), along with its URL"""
+class HTMLPage:
+    """Represents one page, along with its URL"""
 
     def __init__(
         self,
         content: bytes,
-        content_type: str,
         encoding: Optional[str],
         url: str,
         cache_link_parsing: bool = True,
@@ -273,7 +404,6 @@ def __init__(
                                    have this set to False, for example.
         """
         self.content = content
-        self.content_type = content_type
         self.encoding = encoding
         self.url = url
         self.cache_link_parsing = cache_link_parsing
@@ -288,14 +418,21 @@ class HTMLLinkParser(HTMLParser):
     elements' attributes.
     """
 
-    def __init__(self, url: str) -> None:
-        super().__init__(convert_charrefs=True)
-
-        self.url: str = url
+    def __init__(self, *args: Any, **kwargs: Any) -> None:
+        super().__init__(*args, **kwargs)
+        self._seen_decl = False
         self.base_url: Optional[str] = None
         self.anchors: List[Dict[str, Optional[str]]] = []
 
+    def handle_decl(self, decl: str) -> None:
+        if decl.lower() != "doctype html":
+            self._raise_error()
+        self._seen_decl = True
+
     def handle_starttag(self, tag: str, attrs: List[Tuple[str, Optional[str]]]) -> None:
+        if not self._seen_decl:
+            self._raise_error()
+
         if tag == "base" and self.base_url is None:
             href = self.get_href(attrs)
             if href is not None:
@@ -309,8 +446,16 @@ def get_href(self, attrs: List[Tuple[str, Optional[str]]]) -> Optional[str]:
                 return value
         return None
 
+    def _raise_error(self) -> None:
+        raise ValueError(
+            "HTML doctype missing or incorrect. Expected .\n\n"
+            "If you believe this error to be incorrect, try passing the "
+            "command line option --use-deprecated=html5lib and please leave "
+            "a comment on the pip issue at https://github.com/pypa/pip/issues/10825."
+        )
+
 
-def _handle_get_simple_fail(
+def _handle_get_page_fail(
     link: Link,
     reason: Union[str, Exception],
     meth: Optional[Callable[..., None]] = None,
@@ -320,20 +465,24 @@ def _handle_get_simple_fail(
     meth("Could not fetch URL %s: %s - skipping", link, reason)
 
 
-def _make_index_content(
-    response: Response, cache_link_parsing: bool = True
-) -> IndexContent:
+def _make_html_page(response: Response, cache_link_parsing: bool = True) -> HTMLPage:
     encoding = _get_encoding_from_headers(response.headers)
-    return IndexContent(
+    return HTMLPage(
         response.content,
-        response.headers["Content-Type"],
         encoding=encoding,
         url=response.url,
         cache_link_parsing=cache_link_parsing,
     )
 
 
-def _get_index_content(link: Link, *, session: PipSession) -> Optional["IndexContent"]:
+def _get_html_page(
+    link: Link, session: Optional[PipSession] = None
+) -> Optional["HTMLPage"]:
+    if session is None:
+        raise TypeError(
+            "_get_html_page() missing 1 required keyword argument: 'session'"
+        )
+
     url = link.url.split("#", 1)[0]
 
     # Check for VCS schemes that do not support lookup as web pages.
@@ -353,44 +502,39 @@ def _get_index_content(link: Link, *, session: PipSession) -> Optional["IndexCon
         # final segment
         if not url.endswith("/"):
             url += "/"
-        # TODO: In the future, it would be nice if pip supported PEP 691
-        #       style respones in the file:// URLs, however there's no
-        #       standard file extension for application/vnd.pypi.simple.v1+json
-        #       so we'll need to come up with something on our own.
         url = urllib.parse.urljoin(url, "index.html")
         logger.debug(" file: URL is directory, getting %s", url)
 
     try:
-        resp = _get_simple_response(url, session=session)
+        resp = _get_html_response(url, session=session)
     except _NotHTTP:
         logger.warning(
             "Skipping page %s because it looks like an archive, and cannot "
             "be checked by a HTTP HEAD request.",
             link,
         )
-    except _NotAPIContent as exc:
+    except _NotHTML as exc:
         logger.warning(
-            "Skipping page %s because the %s request got Content-Type: %s. "
-            "The only supported Content-Types are application/vnd.pypi.simple.v1+json, "
-            "application/vnd.pypi.simple.v1+html, and text/html",
+            "Skipping page %s because the %s request got Content-Type: %s."
+            "The only supported Content-Type is text/html",
             link,
             exc.request_desc,
             exc.content_type,
         )
     except NetworkConnectionError as exc:
-        _handle_get_simple_fail(link, exc)
+        _handle_get_page_fail(link, exc)
     except RetryError as exc:
-        _handle_get_simple_fail(link, exc)
+        _handle_get_page_fail(link, exc)
     except SSLError as exc:
         reason = "There was a problem confirming the ssl certificate: "
         reason += str(exc)
-        _handle_get_simple_fail(link, reason, meth=logger.info)
+        _handle_get_page_fail(link, reason, meth=logger.info)
     except requests.ConnectionError as exc:
-        _handle_get_simple_fail(link, f"connection error: {exc}")
+        _handle_get_page_fail(link, f"connection error: {exc}")
     except requests.Timeout:
-        _handle_get_simple_fail(link, "timed out")
+        _handle_get_page_fail(link, "timed out")
     else:
-        return _make_index_content(resp, cache_link_parsing=link.cache_link_parsing)
+        return _make_html_page(resp, cache_link_parsing=link.cache_link_parsing)
     return None
 
 
@@ -442,7 +586,6 @@ def create(
         search_scope = SearchScope.create(
             find_links=find_links,
             index_urls=index_urls,
-            no_index=options.no_index,
         )
         link_collector = LinkCollector(
             session=session,
@@ -454,11 +597,11 @@ def create(
     def find_links(self) -> List[str]:
         return self.search_scope.find_links
 
-    def fetch_response(self, location: Link) -> Optional[IndexContent]:
+    def fetch_page(self, location: Link) -> Optional[HTMLPage]:
         """
         Fetch an HTML page containing package links.
         """
-        return _get_index_content(location, session=self.session)
+        return _get_html_page(location, session=self.session)
 
     def collect_sources(
         self,
diff --git a/venv/lib/python3.10/site-packages/pip/_internal/index/package_finder.py b/venv/lib/python3.10/site-packages/pip/_internal/index/package_finder.py
index 9bf247f..223d06d 100644
--- a/venv/lib/python3.10/site-packages/pip/_internal/index/package_finder.py
+++ b/venv/lib/python3.10/site-packages/pip/_internal/index/package_finder.py
@@ -3,7 +3,6 @@
 # The following comment should be removed at some point in the future.
 # mypy: strict-optional=False
 
-import enum
 import functools
 import itertools
 import logging
@@ -95,16 +94,6 @@ def _check_link_requires_python(
     return True
 
 
-class LinkType(enum.Enum):
-    candidate = enum.auto()
-    different_project = enum.auto()
-    yanked = enum.auto()
-    format_unsupported = enum.auto()
-    format_invalid = enum.auto()
-    platform_mismatch = enum.auto()
-    requires_python_mismatch = enum.auto()
-
-
 class LinkEvaluator:
 
     """
@@ -154,20 +143,19 @@ def __init__(
 
         self.project_name = project_name
 
-    def evaluate_link(self, link: Link) -> Tuple[LinkType, str]:
+    def evaluate_link(self, link: Link) -> Tuple[bool, Optional[str]]:
         """
         Determine whether a link is a candidate for installation.
 
-        :return: A tuple (result, detail), where *result* is an enum
-            representing whether the evaluation found a candidate, or the reason
-            why one is not found. If a candidate is found, *detail* will be the
-            candidate's version string; if one is not found, it contains the
-            reason the link fails to qualify.
+        :return: A tuple (is_candidate, result), where `result` is (1) a
+            version string if `is_candidate` is True, and (2) if
+            `is_candidate` is False, an optional string to log the reason
+            the link fails to qualify.
         """
         version = None
         if link.is_yanked and not self._allow_yanked:
             reason = link.yanked_reason or ""
-            return (LinkType.yanked, f"yanked for reason: {reason}")
+            return (False, f"yanked for reason: {reason}")
 
         if link.egg_fragment:
             egg_info = link.egg_fragment
@@ -175,46 +163,42 @@ def evaluate_link(self, link: Link) -> Tuple[LinkType, str]:
         else:
             egg_info, ext = link.splitext()
             if not ext:
-                return (LinkType.format_unsupported, "not a file")
+                return (False, "not a file")
             if ext not in SUPPORTED_EXTENSIONS:
-                return (
-                    LinkType.format_unsupported,
-                    f"unsupported archive format: {ext}",
-                )
+                return (False, f"unsupported archive format: {ext}")
             if "binary" not in self._formats and ext == WHEEL_EXTENSION:
-                reason = f"No binaries permitted for {self.project_name}"
-                return (LinkType.format_unsupported, reason)
+                reason = "No binaries permitted for {}".format(self.project_name)
+                return (False, reason)
             if "macosx10" in link.path and ext == ".zip":
-                return (LinkType.format_unsupported, "macosx10 one")
+                return (False, "macosx10 one")
             if ext == WHEEL_EXTENSION:
                 try:
                     wheel = Wheel(link.filename)
                 except InvalidWheelFilename:
-                    return (
-                        LinkType.format_invalid,
-                        "invalid wheel filename",
-                    )
+                    return (False, "invalid wheel filename")
                 if canonicalize_name(wheel.name) != self._canonical_name:
-                    reason = f"wrong project name (not {self.project_name})"
-                    return (LinkType.different_project, reason)
+                    reason = "wrong project name (not {})".format(self.project_name)
+                    return (False, reason)
 
                 supported_tags = self._target_python.get_tags()
                 if not wheel.supported(supported_tags):
                     # Include the wheel's tags in the reason string to
                     # simplify troubleshooting compatibility issues.
-                    file_tags = ", ".join(wheel.get_formatted_file_tags())
+                    file_tags = wheel.get_formatted_file_tags()
                     reason = (
-                        f"none of the wheel's tags ({file_tags}) are compatible "
-                        f"(run pip debug --verbose to show compatible tags)"
+                        "none of the wheel's tags ({}) are compatible "
+                        "(run pip debug --verbose to show compatible tags)".format(
+                            ", ".join(file_tags)
+                        )
                     )
-                    return (LinkType.platform_mismatch, reason)
+                    return (False, reason)
 
                 version = wheel.version
 
         # This should be up by the self.ok_binary check, but see issue 2700.
         if "source" not in self._formats and ext != WHEEL_EXTENSION:
             reason = f"No sources permitted for {self.project_name}"
-            return (LinkType.format_unsupported, reason)
+            return (False, reason)
 
         if not version:
             version = _extract_version_from_fragment(
@@ -223,17 +207,14 @@ def evaluate_link(self, link: Link) -> Tuple[LinkType, str]:
             )
         if not version:
             reason = f"Missing project version for {self.project_name}"
-            return (LinkType.format_invalid, reason)
+            return (False, reason)
 
         match = self._py_version_re.search(version)
         if match:
             version = version[: match.start()]
             py_version = match.group(1)
             if py_version != self._target_python.py_version:
-                return (
-                    LinkType.platform_mismatch,
-                    "Python version is incorrect",
-                )
+                return (False, "Python version is incorrect")
 
         supports_python = _check_link_requires_python(
             link,
@@ -241,12 +222,13 @@ def evaluate_link(self, link: Link) -> Tuple[LinkType, str]:
             ignore_requires_python=self._ignore_requires_python,
         )
         if not supports_python:
-            reason = f"{version} Requires-Python {link.requires_python}"
-            return (LinkType.requires_python_mismatch, reason)
+            # Return None for the reason text to suppress calling
+            # _log_skipped_link().
+            return (False, None)
 
         logger.debug("Found link %s, version: %s", link, version)
 
-        return (LinkType.candidate, version)
+        return (True, version)
 
 
 def filter_unallowed_hashes(
@@ -598,6 +580,7 @@ def __init__(
         link_collector: LinkCollector,
         target_python: TargetPython,
         allow_yanked: bool,
+        use_deprecated_html5lib: bool,
         format_control: Optional[FormatControl] = None,
         candidate_prefs: Optional[CandidatePreferences] = None,
         ignore_requires_python: Optional[bool] = None,
@@ -622,11 +605,12 @@ def __init__(
         self._ignore_requires_python = ignore_requires_python
         self._link_collector = link_collector
         self._target_python = target_python
+        self._use_deprecated_html5lib = use_deprecated_html5lib
 
         self.format_control = format_control
 
         # These are boring links that have already been logged somehow.
-        self._logged_links: Set[Tuple[Link, LinkType, str]] = set()
+        self._logged_links: Set[Link] = set()
 
     # Don't include an allow_yanked default value to make sure each call
     # site considers whether yanked releases are allowed. This also causes
@@ -638,6 +622,8 @@ def create(
         link_collector: LinkCollector,
         selection_prefs: SelectionPreferences,
         target_python: Optional[TargetPython] = None,
+        *,
+        use_deprecated_html5lib: bool,
     ) -> "PackageFinder":
         """Create a PackageFinder.
 
@@ -662,6 +648,7 @@ def create(
             allow_yanked=selection_prefs.allow_yanked,
             format_control=selection_prefs.format_control,
             ignore_requires_python=selection_prefs.ignore_requires_python,
+            use_deprecated_html5lib=use_deprecated_html5lib,
         )
 
     @property
@@ -703,14 +690,6 @@ def prefer_binary(self) -> bool:
     def set_prefer_binary(self) -> None:
         self._candidate_prefs.prefer_binary = True
 
-    def requires_python_skipped_reasons(self) -> List[str]:
-        reasons = {
-            detail
-            for _, result, detail in self._logged_links
-            if result == LinkType.requires_python_mismatch
-        }
-        return sorted(reasons)
-
     def make_link_evaluator(self, project_name: str) -> LinkEvaluator:
         canonical_name = canonicalize_name(project_name)
         formats = self.format_control.get_allowed_formats(canonical_name)
@@ -740,13 +719,12 @@ def _sort_links(self, links: Iterable[Link]) -> List[Link]:
                     no_eggs.append(link)
         return no_eggs + eggs
 
-    def _log_skipped_link(self, link: Link, result: LinkType, detail: str) -> None:
-        entry = (link, result, detail)
-        if entry not in self._logged_links:
+    def _log_skipped_link(self, link: Link, reason: str) -> None:
+        if link not in self._logged_links:
             # Put the link at the end so the reason is more visible and because
             # the link string is usually very long.
-            logger.debug("Skipping link: %s: %s", detail, link)
-            self._logged_links.add(entry)
+            logger.debug("Skipping link: %s: %s", reason, link)
+            self._logged_links.add(link)
 
     def get_install_candidate(
         self, link_evaluator: LinkEvaluator, link: Link
@@ -755,15 +733,16 @@ def get_install_candidate(
         If the link is a candidate for install, convert it to an
         InstallationCandidate and return it. Otherwise, return None.
         """
-        result, detail = link_evaluator.evaluate_link(link)
-        if result != LinkType.candidate:
-            self._log_skipped_link(link, result, detail)
+        is_candidate, result = link_evaluator.evaluate_link(link)
+        if not is_candidate:
+            if result:
+                self._log_skipped_link(link, reason=result)
             return None
 
         return InstallationCandidate(
             name=link_evaluator.project_name,
             link=link,
-            version=detail,
+            version=result,
         )
 
     def evaluate_links(
@@ -787,11 +766,11 @@ def process_project_url(
             "Fetching project page and analyzing links: %s",
             project_url,
         )
-        index_response = self._link_collector.fetch_response(project_url)
-        if index_response is None:
+        html_page = self._link_collector.fetch_page(project_url)
+        if html_page is None:
             return []
 
-        page_links = list(parse_links(index_response))
+        page_links = list(parse_links(html_page, self._use_deprecated_html5lib))
 
         with indent_log():
             package_links = self.evaluate_links(
diff --git a/venv/lib/python3.10/site-packages/pip/_internal/locations/__init__.py b/venv/lib/python3.10/site-packages/pip/_internal/locations/__init__.py
index 60afe0a..ac0c166 100644
--- a/venv/lib/python3.10/site-packages/pip/_internal/locations/__init__.py
+++ b/venv/lib/python3.10/site-packages/pip/_internal/locations/__init__.py
@@ -4,14 +4,14 @@
 import pathlib
 import sys
 import sysconfig
-from typing import Any, Dict, Generator, List, Optional, Tuple
+from typing import Any, Dict, Iterator, List, Optional, Tuple
 
 from pip._internal.models.scheme import SCHEME_KEYS, Scheme
 from pip._internal.utils.compat import WINDOWS
 from pip._internal.utils.deprecation import deprecated
 from pip._internal.utils.virtualenv import running_under_virtualenv
 
-from . import _sysconfig
+from . import _distutils, _sysconfig
 from .base import (
     USER_CACHE_DIR,
     get_major_minor_version,
@@ -60,12 +60,6 @@ def _should_use_sysconfig() -> bool:
 
 _USE_SYSCONFIG = _should_use_sysconfig()
 
-if not _USE_SYSCONFIG:
-    # Import distutils lazily to avoid deprecation warnings,
-    # but import it soon enough that it is in memory and available during
-    # a pip reinstall.
-    from . import _distutils
-
 # Be noisy about incompatibilities if this platforms "should" be using
 # sysconfig, but is explicitly opting out and using distutils instead.
 if _USE_SYSCONFIG_DEFAULT and not _USE_SYSCONFIG:
@@ -79,7 +73,7 @@ def _looks_like_bpo_44860() -> bool:
 
     See .
     """
-    from distutils.command.install import INSTALL_SCHEMES
+    from distutils.command.install import INSTALL_SCHEMES  # type: ignore
 
     try:
         unix_user_platlib = INSTALL_SCHEMES["unix_user"]["platlib"]
@@ -104,7 +98,7 @@ def _looks_like_red_hat_lib() -> bool:
 
     This is the only way I can see to tell a Red Hat-patched Python.
     """
-    from distutils.command.install import INSTALL_SCHEMES
+    from distutils.command.install import INSTALL_SCHEMES  # type: ignore
 
     return all(
         k in INSTALL_SCHEMES
@@ -116,7 +110,7 @@ def _looks_like_red_hat_lib() -> bool:
 @functools.lru_cache(maxsize=None)
 def _looks_like_debian_scheme() -> bool:
     """Debian adds two additional schemes."""
-    from distutils.command.install import INSTALL_SCHEMES
+    from distutils.command.install import INSTALL_SCHEMES  # type: ignore
 
     return "deb_system" in INSTALL_SCHEMES and "unix_local" in INSTALL_SCHEMES
 
@@ -175,9 +169,9 @@ def _looks_like_msys2_mingw_scheme() -> bool:
     )
 
 
-def _fix_abiflags(parts: Tuple[str]) -> Generator[str, None, None]:
+def _fix_abiflags(parts: Tuple[str]) -> Iterator[str]:
     ldversion = sysconfig.get_config_var("LDVERSION")
-    abiflags = getattr(sys, "abiflags", None)
+    abiflags: str = getattr(sys, "abiflags", None)
 
     # LDVERSION does not end with sys.abiflags. Just return the path unchanged.
     if not ldversion or not abiflags or not ldversion.endswith(abiflags):
@@ -458,8 +452,6 @@ def get_platlib() -> str:
     if _USE_SYSCONFIG:
         return new
 
-    from . import _distutils
-
     old = _distutils.get_platlib()
     if _looks_like_deb_system_dist_packages(old):
         return old
diff --git a/venv/lib/python3.10/site-packages/pip/_internal/locations/_distutils.py b/venv/lib/python3.10/site-packages/pip/_internal/locations/_distutils.py
index c7712f0..2ec79e6 100644
--- a/venv/lib/python3.10/site-packages/pip/_internal/locations/_distutils.py
+++ b/venv/lib/python3.10/site-packages/pip/_internal/locations/_distutils.py
@@ -3,17 +3,6 @@
 # The following comment should be removed at some point in the future.
 # mypy: strict-optional=False
 
-# If pip's going to use distutils, it should not be using the copy that setuptools
-# might have injected into the environment. This is done by removing the injected
-# shim, if it's injected.
-#
-# See https://github.com/pypa/pip/issues/8761 for the original discussion and
-# rationale for why this is done within pip.
-try:
-    __import__("_distutils_hack").remove_shim()
-except (ImportError, AttributeError):
-    pass
-
 import logging
 import os
 import sys
@@ -35,10 +24,10 @@
 def distutils_scheme(
     dist_name: str,
     user: bool = False,
-    home: Optional[str] = None,
-    root: Optional[str] = None,
+    home: str = None,
+    root: str = None,
     isolated: bool = False,
-    prefix: Optional[str] = None,
+    prefix: str = None,
     *,
     ignore_config_files: bool = False,
 ) -> Dict[str, str]:
@@ -95,7 +84,7 @@ def distutils_scheme(
         if home:
             prefix = home
         elif user:
-            prefix = i.install_userbase
+            prefix = i.install_userbase  # type: ignore
         else:
             prefix = i.prefix
         scheme["headers"] = os.path.join(
diff --git a/venv/lib/python3.10/site-packages/pip/_internal/locations/_sysconfig.py b/venv/lib/python3.10/site-packages/pip/_internal/locations/_sysconfig.py
index 0bbc928..5e141aa 100644
--- a/venv/lib/python3.10/site-packages/pip/_internal/locations/_sysconfig.py
+++ b/venv/lib/python3.10/site-packages/pip/_internal/locations/_sysconfig.py
@@ -1,3 +1,4 @@
+import distutils.util  # FIXME: For change_root.
 import logging
 import os
 import sys
@@ -8,7 +9,7 @@
 from pip._internal.models.scheme import SCHEME_KEYS, Scheme
 from pip._internal.utils.virtualenv import running_under_virtualenv
 
-from .base import change_root, get_major_minor_version, is_osx_framework
+from .base import get_major_minor_version, is_osx_framework
 
 logger = logging.getLogger(__name__)
 
@@ -193,7 +194,7 @@ def get_scheme(
     )
     if root is not None:
         for key in SCHEME_KEYS:
-            value = change_root(root, getattr(scheme, key))
+            value = distutils.util.change_root(root, getattr(scheme, key))
             setattr(scheme, key, value)
     return scheme
 
diff --git a/venv/lib/python3.10/site-packages/pip/_internal/locations/base.py b/venv/lib/python3.10/site-packages/pip/_internal/locations/base.py
index 3f7de00..86dad4a 100644
--- a/venv/lib/python3.10/site-packages/pip/_internal/locations/base.py
+++ b/venv/lib/python3.10/site-packages/pip/_internal/locations/base.py
@@ -5,7 +5,6 @@
 import sysconfig
 import typing
 
-from pip._internal.exceptions import InstallationError
 from pip._internal.utils import appdirs
 from pip._internal.utils.virtualenv import running_under_virtualenv
 
@@ -24,34 +23,6 @@ def get_major_minor_version() -> str:
     return "{}.{}".format(*sys.version_info)
 
 
-def change_root(new_root: str, pathname: str) -> str:
-    """Return 'pathname' with 'new_root' prepended.
-
-    If 'pathname' is relative, this is equivalent to os.path.join(new_root, pathname).
-    Otherwise, it requires making 'pathname' relative and then joining the
-    two, which is tricky on DOS/Windows and Mac OS.
-
-    This is borrowed from Python's standard library's distutils module.
-    """
-    if os.name == "posix":
-        if not os.path.isabs(pathname):
-            return os.path.join(new_root, pathname)
-        else:
-            return os.path.join(new_root, pathname[1:])
-
-    elif os.name == "nt":
-        (drive, path) = os.path.splitdrive(pathname)
-        if path[0] == "\\":
-            path = path[1:]
-        return os.path.join(new_root, path)
-
-    else:
-        raise InstallationError(
-            f"Unknown platform: {os.name}\n"
-            "Can not change root path prefix on unknown platform."
-        )
-
-
 def get_src_prefix() -> str:
     if running_under_virtualenv():
         src_prefix = os.path.join(sys.prefix, "src")
diff --git a/venv/lib/python3.10/site-packages/pip/_internal/metadata/__init__.py b/venv/lib/python3.10/site-packages/pip/_internal/metadata/__init__.py
index 9f73ca7..cc037c1 100644
--- a/venv/lib/python3.10/site-packages/pip/_internal/metadata/__init__.py
+++ b/venv/lib/python3.10/site-packages/pip/_internal/metadata/__init__.py
@@ -1,18 +1,7 @@
-import contextlib
-import functools
-import os
-import sys
-from typing import TYPE_CHECKING, List, Optional, Type, cast
-
-from pip._internal.utils.misc import strtobool
+from typing import List, Optional
 
 from .base import BaseDistribution, BaseEnvironment, FilesystemWheel, MemoryWheel, Wheel
 
-if TYPE_CHECKING:
-    from typing import Protocol
-else:
-    Protocol = object
-
 __all__ = [
     "BaseDistribution",
     "BaseEnvironment",
@@ -22,49 +11,9 @@
     "get_default_environment",
     "get_environment",
     "get_wheel_distribution",
-    "select_backend",
 ]
 
 
-def _should_use_importlib_metadata() -> bool:
-    """Whether to use the ``importlib.metadata`` or ``pkg_resources`` backend.
-
-    By default, pip uses ``importlib.metadata`` on Python 3.11+, and
-    ``pkg_resourcess`` otherwise. This can be overridden by a couple of ways:
-
-    * If environment variable ``_PIP_USE_IMPORTLIB_METADATA`` is set, it
-      dictates whether ``importlib.metadata`` is used, regardless of Python
-      version.
-    * On Python 3.11+, Python distributors can patch ``importlib.metadata``
-      to add a global constant ``_PIP_USE_IMPORTLIB_METADATA = False``. This
-      makes pip use ``pkg_resources`` (unless the user set the aforementioned
-      environment variable to *True*).
-    """
-    with contextlib.suppress(KeyError, ValueError):
-        return bool(strtobool(os.environ["_PIP_USE_IMPORTLIB_METADATA"]))
-    if sys.version_info < (3, 11):
-        return False
-    import importlib.metadata
-
-    return bool(getattr(importlib.metadata, "_PIP_USE_IMPORTLIB_METADATA", True))
-
-
-class Backend(Protocol):
-    Distribution: Type[BaseDistribution]
-    Environment: Type[BaseEnvironment]
-
-
-@functools.lru_cache(maxsize=None)
-def select_backend() -> Backend:
-    if _should_use_importlib_metadata():
-        from . import importlib
-
-        return cast(Backend, importlib)
-    from . import pkg_resources
-
-    return cast(Backend, pkg_resources)
-
-
 def get_default_environment() -> BaseEnvironment:
     """Get the default representation for the current environment.
 
@@ -72,7 +21,9 @@ def get_default_environment() -> BaseEnvironment:
     Environment instance should be built from ``sys.path`` and may use caching
     to share instance state accorss calls.
     """
-    return select_backend().Environment.default()
+    from .pkg_resources import Environment
+
+    return Environment.default()
 
 
 def get_environment(paths: Optional[List[str]]) -> BaseEnvironment:
@@ -82,7 +33,9 @@ def get_environment(paths: Optional[List[str]]) -> BaseEnvironment:
     given import paths. The backend must build a fresh instance representing
     the state of installed distributions when this function is called.
     """
-    return select_backend().Environment.from_paths(paths)
+    from .pkg_resources import Environment
+
+    return Environment.from_paths(paths)
 
 
 def get_directory_distribution(directory: str) -> BaseDistribution:
@@ -91,7 +44,9 @@ def get_directory_distribution(directory: str) -> BaseDistribution:
     This returns a Distribution instance from the chosen backend based on
     the given on-disk ``.dist-info`` directory.
     """
-    return select_backend().Distribution.from_directory(directory)
+    from .pkg_resources import Distribution
+
+    return Distribution.from_directory(directory)
 
 
 def get_wheel_distribution(wheel: Wheel, canonical_name: str) -> BaseDistribution:
@@ -102,26 +57,6 @@ def get_wheel_distribution(wheel: Wheel, canonical_name: str) -> BaseDistributio
 
     :param canonical_name: Normalized project name of the given wheel.
     """
-    return select_backend().Distribution.from_wheel(wheel, canonical_name)
-
-
-def get_metadata_distribution(
-    metadata_contents: bytes,
-    filename: str,
-    canonical_name: str,
-) -> BaseDistribution:
-    """Get the dist representation of the specified METADATA file contents.
+    from .pkg_resources import Distribution
 
-    This returns a Distribution instance from the chosen backend sourced from the data
-    in `metadata_contents`.
-
-    :param metadata_contents: Contents of a METADATA file within a dist, or one served
-                              via PEP 658.
-    :param filename: Filename for the dist this metadata represents.
-    :param canonical_name: Normalized project name of the given dist.
-    """
-    return select_backend().Distribution.from_metadata_file_contents(
-        metadata_contents,
-        filename,
-        canonical_name,
-    )
+    return Distribution.from_wheel(wheel, canonical_name)
diff --git a/venv/lib/python3.10/site-packages/pip/_internal/metadata/_json.py b/venv/lib/python3.10/site-packages/pip/_internal/metadata/_json.py
deleted file mode 100644
index 336b52f..0000000
--- a/venv/lib/python3.10/site-packages/pip/_internal/metadata/_json.py
+++ /dev/null
@@ -1,84 +0,0 @@
-# Extracted from https://github.com/pfmoore/pkg_metadata
-
-from email.header import Header, decode_header, make_header
-from email.message import Message
-from typing import Any, Dict, List, Union
-
-METADATA_FIELDS = [
-    # Name, Multiple-Use
-    ("Metadata-Version", False),
-    ("Name", False),
-    ("Version", False),
-    ("Dynamic", True),
-    ("Platform", True),
-    ("Supported-Platform", True),
-    ("Summary", False),
-    ("Description", False),
-    ("Description-Content-Type", False),
-    ("Keywords", False),
-    ("Home-page", False),
-    ("Download-URL", False),
-    ("Author", False),
-    ("Author-email", False),
-    ("Maintainer", False),
-    ("Maintainer-email", False),
-    ("License", False),
-    ("Classifier", True),
-    ("Requires-Dist", True),
-    ("Requires-Python", False),
-    ("Requires-External", True),
-    ("Project-URL", True),
-    ("Provides-Extra", True),
-    ("Provides-Dist", True),
-    ("Obsoletes-Dist", True),
-]
-
-
-def json_name(field: str) -> str:
-    return field.lower().replace("-", "_")
-
-
-def msg_to_json(msg: Message) -> Dict[str, Any]:
-    """Convert a Message object into a JSON-compatible dictionary."""
-
-    def sanitise_header(h: Union[Header, str]) -> str:
-        if isinstance(h, Header):
-            chunks = []
-            for bytes, encoding in decode_header(h):
-                if encoding == "unknown-8bit":
-                    try:
-                        # See if UTF-8 works
-                        bytes.decode("utf-8")
-                        encoding = "utf-8"
-                    except UnicodeDecodeError:
-                        # If not, latin1 at least won't fail
-                        encoding = "latin1"
-                chunks.append((bytes, encoding))
-            return str(make_header(chunks))
-        return str(h)
-
-    result = {}
-    for field, multi in METADATA_FIELDS:
-        if field not in msg:
-            continue
-        key = json_name(field)
-        if multi:
-            value: Union[str, List[str]] = [
-                sanitise_header(v) for v in msg.get_all(field)
-            ]
-        else:
-            value = sanitise_header(msg.get(field))
-            if key == "keywords":
-                # Accept both comma-separated and space-separated
-                # forms, for better compatibility with old data.
-                if "," in value:
-                    value = [v.strip() for v in value.split(",")]
-                else:
-                    value = value.split()
-        result[key] = value
-
-    payload = msg.get_payload()
-    if payload:
-        result["description"] = payload
-
-    return result
diff --git a/venv/lib/python3.10/site-packages/pip/_internal/metadata/base.py b/venv/lib/python3.10/site-packages/pip/_internal/metadata/base.py
index cafb79f..1a5a781 100644
--- a/venv/lib/python3.10/site-packages/pip/_internal/metadata/base.py
+++ b/venv/lib/python3.10/site-packages/pip/_internal/metadata/base.py
@@ -1,6 +1,5 @@
 import csv
 import email.message
-import functools
 import json
 import logging
 import pathlib
@@ -9,14 +8,11 @@
 from typing import (
     IO,
     TYPE_CHECKING,
-    Any,
     Collection,
     Container,
-    Dict,
     Iterable,
     Iterator,
     List,
-    NamedTuple,
     Optional,
     Tuple,
     Union,
@@ -35,13 +31,13 @@
     DirectUrlValidationError,
 )
 from pip._internal.utils.compat import stdlib_pkgs  # TODO: Move definition here.
-from pip._internal.utils.egg_link import egg_link_path_from_sys_path
+from pip._internal.utils.egg_link import (
+    egg_link_path_from_location,
+    egg_link_path_from_sys_path,
+)
 from pip._internal.utils.misc import is_local, normalize_path
-from pip._internal.utils.packaging import safe_extra
 from pip._internal.utils.urls import url_to_path
 
-from ._json import msg_to_json
-
 if TYPE_CHECKING:
     from typing import Protocol
 else:
@@ -49,7 +45,7 @@
 
 DistributionVersion = Union[LegacyVersion, Version]
 
-InfoPath = Union[str, pathlib.PurePath]
+InfoPath = Union[str, pathlib.PurePosixPath]
 
 logger = logging.getLogger(__name__)
 
@@ -98,53 +94,7 @@ def _convert_installed_files_path(
     return str(pathlib.Path(*info, *entry))
 
 
-class RequiresEntry(NamedTuple):
-    requirement: str
-    extra: str
-    marker: str
-
-
 class BaseDistribution(Protocol):
-    @classmethod
-    def from_directory(cls, directory: str) -> "BaseDistribution":
-        """Load the distribution from a metadata directory.
-
-        :param directory: Path to a metadata directory, e.g. ``.dist-info``.
-        """
-        raise NotImplementedError()
-
-    @classmethod
-    def from_metadata_file_contents(
-        cls,
-        metadata_contents: bytes,
-        filename: str,
-        project_name: str,
-    ) -> "BaseDistribution":
-        """Load the distribution from the contents of a METADATA file.
-
-        This is used to implement PEP 658 by generating a "shallow" dist object that can
-        be used for resolution without downloading or building the actual dist yet.
-
-        :param metadata_contents: The contents of a METADATA file.
-        :param filename: File name for the dist with this metadata.
-        :param project_name: Name of the project this dist represents.
-        """
-        raise NotImplementedError()
-
-    @classmethod
-    def from_wheel(cls, wheel: "Wheel", name: str) -> "BaseDistribution":
-        """Load the distribution from a given wheel.
-
-        :param wheel: A concrete wheel definition.
-        :param name: File name of the wheel.
-
-        :raises InvalidWheel: Whenever loading of the wheel causes a
-            :py:exc:`zipfile.BadZipFile` exception to be thrown.
-        :raises UnsupportedWheel: If the wheel is a valid zip, but malformed
-            internally.
-        """
-        raise NotImplementedError()
-
     def __repr__(self) -> str:
         return f"{self.raw_name} {self.version} ({self.location})"
 
@@ -198,7 +148,14 @@ def installed_location(self) -> Optional[str]:
 
         The returned location is normalized (in particular, with symlinks removed).
         """
-        raise NotImplementedError()
+        egg_link = egg_link_path_from_location(self.raw_name)
+        if egg_link:
+            location = egg_link
+        elif self.location:
+            location = self.location
+        else:
+            return None
+        return normalize_path(location)
 
     @property
     def info_location(self) -> Optional[str]:
@@ -329,10 +286,6 @@ def installer(self) -> str:
                 return cleaned_line
         return ""
 
-    @property
-    def requested(self) -> bool:
-        return self.is_file("REQUESTED")
-
     @property
     def editable(self) -> bool:
         return bool(self.editable_project_location)
@@ -363,19 +316,21 @@ def is_file(self, path: InfoPath) -> bool:
         """Check whether an entry in the info directory is a file."""
         raise NotImplementedError()
 
-    def iter_distutils_script_names(self) -> Iterator[str]:
-        """Find distutils 'scripts' entries metadata.
+    def iterdir(self, path: InfoPath) -> Iterator[pathlib.PurePosixPath]:
+        """Iterate through a directory in the info directory.
+
+        Each item yielded would be a path relative to the info directory.
 
-        If 'scripts' is supplied in ``setup.py``, distutils records those in the
-        installed distribution's ``scripts`` directory, a file for each script.
+        :raise FileNotFoundError: If ``name`` does not exist in the directory.
+        :raise NotADirectoryError: If ``name`` does not point to a directory.
         """
         raise NotImplementedError()
 
     def read_text(self, path: InfoPath) -> str:
         """Read a file in the info directory.
 
-        :raise FileNotFoundError: If ``path`` does not exist in the directory.
-        :raise NoneMetadataError: If ``path`` exists in the info directory, but
+        :raise FileNotFoundError: If ``name`` does not exist in the directory.
+        :raise NoneMetadataError: If ``name`` exists in the info directory, but
             cannot be read.
         """
         raise NotImplementedError()
@@ -383,17 +338,6 @@ def read_text(self, path: InfoPath) -> str:
     def iter_entry_points(self) -> Iterable[BaseEntryPoint]:
         raise NotImplementedError()
 
-    def _metadata_impl(self) -> email.message.Message:
-        raise NotImplementedError()
-
-    @functools.lru_cache(maxsize=1)
-    def _metadata_cached(self) -> email.message.Message:
-        # When we drop python 3.7 support, move this to the metadata property and use
-        # functools.cached_property instead of lru_cache.
-        metadata = self._metadata_impl()
-        self._add_egg_info_requires(metadata)
-        return metadata
-
     @property
     def metadata(self) -> email.message.Message:
         """Metadata of distribution parsed from e.g. METADATA or PKG-INFO.
@@ -403,18 +347,7 @@ def metadata(self) -> email.message.Message:
         :raises NoneMetadataError: If the metadata file is available, but does
             not contain valid metadata.
         """
-        return self._metadata_cached()
-
-    @property
-    def metadata_dict(self) -> Dict[str, Any]:
-        """PEP 566 compliant JSON-serializable representation of METADATA or PKG-INFO.
-
-        This should return an empty dict if the metadata file is unavailable.
-
-        :raises NoneMetadataError: If the metadata file is available, but does
-            not contain valid metadata.
-        """
-        return msg_to_json(self.metadata)
+        raise NotImplementedError()
 
     @property
     def metadata_version(self) -> Optional[str]:
@@ -493,7 +426,7 @@ def _iter_declared_entries_from_legacy(self) -> Optional[Iterator[str]]:
         )
 
     def iter_declared_entries(self) -> Optional[Iterator[str]]:
-        """Iterate through file entries declared in this distribution.
+        """Iterate through file entires declared in this distribution.
 
         For modern .dist-info distributions, this is the files listed in the
         ``RECORD`` metadata file. For legacy setuptools distributions, this
@@ -508,76 +441,6 @@ def iter_declared_entries(self) -> Optional[Iterator[str]]:
             or self._iter_declared_entries_from_legacy()
         )
 
-    def _iter_requires_txt_entries(self) -> Iterator[RequiresEntry]:
-        """Parse a ``requires.txt`` in an egg-info directory.
-
-        This is an INI-ish format where an egg-info stores dependencies. A
-        section name describes extra other environment markers, while each entry
-        is an arbitrary string (not a key-value pair) representing a dependency
-        as a requirement string (no markers).
-
-        There is a construct in ``importlib.metadata`` called ``Sectioned`` that
-        does mostly the same, but the format is currently considered private.
-        """
-        try:
-            content = self.read_text("requires.txt")
-        except FileNotFoundError:
-            return
-        extra = marker = ""  # Section-less entries don't have markers.
-        for line in content.splitlines():
-            line = line.strip()
-            if not line or line.startswith("#"):  # Comment; ignored.
-                continue
-            if line.startswith("[") and line.endswith("]"):  # A section header.
-                extra, _, marker = line.strip("[]").partition(":")
-                continue
-            yield RequiresEntry(requirement=line, extra=extra, marker=marker)
-
-    def _iter_egg_info_extras(self) -> Iterable[str]:
-        """Get extras from the egg-info directory."""
-        known_extras = {""}
-        for entry in self._iter_requires_txt_entries():
-            if entry.extra in known_extras:
-                continue
-            known_extras.add(entry.extra)
-            yield entry.extra
-
-    def _iter_egg_info_dependencies(self) -> Iterable[str]:
-        """Get distribution dependencies from the egg-info directory.
-
-        To ease parsing, this converts a legacy dependency entry into a PEP 508
-        requirement string. Like ``_iter_requires_txt_entries()``, there is code
-        in ``importlib.metadata`` that does mostly the same, but not do exactly
-        what we need.
-
-        Namely, ``importlib.metadata`` does not normalize the extra name before
-        putting it into the requirement string, which causes marker comparison
-        to fail because the dist-info format do normalize. This is consistent in
-        all currently available PEP 517 backends, although not standardized.
-        """
-        for entry in self._iter_requires_txt_entries():
-            if entry.extra and entry.marker:
-                marker = f'({entry.marker}) and extra == "{safe_extra(entry.extra)}"'
-            elif entry.extra:
-                marker = f'extra == "{safe_extra(entry.extra)}"'
-            elif entry.marker:
-                marker = entry.marker
-            else:
-                marker = ""
-            if marker:
-                yield f"{entry.requirement} ; {marker}"
-            else:
-                yield entry.requirement
-
-    def _add_egg_info_requires(self, metadata: email.message.Message) -> None:
-        """Add egg-info requires.txt information to the metadata."""
-        if not metadata.get_all("Requires-Dist"):
-            for dep in self._iter_egg_info_dependencies():
-                metadata["Requires-Dist"] = dep
-        if not metadata.get_all("Provides-Extra"):
-            for extra in self._iter_egg_info_extras():
-                metadata["Provides-Extra"] = extra
-
 
 class BaseEnvironment:
     """An environment containing distributions to introspect."""
@@ -607,8 +470,8 @@ def _iter_distributions(self) -> Iterator["BaseDistribution"]:
         """
         raise NotImplementedError()
 
-    def iter_all_distributions(self) -> Iterator[BaseDistribution]:
-        """Iterate through all installed distributions without any filtering."""
+    def iter_distributions(self) -> Iterator["BaseDistribution"]:
+        """Iterate through installed distributions."""
         for dist in self._iter_distributions():
             # Make sure the distribution actually comes from a valid Python
             # packaging distribution. Pip's AdjacentTempDirectory leaves folders
@@ -638,11 +501,6 @@ def iter_installed_distributions(
     ) -> Iterator[BaseDistribution]:
         """Return a list of installed distributions.
 
-        This is based on ``iter_all_distributions()`` with additional filtering
-        options. Note that ``iter_installed_distributions()`` without arguments
-        is *not* equal to ``iter_all_distributions()``, since some of the
-        configurations exclude packages by default.
-
         :param local_only: If True (default), only return installations
         local to the current virtualenv, if in a virtualenv.
         :param skip: An iterable of canonicalized project names to ignore;
@@ -652,7 +510,7 @@ def iter_installed_distributions(
         :param user_only: If True, only report installations in the user
         site directory.
         """
-        it = self.iter_all_distributions()
+        it = self.iter_distributions()
         if local_only:
             it = (d for d in it if d.local)
         if not include_editables:
diff --git a/venv/lib/python3.10/site-packages/pip/_internal/metadata/importlib/__init__.py b/venv/lib/python3.10/site-packages/pip/_internal/metadata/importlib/__init__.py
deleted file mode 100644
index 5e7af9f..0000000
--- a/venv/lib/python3.10/site-packages/pip/_internal/metadata/importlib/__init__.py
+++ /dev/null
@@ -1,4 +0,0 @@
-from ._dists import Distribution
-from ._envs import Environment
-
-__all__ = ["Distribution", "Environment"]
diff --git a/venv/lib/python3.10/site-packages/pip/_internal/metadata/importlib/_compat.py b/venv/lib/python3.10/site-packages/pip/_internal/metadata/importlib/_compat.py
deleted file mode 100644
index 593bff2..0000000
--- a/venv/lib/python3.10/site-packages/pip/_internal/metadata/importlib/_compat.py
+++ /dev/null
@@ -1,55 +0,0 @@
-import importlib.metadata
-from typing import Any, Optional, Protocol, cast
-
-
-class BadMetadata(ValueError):
-    def __init__(self, dist: importlib.metadata.Distribution, *, reason: str) -> None:
-        self.dist = dist
-        self.reason = reason
-
-    def __str__(self) -> str:
-        return f"Bad metadata in {self.dist} ({self.reason})"
-
-
-class BasePath(Protocol):
-    """A protocol that various path objects conform.
-
-    This exists because importlib.metadata uses both ``pathlib.Path`` and
-    ``zipfile.Path``, and we need a common base for type hints (Union does not
-    work well since ``zipfile.Path`` is too new for our linter setup).
-
-    This does not mean to be exhaustive, but only contains things that present
-    in both classes *that we need*.
-    """
-
-    @property
-    def name(self) -> str:
-        raise NotImplementedError()
-
-    @property
-    def parent(self) -> "BasePath":
-        raise NotImplementedError()
-
-
-def get_info_location(d: importlib.metadata.Distribution) -> Optional[BasePath]:
-    """Find the path to the distribution's metadata directory.
-
-    HACK: This relies on importlib.metadata's private ``_path`` attribute. Not
-    all distributions exist on disk, so importlib.metadata is correct to not
-    expose the attribute as public. But pip's code base is old and not as clean,
-    so we do this to avoid having to rewrite too many things. Hopefully we can
-    eliminate this some day.
-    """
-    return getattr(d, "_path", None)
-
-
-def get_dist_name(dist: importlib.metadata.Distribution) -> str:
-    """Get the distribution's project name.
-
-    The ``name`` attribute is only available in Python 3.10 or later. We are
-    targeting exactly that, but Mypy does not know this.
-    """
-    name = cast(Any, dist).name
-    if not isinstance(name, str):
-        raise BadMetadata(dist, reason="invalid metadata entry 'name'")
-    return name
diff --git a/venv/lib/python3.10/site-packages/pip/_internal/metadata/importlib/_dists.py b/venv/lib/python3.10/site-packages/pip/_internal/metadata/importlib/_dists.py
deleted file mode 100644
index 65c043c..0000000
--- a/venv/lib/python3.10/site-packages/pip/_internal/metadata/importlib/_dists.py
+++ /dev/null
@@ -1,224 +0,0 @@
-import email.message
-import importlib.metadata
-import os
-import pathlib
-import zipfile
-from typing import (
-    Collection,
-    Dict,
-    Iterable,
-    Iterator,
-    Mapping,
-    Optional,
-    Sequence,
-    cast,
-)
-
-from pip._vendor.packaging.requirements import Requirement
-from pip._vendor.packaging.utils import NormalizedName, canonicalize_name
-from pip._vendor.packaging.version import parse as parse_version
-
-from pip._internal.exceptions import InvalidWheel, UnsupportedWheel
-from pip._internal.metadata.base import (
-    BaseDistribution,
-    BaseEntryPoint,
-    DistributionVersion,
-    InfoPath,
-    Wheel,
-)
-from pip._internal.utils.misc import normalize_path
-from pip._internal.utils.packaging import safe_extra
-from pip._internal.utils.temp_dir import TempDirectory
-from pip._internal.utils.wheel import parse_wheel, read_wheel_metadata_file
-
-from ._compat import BasePath, get_dist_name
-
-
-class WheelDistribution(importlib.metadata.Distribution):
-    """An ``importlib.metadata.Distribution`` read from a wheel.
-
-    Although ``importlib.metadata.PathDistribution`` accepts ``zipfile.Path``,
-    its implementation is too "lazy" for pip's needs (we can't keep the ZipFile
-    handle open for the entire lifetime of the distribution object).
-
-    This implementation eagerly reads the entire metadata directory into the
-    memory instead, and operates from that.
-    """
-
-    def __init__(
-        self,
-        files: Mapping[pathlib.PurePosixPath, bytes],
-        info_location: pathlib.PurePosixPath,
-    ) -> None:
-        self._files = files
-        self.info_location = info_location
-
-    @classmethod
-    def from_zipfile(
-        cls,
-        zf: zipfile.ZipFile,
-        name: str,
-        location: str,
-    ) -> "WheelDistribution":
-        info_dir, _ = parse_wheel(zf, name)
-        paths = (
-            (name, pathlib.PurePosixPath(name.split("/", 1)[-1]))
-            for name in zf.namelist()
-            if name.startswith(f"{info_dir}/")
-        )
-        files = {
-            relpath: read_wheel_metadata_file(zf, fullpath)
-            for fullpath, relpath in paths
-        }
-        info_location = pathlib.PurePosixPath(location, info_dir)
-        return cls(files, info_location)
-
-    def iterdir(self, path: InfoPath) -> Iterator[pathlib.PurePosixPath]:
-        # Only allow iterating through the metadata directory.
-        if pathlib.PurePosixPath(str(path)) in self._files:
-            return iter(self._files)
-        raise FileNotFoundError(path)
-
-    def read_text(self, filename: str) -> Optional[str]:
-        try:
-            data = self._files[pathlib.PurePosixPath(filename)]
-        except KeyError:
-            return None
-        try:
-            text = data.decode("utf-8")
-        except UnicodeDecodeError as e:
-            wheel = self.info_location.parent
-            error = f"Error decoding metadata for {wheel}: {e} in {filename} file"
-            raise UnsupportedWheel(error)
-        return text
-
-
-class Distribution(BaseDistribution):
-    def __init__(
-        self,
-        dist: importlib.metadata.Distribution,
-        info_location: Optional[BasePath],
-        installed_location: Optional[BasePath],
-    ) -> None:
-        self._dist = dist
-        self._info_location = info_location
-        self._installed_location = installed_location
-
-    @classmethod
-    def from_directory(cls, directory: str) -> BaseDistribution:
-        info_location = pathlib.Path(directory)
-        dist = importlib.metadata.Distribution.at(info_location)
-        return cls(dist, info_location, info_location.parent)
-
-    @classmethod
-    def from_metadata_file_contents(
-        cls,
-        metadata_contents: bytes,
-        filename: str,
-        project_name: str,
-    ) -> BaseDistribution:
-        # Generate temp dir to contain the metadata file, and write the file contents.
-        temp_dir = pathlib.Path(
-            TempDirectory(kind="metadata", globally_managed=True).path
-        )
-        metadata_path = temp_dir / "METADATA"
-        metadata_path.write_bytes(metadata_contents)
-        # Construct dist pointing to the newly created directory.
-        dist = importlib.metadata.Distribution.at(metadata_path.parent)
-        return cls(dist, metadata_path.parent, None)
-
-    @classmethod
-    def from_wheel(cls, wheel: Wheel, name: str) -> BaseDistribution:
-        try:
-            with wheel.as_zipfile() as zf:
-                dist = WheelDistribution.from_zipfile(zf, name, wheel.location)
-        except zipfile.BadZipFile as e:
-            raise InvalidWheel(wheel.location, name) from e
-        except UnsupportedWheel as e:
-            raise UnsupportedWheel(f"{name} has an invalid wheel, {e}")
-        return cls(dist, dist.info_location, pathlib.PurePosixPath(wheel.location))
-
-    @property
-    def location(self) -> Optional[str]:
-        if self._info_location is None:
-            return None
-        return str(self._info_location.parent)
-
-    @property
-    def info_location(self) -> Optional[str]:
-        if self._info_location is None:
-            return None
-        return str(self._info_location)
-
-    @property
-    def installed_location(self) -> Optional[str]:
-        if self._installed_location is None:
-            return None
-        return normalize_path(str(self._installed_location))
-
-    def _get_dist_name_from_location(self) -> Optional[str]:
-        """Try to get the name from the metadata directory name.
-
-        This is much faster than reading metadata.
-        """
-        if self._info_location is None:
-            return None
-        stem, suffix = os.path.splitext(self._info_location.name)
-        if suffix not in (".dist-info", ".egg-info"):
-            return None
-        return stem.split("-", 1)[0]
-
-    @property
-    def canonical_name(self) -> NormalizedName:
-        name = self._get_dist_name_from_location() or get_dist_name(self._dist)
-        return canonicalize_name(name)
-
-    @property
-    def version(self) -> DistributionVersion:
-        return parse_version(self._dist.version)
-
-    def is_file(self, path: InfoPath) -> bool:
-        return self._dist.read_text(str(path)) is not None
-
-    def iter_distutils_script_names(self) -> Iterator[str]:
-        # A distutils installation is always "flat" (not in e.g. egg form), so
-        # if this distribution's info location is NOT a pathlib.Path (but e.g.
-        # zipfile.Path), it can never contain any distutils scripts.
-        if not isinstance(self._info_location, pathlib.Path):
-            return
-        for child in self._info_location.joinpath("scripts").iterdir():
-            yield child.name
-
-    def read_text(self, path: InfoPath) -> str:
-        content = self._dist.read_text(str(path))
-        if content is None:
-            raise FileNotFoundError(path)
-        return content
-
-    def iter_entry_points(self) -> Iterable[BaseEntryPoint]:
-        # importlib.metadata's EntryPoint structure sasitfies BaseEntryPoint.
-        return self._dist.entry_points
-
-    def _metadata_impl(self) -> email.message.Message:
-        # From Python 3.10+, importlib.metadata declares PackageMetadata as the
-        # return type. This protocol is unfortunately a disaster now and misses
-        # a ton of fields that we need, including get() and get_payload(). We
-        # rely on the implementation that the object is actually a Message now,
-        # until upstream can improve the protocol. (python/cpython#94952)
-        return cast(email.message.Message, self._dist.metadata)
-
-    def iter_provided_extras(self) -> Iterable[str]:
-        return (
-            safe_extra(extra) for extra in self.metadata.get_all("Provides-Extra", [])
-        )
-
-    def iter_dependencies(self, extras: Collection[str] = ()) -> Iterable[Requirement]:
-        contexts: Sequence[Dict[str, str]] = [{"extra": safe_extra(e)} for e in extras]
-        for req_string in self.metadata.get_all("Requires-Dist", []):
-            req = Requirement(req_string)
-            if not req.marker:
-                yield req
-            elif not extras and req.marker.evaluate({"extra": ""}):
-                yield req
-            elif any(req.marker.evaluate(context) for context in contexts):
-                yield req
diff --git a/venv/lib/python3.10/site-packages/pip/_internal/metadata/importlib/_envs.py b/venv/lib/python3.10/site-packages/pip/_internal/metadata/importlib/_envs.py
deleted file mode 100644
index cbec59e..0000000
--- a/venv/lib/python3.10/site-packages/pip/_internal/metadata/importlib/_envs.py
+++ /dev/null
@@ -1,188 +0,0 @@
-import functools
-import importlib.metadata
-import logging
-import os
-import pathlib
-import sys
-import zipfile
-import zipimport
-from typing import Iterator, List, Optional, Sequence, Set, Tuple
-
-from pip._vendor.packaging.utils import NormalizedName, canonicalize_name
-
-from pip._internal.metadata.base import BaseDistribution, BaseEnvironment
-from pip._internal.models.wheel import Wheel
-from pip._internal.utils.deprecation import deprecated
-from pip._internal.utils.filetypes import WHEEL_EXTENSION
-
-from ._compat import BadMetadata, BasePath, get_dist_name, get_info_location
-from ._dists import Distribution
-
-logger = logging.getLogger(__name__)
-
-
-def _looks_like_wheel(location: str) -> bool:
-    if not location.endswith(WHEEL_EXTENSION):
-        return False
-    if not os.path.isfile(location):
-        return False
-    if not Wheel.wheel_file_re.match(os.path.basename(location)):
-        return False
-    return zipfile.is_zipfile(location)
-
-
-class _DistributionFinder:
-    """Finder to locate distributions.
-
-    The main purpose of this class is to memoize found distributions' names, so
-    only one distribution is returned for each package name. At lot of pip code
-    assumes this (because it is setuptools's behavior), and not doing the same
-    can potentially cause a distribution in lower precedence path to override a
-    higher precedence one if the caller is not careful.
-
-    Eventually we probably want to make it possible to see lower precedence
-    installations as well. It's useful feature, after all.
-    """
-
-    FoundResult = Tuple[importlib.metadata.Distribution, Optional[BasePath]]
-
-    def __init__(self) -> None:
-        self._found_names: Set[NormalizedName] = set()
-
-    def _find_impl(self, location: str) -> Iterator[FoundResult]:
-        """Find distributions in a location."""
-        # Skip looking inside a wheel. Since a package inside a wheel is not
-        # always valid (due to .data directories etc.), its .dist-info entry
-        # should not be considered an installed distribution.
-        if _looks_like_wheel(location):
-            return
-        # To know exactly where we find a distribution, we have to feed in the
-        # paths one by one, instead of dumping the list to importlib.metadata.
-        for dist in importlib.metadata.distributions(path=[location]):
-            info_location = get_info_location(dist)
-            try:
-                raw_name = get_dist_name(dist)
-            except BadMetadata as e:
-                logger.warning("Skipping %s due to %s", info_location, e.reason)
-                continue
-            normalized_name = canonicalize_name(raw_name)
-            if normalized_name in self._found_names:
-                continue
-            self._found_names.add(normalized_name)
-            yield dist, info_location
-
-    def find(self, location: str) -> Iterator[BaseDistribution]:
-        """Find distributions in a location.
-
-        The path can be either a directory, or a ZIP archive.
-        """
-        for dist, info_location in self._find_impl(location):
-            if info_location is None:
-                installed_location: Optional[BasePath] = None
-            else:
-                installed_location = info_location.parent
-            yield Distribution(dist, info_location, installed_location)
-
-    def find_linked(self, location: str) -> Iterator[BaseDistribution]:
-        """Read location in egg-link files and return distributions in there.
-
-        The path should be a directory; otherwise this returns nothing. This
-        follows how setuptools does this for compatibility. The first non-empty
-        line in the egg-link is read as a path (resolved against the egg-link's
-        containing directory if relative). Distributions found at that linked
-        location are returned.
-        """
-        path = pathlib.Path(location)
-        if not path.is_dir():
-            return
-        for child in path.iterdir():
-            if child.suffix != ".egg-link":
-                continue
-            with child.open() as f:
-                lines = (line.strip() for line in f)
-                target_rel = next((line for line in lines if line), "")
-            if not target_rel:
-                continue
-            target_location = str(path.joinpath(target_rel))
-            for dist, info_location in self._find_impl(target_location):
-                yield Distribution(dist, info_location, path)
-
-    def _find_eggs_in_dir(self, location: str) -> Iterator[BaseDistribution]:
-        from pip._vendor.pkg_resources import find_distributions
-
-        from pip._internal.metadata import pkg_resources as legacy
-
-        with os.scandir(location) as it:
-            for entry in it:
-                if not entry.name.endswith(".egg"):
-                    continue
-                for dist in find_distributions(entry.path):
-                    yield legacy.Distribution(dist)
-
-    def _find_eggs_in_zip(self, location: str) -> Iterator[BaseDistribution]:
-        from pip._vendor.pkg_resources import find_eggs_in_zip
-
-        from pip._internal.metadata import pkg_resources as legacy
-
-        try:
-            importer = zipimport.zipimporter(location)
-        except zipimport.ZipImportError:
-            return
-        for dist in find_eggs_in_zip(importer, location):
-            yield legacy.Distribution(dist)
-
-    def find_eggs(self, location: str) -> Iterator[BaseDistribution]:
-        """Find eggs in a location.
-
-        This actually uses the old *pkg_resources* backend. We likely want to
-        deprecate this so we can eventually remove the *pkg_resources*
-        dependency entirely. Before that, this should first emit a deprecation
-        warning for some versions when using the fallback since importing
-        *pkg_resources* is slow for those who don't need it.
-        """
-        if os.path.isdir(location):
-            yield from self._find_eggs_in_dir(location)
-        if zipfile.is_zipfile(location):
-            yield from self._find_eggs_in_zip(location)
-
-
-@functools.lru_cache(maxsize=None)  # Warn a distribution exactly once.
-def _emit_egg_deprecation(location: Optional[str]) -> None:
-    deprecated(
-        reason=f"Loading egg at {location} is deprecated.",
-        replacement="to use pip for package installation.",
-        gone_in=None,
-    )
-
-
-class Environment(BaseEnvironment):
-    def __init__(self, paths: Sequence[str]) -> None:
-        self._paths = paths
-
-    @classmethod
-    def default(cls) -> BaseEnvironment:
-        return cls(sys.path)
-
-    @classmethod
-    def from_paths(cls, paths: Optional[List[str]]) -> BaseEnvironment:
-        if paths is None:
-            return cls(sys.path)
-        return cls(paths)
-
-    def _iter_distributions(self) -> Iterator[BaseDistribution]:
-        finder = _DistributionFinder()
-        for location in self._paths:
-            yield from finder.find(location)
-            for dist in finder.find_eggs(location):
-                # _emit_egg_deprecation(dist.location)  # TODO: Enable this.
-                yield dist
-            # This must go last because that's how pkg_resources tie-breaks.
-            yield from finder.find_linked(location)
-
-    def get_distribution(self, name: str) -> Optional[BaseDistribution]:
-        matches = (
-            distribution
-            for distribution in self.iter_all_distributions()
-            if distribution.canonical_name == canonicalize_name(name)
-        )
-        return next(matches, None)
diff --git a/venv/lib/python3.10/site-packages/pip/_internal/metadata/pkg_resources.py b/venv/lib/python3.10/site-packages/pip/_internal/metadata/pkg_resources.py
index f330ef1..d39f0ba 100644
--- a/venv/lib/python3.10/site-packages/pip/_internal/metadata/pkg_resources.py
+++ b/venv/lib/python3.10/site-packages/pip/_internal/metadata/pkg_resources.py
@@ -2,6 +2,7 @@
 import email.parser
 import logging
 import os
+import pathlib
 import zipfile
 from typing import Collection, Iterable, Iterator, List, Mapping, NamedTuple, Optional
 
@@ -11,8 +12,7 @@
 from pip._vendor.packaging.version import parse as parse_version
 
 from pip._internal.exceptions import InvalidWheel, NoneMetadataError, UnsupportedWheel
-from pip._internal.utils.egg_link import egg_link_path_from_location
-from pip._internal.utils.misc import display_path, normalize_path
+from pip._internal.utils.misc import display_path
 from pip._internal.utils.wheel import parse_wheel, read_wheel_metadata_file
 
 from .base import (
@@ -33,7 +33,7 @@ class EntryPoint(NamedTuple):
     group: str
 
 
-class InMemoryMetadata:
+class WheelMetadata:
     """IMetadataProvider that reads metadata files from a dictionary.
 
     This also maps metadata decoding exceptions to our internal exception type.
@@ -73,7 +73,7 @@ def __init__(self, dist: pkg_resources.Distribution) -> None:
         self._dist = dist
 
     @classmethod
-    def from_directory(cls, directory: str) -> BaseDistribution:
+    def from_directory(cls, directory: str) -> "Distribution":
         dist_dir = directory.rstrip(os.sep)
 
         # Build a PathMetadata object, from path to metadata. :wink:
@@ -93,28 +93,18 @@ def from_directory(cls, directory: str) -> BaseDistribution:
         return cls(dist)
 
     @classmethod
-    def from_metadata_file_contents(
-        cls,
-        metadata_contents: bytes,
-        filename: str,
-        project_name: str,
-    ) -> BaseDistribution:
-        metadata_dict = {
-            "METADATA": metadata_contents,
-        }
-        dist = pkg_resources.DistInfoDistribution(
-            location=filename,
-            metadata=InMemoryMetadata(metadata_dict, filename),
-            project_name=project_name,
-        )
-        return cls(dist)
+    def from_wheel(cls, wheel: Wheel, name: str) -> "Distribution":
+        """Load the distribution from a given wheel.
 
-    @classmethod
-    def from_wheel(cls, wheel: Wheel, name: str) -> BaseDistribution:
+        :raises InvalidWheel: Whenever loading of the wheel causes a
+            :py:exc:`zipfile.BadZipFile` exception to be thrown.
+        :raises UnsupportedWheel: If the wheel is a valid zip, but malformed
+            internally.
+        """
         try:
             with wheel.as_zipfile() as zf:
                 info_dir, _ = parse_wheel(zf, name)
-                metadata_dict = {
+                metadata_text = {
                     path.split("/", 1)[-1]: read_wheel_metadata_file(zf, path)
                     for path in zf.namelist()
                     if path.startswith(f"{info_dir}/")
@@ -125,7 +115,7 @@ def from_wheel(cls, wheel: Wheel, name: str) -> BaseDistribution:
             raise UnsupportedWheel(f"{name} has an invalid wheel, {e}")
         dist = pkg_resources.DistInfoDistribution(
             location=wheel.location,
-            metadata=InMemoryMetadata(metadata_dict, wheel.location),
+            metadata=WheelMetadata(metadata_text, wheel.location),
             project_name=name,
         )
         return cls(dist)
@@ -134,17 +124,6 @@ def from_wheel(cls, wheel: Wheel, name: str) -> BaseDistribution:
     def location(self) -> Optional[str]:
         return self._dist.location
 
-    @property
-    def installed_location(self) -> Optional[str]:
-        egg_link = egg_link_path_from_location(self.raw_name)
-        if egg_link:
-            location = egg_link
-        elif self.location:
-            location = self.location
-        else:
-            return None
-        return normalize_path(location)
-
     @property
     def info_location(self) -> Optional[str]:
         return self._dist.egg_info
@@ -170,8 +149,14 @@ def version(self) -> DistributionVersion:
     def is_file(self, path: InfoPath) -> bool:
         return self._dist.has_metadata(str(path))
 
-    def iter_distutils_script_names(self) -> Iterator[str]:
-        yield from self._dist.metadata_listdir("scripts")
+    def iterdir(self, path: InfoPath) -> Iterator[pathlib.PurePosixPath]:
+        name = str(path)
+        if not self._dist.has_metadata(name):
+            raise FileNotFoundError(name)
+        if not self._dist.isdir(name):
+            raise NotADirectoryError(name)
+        for child in self._dist.metadata_listdir(name):
+            yield pathlib.PurePosixPath(path, child)
 
     def read_text(self, path: InfoPath) -> str:
         name = str(path)
@@ -188,7 +173,8 @@ def iter_entry_points(self) -> Iterable[BaseEntryPoint]:
                 name, _, value = str(entry_point).partition("=")
                 yield EntryPoint(name=name.strip(), value=value.strip(), group=group)
 
-    def _metadata_impl(self) -> email.message.Message:
+    @property
+    def metadata(self) -> email.message.Message:
         """
         :raises NoneMetadataError: if the distribution reports `has_metadata()`
             True but `get_metadata()` returns None.
@@ -231,10 +217,6 @@ def default(cls) -> BaseEnvironment:
     def from_paths(cls, paths: Optional[List[str]]) -> BaseEnvironment:
         return cls(pkg_resources.WorkingSet(paths))
 
-    def _iter_distributions(self) -> Iterator[BaseDistribution]:
-        for dist in self._ws:
-            yield Distribution(dist)
-
     def _search_distribution(self, name: str) -> Optional[BaseDistribution]:
         """Find a distribution matching the ``name`` in the environment.
 
@@ -242,7 +224,7 @@ def _search_distribution(self, name: str) -> Optional[BaseDistribution]:
         match the behavior of ``pkg_resources.get_distribution()``.
         """
         canonical_name = canonicalize_name(name)
-        for dist in self.iter_all_distributions():
+        for dist in self.iter_distributions():
             if dist.canonical_name == canonical_name:
                 return dist
         return None
@@ -268,3 +250,7 @@ def get_distribution(self, name: str) -> Optional[BaseDistribution]:
         except pkg_resources.DistributionNotFound:
             return None
         return self._search_distribution(name)
+
+    def _iter_distributions(self) -> Iterator[BaseDistribution]:
+        for dist in self._ws:
+            yield Distribution(dist)
diff --git a/venv/lib/python3.10/site-packages/pip/_internal/models/direct_url.py b/venv/lib/python3.10/site-packages/pip/_internal/models/direct_url.py
index e75feda..92060d4 100644
--- a/venv/lib/python3.10/site-packages/pip/_internal/models/direct_url.py
+++ b/venv/lib/python3.10/site-packages/pip/_internal/models/direct_url.py
@@ -74,10 +74,14 @@ def __init__(
         vcs: str,
         commit_id: str,
         requested_revision: Optional[str] = None,
+        resolved_revision: Optional[str] = None,
+        resolved_revision_type: Optional[str] = None,
     ) -> None:
         self.vcs = vcs
         self.requested_revision = requested_revision
         self.commit_id = commit_id
+        self.resolved_revision = resolved_revision
+        self.resolved_revision_type = resolved_revision_type
 
     @classmethod
     def _from_dict(cls, d: Optional[Dict[str, Any]]) -> Optional["VcsInfo"]:
@@ -87,6 +91,8 @@ def _from_dict(cls, d: Optional[Dict[str, Any]]) -> Optional["VcsInfo"]:
             vcs=_get_required(d, str, "vcs"),
             commit_id=_get_required(d, str, "commit_id"),
             requested_revision=_get(d, str, "requested_revision"),
+            resolved_revision=_get(d, str, "resolved_revision"),
+            resolved_revision_type=_get(d, str, "resolved_revision_type"),
         )
 
     def _to_dict(self) -> Dict[str, Any]:
@@ -94,6 +100,8 @@ def _to_dict(self) -> Dict[str, Any]:
             vcs=self.vcs,
             requested_revision=self.requested_revision,
             commit_id=self.commit_id,
+            resolved_revision=self.resolved_revision,
+            resolved_revision_type=self.resolved_revision_type,
         )
 
 
diff --git a/venv/lib/python3.10/site-packages/pip/_internal/models/installation_report.py b/venv/lib/python3.10/site-packages/pip/_internal/models/installation_report.py
deleted file mode 100644
index 965f095..0000000
--- a/venv/lib/python3.10/site-packages/pip/_internal/models/installation_report.py
+++ /dev/null
@@ -1,53 +0,0 @@
-from typing import Any, Dict, Sequence
-
-from pip._vendor.packaging.markers import default_environment
-
-from pip import __version__
-from pip._internal.req.req_install import InstallRequirement
-
-
-class InstallationReport:
-    def __init__(self, install_requirements: Sequence[InstallRequirement]):
-        self._install_requirements = install_requirements
-
-    @classmethod
-    def _install_req_to_dict(cls, ireq: InstallRequirement) -> Dict[str, Any]:
-        assert ireq.download_info, f"No download_info for {ireq}"
-        res = {
-            # PEP 610 json for the download URL. download_info.archive_info.hash may
-            # be absent when the requirement was installed from the wheel cache
-            # and the cache entry was populated by an older pip version that did not
-            # record origin.json.
-            "download_info": ireq.download_info.to_dict(),
-            # is_direct is true if the requirement was a direct URL reference (which
-            # includes editable requirements), and false if the requirement was
-            # downloaded from a PEP 503 index or --find-links.
-            "is_direct": bool(ireq.original_link),
-            # requested is true if the requirement was specified by the user (aka
-            # top level requirement), and false if it was installed as a dependency of a
-            # requirement. https://peps.python.org/pep-0376/#requested
-            "requested": ireq.user_supplied,
-            # PEP 566 json encoding for metadata
-            # https://www.python.org/dev/peps/pep-0566/#json-compatible-metadata
-            "metadata": ireq.get_dist().metadata_dict,
-        }
-        if ireq.user_supplied and ireq.extras:
-            # For top level requirements, the list of requested extras, if any.
-            res["requested_extras"] = list(sorted(ireq.extras))
-        return res
-
-    def to_dict(self) -> Dict[str, Any]:
-        return {
-            "version": "0",
-            "pip_version": __version__,
-            "install": [
-                self._install_req_to_dict(ireq) for ireq in self._install_requirements
-            ],
-            # https://peps.python.org/pep-0508/#environment-markers
-            # TODO: currently, the resolver uses the default environment to evaluate
-            # environment markers, so that is what we report here. In the future, it
-            # should also take into account options such as --python-version or
-            # --platform, perhaps under the form of an environment_override field?
-            # https://github.com/pypa/pip/issues/11198
-            "environment": default_environment(),
-        }
diff --git a/venv/lib/python3.10/site-packages/pip/_internal/models/link.py b/venv/lib/python3.10/site-packages/pip/_internal/models/link.py
index c792d12..6069b27 100644
--- a/venv/lib/python3.10/site-packages/pip/_internal/models/link.py
+++ b/venv/lib/python3.10/site-packages/pip/_internal/models/link.py
@@ -1,27 +1,14 @@
 import functools
-import itertools
 import logging
 import os
 import posixpath
 import re
 import urllib.parse
-from dataclasses import dataclass
-from typing import (
-    TYPE_CHECKING,
-    Any,
-    Dict,
-    List,
-    Mapping,
-    NamedTuple,
-    Optional,
-    Tuple,
-    Union,
-)
+from typing import TYPE_CHECKING, Dict, List, NamedTuple, Optional, Tuple, Union
 
 from pip._internal.utils.filetypes import WHEEL_EXTENSION
 from pip._internal.utils.hashes import Hashes
 from pip._internal.utils.misc import (
-    pairwise,
     redact_auth_from_url,
     split_auth_from_netloc,
     splitext,
@@ -30,127 +17,12 @@
 from pip._internal.utils.urls import path_to_url, url_to_path
 
 if TYPE_CHECKING:
-    from pip._internal.index.collector import IndexContent
+    from pip._internal.index.collector import HTMLPage
 
 logger = logging.getLogger(__name__)
 
 
-# Order matters, earlier hashes have a precedence over later hashes for what
-# we will pick to use.
-_SUPPORTED_HASHES = ("sha512", "sha384", "sha256", "sha224", "sha1", "md5")
-
-
-@dataclass(frozen=True)
-class LinkHash:
-    """Links to content may have embedded hash values. This class parses those.
-
-    `name` must be any member of `_SUPPORTED_HASHES`.
-
-    This class can be converted to and from `ArchiveInfo`. While ArchiveInfo intends to
-    be JSON-serializable to conform to PEP 610, this class contains the logic for
-    parsing a hash name and value for correctness, and then checking whether that hash
-    conforms to a schema with `.is_hash_allowed()`."""
-
-    name: str
-    value: str
-
-    _hash_re = re.compile(
-        # NB: we do not validate that the second group (.*) is a valid hex
-        # digest. Instead, we simply keep that string in this class, and then check it
-        # against Hashes when hash-checking is needed. This is easier to debug than
-        # proactively discarding an invalid hex digest, as we handle incorrect hashes
-        # and malformed hashes in the same place.
-        r"({choices})=(.*)".format(
-            choices="|".join(re.escape(hash_name) for hash_name in _SUPPORTED_HASHES)
-        ),
-    )
-
-    def __post_init__(self) -> None:
-        assert self._hash_re.match(f"{self.name}={self.value}")
-
-    @classmethod
-    @functools.lru_cache(maxsize=None)
-    def split_hash_name_and_value(cls, url: str) -> Optional["LinkHash"]:
-        """Search a string for a checksum algorithm name and encoded output value."""
-        match = cls._hash_re.search(url)
-        if match is None:
-            return None
-        name, value = match.groups()
-        return cls(name=name, value=value)
-
-    def as_hashes(self) -> Hashes:
-        """Return a Hashes instance which checks only for the current hash."""
-        return Hashes({self.name: [self.value]})
-
-    def is_hash_allowed(self, hashes: Optional[Hashes]) -> bool:
-        """
-        Return True if the current hash is allowed by `hashes`.
-        """
-        if hashes is None:
-            return False
-        return hashes.is_hash_allowed(self.name, hex_digest=self.value)
-
-
-def _clean_url_path_part(part: str) -> str:
-    """
-    Clean a "part" of a URL path (i.e. after splitting on "@" characters).
-    """
-    # We unquote prior to quoting to make sure nothing is double quoted.
-    return urllib.parse.quote(urllib.parse.unquote(part))
-
-
-def _clean_file_url_path(part: str) -> str:
-    """
-    Clean the first part of a URL path that corresponds to a local
-    filesystem path (i.e. the first part after splitting on "@" characters).
-    """
-    # We unquote prior to quoting to make sure nothing is double quoted.
-    # Also, on Windows the path part might contain a drive letter which
-    # should not be quoted. On Linux where drive letters do not
-    # exist, the colon should be quoted. We rely on urllib.request
-    # to do the right thing here.
-    return urllib.request.pathname2url(urllib.request.url2pathname(part))
-
-
-# percent-encoded:                   /
-_reserved_chars_re = re.compile("(@|%2F)", re.IGNORECASE)
-
-
-def _clean_url_path(path: str, is_local_path: bool) -> str:
-    """
-    Clean the path portion of a URL.
-    """
-    if is_local_path:
-        clean_func = _clean_file_url_path
-    else:
-        clean_func = _clean_url_path_part
-
-    # Split on the reserved characters prior to cleaning so that
-    # revision strings in VCS URLs are properly preserved.
-    parts = _reserved_chars_re.split(path)
-
-    cleaned_parts = []
-    for to_clean, reserved in pairwise(itertools.chain(parts, [""])):
-        cleaned_parts.append(clean_func(to_clean))
-        # Normalize %xx escapes (e.g. %2f -> %2F)
-        cleaned_parts.append(reserved.upper())
-
-    return "".join(cleaned_parts)
-
-
-def _ensure_quoted_url(url: str) -> str:
-    """
-    Make sure a link is fully quoted.
-    For example, if ' ' occurs in the URL, it will be replaced with "%20",
-    and without double-quoting other characters.
-    """
-    # Split the URL into parts according to the general structure
-    # `scheme://netloc/path;parameters?query#fragment`.
-    result = urllib.parse.urlparse(url)
-    # If the netloc is empty, then the URL refers to a local filesystem path.
-    is_local_path = not result.netloc
-    path = _clean_url_path(result.path, is_local_path=is_local_path)
-    return urllib.parse.urlunparse(result._replace(path=path))
+_SUPPORTED_HASHES = ("sha1", "sha224", "sha384", "sha256", "sha512", "md5")
 
 
 class Link(KeyBasedCompareMixin):
@@ -159,29 +31,23 @@ class Link(KeyBasedCompareMixin):
     __slots__ = [
         "_parsed_url",
         "_url",
-        "_hashes",
         "comes_from",
         "requires_python",
         "yanked_reason",
-        "dist_info_metadata",
-        "link_hash",
         "cache_link_parsing",
     ]
 
     def __init__(
         self,
         url: str,
-        comes_from: Optional[Union[str, "IndexContent"]] = None,
+        comes_from: Optional[Union[str, "HTMLPage"]] = None,
         requires_python: Optional[str] = None,
         yanked_reason: Optional[str] = None,
-        dist_info_metadata: Optional[str] = None,
-        link_hash: Optional[LinkHash] = None,
         cache_link_parsing: bool = True,
-        hashes: Optional[Mapping[str, str]] = None,
     ) -> None:
         """
         :param url: url of the resource pointed to (href of the link)
-        :param comes_from: instance of IndexContent where the link was found,
+        :param comes_from: instance of HTMLPage where the link was found,
             or string.
         :param requires_python: String containing the `Requires-Python`
             metadata field, specified in PEP 345. This may be specified by
@@ -193,21 +59,11 @@ def __init__(
             a simple repository HTML link. If the file has been yanked but
             no reason was provided, this should be the empty string. See
             PEP 592 for more information and the specification.
-        :param dist_info_metadata: the metadata attached to the file, or None if no such
-            metadata is provided. This is the value of the "data-dist-info-metadata"
-            attribute, if present, in a simple repository HTML link. This may be parsed
-            into its own `Link` by `self.metadata_link()`. See PEP 658 for more
-            information and the specification.
-        :param link_hash: a checksum for the content the link points to. If not
-            provided, this will be extracted from the link URL, if the URL has
-            any checksum.
         :param cache_link_parsing: A flag that is used elsewhere to determine
                                    whether resources retrieved from this link
                                    should be cached. PyPI index urls should
                                    generally have this set to False, for
                                    example.
-        :param hashes: A mapping of hash names to digests to allow us to
-                       determine the validity of a download.
         """
 
         # url can be a UNC windows share
@@ -218,80 +74,15 @@ def __init__(
         # Store the url as a private attribute to prevent accidentally
         # trying to set a new value.
         self._url = url
-        self._hashes = hashes if hashes is not None else {}
 
         self.comes_from = comes_from
         self.requires_python = requires_python if requires_python else None
         self.yanked_reason = yanked_reason
-        self.dist_info_metadata = dist_info_metadata
-        self.link_hash = link_hash or LinkHash.split_hash_name_and_value(self._url)
 
         super().__init__(key=url, defining_class=Link)
 
         self.cache_link_parsing = cache_link_parsing
 
-    @classmethod
-    def from_json(
-        cls,
-        file_data: Dict[str, Any],
-        page_url: str,
-    ) -> Optional["Link"]:
-        """
-        Convert an pypi json document from a simple repository page into a Link.
-        """
-        file_url = file_data.get("url")
-        if file_url is None:
-            return None
-
-        url = _ensure_quoted_url(urllib.parse.urljoin(page_url, file_url))
-        pyrequire = file_data.get("requires-python")
-        yanked_reason = file_data.get("yanked")
-        dist_info_metadata = file_data.get("dist-info-metadata")
-        hashes = file_data.get("hashes", {})
-
-        # The Link.yanked_reason expects an empty string instead of a boolean.
-        if yanked_reason and not isinstance(yanked_reason, str):
-            yanked_reason = ""
-        # The Link.yanked_reason expects None instead of False.
-        elif not yanked_reason:
-            yanked_reason = None
-
-        return cls(
-            url,
-            comes_from=page_url,
-            requires_python=pyrequire,
-            yanked_reason=yanked_reason,
-            hashes=hashes,
-            dist_info_metadata=dist_info_metadata,
-        )
-
-    @classmethod
-    def from_element(
-        cls,
-        anchor_attribs: Dict[str, Optional[str]],
-        page_url: str,
-        base_url: str,
-    ) -> Optional["Link"]:
-        """
-        Convert an anchor element's attributes in a simple repository page to a Link.
-        """
-        href = anchor_attribs.get("href")
-        if not href:
-            return None
-
-        url = _ensure_quoted_url(urllib.parse.urljoin(base_url, href))
-        pyrequire = anchor_attribs.get("data-requires-python")
-        yanked_reason = anchor_attribs.get("data-yanked")
-        dist_info_metadata = anchor_attribs.get("data-dist-info-metadata")
-
-        return cls(
-            url,
-            comes_from=page_url,
-            requires_python=pyrequire,
-            yanked_reason=yanked_reason,
-            dist_info_metadata=dist_info_metadata,
-        )
-
     def __str__(self) -> str:
         if self.requires_python:
             rp = f" (requires-python:{self.requires_python})"
@@ -374,36 +165,22 @@ def subdirectory_fragment(self) -> Optional[str]:
             return None
         return match.group(1)
 
-    def metadata_link(self) -> Optional["Link"]:
-        """Implementation of PEP 658 parsing."""
-        # Note that Link.from_element() parsing the "data-dist-info-metadata" attribute
-        # from an HTML anchor tag is typically how the Link.dist_info_metadata attribute
-        # gets set.
-        if self.dist_info_metadata is None:
-            return None
-        metadata_url = f"{self.url_without_fragment}.metadata"
-        link_hash: Optional[LinkHash] = None
-        # If data-dist-info-metadata="true" is set, then the metadata file exists,
-        # but there is no information about its checksum or anything else.
-        if self.dist_info_metadata != "true":
-            link_hash = LinkHash.split_hash_name_and_value(self.dist_info_metadata)
-        return Link(metadata_url, link_hash=link_hash)
-
-    def as_hashes(self) -> Optional[Hashes]:
-        if self.link_hash is not None:
-            return self.link_hash.as_hashes()
-        return None
+    _hash_re = re.compile(
+        r"({choices})=([a-f0-9]+)".format(choices="|".join(_SUPPORTED_HASHES))
+    )
 
     @property
     def hash(self) -> Optional[str]:
-        if self.link_hash is not None:
-            return self.link_hash.value
+        match = self._hash_re.search(self._url)
+        if match:
+            return match.group(2)
         return None
 
     @property
     def hash_name(self) -> Optional[str]:
-        if self.link_hash is not None:
-            return self.link_hash.name
+        match = self._hash_re.search(self._url)
+        if match:
+            return match.group(1)
         return None
 
     @property
@@ -433,15 +210,19 @@ def is_yanked(self) -> bool:
 
     @property
     def has_hash(self) -> bool:
-        return self.link_hash is not None
+        return self.hash_name is not None
 
     def is_hash_allowed(self, hashes: Optional[Hashes]) -> bool:
         """
-        Return True if the link has a hash and it is allowed by `hashes`.
+        Return True if the link has a hash and it is allowed.
         """
-        if self.link_hash is None:
+        if hashes is None or not self.has_hash:
             return False
-        return self.link_hash.is_hash_allowed(hashes)
+        # Assert non-None so mypy knows self.hash_name and self.hash are str.
+        assert self.hash_name is not None
+        assert self.hash is not None
+
+        return hashes.is_hash_allowed(self.hash_name, hex_digest=self.hash)
 
 
 class _CleanResult(NamedTuple):
diff --git a/venv/lib/python3.10/site-packages/pip/_internal/models/search_scope.py b/venv/lib/python3.10/site-packages/pip/_internal/models/search_scope.py
index a64af73..e4e54c2 100644
--- a/venv/lib/python3.10/site-packages/pip/_internal/models/search_scope.py
+++ b/venv/lib/python3.10/site-packages/pip/_internal/models/search_scope.py
@@ -20,14 +20,13 @@ class SearchScope:
     Encapsulates the locations that pip is configured to search.
     """
 
-    __slots__ = ["find_links", "index_urls", "no_index"]
+    __slots__ = ["find_links", "index_urls"]
 
     @classmethod
     def create(
         cls,
         find_links: List[str],
         index_urls: List[str],
-        no_index: bool,
     ) -> "SearchScope":
         """
         Create a SearchScope object after normalizing the `find_links`.
@@ -61,18 +60,15 @@ def create(
         return cls(
             find_links=built_find_links,
             index_urls=index_urls,
-            no_index=no_index,
         )
 
     def __init__(
         self,
         find_links: List[str],
         index_urls: List[str],
-        no_index: bool,
     ) -> None:
         self.find_links = find_links
         self.index_urls = index_urls
-        self.no_index = no_index
 
     def get_formatted_locations(self) -> str:
         lines = []
diff --git a/venv/lib/python3.10/site-packages/pip/_internal/models/wheel.py b/venv/lib/python3.10/site-packages/pip/_internal/models/wheel.py
index a5dc12b..aaf218d 100644
--- a/venv/lib/python3.10/site-packages/pip/_internal/models/wheel.py
+++ b/venv/lib/python3.10/site-packages/pip/_internal/models/wheel.py
@@ -58,10 +58,7 @@ def support_index_min(self, tags: List[Tag]) -> int:
         :raises ValueError: If none of the wheel's file tags match one of
             the supported tags.
         """
-        try:
-            return next(i for i, t in enumerate(tags) if t in self.file_tags)
-        except StopIteration:
-            raise ValueError()
+        return min(tags.index(tag) for tag in self.file_tags if tag in tags)
 
     def find_most_preferred_tag(
         self, tags: List[Tag], tag_to_priority: Dict[Tag, int]
diff --git a/venv/lib/python3.10/site-packages/pip/_internal/network/cache.py b/venv/lib/python3.10/site-packages/pip/_internal/network/cache.py
index a81a239..9dba7ed 100644
--- a/venv/lib/python3.10/site-packages/pip/_internal/network/cache.py
+++ b/venv/lib/python3.10/site-packages/pip/_internal/network/cache.py
@@ -3,7 +3,7 @@
 
 import os
 from contextlib import contextmanager
-from typing import Generator, Optional
+from typing import Iterator, Optional
 
 from pip._vendor.cachecontrol.cache import BaseCache
 from pip._vendor.cachecontrol.caches import FileCache
@@ -18,7 +18,7 @@ def is_from_cache(response: Response) -> bool:
 
 
 @contextmanager
-def suppressed_cache_errors() -> Generator[None, None, None]:
+def suppressed_cache_errors() -> Iterator[None]:
     """If we can't access the cache then we can just skip caching and process
     requests as if caching wasn't enabled.
     """
diff --git a/venv/lib/python3.10/site-packages/pip/_internal/network/download.py b/venv/lib/python3.10/site-packages/pip/_internal/network/download.py
index 79b82a5..35bc970 100644
--- a/venv/lib/python3.10/site-packages/pip/_internal/network/download.py
+++ b/venv/lib/python3.10/site-packages/pip/_internal/network/download.py
@@ -1,6 +1,6 @@
 """Download files with progress indicators.
 """
-import email.message
+import cgi
 import logging
 import mimetypes
 import os
@@ -81,13 +81,12 @@ def parse_content_disposition(content_disposition: str, default_filename: str) -
     Parse the "filename" value from a Content-Disposition header, and
     return the default filename if the result is empty.
     """
-    m = email.message.Message()
-    m["content-type"] = content_disposition
-    filename = m.get_param("filename")
+    _type, params = cgi.parse_header(content_disposition)
+    filename = params.get("filename")
     if filename:
         # We need to sanitize the filename to prevent directory traversal
         # in case the filename contains ".." path parts.
-        filename = sanitize_content_filename(str(filename))
+        filename = sanitize_content_filename(filename)
     return filename or default_filename
 
 
diff --git a/venv/lib/python3.10/site-packages/pip/_internal/network/lazy_wheel.py b/venv/lib/python3.10/site-packages/pip/_internal/network/lazy_wheel.py
index 854a6fa..c9e44d5 100644
--- a/venv/lib/python3.10/site-packages/pip/_internal/network/lazy_wheel.py
+++ b/venv/lib/python3.10/site-packages/pip/_internal/network/lazy_wheel.py
@@ -5,7 +5,7 @@
 from bisect import bisect_left, bisect_right
 from contextlib import contextmanager
 from tempfile import NamedTemporaryFile
-from typing import Any, Dict, Generator, List, Optional, Tuple
+from typing import Any, Dict, Iterator, List, Optional, Tuple
 from zipfile import BadZipfile, ZipFile
 
 from pip._vendor.packaging.utils import canonicalize_name
@@ -23,7 +23,7 @@ class HTTPRangeRequestUnsupported(Exception):
 def dist_from_wheel_url(name: str, url: str, session: PipSession) -> BaseDistribution:
     """Return a distribution object from the given wheel URL.
 
-    This uses HTTP range requests to only fetch the portion of the wheel
+    This uses HTTP range requests to only fetch the potion of the wheel
     containing metadata, just enough for the object to be constructed.
     If such requests are not supported, HTTPRangeRequestUnsupported
     is raised.
@@ -135,11 +135,11 @@ def __enter__(self) -> "LazyZipOverHTTP":
         self._file.__enter__()
         return self
 
-    def __exit__(self, *exc: Any) -> None:
-        self._file.__exit__(*exc)
+    def __exit__(self, *exc: Any) -> Optional[bool]:
+        return self._file.__exit__(*exc)
 
     @contextmanager
-    def _stay(self) -> Generator[None, None, None]:
+    def _stay(self) -> Iterator[None]:
         """Return a context manager keeping the position.
 
         At the end of the block, seek back to original position.
@@ -177,8 +177,8 @@ def _stream_response(
 
     def _merge(
         self, start: int, end: int, left: int, right: int
-    ) -> Generator[Tuple[int, int], None, None]:
-        """Return a generator of intervals to be fetched.
+    ) -> Iterator[Tuple[int, int]]:
+        """Return an iterator of intervals to be fetched.
 
         Args:
             start (int): Start of needed interval
diff --git a/venv/lib/python3.10/site-packages/pip/_internal/network/session.py b/venv/lib/python3.10/site-packages/pip/_internal/network/session.py
index e512ac7..cbe743b 100644
--- a/venv/lib/python3.10/site-packages/pip/_internal/network/session.py
+++ b/venv/lib/python3.10/site-packages/pip/_internal/network/session.py
@@ -15,23 +15,11 @@
 import sys
 import urllib.parse
 import warnings
-from typing import (
-    TYPE_CHECKING,
-    Any,
-    Dict,
-    Generator,
-    List,
-    Mapping,
-    Optional,
-    Sequence,
-    Tuple,
-    Union,
-)
+from typing import Any, Dict, Iterator, List, Mapping, Optional, Sequence, Tuple, Union
 
 from pip._vendor import requests, urllib3
-from pip._vendor.cachecontrol import CacheControlAdapter as _BaseCacheControlAdapter
-from pip._vendor.requests.adapters import DEFAULT_POOLBLOCK, BaseAdapter
-from pip._vendor.requests.adapters import HTTPAdapter as _BaseHTTPAdapter
+from pip._vendor.cachecontrol import CacheControlAdapter
+from pip._vendor.requests.adapters import BaseAdapter, HTTPAdapter
 from pip._vendor.requests.models import PreparedRequest, Response
 from pip._vendor.requests.structures import CaseInsensitiveDict
 from pip._vendor.urllib3.connectionpool import ConnectionPool
@@ -49,12 +37,6 @@
 from pip._internal.utils.misc import build_url_from_netloc, parse_netloc
 from pip._internal.utils.urls import url_to_path
 
-if TYPE_CHECKING:
-    from ssl import SSLContext
-
-    from pip._vendor.urllib3.poolmanager import PoolManager
-
-
 logger = logging.getLogger(__name__)
 
 SecureOrigin = Tuple[str, str, Optional[Union[int, str]]]
@@ -251,48 +233,6 @@ def close(self) -> None:
         pass
 
 
-class _SSLContextAdapterMixin:
-    """Mixin to add the ``ssl_context`` constructor argument to HTTP adapters.
-
-    The additional argument is forwarded directly to the pool manager. This allows us
-    to dynamically decide what SSL store to use at runtime, which is used to implement
-    the optional ``truststore`` backend.
-    """
-
-    def __init__(
-        self,
-        *,
-        ssl_context: Optional["SSLContext"] = None,
-        **kwargs: Any,
-    ) -> None:
-        self._ssl_context = ssl_context
-        super().__init__(**kwargs)
-
-    def init_poolmanager(
-        self,
-        connections: int,
-        maxsize: int,
-        block: bool = DEFAULT_POOLBLOCK,
-        **pool_kwargs: Any,
-    ) -> "PoolManager":
-        if self._ssl_context is not None:
-            pool_kwargs.setdefault("ssl_context", self._ssl_context)
-        return super().init_poolmanager(  # type: ignore[misc]
-            connections=connections,
-            maxsize=maxsize,
-            block=block,
-            **pool_kwargs,
-        )
-
-
-class HTTPAdapter(_SSLContextAdapterMixin, _BaseHTTPAdapter):
-    pass
-
-
-class CacheControlAdapter(_SSLContextAdapterMixin, _BaseCacheControlAdapter):
-    pass
-
-
 class InsecureHTTPAdapter(HTTPAdapter):
     def cert_verify(
         self,
@@ -326,7 +266,6 @@ def __init__(
         cache: Optional[str] = None,
         trusted_hosts: Sequence[str] = (),
         index_urls: Optional[List[str]] = None,
-        ssl_context: Optional["SSLContext"] = None,
         **kwargs: Any,
     ) -> None:
         """
@@ -379,14 +318,13 @@ def __init__(
             secure_adapter = CacheControlAdapter(
                 cache=SafeFileCache(cache),
                 max_retries=retries,
-                ssl_context=ssl_context,
             )
             self._trusted_host_adapter = InsecureCacheControlAdapter(
                 cache=SafeFileCache(cache),
                 max_retries=retries,
             )
         else:
-            secure_adapter = HTTPAdapter(max_retries=retries, ssl_context=ssl_context)
+            secure_adapter = HTTPAdapter(max_retries=retries)
             self._trusted_host_adapter = insecure_adapter
 
         self.mount("https://", secure_adapter)
@@ -436,7 +374,7 @@ def add_trusted_host(
             # Mount wildcard ports for the same host.
             self.mount(build_url_from_netloc(host) + ":", self._trusted_host_adapter)
 
-    def iter_secure_origins(self) -> Generator[SecureOrigin, None, None]:
+    def iter_secure_origins(self) -> Iterator[SecureOrigin]:
         yield from SECURE_ORIGINS
         for host, port in self.pip_trusted_origins:
             yield ("*", host, "*" if port is None else port)
@@ -465,7 +403,7 @@ def is_secure_origin(self, location: Link) -> bool:
                 continue
 
             try:
-                addr = ipaddress.ip_address(origin_host or "")
+                addr = ipaddress.ip_address(origin_host)
                 network = ipaddress.ip_network(secure_host)
             except ValueError:
                 # We don't have both a valid address or a valid network, so
@@ -511,8 +449,6 @@ def is_secure_origin(self, location: Link) -> bool:
     def request(self, method: str, url: str, *args: Any, **kwargs: Any) -> Response:
         # Allow setting a default timeout on a session
         kwargs.setdefault("timeout", self.timeout)
-        # Allow setting a default proxies on a session
-        kwargs.setdefault("proxies", self.proxies)
 
         # Dispatch the actual request
         return super().request(method, url, *args, **kwargs)
diff --git a/venv/lib/python3.10/site-packages/pip/_internal/network/utils.py b/venv/lib/python3.10/site-packages/pip/_internal/network/utils.py
index 134848a..094cf1b 100644
--- a/venv/lib/python3.10/site-packages/pip/_internal/network/utils.py
+++ b/venv/lib/python3.10/site-packages/pip/_internal/network/utils.py
@@ -1,4 +1,4 @@
-from typing import Dict, Generator
+from typing import Dict, Iterator
 
 from pip._vendor.requests.models import CONTENT_CHUNK_SIZE, Response
 
@@ -56,7 +56,7 @@ def raise_for_status(resp: Response) -> None:
 
 def response_chunks(
     response: Response, chunk_size: int = CONTENT_CHUNK_SIZE
-) -> Generator[bytes, None, None]:
+) -> Iterator[bytes]:
     """Given a requests Response, provide the data chunks."""
     try:
         # Special case for urllib3.
diff --git a/venv/lib/python3.10/site-packages/pip/_internal/operations/build/build_tracker.py b/venv/lib/python3.10/site-packages/pip/_internal/operations/build/build_tracker.py
deleted file mode 100644
index 6621549..0000000
--- a/venv/lib/python3.10/site-packages/pip/_internal/operations/build/build_tracker.py
+++ /dev/null
@@ -1,124 +0,0 @@
-import contextlib
-import hashlib
-import logging
-import os
-from types import TracebackType
-from typing import Dict, Generator, Optional, Set, Type, Union
-
-from pip._internal.models.link import Link
-from pip._internal.req.req_install import InstallRequirement
-from pip._internal.utils.temp_dir import TempDirectory
-
-logger = logging.getLogger(__name__)
-
-
-@contextlib.contextmanager
-def update_env_context_manager(**changes: str) -> Generator[None, None, None]:
-    target = os.environ
-
-    # Save values from the target and change them.
-    non_existent_marker = object()
-    saved_values: Dict[str, Union[object, str]] = {}
-    for name, new_value in changes.items():
-        try:
-            saved_values[name] = target[name]
-        except KeyError:
-            saved_values[name] = non_existent_marker
-        target[name] = new_value
-
-    try:
-        yield
-    finally:
-        # Restore original values in the target.
-        for name, original_value in saved_values.items():
-            if original_value is non_existent_marker:
-                del target[name]
-            else:
-                assert isinstance(original_value, str)  # for mypy
-                target[name] = original_value
-
-
-@contextlib.contextmanager
-def get_build_tracker() -> Generator["BuildTracker", None, None]:
-    root = os.environ.get("PIP_BUILD_TRACKER")
-    with contextlib.ExitStack() as ctx:
-        if root is None:
-            root = ctx.enter_context(TempDirectory(kind="build-tracker")).path
-            ctx.enter_context(update_env_context_manager(PIP_BUILD_TRACKER=root))
-            logger.debug("Initialized build tracking at %s", root)
-
-        with BuildTracker(root) as tracker:
-            yield tracker
-
-
-class BuildTracker:
-    def __init__(self, root: str) -> None:
-        self._root = root
-        self._entries: Set[InstallRequirement] = set()
-        logger.debug("Created build tracker: %s", self._root)
-
-    def __enter__(self) -> "BuildTracker":
-        logger.debug("Entered build tracker: %s", self._root)
-        return self
-
-    def __exit__(
-        self,
-        exc_type: Optional[Type[BaseException]],
-        exc_val: Optional[BaseException],
-        exc_tb: Optional[TracebackType],
-    ) -> None:
-        self.cleanup()
-
-    def _entry_path(self, link: Link) -> str:
-        hashed = hashlib.sha224(link.url_without_fragment.encode()).hexdigest()
-        return os.path.join(self._root, hashed)
-
-    def add(self, req: InstallRequirement) -> None:
-        """Add an InstallRequirement to build tracking."""
-
-        assert req.link
-        # Get the file to write information about this requirement.
-        entry_path = self._entry_path(req.link)
-
-        # Try reading from the file. If it exists and can be read from, a build
-        # is already in progress, so a LookupError is raised.
-        try:
-            with open(entry_path) as fp:
-                contents = fp.read()
-        except FileNotFoundError:
-            pass
-        else:
-            message = "{} is already being built: {}".format(req.link, contents)
-            raise LookupError(message)
-
-        # If we're here, req should really not be building already.
-        assert req not in self._entries
-
-        # Start tracking this requirement.
-        with open(entry_path, "w", encoding="utf-8") as fp:
-            fp.write(str(req))
-        self._entries.add(req)
-
-        logger.debug("Added %s to build tracker %r", req, self._root)
-
-    def remove(self, req: InstallRequirement) -> None:
-        """Remove an InstallRequirement from build tracking."""
-
-        assert req.link
-        # Delete the created file and the corresponding entries.
-        os.unlink(self._entry_path(req.link))
-        self._entries.remove(req)
-
-        logger.debug("Removed %s from build tracker %r", req, self._root)
-
-    def cleanup(self) -> None:
-        for req in set(self._entries):
-            self.remove(req)
-
-        logger.debug("Removed build tracker: %r", self._root)
-
-    @contextlib.contextmanager
-    def track(self, req: InstallRequirement) -> Generator[None, None, None]:
-        self.add(req)
-        yield
-        self.remove(req)
diff --git a/venv/lib/python3.10/site-packages/pip/_internal/operations/freeze.py b/venv/lib/python3.10/site-packages/pip/_internal/operations/freeze.py
index 930d4c6..4565540 100644
--- a/venv/lib/python3.10/site-packages/pip/_internal/operations/freeze.py
+++ b/venv/lib/python3.10/site-packages/pip/_internal/operations/freeze.py
@@ -1,7 +1,7 @@
 import collections
 import logging
 import os
-from typing import Container, Dict, Generator, Iterable, List, NamedTuple, Optional, Set
+from typing import Container, Dict, Iterable, Iterator, List, NamedTuple, Optional, Set
 
 from pip._vendor.packaging.utils import canonicalize_name
 from pip._vendor.packaging.version import Version
@@ -31,7 +31,7 @@ def freeze(
     isolated: bool = False,
     exclude_editable: bool = False,
     skip: Container[str] = (),
-) -> Generator[str, None, None]:
+) -> Iterator[str]:
     installations: Dict[str, FrozenRequirement] = {}
 
     dists = get_environment(paths).iter_installed_distributions(
diff --git a/venv/lib/python3.10/site-packages/pip/_internal/operations/install/legacy.py b/venv/lib/python3.10/site-packages/pip/_internal/operations/install/legacy.py
index 290967d..5b7ef90 100644
--- a/venv/lib/python3.10/site-packages/pip/_internal/operations/install/legacy.py
+++ b/venv/lib/python3.10/site-packages/pip/_internal/operations/install/legacy.py
@@ -3,11 +3,11 @@
 
 import logging
 import os
+from distutils.util import change_root
 from typing import List, Optional, Sequence
 
 from pip._internal.build_env import BuildEnvironment
 from pip._internal.exceptions import InstallationError, LegacyInstallFailure
-from pip._internal.locations.base import change_root
 from pip._internal.models.scheme import Scheme
 from pip._internal.utils.misc import ensure_dir
 from pip._internal.utils.setuptools_build import make_setuptools_install_args
diff --git a/venv/lib/python3.10/site-packages/pip/_internal/operations/install/wheel.py b/venv/lib/python3.10/site-packages/pip/_internal/operations/install/wheel.py
index c799413..e191b13 100644
--- a/venv/lib/python3.10/site-packages/pip/_internal/operations/install/wheel.py
+++ b/venv/lib/python3.10/site-packages/pip/_internal/operations/install/wheel.py
@@ -22,7 +22,6 @@
     BinaryIO,
     Callable,
     Dict,
-    Generator,
     Iterable,
     Iterator,
     List,
@@ -224,16 +223,19 @@ def _normalized_outrows(
     )
 
 
-def _record_to_fs_path(record_path: RecordPath, lib_dir: str) -> str:
-    return os.path.join(lib_dir, record_path)
+def _record_to_fs_path(record_path: RecordPath) -> str:
+    return record_path
 
 
-def _fs_to_record_path(path: str, lib_dir: str) -> RecordPath:
-    # On Windows, do not handle relative paths if they belong to different
-    # logical disks
-    if os.path.splitdrive(path)[0].lower() == os.path.splitdrive(lib_dir)[0].lower():
-        path = os.path.relpath(path, lib_dir)
-
+def _fs_to_record_path(path: str, relative_to: Optional[str] = None) -> RecordPath:
+    if relative_to is not None:
+        # On Windows, do not handle relative paths if they belong to different
+        # logical disks
+        if (
+            os.path.splitdrive(path)[0].lower()
+            == os.path.splitdrive(relative_to)[0].lower()
+        ):
+            path = os.path.relpath(path, relative_to)
     path = path.replace(os.path.sep, "/")
     return cast("RecordPath", path)
 
@@ -256,7 +258,7 @@ def get_csv_rows_for_installed(
         old_record_path = cast("RecordPath", row[0])
         new_record_path = installed.pop(old_record_path, old_record_path)
         if new_record_path in changed:
-            digest, length = rehash(_record_to_fs_path(new_record_path, lib_dir))
+            digest, length = rehash(_record_to_fs_path(new_record_path))
         else:
             digest = row[1] if len(row) > 1 else ""
             length = row[2] if len(row) > 2 else ""
@@ -325,7 +327,7 @@ def get_console_script_specs(console: Dict[str, str]) -> List[str]:
 
         scripts_to_generate.append(f"pip{get_major_minor_version()} = {pip_script}")
         # Delete any other versioned pip entry points
-        pip_ep = [k for k in console if re.match(r"pip(\d+(\.\d+)?)?$", k)]
+        pip_ep = [k for k in console if re.match(r"pip(\d(\.\d)?)?$", k)]
         for k in pip_ep:
             del console[k]
     easy_install_script = console.pop("easy_install", None)
@@ -340,7 +342,7 @@ def get_console_script_specs(console: Dict[str, str]) -> List[str]:
         )
         # Delete any other versioned easy_install entry points
         easy_install_ep = [
-            k for k in console if re.match(r"easy_install(-\d+\.\d+)?$", k)
+            k for k in console if re.match(r"easy_install(-\d\.\d)?$", k)
         ]
         for k in easy_install_ep:
             del console[k]
@@ -420,9 +422,7 @@ def _raise_for_invalid_entrypoint(specification: str) -> None:
 
 
 class PipScriptMaker(ScriptMaker):
-    def make(
-        self, specification: str, options: Optional[Dict[str, Any]] = None
-    ) -> List[str]:
+    def make(self, specification: str, options: Dict[str, Any] = None) -> List[str]:
         _raise_for_invalid_entrypoint(specification)
         return super().make(specification, options)
 
@@ -474,7 +474,7 @@ def record_installed(
         newpath = _fs_to_record_path(destfile, lib_dir)
         installed[srcfile] = newpath
         if modified:
-            changed.add(newpath)
+            changed.add(_fs_to_record_path(destfile))
 
     def is_dir_path(path: RecordPath) -> bool:
         return path.endswith("/")
@@ -589,7 +589,7 @@ def is_entrypoint_wrapper(file: "File") -> bool:
         file.save()
         record_installed(file.src_record_path, file.dest_path, file.changed)
 
-    def pyc_source_file_paths() -> Generator[str, None, None]:
+    def pyc_source_file_paths() -> Iterator[str]:
         # We de-duplicate installation paths, since there can be overlap (e.g.
         # file in .data maps to same location as file in wheel root).
         # Sorting installation paths makes it easier to reproduce and debug
@@ -656,7 +656,7 @@ def pyc_output_path(path: str) -> str:
     generated_file_mode = 0o666 & ~current_umask()
 
     @contextlib.contextmanager
-    def _generate_file(path: str, **kwargs: Any) -> Generator[BinaryIO, None, None]:
+    def _generate_file(path: str, **kwargs: Any) -> Iterator[BinaryIO]:
         with adjacent_tmp_file(path, **kwargs) as f:
             yield f
         os.chmod(f.name, generated_file_mode)
@@ -706,7 +706,7 @@ def _generate_file(path: str, **kwargs: Any) -> Generator[BinaryIO, None, None]:
 
 
 @contextlib.contextmanager
-def req_error_context(req_description: str) -> Generator[None, None, None]:
+def req_error_context(req_description: str) -> Iterator[None]:
     try:
         yield
     except InstallationError as e:
diff --git a/venv/lib/python3.10/site-packages/pip/_internal/operations/prepare.py b/venv/lib/python3.10/site-packages/pip/_internal/operations/prepare.py
index 4bf414c..a726f03 100644
--- a/venv/lib/python3.10/site-packages/pip/_internal/operations/prepare.py
+++ b/venv/lib/python3.10/site-packages/pip/_internal/operations/prepare.py
@@ -19,14 +19,12 @@
     HashMismatch,
     HashUnpinned,
     InstallationError,
-    MetadataInconsistent,
     NetworkConnectionError,
     PreviousBuildDirError,
     VcsHashUnsupported,
 )
 from pip._internal.index.package_finder import PackageFinder
-from pip._internal.metadata import BaseDistribution, get_metadata_distribution
-from pip._internal.models.direct_url import ArchiveInfo
+from pip._internal.metadata import BaseDistribution
 from pip._internal.models.link import Link
 from pip._internal.models.wheel import Wheel
 from pip._internal.network.download import BatchDownloader, Downloader
@@ -35,20 +33,12 @@
     dist_from_wheel_url,
 )
 from pip._internal.network.session import PipSession
-from pip._internal.operations.build.build_tracker import BuildTracker
 from pip._internal.req.req_install import InstallRequirement
-from pip._internal.utils.direct_url_helpers import (
-    direct_url_for_editable,
-    direct_url_from_link,
-)
+from pip._internal.req.req_tracker import RequirementTracker
+from pip._internal.utils.filesystem import copy2_fixed
 from pip._internal.utils.hashes import Hashes, MissingHashes
 from pip._internal.utils.logging import indent_log
-from pip._internal.utils.misc import (
-    display_path,
-    hash_file,
-    hide_url,
-    is_installable_dir,
-)
+from pip._internal.utils.misc import display_path, hide_url, is_installable_dir, rmtree
 from pip._internal.utils.temp_dir import TempDirectory
 from pip._internal.utils.unpacking import unpack_file
 from pip._internal.vcs import vcs
@@ -58,17 +48,14 @@
 
 def _get_prepared_distribution(
     req: InstallRequirement,
-    build_tracker: BuildTracker,
+    req_tracker: RequirementTracker,
     finder: PackageFinder,
     build_isolation: bool,
-    check_build_deps: bool,
 ) -> BaseDistribution:
     """Prepare a distribution for installation."""
     abstract_dist = make_distribution_for_install_requirement(req)
-    with build_tracker.track(req):
-        abstract_dist.prepare_distribution_metadata(
-            finder, build_isolation, check_build_deps
-        )
+    with req_tracker.track(req):
+        abstract_dist.prepare_distribution_metadata(finder, build_isolation)
     return abstract_dist.get_metadata_distribution()
 
 
@@ -111,6 +98,55 @@ def get_http_url(
     return File(from_path, content_type)
 
 
+def _copy2_ignoring_special_files(src: str, dest: str) -> None:
+    """Copying special files is not supported, but as a convenience to users
+    we skip errors copying them. This supports tools that may create e.g.
+    socket files in the project source directory.
+    """
+    try:
+        copy2_fixed(src, dest)
+    except shutil.SpecialFileError as e:
+        # SpecialFileError may be raised due to either the source or
+        # destination. If the destination was the cause then we would actually
+        # care, but since the destination directory is deleted prior to
+        # copy we ignore all of them assuming it is caused by the source.
+        logger.warning(
+            "Ignoring special file error '%s' encountered copying %s to %s.",
+            str(e),
+            src,
+            dest,
+        )
+
+
+def _copy_source_tree(source: str, target: str) -> None:
+    target_abspath = os.path.abspath(target)
+    target_basename = os.path.basename(target_abspath)
+    target_dirname = os.path.dirname(target_abspath)
+
+    def ignore(d: str, names: List[str]) -> List[str]:
+        skipped: List[str] = []
+        if d == source:
+            # Pulling in those directories can potentially be very slow,
+            # exclude the following directories if they appear in the top
+            # level dir (and only it).
+            # See discussion at https://github.com/pypa/pip/pull/6770
+            skipped += [".tox", ".nox"]
+        if os.path.abspath(d) == target_dirname:
+            # Prevent an infinite recursion if the target is in source.
+            # This can happen when TMPDIR is set to ${PWD}/...
+            # and we copy PWD to TMPDIR.
+            skipped += [target_basename]
+        return skipped
+
+    shutil.copytree(
+        source,
+        target,
+        ignore=ignore,
+        symlinks=True,
+        copy_function=_copy2_ignoring_special_files,
+    )
+
+
 def get_file_url(
     link: Link, download_dir: Optional[str] = None, hashes: Optional[Hashes] = None
 ) -> File:
@@ -155,7 +191,19 @@ def unpack_url(
         unpack_vcs_link(link, location, verbosity=verbosity)
         return None
 
-    assert not link.is_existing_dir()
+    # Once out-of-tree-builds are no longer supported, could potentially
+    # replace the below condition with `assert not link.is_existing_dir`
+    # - unpack_url does not need to be called for in-tree-builds.
+    #
+    # As further cleanup, _copy_source_tree and accompanying tests can
+    # be removed.
+    #
+    # TODO when use-deprecated=out-of-tree-build is removed
+    if link.is_existing_dir():
+        if os.path.isdir(location):
+            rmtree(location)
+        _copy_source_tree(link.file_path, location)
+        return None
 
     # file urls
     if link.is_file:
@@ -213,8 +261,7 @@ def __init__(
         download_dir: Optional[str],
         src_dir: str,
         build_isolation: bool,
-        check_build_deps: bool,
-        build_tracker: BuildTracker,
+        req_tracker: RequirementTracker,
         session: PipSession,
         progress_bar: str,
         finder: PackageFinder,
@@ -222,12 +269,13 @@ def __init__(
         use_user_site: bool,
         lazy_wheel: bool,
         verbosity: int,
+        in_tree_build: bool,
     ) -> None:
         super().__init__()
 
         self.src_dir = src_dir
         self.build_dir = build_dir
-        self.build_tracker = build_tracker
+        self.req_tracker = req_tracker
         self._session = session
         self._download = Downloader(session, progress_bar)
         self._batch_download = BatchDownloader(session, progress_bar)
@@ -240,9 +288,6 @@ def __init__(
         # Is build isolation allowed?
         self.build_isolation = build_isolation
 
-        # Should check build dependencies?
-        self.check_build_deps = check_build_deps
-
         # Should hash-checking be required?
         self.require_hashes = require_hashes
 
@@ -255,6 +300,9 @@ def __init__(
         # How verbose should underlying tooling be?
         self.verbosity = verbosity
 
+        # Should in-tree builds be used for local paths?
+        self.in_tree_build = in_tree_build
+
         # Memoized downloaded files, as mapping of url: path.
         self._downloaded: Dict[str, str] = {}
 
@@ -288,7 +336,7 @@ def _ensure_link_req_src_dir(
             # directory.
             return
         assert req.source_dir is None
-        if req.link.is_existing_dir():
+        if req.link.is_existing_dir() and self.in_tree_build:
             # build local directories in-tree
             req.source_dir = req.link.file_path
             return
@@ -347,72 +395,19 @@ def _get_linked_req_hashes(self, req: InstallRequirement) -> Hashes:
         # showing the user what the hash should be.
         return req.hashes(trust_internet=False) or MissingHashes()
 
-    def _fetch_metadata_only(
-        self,
-        req: InstallRequirement,
-    ) -> Optional[BaseDistribution]:
-        if self.require_hashes:
-            logger.debug(
-                "Metadata-only fetching is not used as hash checking is required",
-            )
-            return None
-        # Try PEP 658 metadata first, then fall back to lazy wheel if unavailable.
-        return self._fetch_metadata_using_link_data_attr(
-            req
-        ) or self._fetch_metadata_using_lazy_wheel(req.link)
-
-    def _fetch_metadata_using_link_data_attr(
-        self,
-        req: InstallRequirement,
-    ) -> Optional[BaseDistribution]:
-        """Fetch metadata from the data-dist-info-metadata attribute, if possible."""
-        # (1) Get the link to the metadata file, if provided by the backend.
-        metadata_link = req.link.metadata_link()
-        if metadata_link is None:
-            return None
-        assert req.req is not None
-        logger.info(
-            "Obtaining dependency information for %s from %s",
-            req.req,
-            metadata_link,
-        )
-        # (2) Download the contents of the METADATA file, separate from the dist itself.
-        metadata_file = get_http_url(
-            metadata_link,
-            self._download,
-            hashes=metadata_link.as_hashes(),
-        )
-        with open(metadata_file.path, "rb") as f:
-            metadata_contents = f.read()
-        # (3) Generate a dist just from those file contents.
-        metadata_dist = get_metadata_distribution(
-            metadata_contents,
-            req.link.filename,
-            req.req.name,
-        )
-        # (4) Ensure the Name: field from the METADATA file matches the name from the
-        #     install requirement.
-        #
-        #     NB: raw_name will fall back to the name from the install requirement if
-        #     the Name: field is not present, but it's noted in the raw_name docstring
-        #     that that should NEVER happen anyway.
-        if metadata_dist.raw_name != req.req.name:
-            raise MetadataInconsistent(
-                req, "Name", req.req.name, metadata_dist.raw_name
-            )
-        return metadata_dist
-
     def _fetch_metadata_using_lazy_wheel(
         self,
         link: Link,
     ) -> Optional[BaseDistribution]:
         """Fetch metadata using lazy wheel, if possible."""
-        # --use-feature=fast-deps must be provided.
         if not self.use_lazy_wheel:
             return None
+        if self.require_hashes:
+            logger.debug("Lazy wheel is not used as hash checking is required")
+            return None
         if link.is_file or not link.is_wheel:
             logger.debug(
-                "Lazy wheel is not used as %r does not point to a remote wheel",
+                "Lazy wheel is not used as %r does not points to a remote wheel",
                 link,
             )
             return None
@@ -468,12 +463,13 @@ def prepare_linked_requirement(
     ) -> BaseDistribution:
         """Prepare a requirement to be obtained from req.link."""
         assert req.link
+        link = req.link
         self._log_preparing_link(req)
         with indent_log():
             # Check if the relevant file is already available
             # in the download directory
             file_path = None
-            if self.download_dir is not None and req.link.is_wheel:
+            if self.download_dir is not None and link.is_wheel:
                 hashes = self._get_linked_req_hashes(req)
                 file_path = _check_download_dir(req.link, self.download_dir, hashes)
 
@@ -482,10 +478,10 @@ def prepare_linked_requirement(
                 self._downloaded[req.link.url] = file_path
             else:
                 # The file is not available, attempt to fetch only metadata
-                metadata_dist = self._fetch_metadata_only(req)
-                if metadata_dist is not None:
+                wheel_dist = self._fetch_metadata_using_lazy_wheel(link)
+                if wheel_dist is not None:
                     req.needs_more_preparation = True
-                    return metadata_dist
+                    return wheel_dist
 
             # None of the optimizations worked, fully prepare the requirement
             return self._prepare_linked_requirement(req, parallel_builds)
@@ -529,7 +525,7 @@ def _prepare_linked_requirement(
         self._ensure_link_req_src_dir(req, parallel_builds)
         hashes = self._get_linked_req_hashes(req)
 
-        if link.is_existing_dir():
+        if link.is_existing_dir() and self.in_tree_build:
             local_file = None
         elif link.url not in self._downloaded:
             try:
@@ -552,23 +548,6 @@ def _prepare_linked_requirement(
                 hashes.check_against_path(file_path)
             local_file = File(file_path, content_type=None)
 
-        # If download_info is set, we got it from the wheel cache.
-        if req.download_info is None:
-            # Editables don't go through this function (see
-            # prepare_editable_requirement).
-            assert not req.editable
-            req.download_info = direct_url_from_link(link, req.source_dir)
-            # Make sure we have a hash in download_info. If we got it as part of the
-            # URL, it will have been verified and we can rely on it. Otherwise we
-            # compute it from the downloaded file.
-            if (
-                isinstance(req.download_info.info, ArchiveInfo)
-                and not req.download_info.info.hash
-                and local_file
-            ):
-                hash = hash_file(local_file.path)[0].hexdigest()
-                req.download_info.info.hash = f"sha256={hash}"
-
         # For use in later processing,
         # preserve the file path on the requirement.
         if local_file:
@@ -576,10 +555,9 @@ def _prepare_linked_requirement(
 
         dist = _get_prepared_distribution(
             req,
-            self.build_tracker,
+            self.req_tracker,
             self.finder,
             self.build_isolation,
-            self.check_build_deps,
         )
         return dist
 
@@ -627,15 +605,12 @@ def prepare_editable_requirement(
                 )
             req.ensure_has_source_dir(self.src_dir)
             req.update_editable()
-            assert req.source_dir
-            req.download_info = direct_url_for_editable(req.unpacked_source_directory)
 
             dist = _get_prepared_distribution(
                 req,
-                self.build_tracker,
+                self.req_tracker,
                 self.finder,
                 self.build_isolation,
-                self.check_build_deps,
             )
 
             req.check_if_exists(self.use_user_site)
diff --git a/venv/lib/python3.10/site-packages/pip/_internal/pyproject.py b/venv/lib/python3.10/site-packages/pip/_internal/pyproject.py
index 1e9119f..e183eaf 100644
--- a/venv/lib/python3.10/site-packages/pip/_internal/pyproject.py
+++ b/venv/lib/python3.10/site-packages/pip/_internal/pyproject.py
@@ -1,4 +1,3 @@
-import importlib.util
 import os
 from collections import namedtuple
 from typing import Any, List, Optional
@@ -90,15 +89,9 @@ def load_pyproject_toml(
 
     # If we haven't worked out whether to use PEP 517 yet,
     # and the user hasn't explicitly stated a preference,
-    # we do so if the project has a pyproject.toml file
-    # or if we cannot import setuptools.
-
-    # We fallback to PEP 517 when without setuptools,
-    # so setuptools can be installed as a default build backend.
-    # For more info see:
-    # https://discuss.python.org/t/pip-without-setuptools-could-the-experience-be-improved/11810/9
+    # we do so if the project has a pyproject.toml file.
     elif use_pep517 is None:
-        use_pep517 = has_pyproject or not importlib.util.find_spec("setuptools")
+        use_pep517 = has_pyproject
 
     # At this point, we know whether we're going to use PEP 517.
     assert use_pep517 is not None
diff --git a/venv/lib/python3.10/site-packages/pip/_internal/req/__init__.py b/venv/lib/python3.10/site-packages/pip/_internal/req/__init__.py
index 8d56359..70dea27 100644
--- a/venv/lib/python3.10/site-packages/pip/_internal/req/__init__.py
+++ b/venv/lib/python3.10/site-packages/pip/_internal/req/__init__.py
@@ -1,6 +1,6 @@
 import collections
 import logging
-from typing import Generator, List, Optional, Sequence, Tuple
+from typing import Iterator, List, Optional, Sequence, Tuple
 
 from pip._internal.utils.logging import indent_log
 
@@ -28,7 +28,7 @@ def __repr__(self) -> str:
 
 def _validate_requirements(
     requirements: List[InstallRequirement],
-) -> Generator[Tuple[str, InstallRequirement], None, None]:
+) -> Iterator[Tuple[str, InstallRequirement]]:
     for req in requirements:
         assert req.name, f"invalid to-be-installed requirement: {req}"
         yield req.name, req
diff --git a/venv/lib/python3.10/site-packages/pip/_internal/req/constructors.py b/venv/lib/python3.10/site-packages/pip/_internal/req/constructors.py
index dea7c3b..25bfb39 100644
--- a/venv/lib/python3.10/site-packages/pip/_internal/req/constructors.py
+++ b/venv/lib/python3.10/site-packages/pip/_internal/req/constructors.py
@@ -207,7 +207,6 @@ def install_req_from_editable(
     constraint: bool = False,
     user_supplied: bool = False,
     permit_editable_wheels: bool = False,
-    config_settings: Optional[Dict[str, str]] = None,
 ) -> InstallRequirement:
 
     parts = parse_req_from_editable(editable_req)
@@ -225,7 +224,6 @@ def install_req_from_editable(
         install_options=options.get("install_options", []) if options else [],
         global_options=options.get("global_options", []) if options else [],
         hash_options=options.get("hashes", {}) if options else {},
-        config_settings=config_settings,
         extras=parts.extras,
     )
 
@@ -382,7 +380,6 @@ def install_req_from_line(
     constraint: bool = False,
     line_source: Optional[str] = None,
     user_supplied: bool = False,
-    config_settings: Optional[Dict[str, str]] = None,
 ) -> InstallRequirement:
     """Creates an InstallRequirement from a name, which might be a
     requirement, directory containing 'setup.py', filename, or URL.
@@ -402,7 +399,6 @@ def install_req_from_line(
         install_options=options.get("install_options", []) if options else [],
         global_options=options.get("global_options", []) if options else [],
         hash_options=options.get("hashes", {}) if options else {},
-        config_settings=config_settings,
         constraint=constraint,
         extras=parts.extras,
         user_supplied=user_supplied,
@@ -415,7 +411,6 @@ def install_req_from_req_string(
     isolated: bool = False,
     use_pep517: Optional[bool] = None,
     user_supplied: bool = False,
-    config_settings: Optional[Dict[str, str]] = None,
 ) -> InstallRequirement:
     try:
         req = get_requirement(req_string)
@@ -445,7 +440,6 @@ def install_req_from_req_string(
         isolated=isolated,
         use_pep517=use_pep517,
         user_supplied=user_supplied,
-        config_settings=config_settings,
     )
 
 
@@ -454,7 +448,6 @@ def install_req_from_parsed_requirement(
     isolated: bool = False,
     use_pep517: Optional[bool] = None,
     user_supplied: bool = False,
-    config_settings: Optional[Dict[str, str]] = None,
 ) -> InstallRequirement:
     if parsed_req.is_editable:
         req = install_req_from_editable(
@@ -464,7 +457,6 @@ def install_req_from_parsed_requirement(
             constraint=parsed_req.constraint,
             isolated=isolated,
             user_supplied=user_supplied,
-            config_settings=config_settings,
         )
 
     else:
@@ -477,7 +469,6 @@ def install_req_from_parsed_requirement(
             constraint=parsed_req.constraint,
             line_source=parsed_req.line_source,
             user_supplied=user_supplied,
-            config_settings=config_settings,
         )
     return req
 
@@ -496,6 +487,4 @@ def install_req_from_link_and_ireq(
         install_options=ireq.install_options,
         global_options=ireq.global_options,
         hash_options=ireq.hash_options,
-        config_settings=ireq.config_settings,
-        user_supplied=ireq.user_supplied,
     )
diff --git a/venv/lib/python3.10/site-packages/pip/_internal/req/req_file.py b/venv/lib/python3.10/site-packages/pip/_internal/req/req_file.py
index 11ec699..03ae504 100644
--- a/venv/lib/python3.10/site-packages/pip/_internal/req/req_file.py
+++ b/venv/lib/python3.10/site-packages/pip/_internal/req/req_file.py
@@ -13,8 +13,8 @@
     Any,
     Callable,
     Dict,
-    Generator,
     Iterable,
+    Iterator,
     List,
     Optional,
     Tuple,
@@ -129,7 +129,7 @@ def parse_requirements(
     finder: Optional["PackageFinder"] = None,
     options: Optional[optparse.Values] = None,
     constraint: bool = False,
-) -> Generator[ParsedRequirement, None, None]:
+) -> Iterator[ParsedRequirement]:
     """Parse a requirements file and yield ParsedRequirement instances.
 
     :param filename:    Path or url of requirements file.
@@ -186,6 +186,10 @@ def handle_requirement_line(
             constraint=line.constraint,
         )
     else:
+        if options:
+            # Disable wheels if the user has specified build options
+            cmdoptions.check_install_build_global(options, line.opts)
+
         # get the options that apply to requirements
         req_options = {}
         for dest in SUPPORTED_OPTIONS_REQ_DEST:
@@ -225,13 +229,11 @@ def handle_option_line(
     if finder:
         find_links = finder.find_links
         index_urls = finder.index_urls
-        no_index = finder.search_scope.no_index
+        if opts.index_url:
+            index_urls = [opts.index_url]
         if opts.no_index is True:
-            no_index = True
             index_urls = []
-        if opts.index_url and not no_index:
-            index_urls = [opts.index_url]
-        if opts.extra_index_urls and not no_index:
+        if opts.extra_index_urls:
             index_urls.extend(opts.extra_index_urls)
         if opts.find_links:
             # FIXME: it would be nice to keep track of the source
@@ -251,7 +253,6 @@ def handle_option_line(
         search_scope = SearchScope(
             find_links=find_links,
             index_urls=index_urls,
-            no_index=no_index,
         )
         finder.search_scope = search_scope
 
@@ -320,15 +321,13 @@ def __init__(
         self._session = session
         self._line_parser = line_parser
 
-    def parse(
-        self, filename: str, constraint: bool
-    ) -> Generator[ParsedLine, None, None]:
+    def parse(self, filename: str, constraint: bool) -> Iterator[ParsedLine]:
         """Parse a given file, yielding parsed lines."""
         yield from self._parse_and_recurse(filename, constraint)
 
     def _parse_and_recurse(
         self, filename: str, constraint: bool
-    ) -> Generator[ParsedLine, None, None]:
+    ) -> Iterator[ParsedLine]:
         for line in self._parse_file(filename, constraint):
             if not line.is_requirement and (
                 line.opts.requirements or line.opts.constraints
@@ -357,9 +356,7 @@ def _parse_and_recurse(
             else:
                 yield line
 
-    def _parse_file(
-        self, filename: str, constraint: bool
-    ) -> Generator[ParsedLine, None, None]:
+    def _parse_file(self, filename: str, constraint: bool) -> Iterator[ParsedLine]:
         _, content = get_file_content(filename, self._session)
 
         lines_enum = preprocess(content)
@@ -393,12 +390,7 @@ def parse_line(line: str) -> Tuple[str, Values]:
 
         args_str, options_str = break_args_options(line)
 
-        try:
-            options = shlex.split(options_str)
-        except ValueError as e:
-            raise OptionParsingError(f"Could not split options: {options_str}") from e
-
-        opts, _ = parser.parse_args(options, defaults)
+        opts, _ = parser.parse_args(shlex.split(options_str), defaults)
 
         return args_str, opts
 
diff --git a/venv/lib/python3.10/site-packages/pip/_internal/req/req_install.py b/venv/lib/python3.10/site-packages/pip/_internal/req/req_install.py
index 5f29261..02dbda1 100644
--- a/venv/lib/python3.10/site-packages/pip/_internal/req/req_install.py
+++ b/venv/lib/python3.10/site-packages/pip/_internal/req/req_install.py
@@ -8,8 +8,6 @@
 import sys
 import uuid
 import zipfile
-from enum import Enum
-from optparse import Values
 from typing import Any, Collection, Dict, Iterable, List, Optional, Sequence, Union
 
 from pip._vendor.packaging.markers import Marker
@@ -27,10 +25,7 @@
     BaseDistribution,
     get_default_environment,
     get_directory_distribution,
-    get_wheel_distribution,
 )
-from pip._internal.metadata.base import FilesystemWheel
-from pip._internal.models.direct_url import DirectUrl
 from pip._internal.models.link import Link
 from pip._internal.operations.build.metadata import generate_metadata
 from pip._internal.operations.build.metadata_editable import generate_editable_metadata
@@ -44,14 +39,13 @@
 from pip._internal.operations.install.wheel import install_wheel
 from pip._internal.pyproject import load_pyproject_toml, make_pyproject_path
 from pip._internal.req.req_uninstall import UninstallPathSet
-from pip._internal.utils.deprecation import LegacyInstallReason, deprecated
+from pip._internal.utils.deprecation import deprecated
 from pip._internal.utils.direct_url_helpers import (
     direct_url_for_editable,
     direct_url_from_link,
 )
 from pip._internal.utils.hashes import Hashes
 from pip._internal.utils.misc import (
-    ConfiguredPep517HookCaller,
     ask_path_exists,
     backup_dir,
     display_path,
@@ -86,7 +80,6 @@ def __init__(
         install_options: Optional[List[str]] = None,
         global_options: Optional[List[str]] = None,
         hash_options: Optional[Dict[str, List[str]]] = None,
-        config_settings: Optional[Dict[str, str]] = None,
         constraint: bool = False,
         extras: Collection[str] = (),
         user_supplied: bool = False,
@@ -98,7 +91,7 @@ def __init__(
         self.constraint = constraint
         self.editable = editable
         self.permit_editable_wheels = permit_editable_wheels
-        self.legacy_install_reason: Optional[LegacyInstallReason] = None
+        self.legacy_install_reason: Optional[int] = None
 
         # source_dir is the local directory where the linked requirement is
         # located, or unpacked. In case unpacking is needed, creating and
@@ -117,10 +110,6 @@ def __init__(
         self.link = self.original_link = link
         self.original_link_is_in_wheel_cache = False
 
-        # Information about the location of the artifact that was downloaded . This
-        # property is guaranteed to be set in resolver results.
-        self.download_info: Optional[DirectUrl] = None
-
         # Path to any downloaded or already-existing package.
         self.local_file_path: Optional[str] = None
         if self.link and self.link.is_file:
@@ -149,7 +138,6 @@ def __init__(
         self.install_options = install_options if install_options else []
         self.global_options = global_options if global_options else []
         self.hash_options = hash_options if hash_options else {}
-        self.config_settings = config_settings
         # Set to True after successful preparation of this requirement
         self.prepared = False
         # User supplied requirement are explicitly requested for installation
@@ -482,8 +470,7 @@ def load_pyproject_toml(self) -> None:
         requires, backend, check, backend_path = pyproject_toml_data
         self.requirements_to_check = check
         self.pyproject_requires = requires
-        self.pep517_backend = ConfiguredPep517HookCaller(
-            self,
+        self.pep517_backend = Pep517HookCaller(
             self.unpacked_source_directory,
             backend,
             backend_path=backend_path,
@@ -562,16 +549,7 @@ def metadata(self) -> Any:
         return self._metadata
 
     def get_dist(self) -> BaseDistribution:
-        if self.metadata_directory:
-            return get_directory_distribution(self.metadata_directory)
-        elif self.local_file_path and self.is_wheel:
-            return get_wheel_distribution(
-                FilesystemWheel(self.local_file_path), canonicalize_name(self.name)
-            )
-        raise AssertionError(
-            f"InstallRequirement {self} has no metadata directory and no wheel: "
-            f"can't make a distribution."
-        )
+        return get_directory_distribution(self.metadata_directory)
 
     def assert_source_matches_version(self) -> None:
         assert self.source_dir
@@ -780,7 +758,6 @@ def install(
         if self.is_wheel:
             assert self.local_file_path
             direct_url = None
-            # TODO this can be refactored to direct_url = self.download_info
             if self.editable:
                 direct_url = direct_url_for_editable(self.unpacked_source_directory)
             elif self.original_link:
@@ -813,11 +790,6 @@ def install(
         install_options = list(install_options) + self.install_options
 
         try:
-            if (
-                self.legacy_install_reason is not None
-                and self.legacy_install_reason.emit_before_install
-            ):
-                self.legacy_install_reason.emit_deprecation(self.name)
             success = install_legacy(
                 install_options=install_options,
                 global_options=global_options,
@@ -843,12 +815,18 @@ def install(
 
         self.install_succeeded = success
 
-        if (
-            success
-            and self.legacy_install_reason is not None
-            and self.legacy_install_reason.emit_after_success
-        ):
-            self.legacy_install_reason.emit_deprecation(self.name)
+        if success and self.legacy_install_reason == 8368:
+            deprecated(
+                reason=(
+                    "{} was installed using the legacy 'setup.py install' "
+                    "method, because a wheel could not be built for it.".format(
+                        self.name
+                    )
+                ),
+                replacement="to fix the wheel build issue reported above",
+                gone_in=None,
+                issue=8368,
+            )
 
 
 def check_invalid_constraint_type(req: InstallRequirement) -> str:
@@ -878,65 +856,3 @@ def check_invalid_constraint_type(req: InstallRequirement) -> str:
         )
 
     return problem
-
-
-def _has_option(options: Values, reqs: List[InstallRequirement], option: str) -> bool:
-    if getattr(options, option, None):
-        return True
-    for req in reqs:
-        if getattr(req, option, None):
-            return True
-    return False
-
-
-def _install_option_ignored(
-    install_options: List[str], reqs: List[InstallRequirement]
-) -> bool:
-    for req in reqs:
-        if (install_options or req.install_options) and not req.use_pep517:
-            return False
-    return True
-
-
-class LegacySetupPyOptionsCheckMode(Enum):
-    INSTALL = 1
-    WHEEL = 2
-    DOWNLOAD = 3
-
-
-def check_legacy_setup_py_options(
-    options: Values,
-    reqs: List[InstallRequirement],
-    mode: LegacySetupPyOptionsCheckMode,
-) -> None:
-    has_install_options = _has_option(options, reqs, "install_options")
-    has_build_options = _has_option(options, reqs, "build_options")
-    has_global_options = _has_option(options, reqs, "global_options")
-    legacy_setup_py_options_present = (
-        has_install_options or has_build_options or has_global_options
-    )
-    if not legacy_setup_py_options_present:
-        return
-
-    options.format_control.disallow_binaries()
-    logger.warning(
-        "Implying --no-binary=:all: due to the presence of "
-        "--build-option / --global-option / --install-option. "
-        "Consider using --config-settings for more flexibility.",
-    )
-    if mode == LegacySetupPyOptionsCheckMode.INSTALL and has_install_options:
-        if _install_option_ignored(options.install_options, reqs):
-            logger.warning(
-                "Ignoring --install-option when building using PEP 517",
-            )
-        else:
-            deprecated(
-                reason=(
-                    "--install-option is deprecated because "
-                    "it forces pip to use the 'setup.py install' "
-                    "command which is itself deprecated."
-                ),
-                issue=11358,
-                replacement="to use --config-settings",
-                gone_in="23.1",
-            )
diff --git a/venv/lib/python3.10/site-packages/pip/_internal/req/req_set.py b/venv/lib/python3.10/site-packages/pip/_internal/req/req_set.py
index ec7a6e0..6626c37 100644
--- a/venv/lib/python3.10/site-packages/pip/_internal/req/req_set.py
+++ b/venv/lib/python3.10/site-packages/pip/_internal/req/req_set.py
@@ -1,10 +1,13 @@
 import logging
 from collections import OrderedDict
-from typing import Dict, List
+from typing import Dict, Iterable, List, Optional, Tuple
 
 from pip._vendor.packaging.utils import canonicalize_name
 
+from pip._internal.exceptions import InstallationError
+from pip._internal.models.wheel import Wheel
 from pip._internal.req.req_install import InstallRequirement
+from pip._internal.utils import compatibility_tags
 
 logger = logging.getLogger(__name__)
 
@@ -48,6 +51,123 @@ def add_named_requirement(self, install_req: InstallRequirement) -> None:
         project_name = canonicalize_name(install_req.name)
         self.requirements[project_name] = install_req
 
+    def add_requirement(
+        self,
+        install_req: InstallRequirement,
+        parent_req_name: Optional[str] = None,
+        extras_requested: Optional[Iterable[str]] = None,
+    ) -> Tuple[List[InstallRequirement], Optional[InstallRequirement]]:
+        """Add install_req as a requirement to install.
+
+        :param parent_req_name: The name of the requirement that needed this
+            added. The name is used because when multiple unnamed requirements
+            resolve to the same name, we could otherwise end up with dependency
+            links that point outside the Requirements set. parent_req must
+            already be added. Note that None implies that this is a user
+            supplied requirement, vs an inferred one.
+        :param extras_requested: an iterable of extras used to evaluate the
+            environment markers.
+        :return: Additional requirements to scan. That is either [] if
+            the requirement is not applicable, or [install_req] if the
+            requirement is applicable and has just been added.
+        """
+        # If the markers do not match, ignore this requirement.
+        if not install_req.match_markers(extras_requested):
+            logger.info(
+                "Ignoring %s: markers '%s' don't match your environment",
+                install_req.name,
+                install_req.markers,
+            )
+            return [], None
+
+        # If the wheel is not supported, raise an error.
+        # Should check this after filtering out based on environment markers to
+        # allow specifying different wheels based on the environment/OS, in a
+        # single requirements file.
+        if install_req.link and install_req.link.is_wheel:
+            wheel = Wheel(install_req.link.filename)
+            tags = compatibility_tags.get_supported()
+            if self.check_supported_wheels and not wheel.supported(tags):
+                raise InstallationError(
+                    "{} is not a supported wheel on this platform.".format(
+                        wheel.filename
+                    )
+                )
+
+        # This next bit is really a sanity check.
+        assert (
+            not install_req.user_supplied or parent_req_name is None
+        ), "a user supplied req shouldn't have a parent"
+
+        # Unnamed requirements are scanned again and the requirement won't be
+        # added as a dependency until after scanning.
+        if not install_req.name:
+            self.add_unnamed_requirement(install_req)
+            return [install_req], None
+
+        try:
+            existing_req: Optional[InstallRequirement] = self.get_requirement(
+                install_req.name
+            )
+        except KeyError:
+            existing_req = None
+
+        has_conflicting_requirement = (
+            parent_req_name is None
+            and existing_req
+            and not existing_req.constraint
+            and existing_req.extras == install_req.extras
+            and existing_req.req
+            and install_req.req
+            and existing_req.req.specifier != install_req.req.specifier
+        )
+        if has_conflicting_requirement:
+            raise InstallationError(
+                "Double requirement given: {} (already in {}, name={!r})".format(
+                    install_req, existing_req, install_req.name
+                )
+            )
+
+        # When no existing requirement exists, add the requirement as a
+        # dependency and it will be scanned again after.
+        if not existing_req:
+            self.add_named_requirement(install_req)
+            # We'd want to rescan this requirement later
+            return [install_req], install_req
+
+        # Assume there's no need to scan, and that we've already
+        # encountered this for scanning.
+        if install_req.constraint or not existing_req.constraint:
+            return [], existing_req
+
+        does_not_satisfy_constraint = install_req.link and not (
+            existing_req.link and install_req.link.path == existing_req.link.path
+        )
+        if does_not_satisfy_constraint:
+            raise InstallationError(
+                "Could not satisfy constraints for '{}': "
+                "installation from path or url cannot be "
+                "constrained to a version".format(install_req.name)
+            )
+        # If we're now installing a constraint, mark the existing
+        # object for real installation.
+        existing_req.constraint = False
+        # If we're now installing a user supplied requirement,
+        # mark the existing object as such.
+        if install_req.user_supplied:
+            existing_req.user_supplied = True
+        existing_req.extras = tuple(
+            sorted(set(existing_req.extras) | set(install_req.extras))
+        )
+        logger.debug(
+            "Setting %s extras to: %s",
+            existing_req,
+            existing_req.extras,
+        )
+        # Return the existing requirement for addition to the parent and
+        # scanning again.
+        return [existing_req], existing_req
+
     def has_requirement(self, name: str) -> bool:
         project_name = canonicalize_name(name)
 
@@ -67,16 +187,3 @@ def get_requirement(self, name: str) -> InstallRequirement:
     @property
     def all_requirements(self) -> List[InstallRequirement]:
         return self.unnamed_requirements + list(self.requirements.values())
-
-    @property
-    def requirements_to_install(self) -> List[InstallRequirement]:
-        """Return the list of requirements that need to be installed.
-
-        TODO remove this property together with the legacy resolver, since the new
-             resolver only returns requirements that need to be installed.
-        """
-        return [
-            install_req
-            for install_req in self.all_requirements
-            if not install_req.constraint and not install_req.satisfied_by
-        ]
diff --git a/venv/lib/python3.10/site-packages/pip/_internal/req/req_uninstall.py b/venv/lib/python3.10/site-packages/pip/_internal/req/req_uninstall.py
index 15b6738..472090a 100644
--- a/venv/lib/python3.10/site-packages/pip/_internal/req/req_uninstall.py
+++ b/venv/lib/python3.10/site-packages/pip/_internal/req/req_uninstall.py
@@ -3,7 +3,7 @@
 import sys
 import sysconfig
 from importlib.util import cache_from_source
-from typing import Any, Callable, Dict, Generator, Iterable, List, Optional, Set, Tuple
+from typing import Any, Callable, Dict, Iterable, Iterator, List, Optional, Set, Tuple
 
 from pip._internal.exceptions import UninstallationError
 from pip._internal.locations import get_bin_prefix, get_bin_user
@@ -17,9 +17,7 @@
 logger = getLogger(__name__)
 
 
-def _script_names(
-    bin_dir: str, script_name: str, is_gui: bool
-) -> Generator[str, None, None]:
+def _script_names(bin_dir: str, script_name: str, is_gui: bool) -> Iterator[str]:
     """Create the fully qualified name of the files created by
     {console,gui}_scripts for the given ``dist``.
     Returns the list of file names
@@ -36,11 +34,9 @@ def _script_names(
         yield f"{exe_name}-script.py"
 
 
-def _unique(
-    fn: Callable[..., Generator[Any, None, None]]
-) -> Callable[..., Generator[Any, None, None]]:
+def _unique(fn: Callable[..., Iterator[Any]]) -> Callable[..., Iterator[Any]]:
     @functools.wraps(fn)
-    def unique(*args: Any, **kw: Any) -> Generator[Any, None, None]:
+    def unique(*args: Any, **kw: Any) -> Iterator[Any]:
         seen: Set[Any] = set()
         for item in fn(*args, **kw):
             if item not in seen:
@@ -51,7 +47,7 @@ def unique(*args: Any, **kw: Any) -> Generator[Any, None, None]:
 
 
 @_unique
-def uninstallation_paths(dist: BaseDistribution) -> Generator[str, None, None]:
+def uninstallation_paths(dist: BaseDistribution) -> Iterator[str]:
     """
     Yield all the uninstallation paths for dist based on RECORD-without-.py[co]
 
@@ -531,10 +527,7 @@ def from_dist(cls, dist: BaseDistribution) -> "UninstallPathSet":
             # above, so this only covers the setuptools-style editable.
             with open(develop_egg_link) as fh:
                 link_pointer = os.path.normcase(fh.readline().strip())
-                normalized_link_pointer = normalize_path(link_pointer)
-            assert os.path.samefile(
-                normalized_link_pointer, normalized_dist_location
-            ), (
+            assert link_pointer == dist_location, (
                 f"Egg-link {link_pointer} does not match installed location of "
                 f"{dist.raw_name} (at {dist_location})"
             )
@@ -558,10 +551,10 @@ def from_dist(cls, dist: BaseDistribution) -> "UninstallPathSet":
 
         # find distutils scripts= scripts
         try:
-            for script in dist.iter_distutils_script_names():
-                paths_to_remove.add(os.path.join(bin_dir, script))
+            for script in dist.iterdir("scripts"):
+                paths_to_remove.add(os.path.join(bin_dir, script.name))
                 if WINDOWS:
-                    paths_to_remove.add(os.path.join(bin_dir, f"{script}.bat"))
+                    paths_to_remove.add(os.path.join(bin_dir, f"{script.name}.bat"))
         except (FileNotFoundError, NotADirectoryError):
             pass
 
@@ -569,7 +562,7 @@ def from_dist(cls, dist: BaseDistribution) -> "UninstallPathSet":
         def iter_scripts_to_remove(
             dist: BaseDistribution,
             bin_dir: str,
-        ) -> Generator[str, None, None]:
+        ) -> Iterator[str]:
             for entry_point in dist.iter_entry_points():
                 if entry_point.group == "console_scripts":
                     yield from _script_names(bin_dir, entry_point.name, False)
diff --git a/venv/lib/python3.10/site-packages/pip/_internal/resolution/legacy/resolver.py b/venv/lib/python3.10/site-packages/pip/_internal/resolution/legacy/resolver.py
index fb49d41..8c149d4 100644
--- a/venv/lib/python3.10/site-packages/pip/_internal/resolution/legacy/resolver.py
+++ b/venv/lib/python3.10/site-packages/pip/_internal/resolution/legacy/resolver.py
@@ -28,14 +28,12 @@
     DistributionNotFound,
     HashError,
     HashErrors,
-    InstallationError,
     NoneMetadataError,
     UnsupportedPythonVersion,
 )
 from pip._internal.index.package_finder import PackageFinder
 from pip._internal.metadata import BaseDistribution
 from pip._internal.models.link import Link
-from pip._internal.models.wheel import Wheel
 from pip._internal.operations.prepare import RequirementPreparer
 from pip._internal.req.req_install import (
     InstallRequirement,
@@ -43,9 +41,7 @@
 )
 from pip._internal.req.req_set import RequirementSet
 from pip._internal.resolution.base import BaseResolver, InstallRequirementProvider
-from pip._internal.utils import compatibility_tags
 from pip._internal.utils.compatibility_tags import get_supported
-from pip._internal.utils.direct_url_helpers import direct_url_from_link
 from pip._internal.utils.logging import indent_log
 from pip._internal.utils.misc import normalize_version_info
 from pip._internal.utils.packaging import check_requires_python
@@ -172,7 +168,7 @@ def resolve(
         for req in root_reqs:
             if req.constraint:
                 check_invalid_constraint_type(req)
-            self._add_requirement_to_set(requirement_set, req)
+            requirement_set.add_requirement(req)
 
         # Actually prepare the files, and collect any exceptions. Most hash
         # exceptions cannot be checked ahead of time, because
@@ -192,124 +188,6 @@ def resolve(
 
         return requirement_set
 
-    def _add_requirement_to_set(
-        self,
-        requirement_set: RequirementSet,
-        install_req: InstallRequirement,
-        parent_req_name: Optional[str] = None,
-        extras_requested: Optional[Iterable[str]] = None,
-    ) -> Tuple[List[InstallRequirement], Optional[InstallRequirement]]:
-        """Add install_req as a requirement to install.
-
-        :param parent_req_name: The name of the requirement that needed this
-            added. The name is used because when multiple unnamed requirements
-            resolve to the same name, we could otherwise end up with dependency
-            links that point outside the Requirements set. parent_req must
-            already be added. Note that None implies that this is a user
-            supplied requirement, vs an inferred one.
-        :param extras_requested: an iterable of extras used to evaluate the
-            environment markers.
-        :return: Additional requirements to scan. That is either [] if
-            the requirement is not applicable, or [install_req] if the
-            requirement is applicable and has just been added.
-        """
-        # If the markers do not match, ignore this requirement.
-        if not install_req.match_markers(extras_requested):
-            logger.info(
-                "Ignoring %s: markers '%s' don't match your environment",
-                install_req.name,
-                install_req.markers,
-            )
-            return [], None
-
-        # If the wheel is not supported, raise an error.
-        # Should check this after filtering out based on environment markers to
-        # allow specifying different wheels based on the environment/OS, in a
-        # single requirements file.
-        if install_req.link and install_req.link.is_wheel:
-            wheel = Wheel(install_req.link.filename)
-            tags = compatibility_tags.get_supported()
-            if requirement_set.check_supported_wheels and not wheel.supported(tags):
-                raise InstallationError(
-                    "{} is not a supported wheel on this platform.".format(
-                        wheel.filename
-                    )
-                )
-
-        # This next bit is really a sanity check.
-        assert (
-            not install_req.user_supplied or parent_req_name is None
-        ), "a user supplied req shouldn't have a parent"
-
-        # Unnamed requirements are scanned again and the requirement won't be
-        # added as a dependency until after scanning.
-        if not install_req.name:
-            requirement_set.add_unnamed_requirement(install_req)
-            return [install_req], None
-
-        try:
-            existing_req: Optional[
-                InstallRequirement
-            ] = requirement_set.get_requirement(install_req.name)
-        except KeyError:
-            existing_req = None
-
-        has_conflicting_requirement = (
-            parent_req_name is None
-            and existing_req
-            and not existing_req.constraint
-            and existing_req.extras == install_req.extras
-            and existing_req.req
-            and install_req.req
-            and existing_req.req.specifier != install_req.req.specifier
-        )
-        if has_conflicting_requirement:
-            raise InstallationError(
-                "Double requirement given: {} (already in {}, name={!r})".format(
-                    install_req, existing_req, install_req.name
-                )
-            )
-
-        # When no existing requirement exists, add the requirement as a
-        # dependency and it will be scanned again after.
-        if not existing_req:
-            requirement_set.add_named_requirement(install_req)
-            # We'd want to rescan this requirement later
-            return [install_req], install_req
-
-        # Assume there's no need to scan, and that we've already
-        # encountered this for scanning.
-        if install_req.constraint or not existing_req.constraint:
-            return [], existing_req
-
-        does_not_satisfy_constraint = install_req.link and not (
-            existing_req.link and install_req.link.path == existing_req.link.path
-        )
-        if does_not_satisfy_constraint:
-            raise InstallationError(
-                "Could not satisfy constraints for '{}': "
-                "installation from path or url cannot be "
-                "constrained to a version".format(install_req.name)
-            )
-        # If we're now installing a constraint, mark the existing
-        # object for real installation.
-        existing_req.constraint = False
-        # If we're now installing a user supplied requirement,
-        # mark the existing object as such.
-        if install_req.user_supplied:
-            existing_req.user_supplied = True
-        existing_req.extras = tuple(
-            sorted(set(existing_req.extras) | set(install_req.extras))
-        )
-        logger.debug(
-            "Setting %s extras to: %s",
-            existing_req,
-            existing_req.extras,
-        )
-        # Return the existing requirement for addition to the parent and
-        # scanning again.
-        return [existing_req], existing_req
-
     def _is_upgrade_allowed(self, req: InstallRequirement) -> bool:
         if self.upgrade_strategy == "to-satisfy-only":
             return False
@@ -432,14 +310,6 @@ def _populate_link(self, req: InstallRequirement) -> None:
             logger.debug("Using cached wheel link: %s", cache_entry.link)
             if req.link is req.original_link and cache_entry.persistent:
                 req.original_link_is_in_wheel_cache = True
-            if cache_entry.origin is not None:
-                req.download_info = cache_entry.origin
-            else:
-                # Legacy cache entry that does not have origin.json.
-                # download_info may miss the archive_info.hash field.
-                req.download_info = direct_url_from_link(
-                    req.link, link_is_in_wheel_cache=cache_entry.persistent
-                )
             req.link = cache_entry.link
 
     def _get_dist_for(self, req: InstallRequirement) -> BaseDistribution:
@@ -523,8 +393,7 @@ def add_req(subreq: Requirement, extras_requested: Iterable[str]) -> None:
             # the legacy resolver so I'm just not going to bother refactoring.
             sub_install_req = self._make_install_req(str(subreq), req_to_install)
             parent_req_name = req_to_install.name
-            to_scan_again, add_to_parent = self._add_requirement_to_set(
-                requirement_set,
+            to_scan_again, add_to_parent = requirement_set.add_requirement(
                 sub_install_req,
                 parent_req_name=parent_req_name,
                 extras_requested=extras_requested,
@@ -541,9 +410,7 @@ def add_req(subreq: Requirement, extras_requested: Iterable[str]) -> None:
                 # 'unnamed' requirements can only come from being directly
                 # provided by the user.
                 assert req_to_install.user_supplied
-                self._add_requirement_to_set(
-                    requirement_set, req_to_install, parent_req_name=None
-                )
+                requirement_set.add_requirement(req_to_install, parent_req_name=None)
 
             if not self.ignore_dependencies:
                 if req_to_install.extras:
diff --git a/venv/lib/python3.10/site-packages/pip/_internal/resolution/resolvelib/candidates.py b/venv/lib/python3.10/site-packages/pip/_internal/resolution/resolvelib/candidates.py
index f5bc343..9b8450e 100644
--- a/venv/lib/python3.10/site-packages/pip/_internal/resolution/resolvelib/candidates.py
+++ b/venv/lib/python3.10/site-packages/pip/_internal/resolution/resolvelib/candidates.py
@@ -18,7 +18,6 @@
     install_req_from_line,
 )
 from pip._internal.req.req_install import InstallRequirement
-from pip._internal.utils.direct_url_helpers import direct_url_from_link
 from pip._internal.utils.misc import normalize_version_info
 
 from .base import Candidate, CandidateVersion, Requirement, format_name
@@ -70,7 +69,6 @@ def make_install_req_from_link(
             global_options=template.global_options,
             hashes=template.hash_options,
         ),
-        config_settings=template.config_settings,
     )
     ireq.original_link = template.original_link
     ireq.link = link
@@ -94,7 +92,6 @@ def make_install_req_from_editable(
             global_options=template.global_options,
             hashes=template.hash_options,
         ),
-        config_settings=template.config_settings,
     )
 
 
@@ -119,7 +116,6 @@ def _make_install_req_from_dist(
             global_options=template.global_options,
             hashes=template.hash_options,
         ),
-        config_settings=template.config_settings,
     )
     ireq.satisfied_by = dist
     return ireq
@@ -282,17 +278,12 @@ def __init__(
                     version, wheel_version, name
                 )
 
-        if cache_entry is not None:
-            if cache_entry.persistent and template.link is template.original_link:
-                ireq.original_link_is_in_wheel_cache = True
-            if cache_entry.origin is not None:
-                ireq.download_info = cache_entry.origin
-            else:
-                # Legacy cache entry that does not have origin.json.
-                # download_info may miss the archive_info.hash field.
-                ireq.download_info = direct_url_from_link(
-                    source_link, link_is_in_wheel_cache=cache_entry.persistent
-                )
+        if (
+            cache_entry is not None
+            and cache_entry.persistent
+            and template.link is template.original_link
+        ):
+            ireq.original_link_is_in_wheel_cache = True
 
         super().__init__(
             link=link,
diff --git a/venv/lib/python3.10/site-packages/pip/_internal/resolution/resolvelib/factory.py b/venv/lib/python3.10/site-packages/pip/_internal/resolution/resolvelib/factory.py
index a4c24b5..261d8d5 100644
--- a/venv/lib/python3.10/site-packages/pip/_internal/resolution/resolvelib/factory.py
+++ b/venv/lib/python3.10/site-packages/pip/_internal/resolution/resolvelib/factory.py
@@ -27,6 +27,7 @@
 from pip._internal.exceptions import (
     DistributionNotFound,
     InstallationError,
+    InstallationSubprocessError,
     MetadataInconsistent,
     UnsupportedPythonVersion,
     UnsupportedWheel,
@@ -96,6 +97,7 @@ def __init__(
         force_reinstall: bool,
         ignore_installed: bool,
         ignore_requires_python: bool,
+        suppress_build_failures: bool,
         py_version_info: Optional[Tuple[int, ...]] = None,
     ) -> None:
         self._finder = finder
@@ -106,6 +108,7 @@ def __init__(
         self._use_user_site = use_user_site
         self._force_reinstall = force_reinstall
         self._ignore_requires_python = ignore_requires_python
+        self._suppress_build_failures = suppress_build_failures
 
         self._build_failures: Cache[InstallationError] = {}
         self._link_candidate_cache: Cache[LinkCandidate] = {}
@@ -198,6 +201,12 @@ def _make_candidate_from_link(
                     )
                     self._build_failures[link] = e
                     return None
+                except InstallationSubprocessError as e:
+                    if not self._suppress_build_failures:
+                        raise
+                    logger.warning("Discarding %s due to build failure: %s", link, e)
+                    self._build_failures[link] = e
+                    return None
 
             base: BaseCandidate = self._editable_candidate_cache[link]
         else:
@@ -219,6 +228,12 @@ def _make_candidate_from_link(
                     )
                     self._build_failures[link] = e
                     return None
+                except InstallationSubprocessError as e:
+                    if not self._suppress_build_failures:
+                        raise
+                    logger.warning("Discarding %s due to build failure: %s", link, e)
+                    self._build_failures[link] = e
+                    return None
             base = self._link_candidate_cache[link]
 
         if not extras:
@@ -602,15 +617,8 @@ def _report_single_requirement_conflict(
             req_disp = f"{req} (from {parent.name})"
 
         cands = self._finder.find_all_candidates(req.project_name)
-        skipped_by_requires_python = self._finder.requires_python_skipped_reasons()
         versions = [str(v) for v in sorted({c.version for c in cands})]
 
-        if skipped_by_requires_python:
-            logger.critical(
-                "Ignored the following versions that require a different python "
-                "version: %s",
-                "; ".join(skipped_by_requires_python) or "none",
-            )
         logger.critical(
             "Could not find a version that satisfies the requirement %s "
             "(from versions: %s)",
diff --git a/venv/lib/python3.10/site-packages/pip/_internal/resolution/resolvelib/provider.py b/venv/lib/python3.10/site-packages/pip/_internal/resolution/resolvelib/provider.py
index 6300dfc..e6ec959 100644
--- a/venv/lib/python3.10/site-packages/pip/_internal/resolution/resolvelib/provider.py
+++ b/venv/lib/python3.10/site-packages/pip/_internal/resolution/resolvelib/provider.py
@@ -117,7 +117,7 @@ def get_preference(  # type: ignore
         The lower the return value is, the more preferred this group of
         arguments is.
 
-        Currently pip considers the following in order:
+        Currently pip considers the followings in order:
 
         * Prefer if any of the known requirements is "direct", e.g. points to an
           explicit URL.
diff --git a/venv/lib/python3.10/site-packages/pip/_internal/resolution/resolvelib/resolver.py b/venv/lib/python3.10/site-packages/pip/_internal/resolution/resolvelib/resolver.py
index a605d6c..618f1e1 100644
--- a/venv/lib/python3.10/site-packages/pip/_internal/resolution/resolvelib/resolver.py
+++ b/venv/lib/python3.10/site-packages/pip/_internal/resolution/resolvelib/resolver.py
@@ -47,6 +47,7 @@ def __init__(
         ignore_requires_python: bool,
         force_reinstall: bool,
         upgrade_strategy: str,
+        suppress_build_failures: bool,
         py_version_info: Optional[Tuple[int, ...]] = None,
     ):
         super().__init__()
@@ -61,6 +62,7 @@ def __init__(
             force_reinstall=force_reinstall,
             ignore_installed=ignore_installed,
             ignore_requires_python=ignore_requires_python,
+            suppress_build_failures=suppress_build_failures,
             py_version_info=py_version_info,
         )
         self.ignore_dependencies = ignore_dependencies
@@ -183,7 +185,10 @@ def get_installation_order(
             return []
 
         graph = self._result.graph
-        weights = get_topological_weights(graph, set(req_set.requirements.keys()))
+        weights = get_topological_weights(
+            graph,
+            expected_node_count=len(self._result.mapping) + 1,
+        )
 
         sorted_items = sorted(
             req_set.requirements.items(),
@@ -194,7 +199,7 @@ def get_installation_order(
 
 
 def get_topological_weights(
-    graph: "DirectedGraph[Optional[str]]", requirement_keys: Set[str]
+    graph: "DirectedGraph[Optional[str]]", expected_node_count: int
 ) -> Dict[Optional[str], int]:
     """Assign weights to each node based on how "deep" they are.
 
@@ -217,9 +222,6 @@ def get_topological_weights(
     don't get stuck in a cycle.
 
     When assigning weight, the longer path (i.e. larger length) is preferred.
-
-    We are only interested in the weights of packages that are in the
-    requirement_keys.
     """
     path: Set[Optional[str]] = set()
     weights: Dict[Optional[str], int] = {}
@@ -235,9 +237,6 @@ def visit(node: Optional[str]) -> None:
             visit(child)
         path.remove(node)
 
-        if node not in requirement_keys:
-            return
-
         last_known_parent_count = weights.get(node, 0)
         weights[node] = max(last_known_parent_count, len(path))
 
@@ -263,8 +262,6 @@ def visit(node: Optional[str]) -> None:
         # Calculate the weight for the leaves.
         weight = len(graph) - 1
         for leaf in leaves:
-            if leaf not in requirement_keys:
-                continue
             weights[leaf] = weight
         # Remove the leaves from the graph, making it simpler.
         for leaf in leaves:
@@ -274,10 +271,9 @@ def visit(node: Optional[str]) -> None:
     # `None` is guaranteed to be the root node by resolvelib.
     visit(None)
 
-    # Sanity check: all requirement keys should be in the weights,
-    # and no other keys should be in the weights.
-    difference = set(weights.keys()).difference(requirement_keys)
-    assert not difference, difference
+    # Sanity checks
+    assert weights[None] == 0
+    assert len(weights) == expected_node_count
 
     return weights
 
diff --git a/venv/lib/python3.10/site-packages/pip/_internal/self_outdated_check.py b/venv/lib/python3.10/site-packages/pip/_internal/self_outdated_check.py
index 9e2149c..7300e0e 100644
--- a/venv/lib/python3.10/site-packages/pip/_internal/self_outdated_check.py
+++ b/venv/lib/python3.10/site-packages/pip/_internal/self_outdated_check.py
@@ -1,34 +1,23 @@
 import datetime
-import functools
 import hashlib
 import json
 import logging
 import optparse
 import os.path
 import sys
-from dataclasses import dataclass
-from typing import Any, Callable, Dict, Optional
+from typing import Any, Dict
 
 from pip._vendor.packaging.version import parse as parse_version
-from pip._vendor.rich.console import Group
-from pip._vendor.rich.markup import escape
-from pip._vendor.rich.text import Text
 
 from pip._internal.index.collector import LinkCollector
 from pip._internal.index.package_finder import PackageFinder
 from pip._internal.metadata import get_default_environment
-from pip._internal.metadata.base import DistributionVersion
 from pip._internal.models.selection_prefs import SelectionPreferences
 from pip._internal.network.session import PipSession
-from pip._internal.utils.compat import WINDOWS
-from pip._internal.utils.entrypoints import (
-    get_best_invocation_for_this_pip,
-    get_best_invocation_for_this_python,
-)
 from pip._internal.utils.filesystem import adjacent_tmp_file, check_path_owner, replace
 from pip._internal.utils.misc import ensure_dir
 
-_DATE_FMT = "%Y-%m-%dT%H:%M:%SZ"
+SELFCHECK_DATE_FMT = "%Y-%m-%dT%H:%M:%SZ"
 
 
 logger = logging.getLogger(__name__)
@@ -42,17 +31,17 @@ def _get_statefile_name(key: str) -> str:
 
 class SelfCheckState:
     def __init__(self, cache_dir: str) -> None:
-        self._state: Dict[str, Any] = {}
-        self._statefile_path = None
+        self.state: Dict[str, Any] = {}
+        self.statefile_path = None
 
         # Try to load the existing state
         if cache_dir:
-            self._statefile_path = os.path.join(
+            self.statefile_path = os.path.join(
                 cache_dir, "selfcheck", _get_statefile_name(self.key)
             )
             try:
-                with open(self._statefile_path, encoding="utf-8") as statefile:
-                    self._state = json.load(statefile)
+                with open(self.statefile_path, encoding="utf-8") as statefile:
+                    self.state = json.load(statefile)
             except (OSError, ValueError, KeyError):
                 # Explicitly suppressing exceptions, since we don't want to
                 # error out if the cache file is invalid.
@@ -62,87 +51,41 @@ def __init__(self, cache_dir: str) -> None:
     def key(self) -> str:
         return sys.prefix
 
-    def get(self, current_time: datetime.datetime) -> Optional[str]:
-        """Check if we have a not-outdated version loaded already."""
-        if not self._state:
-            return None
-
-        if "last_check" not in self._state:
-            return None
-
-        if "pypi_version" not in self._state:
-            return None
-
-        seven_days_in_seconds = 7 * 24 * 60 * 60
-
-        # Determine if we need to refresh the state
-        last_check = datetime.datetime.strptime(self._state["last_check"], _DATE_FMT)
-        seconds_since_last_check = (current_time - last_check).total_seconds()
-        if seconds_since_last_check > seven_days_in_seconds:
-            return None
-
-        return self._state["pypi_version"]
-
-    def set(self, pypi_version: str, current_time: datetime.datetime) -> None:
+    def save(self, pypi_version: str, current_time: datetime.datetime) -> None:
         # If we do not have a path to cache in, don't bother saving.
-        if not self._statefile_path:
+        if not self.statefile_path:
             return
 
         # Check to make sure that we own the directory
-        if not check_path_owner(os.path.dirname(self._statefile_path)):
+        if not check_path_owner(os.path.dirname(self.statefile_path)):
             return
 
         # Now that we've ensured the directory is owned by this user, we'll go
         # ahead and make sure that all our directories are created.
-        ensure_dir(os.path.dirname(self._statefile_path))
+        ensure_dir(os.path.dirname(self.statefile_path))
 
         state = {
             # Include the key so it's easy to tell which pip wrote the
             # file.
             "key": self.key,
-            "last_check": current_time.strftime(_DATE_FMT),
+            "last_check": current_time.strftime(SELFCHECK_DATE_FMT),
             "pypi_version": pypi_version,
         }
 
         text = json.dumps(state, sort_keys=True, separators=(",", ":"))
 
-        with adjacent_tmp_file(self._statefile_path) as f:
+        with adjacent_tmp_file(self.statefile_path) as f:
             f.write(text.encode())
 
         try:
             # Since we have a prefix-specific state file, we can just
             # overwrite whatever is there, no need to check.
-            replace(f.name, self._statefile_path)
+            replace(f.name, self.statefile_path)
         except OSError:
             # Best effort.
             pass
 
 
-@dataclass
-class UpgradePrompt:
-    old: str
-    new: str
-
-    def __rich__(self) -> Group:
-        if WINDOWS:
-            pip_cmd = f"{get_best_invocation_for_this_python()} -m pip"
-        else:
-            pip_cmd = get_best_invocation_for_this_pip()
-
-        notice = "[bold][[reset][blue]notice[reset][bold]][reset]"
-        return Group(
-            Text(),
-            Text.from_markup(
-                f"{notice} A new release of pip available: "
-                f"[red]{self.old}[reset] -> [green]{self.new}[reset]"
-            ),
-            Text.from_markup(
-                f"{notice} To update, run: "
-                f"[green]{escape(pip_cmd)} install --upgrade pip"
-            ),
-        )
-
-
 def was_installed_by_pip(pkg: str) -> bool:
     """Checks whether pkg was installed by pip
 
@@ -153,65 +96,6 @@ def was_installed_by_pip(pkg: str) -> bool:
     return dist is not None and "pip" == dist.installer
 
 
-def _get_current_remote_pip_version(
-    session: PipSession, options: optparse.Values
-) -> str:
-    # Lets use PackageFinder to see what the latest pip version is
-    link_collector = LinkCollector.create(
-        session,
-        options=options,
-        suppress_no_index=True,
-    )
-
-    # Pass allow_yanked=False so we don't suggest upgrading to a
-    # yanked version.
-    selection_prefs = SelectionPreferences(
-        allow_yanked=False,
-        allow_all_prereleases=False,  # Explicitly set to False
-    )
-
-    finder = PackageFinder.create(
-        link_collector=link_collector,
-        selection_prefs=selection_prefs,
-    )
-    best_candidate = finder.find_best_candidate("pip").best_candidate
-    if best_candidate is None:
-        return
-
-    return str(best_candidate.version)
-
-
-def _self_version_check_logic(
-    *,
-    state: SelfCheckState,
-    current_time: datetime.datetime,
-    local_version: DistributionVersion,
-    get_remote_version: Callable[[], str],
-) -> Optional[UpgradePrompt]:
-    remote_version_str = state.get(current_time)
-    if remote_version_str is None:
-        remote_version_str = get_remote_version()
-        state.set(remote_version_str, current_time)
-
-    remote_version = parse_version(remote_version_str)
-    logger.debug("Remote version of pip: %s", remote_version)
-    logger.debug("Local version of pip:  %s", local_version)
-
-    pip_installed_by_pip = was_installed_by_pip("pip")
-    logger.debug("Was pip installed by pip? %s", pip_installed_by_pip)
-    if not pip_installed_by_pip:
-        return None  # Only suggest upgrade if pip is installed by pip.
-
-    local_version_is_older = (
-        local_version < remote_version
-        and local_version.base_version != remote_version.base_version
-    )
-    if local_version_is_older:
-        return UpgradePrompt(old=str(local_version), new=remote_version_str)
-
-    return None
-
-
 def pip_self_version_check(session: PipSession, options: optparse.Values) -> None:
     """Check for an update for pip.
 
@@ -223,17 +107,83 @@ def pip_self_version_check(session: PipSession, options: optparse.Values) -> Non
     if not installed_dist:
         return
 
+    pip_version = installed_dist.version
+    pypi_version = None
+
     try:
-        upgrade_prompt = _self_version_check_logic(
-            state=SelfCheckState(cache_dir=options.cache_dir),
-            current_time=datetime.datetime.utcnow(),
-            local_version=installed_dist.version,
-            get_remote_version=functools.partial(
-                _get_current_remote_pip_version, session, options
-            ),
+        state = SelfCheckState(cache_dir=options.cache_dir)
+
+        current_time = datetime.datetime.utcnow()
+        # Determine if we need to refresh the state
+        if "last_check" in state.state and "pypi_version" in state.state:
+            last_check = datetime.datetime.strptime(
+                state.state["last_check"], SELFCHECK_DATE_FMT
+            )
+            if (current_time - last_check).total_seconds() < 7 * 24 * 60 * 60:
+                pypi_version = state.state["pypi_version"]
+
+        # Refresh the version if we need to or just see if we need to warn
+        if pypi_version is None:
+            # Lets use PackageFinder to see what the latest pip version is
+            link_collector = LinkCollector.create(
+                session,
+                options=options,
+                suppress_no_index=True,
+            )
+
+            # Pass allow_yanked=False so we don't suggest upgrading to a
+            # yanked version.
+            selection_prefs = SelectionPreferences(
+                allow_yanked=False,
+                allow_all_prereleases=False,  # Explicitly set to False
+            )
+
+            finder = PackageFinder.create(
+                link_collector=link_collector,
+                selection_prefs=selection_prefs,
+                use_deprecated_html5lib=(
+                    "html5lib" in options.deprecated_features_enabled
+                ),
+            )
+            best_candidate = finder.find_best_candidate("pip").best_candidate
+            if best_candidate is None:
+                return
+            pypi_version = str(best_candidate.version)
+
+            # save that we've performed a check
+            state.save(pypi_version, current_time)
+
+        remote_version = parse_version(pypi_version)
+
+        local_version_is_older = (
+            pip_version < remote_version
+            and pip_version.base_version != remote_version.base_version
+            and was_installed_by_pip("pip")
+        )
+
+        # Determine if our pypi_version is older
+        if not local_version_is_older:
+            return
+
+        # We cannot tell how the current pip is available in the current
+        # command context, so be pragmatic here and suggest the command
+        # that's always available. This does not accommodate spaces in
+        # `sys.executable` on purpose as it is not possible to do it
+        # correctly without knowing the user's shell. Thus,
+        # it won't be done until possible through the standard library.
+        # Do not be tempted to use the undocumented subprocess.list2cmdline.
+        # It is considered an internal implementation detail for a reason.
+        pip_cmd = f"{sys.executable} -m pip"
+        logger.warning(
+            "You are using pip version %s; however, version %s is "
+            "available.\nYou should consider upgrading via the "
+            "'%s install --upgrade pip' command.",
+            pip_version,
+            pypi_version,
+            pip_cmd,
         )
-        if upgrade_prompt is not None:
-            logger.warning("[present-rich] %s", upgrade_prompt)
     except Exception:
-        logger.warning("There was an error checking the latest version of pip.")
-        logger.debug("See below for error", exc_info=True)
+        logger.debug(
+            "There was an error checking the latest version of pip",
+            exc_info=True,
+        )
diff --git a/venv/lib/python3.10/site-packages/pip/_internal/utils/deprecation.py b/venv/lib/python3.10/site-packages/pip/_internal/utils/deprecation.py
index 18e9be9..72bd6f2 100644
--- a/venv/lib/python3.10/site-packages/pip/_internal/utils/deprecation.py
+++ b/venv/lib/python3.10/site-packages/pip/_internal/utils/deprecation.py
@@ -118,71 +118,3 @@ def deprecated(
         raise PipDeprecationWarning(message)
 
     warnings.warn(message, category=PipDeprecationWarning, stacklevel=2)
-
-
-class LegacyInstallReason:
-    def __init__(
-        self,
-        reason: str,
-        replacement: Optional[str] = None,
-        gone_in: Optional[str] = None,
-        feature_flag: Optional[str] = None,
-        issue: Optional[int] = None,
-        emit_after_success: bool = False,
-        emit_before_install: bool = False,
-    ):
-        self._reason = reason
-        self._replacement = replacement
-        self._gone_in = gone_in
-        self._feature_flag = feature_flag
-        self._issue = issue
-        self.emit_after_success = emit_after_success
-        self.emit_before_install = emit_before_install
-
-    def emit_deprecation(self, name: str) -> None:
-        deprecated(
-            reason=self._reason.format(name=name),
-            replacement=self._replacement,
-            gone_in=self._gone_in,
-            feature_flag=self._feature_flag,
-            issue=self._issue,
-        )
-
-
-LegacyInstallReasonFailedBdistWheel = LegacyInstallReason(
-    reason=(
-        "{name} was installed using the legacy 'setup.py install' "
-        "method, because a wheel could not be built for it."
-    ),
-    replacement="to fix the wheel build issue reported above",
-    gone_in="23.1",
-    issue=8368,
-    emit_after_success=True,
-)
-
-
-LegacyInstallReasonMissingWheelPackage = LegacyInstallReason(
-    reason=(
-        "{name} is being installed using the legacy "
-        "'setup.py install' method, because it does not have a "
-        "'pyproject.toml' and the 'wheel' package "
-        "is not installed."
-    ),
-    replacement="to enable the '--use-pep517' option",
-    gone_in="23.1",
-    issue=8559,
-    emit_before_install=True,
-)
-
-LegacyInstallReasonNoBinaryForcesSetuptoolsInstall = LegacyInstallReason(
-    reason=(
-        "{name} is being installed using the legacy "
-        "'setup.py install' method, because the '--no-binary' option was enabled "
-        "for it and this currently disables local wheel building for projects that "
-        "don't have a 'pyproject.toml' file."
-    ),
-    replacement="to enable the '--use-pep517' option",
-    gone_in="23.1",
-    issue=11451,
-    emit_before_install=True,
-)
diff --git a/venv/lib/python3.10/site-packages/pip/_internal/utils/distutils_args.py b/venv/lib/python3.10/site-packages/pip/_internal/utils/distutils_args.py
index 2fd1862..e4aa5b8 100644
--- a/venv/lib/python3.10/site-packages/pip/_internal/utils/distutils_args.py
+++ b/venv/lib/python3.10/site-packages/pip/_internal/utils/distutils_args.py
@@ -1,43 +1,42 @@
-from getopt import GetoptError, getopt
+from distutils.errors import DistutilsArgError
+from distutils.fancy_getopt import FancyGetopt
 from typing import Dict, List
 
 _options = [
-    "exec-prefix=",
-    "home=",
-    "install-base=",
-    "install-data=",
-    "install-headers=",
-    "install-lib=",
-    "install-platlib=",
-    "install-purelib=",
-    "install-scripts=",
-    "prefix=",
-    "root=",
-    "user",
+    ("exec-prefix=", None, ""),
+    ("home=", None, ""),
+    ("install-base=", None, ""),
+    ("install-data=", None, ""),
+    ("install-headers=", None, ""),
+    ("install-lib=", None, ""),
+    ("install-platlib=", None, ""),
+    ("install-purelib=", None, ""),
+    ("install-scripts=", None, ""),
+    ("prefix=", None, ""),
+    ("root=", None, ""),
+    ("user", None, ""),
 ]
 
 
+# typeshed doesn't permit Tuple[str, None, str], see python/typeshed#3469.
+_distutils_getopt = FancyGetopt(_options)  # type: ignore
+
+
 def parse_distutils_args(args: List[str]) -> Dict[str, str]:
-    """Parse provided arguments, returning an object that has the matched arguments.
+    """Parse provided arguments, returning an object that has the
+    matched arguments.
 
     Any unknown arguments are ignored.
     """
     result = {}
     for arg in args:
         try:
-            parsed_opt, _ = getopt(args=[arg], shortopts="", longopts=_options)
-        except GetoptError:
+            _, match = _distutils_getopt.getopt(args=[arg])
+        except DistutilsArgError:
             # We don't care about any other options, which here may be
             # considered unrecognized since our option list is not
             # exhaustive.
-            continue
-
-        if not parsed_opt:
-            continue
-
-        option = parsed_opt[0]
-        name_from_parsed = option[0][2:].replace("-", "_")
-        value_from_parsed = option[1] or "true"
-        result[name_from_parsed] = value_from_parsed
-
+            pass
+        else:
+            result.update(match.__dict__)
     return result
diff --git a/venv/lib/python3.10/site-packages/pip/_internal/utils/encoding.py b/venv/lib/python3.10/site-packages/pip/_internal/utils/encoding.py
index 008f06a..1c73f6c 100644
--- a/venv/lib/python3.10/site-packages/pip/_internal/utils/encoding.py
+++ b/venv/lib/python3.10/site-packages/pip/_internal/utils/encoding.py
@@ -14,7 +14,7 @@
     (codecs.BOM_UTF32_LE, "utf-32-le"),
 ]
 
-ENCODING_RE = re.compile(rb"coding[:=]\s*([-\w.]+)")
+ENCODING_RE = re.compile(br"coding[:=]\s*([-\w.]+)")
 
 
 def auto_decode(data: bytes) -> str:
diff --git a/venv/lib/python3.10/site-packages/pip/_internal/utils/entrypoints.py b/venv/lib/python3.10/site-packages/pip/_internal/utils/entrypoints.py
index 1501369..1504a12 100644
--- a/venv/lib/python3.10/site-packages/pip/_internal/utils/entrypoints.py
+++ b/venv/lib/python3.10/site-packages/pip/_internal/utils/entrypoints.py
@@ -1,23 +1,7 @@
-import itertools
-import os
-import shutil
 import sys
 from typing import List, Optional
 
 from pip._internal.cli.main import main
-from pip._internal.utils.compat import WINDOWS
-
-_EXECUTABLE_NAMES = [
-    "pip",
-    f"pip{sys.version_info.major}",
-    f"pip{sys.version_info.major}.{sys.version_info.minor}",
-]
-if WINDOWS:
-    _allowed_extensions = {"", ".exe"}
-    _EXECUTABLE_NAMES = [
-        "".join(parts)
-        for parts in itertools.product(_EXECUTABLE_NAMES, _allowed_extensions)
-    ]
 
 
 def _wrapper(args: Optional[List[str]] = None) -> int:
@@ -41,44 +25,3 @@ def _wrapper(args: Optional[List[str]] = None) -> int:
         "running pip directly.\n"
     )
     return main(args)
-
-
-def get_best_invocation_for_this_pip() -> str:
-    """Try to figure out the best way to invoke pip in the current environment."""
-    binary_directory = "Scripts" if WINDOWS else "bin"
-    binary_prefix = os.path.join(sys.prefix, binary_directory)
-
-    # Try to use pip[X[.Y]] names, if those executables for this environment are
-    # the first on PATH with that name.
-    path_parts = os.path.normcase(os.environ.get("PATH", "")).split(os.pathsep)
-    exe_are_in_PATH = os.path.normcase(binary_prefix) in path_parts
-    if exe_are_in_PATH:
-        for exe_name in _EXECUTABLE_NAMES:
-            found_executable = shutil.which(exe_name)
-            binary_executable = os.path.join(binary_prefix, exe_name)
-            if (
-                found_executable
-                and os.path.exists(binary_executable)
-                and os.path.samefile(
-                    found_executable,
-                    binary_executable,
-                )
-            ):
-                return exe_name
-
-    # Use the `-m` invocation, if there's no "nice" invocation.
-    return f"{get_best_invocation_for_this_python()} -m pip"
-
-
-def get_best_invocation_for_this_python() -> str:
-    """Try to figure out the best way to invoke the current Python."""
-    exe = sys.executable
-    exe_name = os.path.basename(exe)
-
-    # Try to use the basename, if it's the first executable.
-    found_executable = shutil.which(exe_name)
-    if found_executable and os.path.samefile(found_executable, exe):
-        return exe_name
-
-    # Use the full executable name, because we couldn't find something simpler.
-    return exe
diff --git a/venv/lib/python3.10/site-packages/pip/_internal/utils/filesystem.py b/venv/lib/python3.10/site-packages/pip/_internal/utils/filesystem.py
index 83c2df7..b7e6191 100644
--- a/venv/lib/python3.10/site-packages/pip/_internal/utils/filesystem.py
+++ b/venv/lib/python3.10/site-packages/pip/_internal/utils/filesystem.py
@@ -2,10 +2,12 @@
 import os
 import os.path
 import random
+import shutil
+import stat
 import sys
 from contextlib import contextmanager
 from tempfile import NamedTemporaryFile
-from typing import Any, BinaryIO, Generator, List, Union, cast
+from typing import Any, BinaryIO, Iterator, List, Union, cast
 
 from pip._vendor.tenacity import retry, stop_after_delay, wait_fixed
 
@@ -40,8 +42,35 @@ def check_path_owner(path: str) -> bool:
     return False  # assume we don't own the path
 
 
+def copy2_fixed(src: str, dest: str) -> None:
+    """Wrap shutil.copy2() but map errors copying socket files to
+    SpecialFileError as expected.
+
+    See also https://bugs.python.org/issue37700.
+    """
+    try:
+        shutil.copy2(src, dest)
+    except OSError:
+        for f in [src, dest]:
+            try:
+                is_socket_file = is_socket(f)
+            except OSError:
+                # An error has already occurred. Another error here is not
+                # a problem and we can ignore it.
+                pass
+            else:
+                if is_socket_file:
+                    raise shutil.SpecialFileError(f"`{f}` is a socket")
+
+        raise
+
+
+def is_socket(path: str) -> bool:
+    return stat.S_ISSOCK(os.lstat(path).st_mode)
+
+
 @contextmanager
-def adjacent_tmp_file(path: str, **kwargs: Any) -> Generator[BinaryIO, None, None]:
+def adjacent_tmp_file(path: str, **kwargs: Any) -> Iterator[BinaryIO]:
     """Return a file-like object pointing to a tmp file next to path.
 
     The file is created securely and is ensured to be written to disk
diff --git a/venv/lib/python3.10/site-packages/pip/_internal/utils/hashes.py b/venv/lib/python3.10/site-packages/pip/_internal/utils/hashes.py
index 7672730..82eb035 100644
--- a/venv/lib/python3.10/site-packages/pip/_internal/utils/hashes.py
+++ b/venv/lib/python3.10/site-packages/pip/_internal/utils/hashes.py
@@ -1,5 +1,5 @@
 import hashlib
-from typing import TYPE_CHECKING, BinaryIO, Dict, Iterable, List, Optional
+from typing import TYPE_CHECKING, BinaryIO, Dict, Iterator, List
 
 from pip._internal.exceptions import HashMismatch, HashMissing, InstallationError
 from pip._internal.utils.misc import read_chunks
@@ -28,7 +28,7 @@ class Hashes:
 
     """
 
-    def __init__(self, hashes: Optional[Dict[str, List[str]]] = None) -> None:
+    def __init__(self, hashes: Dict[str, List[str]] = None) -> None:
         """
         :param hashes: A dict of algorithm names pointing to lists of allowed
             hex digests
@@ -67,7 +67,7 @@ def is_hash_allowed(self, hash_name: str, hex_digest: str) -> bool:
         """Return whether the given hex digest is allowed."""
         return hex_digest in self._allowed.get(hash_name, [])
 
-    def check_against_chunks(self, chunks: Iterable[bytes]) -> None:
+    def check_against_chunks(self, chunks: Iterator[bytes]) -> None:
         """Check good hashes against ones built from iterable of chunks of
         data.
 
diff --git a/venv/lib/python3.10/site-packages/pip/_internal/utils/logging.py b/venv/lib/python3.10/site-packages/pip/_internal/utils/logging.py
index c10e1f4..6e001c5 100644
--- a/venv/lib/python3.10/site-packages/pip/_internal/utils/logging.py
+++ b/venv/lib/python3.10/site-packages/pip/_internal/utils/logging.py
@@ -6,23 +6,21 @@
 import sys
 import threading
 from dataclasses import dataclass
-from io import TextIOWrapper
 from logging import Filter
-from typing import Any, ClassVar, Generator, List, Optional, TextIO, Type
+from typing import IO, Any, ClassVar, Iterator, List, Optional, TextIO, Type
 
 from pip._vendor.rich.console import (
     Console,
     ConsoleOptions,
     ConsoleRenderable,
-    RenderableType,
     RenderResult,
-    RichCast,
 )
 from pip._vendor.rich.highlighter import NullHighlighter
 from pip._vendor.rich.logging import RichHandler
 from pip._vendor.rich.segment import Segment
 from pip._vendor.rich.style import Style
 
+from pip._internal.exceptions import DiagnosticPipError
 from pip._internal.utils._log import VERBOSE, getLogger
 from pip._internal.utils.compat import WINDOWS
 from pip._internal.utils.deprecation import DEPRECATION_MSG_PREFIX
@@ -52,7 +50,7 @@ def _is_broken_pipe_error(exc_class: Type[BaseException], exc: BaseException) ->
 
 
 @contextlib.contextmanager
-def indent_log(num: int = 2) -> Generator[None, None, None]:
+def indent_log(num: int = 2) -> Iterator[None]:
     """
     A context manager which will cause the log output to be indented for any
     log messages emitted inside it.
@@ -123,7 +121,7 @@ def format(self, record: logging.LogRecord) -> str:
 
 @dataclass
 class IndentedRenderable:
-    renderable: RenderableType
+    renderable: ConsoleRenderable
     indent: int
 
     def __rich_console__(
@@ -154,15 +152,12 @@ def emit(self, record: logging.LogRecord) -> None:
         style: Optional[Style] = None
 
         # If we are given a diagnostic error to present, present it with indentation.
-        assert isinstance(record.args, tuple)
-        if record.msg == "[present-rich] %s" and len(record.args) == 1:
-            rich_renderable = record.args[0]
-            assert isinstance(
-                rich_renderable, (ConsoleRenderable, RichCast, str)
-            ), f"{rich_renderable} is not rich-console-renderable"
-
-            renderable: RenderableType = IndentedRenderable(
-                rich_renderable, indent=get_indentation()
+        if record.msg == "[present-diagnostic] %s" and len(record.args) == 1:
+            diagnostic_error: DiagnosticPipError = record.args[0]  # type: ignore[index]
+            assert isinstance(diagnostic_error, DiagnosticPipError)
+
+            renderable: ConsoleRenderable = IndentedRenderable(
+                diagnostic_error, indent=get_indentation()
             )
         else:
             message = self.format(record)
@@ -198,7 +193,7 @@ def handleError(self, record: logging.LogRecord) -> None:
 
 
 class BetterRotatingFileHandler(logging.handlers.RotatingFileHandler):
-    def _open(self) -> TextIOWrapper:
+    def _open(self) -> IO[Any]:
         ensure_dir(os.path.dirname(self.baseFilename))
         return super()._open()
 
diff --git a/venv/lib/python3.10/site-packages/pip/_internal/utils/misc.py b/venv/lib/python3.10/site-packages/pip/_internal/utils/misc.py
index a8f4cb5..0bf9e99 100644
--- a/venv/lib/python3.10/site-packages/pip/_internal/utils/misc.py
+++ b/venv/lib/python3.10/site-packages/pip/_internal/utils/misc.py
@@ -21,8 +21,6 @@
     BinaryIO,
     Callable,
     ContextManager,
-    Dict,
-    Generator,
     Iterable,
     Iterator,
     List,
@@ -34,7 +32,6 @@
     cast,
 )
 
-from pip._vendor.pep517 import Pep517HookCaller
 from pip._vendor.tenacity import retry, stop_after_delay, wait_fixed
 
 from pip import __version__
@@ -57,7 +54,6 @@
     "captured_stdout",
     "ensure_dir",
     "remove_auth_from_url",
-    "ConfiguredPep517HookCaller",
 ]
 
 
@@ -268,9 +264,7 @@ def is_installable_dir(path: str) -> bool:
     return False
 
 
-def read_chunks(
-    file: BinaryIO, size: int = io.DEFAULT_BUFFER_SIZE
-) -> Generator[bytes, None, None]:
+def read_chunks(file: BinaryIO, size: int = io.DEFAULT_BUFFER_SIZE) -> Iterator[bytes]:
     """Yield pieces of data from a file-like object until EOF."""
     while True:
         chunk = file.read(size)
@@ -320,16 +314,40 @@ def renames(old: str, new: str) -> None:
 
 def is_local(path: str) -> bool:
     """
-    Return True if path is within sys.prefix, if we're running in a virtualenv.
+    Return True if this is a path pip is allowed to modify.
 
-    If we're not in a virtualenv, all paths are considered "local."
+    If we're in a virtualenv, sys.prefix points to the virtualenv's
+    prefix; only sys.prefix is considered local.
+
+    If we're not in a virtualenv, in general we can modify anything.
+    However, if the OS vendor has configured distutils to install
+    somewhere other than sys.prefix (which could be a subdirectory of
+    sys.prefix, e.g. /usr/local), we consider sys.prefix itself nonlocal
+    and the domain of the OS vendor. (In other words, everything _other
+    than_ sys.prefix is considered local.)
 
     Caution: this function assumes the head of path has been normalized
     with normalize_path.
     """
-    if not running_under_virtualenv():
-        return True
-    return path.startswith(normalize_path(sys.prefix))
+
+    path = normalize_path(path)
+    # Hard-coded becouse PyPy uses a different sys.prefix on Debian
+    prefix = '/usr'
+
+    if running_under_virtualenv():
+        return path.startswith(normalize_path(sys.prefix))
+    else:
+        from pip._internal.locations import get_scheme
+        from pip._internal.models.scheme import SCHEME_KEYS
+        if path.startswith(prefix):
+            scheme = get_scheme("")
+            for key in SCHEME_KEYS:
+                local_path = getattr(scheme, key)
+                if path.startswith(normalize_path(local_path)):
+                    return True
+            return False
+        else:
+            return True
 
 
 def write_output(msg: Any, *args: Any) -> None:
@@ -352,7 +370,7 @@ def encoding(self):  # type: ignore
 
 
 @contextlib.contextmanager
-def captured_output(stream_name: str) -> Generator[StreamWrapper, None, None]:
+def captured_output(stream_name: str) -> Iterator[StreamWrapper]:
     """Return a context manager used by captured_stdout/stdin/stderr
     that temporarily replaces the sys stream *stream_name* with a StringIO.
 
@@ -562,9 +580,9 @@ def protect_pip_from_modification_on_windows(modifying_pip: bool) -> None:
         python -m pip ...
     """
     pip_names = [
-        "pip",
-        f"pip{sys.version_info.major}",
-        f"pip{sys.version_info.major}.{sys.version_info.minor}",
+        "pip.exe",
+        "pip{}.exe".format(sys.version_info[0]),
+        "pip{}.{}.exe".format(*sys.version_info[:2]),
     ]
 
     # See https://github.com/pypa/pip/issues/1299 for more discussion
@@ -633,91 +651,3 @@ def partition(
     """
     t1, t2 = tee(iterable)
     return filterfalse(pred, t1), filter(pred, t2)
-
-
-class ConfiguredPep517HookCaller(Pep517HookCaller):
-    def __init__(
-        self,
-        config_holder: Any,
-        source_dir: str,
-        build_backend: str,
-        backend_path: Optional[str] = None,
-        runner: Optional[Callable[..., None]] = None,
-        python_executable: Optional[str] = None,
-    ):
-        super().__init__(
-            source_dir, build_backend, backend_path, runner, python_executable
-        )
-        self.config_holder = config_holder
-
-    def build_wheel(
-        self,
-        wheel_directory: str,
-        config_settings: Optional[Dict[str, str]] = None,
-        metadata_directory: Optional[str] = None,
-    ) -> str:
-        cs = self.config_holder.config_settings
-        return super().build_wheel(
-            wheel_directory, config_settings=cs, metadata_directory=metadata_directory
-        )
-
-    def build_sdist(
-        self, sdist_directory: str, config_settings: Optional[Dict[str, str]] = None
-    ) -> str:
-        cs = self.config_holder.config_settings
-        return super().build_sdist(sdist_directory, config_settings=cs)
-
-    def build_editable(
-        self,
-        wheel_directory: str,
-        config_settings: Optional[Dict[str, str]] = None,
-        metadata_directory: Optional[str] = None,
-    ) -> str:
-        cs = self.config_holder.config_settings
-        return super().build_editable(
-            wheel_directory, config_settings=cs, metadata_directory=metadata_directory
-        )
-
-    def get_requires_for_build_wheel(
-        self, config_settings: Optional[Dict[str, str]] = None
-    ) -> List[str]:
-        cs = self.config_holder.config_settings
-        return super().get_requires_for_build_wheel(config_settings=cs)
-
-    def get_requires_for_build_sdist(
-        self, config_settings: Optional[Dict[str, str]] = None
-    ) -> List[str]:
-        cs = self.config_holder.config_settings
-        return super().get_requires_for_build_sdist(config_settings=cs)
-
-    def get_requires_for_build_editable(
-        self, config_settings: Optional[Dict[str, str]] = None
-    ) -> List[str]:
-        cs = self.config_holder.config_settings
-        return super().get_requires_for_build_editable(config_settings=cs)
-
-    def prepare_metadata_for_build_wheel(
-        self,
-        metadata_directory: str,
-        config_settings: Optional[Dict[str, str]] = None,
-        _allow_fallback: bool = True,
-    ) -> str:
-        cs = self.config_holder.config_settings
-        return super().prepare_metadata_for_build_wheel(
-            metadata_directory=metadata_directory,
-            config_settings=cs,
-            _allow_fallback=_allow_fallback,
-        )
-
-    def prepare_metadata_for_build_editable(
-        self,
-        metadata_directory: str,
-        config_settings: Optional[Dict[str, str]] = None,
-        _allow_fallback: bool = True,
-    ) -> str:
-        cs = self.config_holder.config_settings
-        return super().prepare_metadata_for_build_editable(
-            metadata_directory=metadata_directory,
-            config_settings=cs,
-            _allow_fallback=_allow_fallback,
-        )
diff --git a/venv/lib/python3.10/site-packages/pip/_internal/utils/setuptools_build.py b/venv/lib/python3.10/site-packages/pip/_internal/utils/setuptools_build.py
index 01ef4a4..f460c40 100644
--- a/venv/lib/python3.10/site-packages/pip/_internal/utils/setuptools_build.py
+++ b/venv/lib/python3.10/site-packages/pip/_internal/utils/setuptools_build.py
@@ -48,7 +48,7 @@
 
 def make_setuptools_shim_args(
     setup_py_path: str,
-    global_options: Optional[Sequence[str]] = None,
+    global_options: Sequence[str] = None,
     no_user_config: bool = False,
     unbuffered_output: bool = False,
 ) -> List[str]:
diff --git a/venv/lib/python3.10/site-packages/pip/_internal/utils/subprocess.py b/venv/lib/python3.10/site-packages/pip/_internal/utils/subprocess.py
index cf5bf6b..b5b7624 100644
--- a/venv/lib/python3.10/site-packages/pip/_internal/utils/subprocess.py
+++ b/venv/lib/python3.10/site-packages/pip/_internal/utils/subprocess.py
@@ -116,7 +116,7 @@ def call_subprocess(
     # replaced by INFO.
     if show_stdout:
         # Then log the subprocess output at INFO level.
-        log_subprocess: Callable[..., None] = subprocess_logger.info
+        log_subprocess = subprocess_logger.info
         used_level = logging.INFO
     else:
         # Then log the subprocess output using VERBOSE.  This also ensures
@@ -209,7 +209,7 @@ def call_subprocess(
                 output_lines=all_output if not showing_subprocess else None,
             )
             if log_failed_cmd:
-                subprocess_logger.error("[present-rich] %s", error)
+                subprocess_logger.error("[present-diagnostic] %s", error)
                 subprocess_logger.verbose(
                     "[bold magenta]full command[/]: [blue]%s[/]",
                     escape(format_command_args(cmd)),
diff --git a/venv/lib/python3.10/site-packages/pip/_internal/utils/temp_dir.py b/venv/lib/python3.10/site-packages/pip/_internal/utils/temp_dir.py
index 8ee8a1c..442679a 100644
--- a/venv/lib/python3.10/site-packages/pip/_internal/utils/temp_dir.py
+++ b/venv/lib/python3.10/site-packages/pip/_internal/utils/temp_dir.py
@@ -4,7 +4,7 @@
 import os.path
 import tempfile
 from contextlib import ExitStack, contextmanager
-from typing import Any, Dict, Generator, Optional, TypeVar, Union
+from typing import Any, Dict, Iterator, Optional, TypeVar, Union
 
 from pip._internal.utils.misc import enum, rmtree
 
@@ -26,7 +26,7 @@
 
 
 @contextmanager
-def global_tempdir_manager() -> Generator[None, None, None]:
+def global_tempdir_manager() -> Iterator[None]:
     global _tempdir_manager
     with ExitStack() as stack:
         old_tempdir_manager, _tempdir_manager = _tempdir_manager, stack
@@ -59,7 +59,7 @@ def get_delete(self, kind: str) -> bool:
 
 
 @contextmanager
-def tempdir_registry() -> Generator[TempDirectoryTypeRegistry, None, None]:
+def tempdir_registry() -> Iterator[TempDirectoryTypeRegistry]:
     """Provides a scoped global tempdir registry that can be used to dictate
     whether directories should be deleted.
     """
@@ -200,7 +200,7 @@ def __init__(self, original: str, delete: Optional[bool] = None) -> None:
         super().__init__(delete=delete)
 
     @classmethod
-    def _generate_names(cls, name: str) -> Generator[str, None, None]:
+    def _generate_names(cls, name: str) -> Iterator[str]:
         """Generates a series of temporary names.
 
         The algorithm replaces the leading characters in the name
diff --git a/venv/lib/python3.10/site-packages/pip/_internal/utils/unpacking.py b/venv/lib/python3.10/site-packages/pip/_internal/utils/unpacking.py
index 78b5c13..5f63f97 100644
--- a/venv/lib/python3.10/site-packages/pip/_internal/utils/unpacking.py
+++ b/venv/lib/python3.10/site-packages/pip/_internal/utils/unpacking.py
@@ -188,7 +188,8 @@ def untar_file(filename: str, location: str) -> None:
                 ensure_dir(path)
             elif member.issym():
                 try:
-                    tar._extract_member(member, path)
+                    # https://github.com/python/typeshed/issues/2673
+                    tar._extract_member(member, path)  # type: ignore
                 except Exception as exc:
                     # Some corrupt tar files seem to produce this
                     # (specifically bad symlinks)
diff --git a/venv/lib/python3.10/site-packages/pip/_internal/vcs/bazaar.py b/venv/lib/python3.10/site-packages/pip/_internal/vcs/bazaar.py
index 06c80e4..a7b16e2 100644
--- a/venv/lib/python3.10/site-packages/pip/_internal/vcs/bazaar.py
+++ b/venv/lib/python3.10/site-packages/pip/_internal/vcs/bazaar.py
@@ -49,25 +49,14 @@ def fetch_new(
             flag = ""
         else:
             flag = f"-{'v'*verbosity}"
-        cmd_args = make_command(
-            "checkout", "--lightweight", flag, rev_options.to_args(), url, dest
-        )
+        cmd_args = make_command("branch", flag, rev_options.to_args(), url, dest)
         self.run_command(cmd_args)
 
     def switch(self, dest: str, url: HiddenText, rev_options: RevOptions) -> None:
         self.run_command(make_command("switch", url), cwd=dest)
 
     def update(self, dest: str, url: HiddenText, rev_options: RevOptions) -> None:
-        output = self.run_command(
-            make_command("info"), show_stdout=False, stdout_only=True, cwd=dest
-        )
-        if output.startswith("Standalone "):
-            # Older versions of pip used to create standalone branches.
-            # Convert the standalone branch to a checkout by calling "bzr bind".
-            cmd_args = make_command("bind", "-q", url)
-            self.run_command(cmd_args, cwd=dest)
-
-        cmd_args = make_command("update", "-q", rev_options.to_args())
+        cmd_args = make_command("pull", "-q", rev_options.to_args())
         self.run_command(cmd_args, cwd=dest)
 
     @classmethod
diff --git a/venv/lib/python3.10/site-packages/pip/_internal/vcs/subversion.py b/venv/lib/python3.10/site-packages/pip/_internal/vcs/subversion.py
index 2cd6f0a..89c8754 100644
--- a/venv/lib/python3.10/site-packages/pip/_internal/vcs/subversion.py
+++ b/venv/lib/python3.10/site-packages/pip/_internal/vcs/subversion.py
@@ -184,7 +184,7 @@ def is_commit_id_equal(cls, dest: str, name: Optional[str]) -> bool:
         """Always assume the versions don't match"""
         return False
 
-    def __init__(self, use_interactive: Optional[bool] = None) -> None:
+    def __init__(self, use_interactive: bool = None) -> None:
         if use_interactive is None:
             use_interactive = is_console_interactive()
         self.use_interactive = use_interactive
diff --git a/venv/lib/python3.10/site-packages/pip/_internal/wheel_builder.py b/venv/lib/python3.10/site-packages/pip/_internal/wheel_builder.py
index 15b30af..d066344 100644
--- a/venv/lib/python3.10/site-packages/pip/_internal/wheel_builder.py
+++ b/venv/lib/python3.10/site-packages/pip/_internal/wheel_builder.py
@@ -5,7 +5,7 @@
 import os.path
 import re
 import shutil
-from typing import Callable, Iterable, List, Optional, Tuple
+from typing import Any, Callable, Iterable, List, Optional, Tuple
 
 from pip._vendor.packaging.utils import canonicalize_name, canonicalize_version
 from pip._vendor.packaging.version import InvalidVersion, Version
@@ -19,10 +19,6 @@
 from pip._internal.operations.build.wheel_editable import build_wheel_editable
 from pip._internal.operations.build.wheel_legacy import build_wheel_legacy
 from pip._internal.req.req_install import InstallRequirement
-from pip._internal.utils.deprecation import (
-    LegacyInstallReasonMissingWheelPackage,
-    LegacyInstallReasonNoBinaryForcesSetuptoolsInstall,
-)
 from pip._internal.utils.logging import indent_log
 from pip._internal.utils.misc import ensure_dir, hash_file, is_wheel_installed
 from pip._internal.utils.setuptools_build import make_setuptools_clean_args
@@ -35,7 +31,7 @@
 
 _egg_info_re = re.compile(r"([a-z0-9_.]+)-([a-z0-9_.!+-]+)", re.IGNORECASE)
 
-BdistWheelAllowedPredicate = Callable[[InstallRequirement], bool]
+BinaryAllowedPredicate = Callable[[InstallRequirement], bool]
 BuildResult = Tuple[List[InstallRequirement], List[InstallRequirement]]
 
 
@@ -50,7 +46,7 @@ def _contains_egg_info(s: str) -> bool:
 def _should_build(
     req: InstallRequirement,
     need_wheel: bool,
-    check_bdist_wheel: Optional[BdistWheelAllowedPredicate] = None,
+    check_binary_allowed: BinaryAllowedPredicate,
 ) -> bool:
     """Return whether an InstallRequirement should be built into a wheel."""
     if req.constraint:
@@ -81,19 +77,20 @@ def _should_build(
     if req.use_pep517:
         return True
 
-    assert check_bdist_wheel is not None
-    if not check_bdist_wheel(req):
-        # /!\ When we change this to unconditionally return True, we must also remove
-        # support for `--install-option`. Indeed, `--install-option` implies
-        # `--no-binary` so we can return False here and run `setup.py install`.
-        # `--global-option` and `--build-option` can remain until we drop support for
-        # building with `setup.py bdist_wheel`.
-        req.legacy_install_reason = LegacyInstallReasonNoBinaryForcesSetuptoolsInstall
+    if not check_binary_allowed(req):
+        logger.info(
+            "Skipping wheel build for %s, due to binaries being disabled for it.",
+            req.name,
+        )
         return False
 
     if not is_wheel_installed():
         # we don't build legacy requirements if wheel is not installed
-        req.legacy_install_reason = LegacyInstallReasonMissingWheelPackage
+        logger.info(
+            "Using legacy 'setup.py install' for %s, "
+            "since package 'wheel' is not installed.",
+            req.name,
+        )
         return False
 
     return True
@@ -102,15 +99,15 @@ def _should_build(
 def should_build_for_wheel_command(
     req: InstallRequirement,
 ) -> bool:
-    return _should_build(req, need_wheel=True)
+    return _should_build(req, need_wheel=True, check_binary_allowed=_always_true)
 
 
 def should_build_for_install_command(
     req: InstallRequirement,
-    check_bdist_wheel_allowed: BdistWheelAllowedPredicate,
+    check_binary_allowed: BinaryAllowedPredicate,
 ) -> bool:
     return _should_build(
-        req, need_wheel=False, check_bdist_wheel=check_bdist_wheel_allowed
+        req, need_wheel=False, check_binary_allowed=check_binary_allowed
     )
 
 
@@ -162,6 +159,10 @@ def _get_cache_dir(
     return cache_dir
 
 
+def _always_true(_: Any) -> bool:
+    return True
+
+
 def _verify_one(req: InstallRequirement, wheel_path: str) -> None:
     canonical_name = canonicalize_name(req.name or "")
     w = Wheel(os.path.basename(wheel_path))
@@ -353,12 +354,6 @@ def build(
                 req.editable and req.permit_editable_wheels,
             )
             if wheel_file:
-                # Record the download origin in the cache
-                if req.download_info is not None:
-                    # download_info is guaranteed to be set because when we build an
-                    # InstallRequirement it has been through the preparer before, but
-                    # let's be cautious.
-                    wheel_cache.record_download_origin(cache_dir, req.download_info)
                 # Update the link for this.
                 req.link = Link(path_to_url(wheel_file))
                 req.local_file_path = req.link.file_path
diff --git a/venv/lib/python3.10/site-packages/pip/_vendor/__init__.py b/venv/lib/python3.10/site-packages/pip/_vendor/__init__.py
index b22f7ab..3843cb0 100644
--- a/venv/lib/python3.10/site-packages/pip/_vendor/__init__.py
+++ b/venv/lib/python3.10/site-packages/pip/_vendor/__init__.py
@@ -63,6 +63,7 @@ def vendored(modulename):
     vendored("colorama")
     vendored("distlib")
     vendored("distro")
+    vendored("html5lib")
     vendored("six")
     vendored("six.moves")
     vendored("six.moves.urllib")
@@ -105,16 +106,6 @@ def vendored(modulename):
     vendored("requests.packages.urllib3.util.timeout")
     vendored("requests.packages.urllib3.util.url")
     vendored("resolvelib")
-    vendored("rich")
-    vendored("rich.console")
-    vendored("rich.highlighter")
-    vendored("rich.logging")
-    vendored("rich.markup")
-    vendored("rich.progress")
-    vendored("rich.segment")
-    vendored("rich.style")
-    vendored("rich.text")
-    vendored("rich.traceback")
     vendored("tenacity")
     vendored("tomli")
     vendored("urllib3")
diff --git a/venv/lib/python3.10/site-packages/pip/_vendor/cachecontrol/__init__.py b/venv/lib/python3.10/site-packages/pip/_vendor/cachecontrol/__init__.py
index f631ae6..8435d62 100644
--- a/venv/lib/python3.10/site-packages/pip/_vendor/cachecontrol/__init__.py
+++ b/venv/lib/python3.10/site-packages/pip/_vendor/cachecontrol/__init__.py
@@ -8,7 +8,7 @@
 """
 __author__ = "Eric Larson"
 __email__ = "eric@ionrock.org"
-__version__ = "0.12.11"
+__version__ = "0.12.10"
 
 from .wrapper import CacheControl
 from .adapter import CacheControlAdapter
diff --git a/venv/lib/python3.10/site-packages/pip/_vendor/cachecontrol/cache.py b/venv/lib/python3.10/site-packages/pip/_vendor/cachecontrol/cache.py
index 2a965f5..44e4309 100644
--- a/venv/lib/python3.10/site-packages/pip/_vendor/cachecontrol/cache.py
+++ b/venv/lib/python3.10/site-packages/pip/_vendor/cachecontrol/cache.py
@@ -41,25 +41,3 @@ def delete(self, key):
         with self.lock:
             if key in self.data:
                 self.data.pop(key)
-
-
-class SeparateBodyBaseCache(BaseCache):
-    """
-    In this variant, the body is not stored mixed in with the metadata, but is
-    passed in (as a bytes-like object) in a separate call to ``set_body()``.
-
-    That is, the expected interaction pattern is::
-
-        cache.set(key, serialized_metadata)
-        cache.set_body(key)
-
-    Similarly, the body should be loaded separately via ``get_body()``.
-    """
-    def set_body(self, key, body):
-        raise NotImplementedError()
-
-    def get_body(self, key):
-        """
-        Return the body as file-like object.
-        """
-        raise NotImplementedError()
diff --git a/venv/lib/python3.10/site-packages/pip/_vendor/cachecontrol/caches/__init__.py b/venv/lib/python3.10/site-packages/pip/_vendor/cachecontrol/caches/__init__.py
index 3782729..44becd6 100644
--- a/venv/lib/python3.10/site-packages/pip/_vendor/cachecontrol/caches/__init__.py
+++ b/venv/lib/python3.10/site-packages/pip/_vendor/cachecontrol/caches/__init__.py
@@ -2,8 +2,5 @@
 #
 # SPDX-License-Identifier: Apache-2.0
 
-from .file_cache import FileCache, SeparateBodyFileCache
-from .redis_cache import RedisCache
-
-
-__all__ = ["FileCache", "SeparateBodyFileCache", "RedisCache"]
+from .file_cache import FileCache  # noqa
+from .redis_cache import RedisCache  # noqa
diff --git a/venv/lib/python3.10/site-packages/pip/_vendor/cachecontrol/caches/file_cache.py b/venv/lib/python3.10/site-packages/pip/_vendor/cachecontrol/caches/file_cache.py
index f1ddb2e..6cd1106 100644
--- a/venv/lib/python3.10/site-packages/pip/_vendor/cachecontrol/caches/file_cache.py
+++ b/venv/lib/python3.10/site-packages/pip/_vendor/cachecontrol/caches/file_cache.py
@@ -6,7 +6,7 @@
 import os
 from textwrap import dedent
 
-from ..cache import BaseCache, SeparateBodyBaseCache
+from ..cache import BaseCache
 from ..controller import CacheController
 
 try:
@@ -57,8 +57,7 @@ def _secure_open_write(filename, fmode):
         raise
 
 
-class _FileCacheMixin:
-    """Shared implementation for both FileCache variants."""
+class FileCache(BaseCache):
 
     def __init__(
         self,
@@ -121,25 +120,20 @@ def get(self, key):
 
     def set(self, key, value, expires=None):
         name = self._fn(key)
-        self._write(name, value)
 
-    def _write(self, path, data: bytes):
-        """
-        Safely write the data to the given path.
-        """
         # Make sure the directory exists
         try:
-            os.makedirs(os.path.dirname(path), self.dirmode)
+            os.makedirs(os.path.dirname(name), self.dirmode)
         except (IOError, OSError):
             pass
 
-        with self.lock_class(path) as lock:
+        with self.lock_class(name) as lock:
             # Write our actual file
             with _secure_open_write(lock.path, self.filemode) as fh:
-                fh.write(data)
+                fh.write(value)
 
-    def _delete(self, key, suffix):
-        name = self._fn(key) + suffix
+    def delete(self, key):
+        name = self._fn(key)
         if not self.forever:
             try:
                 os.remove(name)
@@ -147,38 +141,6 @@ def _delete(self, key, suffix):
                 pass
 
 
-class FileCache(_FileCacheMixin, BaseCache):
-    """
-    Traditional FileCache: body is stored in memory, so not suitable for large
-    downloads.
-    """
-
-    def delete(self, key):
-        self._delete(key, "")
-
-
-class SeparateBodyFileCache(_FileCacheMixin, SeparateBodyBaseCache):
-    """
-    Memory-efficient FileCache: body is stored in a separate file, reducing
-    peak memory usage.
-    """
-
-    def get_body(self, key):
-        name = self._fn(key) + ".body"
-        try:
-            return open(name, "rb")
-        except FileNotFoundError:
-            return None
-
-    def set_body(self, key, body):
-        name = self._fn(key) + ".body"
-        self._write(name, body)
-
-    def delete(self, key):
-        self._delete(key, "")
-        self._delete(key, ".body")
-
-
 def url_to_file_path(url, filecache):
     """Return the file cache path based on the URL.
 
diff --git a/venv/lib/python3.10/site-packages/pip/_vendor/cachecontrol/caches/redis_cache.py b/venv/lib/python3.10/site-packages/pip/_vendor/cachecontrol/caches/redis_cache.py
index 2cba4b0..720b507 100644
--- a/venv/lib/python3.10/site-packages/pip/_vendor/cachecontrol/caches/redis_cache.py
+++ b/venv/lib/python3.10/site-packages/pip/_vendor/cachecontrol/caches/redis_cache.py
@@ -19,11 +19,9 @@ def get(self, key):
     def set(self, key, value, expires=None):
         if not expires:
             self.conn.set(key, value)
-        elif isinstance(expires, datetime):
+        else:
             expires = expires - datetime.utcnow()
             self.conn.setex(key, int(expires.total_seconds()), value)
-        else:
-            self.conn.setex(key, expires, value)
 
     def delete(self, key):
         self.conn.delete(key)
diff --git a/venv/lib/python3.10/site-packages/pip/_vendor/cachecontrol/controller.py b/venv/lib/python3.10/site-packages/pip/_vendor/cachecontrol/controller.py
index 7f23529..d7e7380 100644
--- a/venv/lib/python3.10/site-packages/pip/_vendor/cachecontrol/controller.py
+++ b/venv/lib/python3.10/site-packages/pip/_vendor/cachecontrol/controller.py
@@ -13,7 +13,7 @@
 
 from pip._vendor.requests.structures import CaseInsensitiveDict
 
-from .cache import DictCache, SeparateBodyBaseCache
+from .cache import DictCache
 from .serialize import Serializer
 
 
@@ -27,14 +27,15 @@
 def parse_uri(uri):
     """Parses a URI using the regex given in Appendix B of RFC 3986.
 
-    (scheme, authority, path, query, fragment) = parse_uri(uri)
+        (scheme, authority, path, query, fragment) = parse_uri(uri)
     """
     groups = URI.match(uri).groups()
     return (groups[1], groups[3], groups[4], groups[6], groups[8])
 
 
 class CacheController(object):
-    """An interface to see if request should cached or not."""
+    """An interface to see if request should cached or not.
+    """
 
     def __init__(
         self, cache=None, cache_etags=True, serializer=None, status_codes=None
@@ -146,13 +147,8 @@ def cached_request(self, request):
             logger.debug("No cache entry available")
             return False
 
-        if isinstance(self.cache, SeparateBodyBaseCache):
-            body_file = self.cache.get_body(cache_url)
-        else:
-            body_file = None
-
         # Check whether it can be deserialized
-        resp = self.serializer.loads(request, cache_data, body_file)
+        resp = self.serializer.loads(request, cache_data)
         if not resp:
             logger.warning("Cache entry deserialization failed, entry ignored")
             return False
@@ -255,26 +251,6 @@ def conditional_headers(self, request):
 
         return new_headers
 
-    def _cache_set(self, cache_url, request, response, body=None, expires_time=None):
-        """
-        Store the data in the cache.
-        """
-        if isinstance(self.cache, SeparateBodyBaseCache):
-            # We pass in the body separately; just put a placeholder empty
-            # string in the metadata.
-            self.cache.set(
-                cache_url,
-                self.serializer.dumps(request, response, b""),
-                expires=expires_time,
-            )
-            self.cache.set_body(cache_url, body)
-        else:
-            self.cache.set(
-                cache_url,
-                self.serializer.dumps(request, response, body),
-                expires=expires_time,
-            )
-
     def cache_response(self, request, response, body=None, status_codes=None):
         """
         Algorithm for caching requests.
@@ -350,13 +326,17 @@ def cache_response(self, request, response, body=None, status_codes=None):
 
             logger.debug("etag object cached for {0} seconds".format(expires_time))
             logger.debug("Caching due to etag")
-            self._cache_set(cache_url, request, response, body, expires_time)
+            self.cache.set(
+                cache_url,
+                self.serializer.dumps(request, response, body),
+                expires=expires_time,
+            )
 
         # Add to the cache any permanent redirects. We do this before looking
         # that the Date headers.
         elif int(response.status) in PERMANENT_REDIRECT_STATUSES:
             logger.debug("Caching permanent redirect")
-            self._cache_set(cache_url, request, response, b"")
+            self.cache.set(cache_url, self.serializer.dumps(request, response, b""))
 
         # Add to the cache if the response headers demand it. If there
         # is no date header then we can't do anything about expiring
@@ -367,12 +347,10 @@ def cache_response(self, request, response, body=None, status_codes=None):
             if "max-age" in cc and cc["max-age"] > 0:
                 logger.debug("Caching b/c date exists and max-age > 0")
                 expires_time = cc["max-age"]
-                self._cache_set(
+                self.cache.set(
                     cache_url,
-                    request,
-                    response,
-                    body,
-                    expires_time,
+                    self.serializer.dumps(request, response, body),
+                    expires=expires_time,
                 )
 
             # If the request can expire, it means we should cache it
@@ -390,12 +368,10 @@ def cache_response(self, request, response, body=None, status_codes=None):
                             expires_time
                         )
                     )
-                    self._cache_set(
+                    self.cache.set(
                         cache_url,
-                        request,
-                        response,
-                        body,
-                        expires_time,
+                        self.serializer.dumps(request, response, body=body),
+                        expires=expires_time,
                     )
 
     def update_cached_response(self, request, response):
@@ -434,6 +410,6 @@ def update_cached_response(self, request, response):
         cached_response.status = 200
 
         # update our cache
-        self._cache_set(cache_url, request, cached_response)
+        self.cache.set(cache_url, self.serializer.dumps(request, cached_response))
 
         return cached_response
diff --git a/venv/lib/python3.10/site-packages/pip/_vendor/cachecontrol/serialize.py b/venv/lib/python3.10/site-packages/pip/_vendor/cachecontrol/serialize.py
index 7fe1a3e..b075df1 100644
--- a/venv/lib/python3.10/site-packages/pip/_vendor/cachecontrol/serialize.py
+++ b/venv/lib/python3.10/site-packages/pip/_vendor/cachecontrol/serialize.py
@@ -44,7 +44,7 @@ def dumps(self, request, response, body=None):
         #       enough to have msgpack know the difference.
         data = {
             u"response": {
-                u"body": body,  # Empty bytestring if body is stored separately
+                u"body": body,
                 u"headers": dict(
                     (text_type(k), text_type(v)) for k, v in response.headers.items()
                 ),
@@ -69,7 +69,7 @@ def dumps(self, request, response, body=None):
 
         return b",".join([b"cc=4", msgpack.dumps(data, use_bin_type=True)])
 
-    def loads(self, request, data, body_file=None):
+    def loads(self, request, data):
         # Short circuit if we've been given an empty set of data
         if not data:
             return
@@ -92,14 +92,14 @@ def loads(self, request, data, body_file=None):
 
         # Dispatch to the actual load method for the given version
         try:
-            return getattr(self, "_loads_v{}".format(ver))(request, data, body_file)
+            return getattr(self, "_loads_v{}".format(ver))(request, data)
 
         except AttributeError:
             # This is a version we don't have a loads function for, so we'll
             # just treat it as a miss and return None
             return
 
-    def prepare_response(self, request, cached, body_file=None):
+    def prepare_response(self, request, cached):
         """Verify our vary headers match and construct a real urllib3
         HTTPResponse object.
         """
@@ -125,10 +125,7 @@ def prepare_response(self, request, cached, body_file=None):
         cached["response"]["headers"] = headers
 
         try:
-            if body_file is None:
-                body = io.BytesIO(body_raw)
-            else:
-                body = body_file
+            body = io.BytesIO(body_raw)
         except TypeError:
             # This can happen if cachecontrol serialized to v1 format (pickle)
             # using Python 2. A Python 2 str(byte string) will be unpickled as
@@ -140,22 +137,21 @@ def prepare_response(self, request, cached, body_file=None):
 
         return HTTPResponse(body=body, preload_content=False, **cached["response"])
 
-    def _loads_v0(self, request, data, body_file=None):
+    def _loads_v0(self, request, data):
         # The original legacy cache data. This doesn't contain enough
         # information to construct everything we need, so we'll treat this as
         # a miss.
         return
 
-    def _loads_v1(self, request, data, body_file=None):
+    def _loads_v1(self, request, data):
         try:
             cached = pickle.loads(data)
         except ValueError:
             return
 
-        return self.prepare_response(request, cached, body_file)
+        return self.prepare_response(request, cached)
 
-    def _loads_v2(self, request, data, body_file=None):
-        assert body_file is None
+    def _loads_v2(self, request, data):
         try:
             cached = json.loads(zlib.decompress(data).decode("utf8"))
         except (ValueError, zlib.error):
@@ -173,18 +169,18 @@ def _loads_v2(self, request, data, body_file=None):
             for k, v in cached["vary"].items()
         )
 
-        return self.prepare_response(request, cached, body_file)
+        return self.prepare_response(request, cached)
 
-    def _loads_v3(self, request, data, body_file):
+    def _loads_v3(self, request, data):
         # Due to Python 2 encoding issues, it's impossible to know for sure
         # exactly how to load v3 entries, thus we'll treat these as a miss so
         # that they get rewritten out as v4 entries.
         return
 
-    def _loads_v4(self, request, data, body_file=None):
+    def _loads_v4(self, request, data):
         try:
             cached = msgpack.loads(data, raw=False)
         except ValueError:
             return
 
-        return self.prepare_response(request, cached, body_file)
+        return self.prepare_response(request, cached)
diff --git a/venv/lib/python3.10/site-packages/pip/_vendor/certifi/__init__.py b/venv/lib/python3.10/site-packages/pip/_vendor/certifi/__init__.py
index af4bcc1..8db1a0e 100644
--- a/venv/lib/python3.10/site-packages/pip/_vendor/certifi/__init__.py
+++ b/venv/lib/python3.10/site-packages/pip/_vendor/certifi/__init__.py
@@ -1,4 +1,3 @@
 from .core import contents, where
 
-__all__ = ["contents", "where"]
-__version__ = "2022.09.24"
+__version__ = "2021.10.08"
diff --git a/venv/lib/python3.10/site-packages/pip/_vendor/certifi/cacert.pem b/venv/lib/python3.10/site-packages/pip/_vendor/certifi/cacert.pem
index 4005155..6d0ccc0 100644
--- a/venv/lib/python3.10/site-packages/pip/_vendor/certifi/cacert.pem
+++ b/venv/lib/python3.10/site-packages/pip/_vendor/certifi/cacert.pem
@@ -28,6 +28,36 @@ DKqC5JlR3XC321Y9YeRq4VzW9v493kHMB65jUr9TU/Qr6cf9tveCX4XSQRjbgbME
 HMUfpIBvFSDJ3gyICh3WZlXi/EjJKSZp4A==
 -----END CERTIFICATE-----
 
+# Issuer: CN=GlobalSign O=GlobalSign OU=GlobalSign Root CA - R2
+# Subject: CN=GlobalSign O=GlobalSign OU=GlobalSign Root CA - R2
+# Label: "GlobalSign Root CA - R2"
+# Serial: 4835703278459682885658125
+# MD5 Fingerprint: 94:14:77:7e:3e:5e:fd:8f:30:bd:41:b0:cf:e7:d0:30
+# SHA1 Fingerprint: 75:e0:ab:b6:13:85:12:27:1c:04:f8:5f:dd:de:38:e4:b7:24:2e:fe
+# SHA256 Fingerprint: ca:42:dd:41:74:5f:d0:b8:1e:b9:02:36:2c:f9:d8:bf:71:9d:a1:bd:1b:1e:fc:94:6f:5b:4c:99:f4:2c:1b:9e
+-----BEGIN CERTIFICATE-----
+MIIDujCCAqKgAwIBAgILBAAAAAABD4Ym5g0wDQYJKoZIhvcNAQEFBQAwTDEgMB4G
+A1UECxMXR2xvYmFsU2lnbiBSb290IENBIC0gUjIxEzARBgNVBAoTCkdsb2JhbFNp
+Z24xEzARBgNVBAMTCkdsb2JhbFNpZ24wHhcNMDYxMjE1MDgwMDAwWhcNMjExMjE1
+MDgwMDAwWjBMMSAwHgYDVQQLExdHbG9iYWxTaWduIFJvb3QgQ0EgLSBSMjETMBEG
+A1UEChMKR2xvYmFsU2lnbjETMBEGA1UEAxMKR2xvYmFsU2lnbjCCASIwDQYJKoZI
+hvcNAQEBBQADggEPADCCAQoCggEBAKbPJA6+Lm8omUVCxKs+IVSbC9N/hHD6ErPL
+v4dfxn+G07IwXNb9rfF73OX4YJYJkhD10FPe+3t+c4isUoh7SqbKSaZeqKeMWhG8
+eoLrvozps6yWJQeXSpkqBy+0Hne/ig+1AnwblrjFuTosvNYSuetZfeLQBoZfXklq
+tTleiDTsvHgMCJiEbKjNS7SgfQx5TfC4LcshytVsW33hoCmEofnTlEnLJGKRILzd
+C9XZzPnqJworc5HGnRusyMvo4KD0L5CLTfuwNhv2GXqF4G3yYROIXJ/gkwpRl4pa
+zq+r1feqCapgvdzZX99yqWATXgAByUr6P6TqBwMhAo6CygPCm48CAwEAAaOBnDCB
+mTAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUm+IH
+V2ccHsBqBt5ZtJot39wZhi4wNgYDVR0fBC8wLTAroCmgJ4YlaHR0cDovL2NybC5n
+bG9iYWxzaWduLm5ldC9yb290LXIyLmNybDAfBgNVHSMEGDAWgBSb4gdXZxwewGoG
+3lm0mi3f3BmGLjANBgkqhkiG9w0BAQUFAAOCAQEAmYFThxxol4aR7OBKuEQLq4Gs
+J0/WwbgcQ3izDJr86iw8bmEbTUsp9Z8FHSbBuOmDAGJFtqkIk7mpM0sYmsL4h4hO
+291xNBrBVNpGP+DTKqttVCL1OmLNIG+6KYnX3ZHu01yiPqFbQfXf5WRDLenVOavS
+ot+3i9DAgBkcRcAtjOj4LaR0VknFBbVPFd5uRHg5h6h+u/N5GJG79G+dwfCMNYxd
+AfvDbbnvRG15RjF+Cv6pgsH/76tuIMRQyV+dTZsXjAzlAcmgQWpzU/qlULRuJQ/7
+TBj0/VLZjmmx6BEP3ojY+x1J96relc8geMJgEtslQIxq/H5COEBkEveegeGTLg==
+-----END CERTIFICATE-----
+
 # Issuer: CN=Entrust.net Certification Authority (2048) O=Entrust.net OU=www.entrust.net/CPS_2048 incorp. by ref. (limits liab.)/(c) 1999 Entrust.net Limited
 # Subject: CN=Entrust.net Certification Authority (2048) O=Entrust.net OU=www.entrust.net/CPS_2048 incorp. by ref. (limits liab.)/(c) 1999 Entrust.net Limited
 # Label: "Entrust.net Premium 2048 Secure Server CA"
@@ -461,6 +491,34 @@ vEsXCS+0yx5DaMkHJ8HSXPfqIbloEpw8nL+e/IBcm2PN7EeqJSdnoDfzAIJ9VNep
 +OkuE6N36B9K
 -----END CERTIFICATE-----
 
+# Issuer: CN=DST Root CA X3 O=Digital Signature Trust Co.
+# Subject: CN=DST Root CA X3 O=Digital Signature Trust Co.
+# Label: "DST Root CA X3"
+# Serial: 91299735575339953335919266965803778155
+# MD5 Fingerprint: 41:03:52:dc:0f:f7:50:1b:16:f0:02:8e:ba:6f:45:c5
+# SHA1 Fingerprint: da:c9:02:4f:54:d8:f6:df:94:93:5f:b1:73:26:38:ca:6a:d7:7c:13
+# SHA256 Fingerprint: 06:87:26:03:31:a7:24:03:d9:09:f1:05:e6:9b:cf:0d:32:e1:bd:24:93:ff:c6:d9:20:6d:11:bc:d6:77:07:39
+-----BEGIN CERTIFICATE-----
+MIIDSjCCAjKgAwIBAgIQRK+wgNajJ7qJMDmGLvhAazANBgkqhkiG9w0BAQUFADA/
+MSQwIgYDVQQKExtEaWdpdGFsIFNpZ25hdHVyZSBUcnVzdCBDby4xFzAVBgNVBAMT
+DkRTVCBSb290IENBIFgzMB4XDTAwMDkzMDIxMTIxOVoXDTIxMDkzMDE0MDExNVow
+PzEkMCIGA1UEChMbRGlnaXRhbCBTaWduYXR1cmUgVHJ1c3QgQ28uMRcwFQYDVQQD
+Ew5EU1QgUm9vdCBDQSBYMzCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEB
+AN+v6ZdQCINXtMxiZfaQguzH0yxrMMpb7NnDfcdAwRgUi+DoM3ZJKuM/IUmTrE4O
+rz5Iy2Xu/NMhD2XSKtkyj4zl93ewEnu1lcCJo6m67XMuegwGMoOifooUMM0RoOEq
+OLl5CjH9UL2AZd+3UWODyOKIYepLYYHsUmu5ouJLGiifSKOeDNoJjj4XLh7dIN9b
+xiqKqy69cK3FCxolkHRyxXtqqzTWMIn/5WgTe1QLyNau7Fqckh49ZLOMxt+/yUFw
+7BZy1SbsOFU5Q9D8/RhcQPGX69Wam40dutolucbY38EVAjqr2m7xPi71XAicPNaD
+aeQQmxkqtilX4+U9m5/wAl0CAwEAAaNCMEAwDwYDVR0TAQH/BAUwAwEB/zAOBgNV
+HQ8BAf8EBAMCAQYwHQYDVR0OBBYEFMSnsaR7LHH62+FLkHX/xBVghYkQMA0GCSqG
+SIb3DQEBBQUAA4IBAQCjGiybFwBcqR7uKGY3Or+Dxz9LwwmglSBd49lZRNI+DT69
+ikugdB/OEIKcdBodfpga3csTS7MgROSR6cz8faXbauX+5v3gTt23ADq1cEmv8uXr
+AvHRAosZy5Q6XkjEGB5YGV8eAlrwDPGxrancWYaLbumR9YbK+rlmM6pZW87ipxZz
+R8srzJmwN0jP41ZL9c8PDHIyh8bwRLtTcm1D9SZImlJnt1ir/md2cXjbDaJWFBM5
+JDGFoqgCWjBH4d1QB7wCCZAA62RjYJsWvIjJEubSfZGL+T0yjWW06XyxV3bqxbYo
+Ob8VZRzI9neWagqNdwvYkQsEjgfbKbYK7p2CNTUQ
+-----END CERTIFICATE-----
+
 # Issuer: CN=SwissSign Gold CA - G2 O=SwissSign AG
 # Subject: CN=SwissSign Gold CA - G2 O=SwissSign AG
 # Label: "SwissSign Gold CA - G2"
@@ -721,6 +779,36 @@ t0QmwCbAr1UwnjvVNioZBPRcHv/PLLf/0P2HQBHVESO7SMAhqaQoLf0V+LBOK/Qw
 WyH8EZE0vkHve52Xdf+XlcCWWC/qu0bXu+TZLg==
 -----END CERTIFICATE-----
 
+# Issuer: CN=Cybertrust Global Root O=Cybertrust, Inc
+# Subject: CN=Cybertrust Global Root O=Cybertrust, Inc
+# Label: "Cybertrust Global Root"
+# Serial: 4835703278459682877484360
+# MD5 Fingerprint: 72:e4:4a:87:e3:69:40:80:77:ea:bc:e3:f4:ff:f0:e1
+# SHA1 Fingerprint: 5f:43:e5:b1:bf:f8:78:8c:ac:1c:c7:ca:4a:9a:c6:22:2b:cc:34:c6
+# SHA256 Fingerprint: 96:0a:df:00:63:e9:63:56:75:0c:29:65:dd:0a:08:67:da:0b:9c:bd:6e:77:71:4a:ea:fb:23:49:ab:39:3d:a3
+-----BEGIN CERTIFICATE-----
+MIIDoTCCAomgAwIBAgILBAAAAAABD4WqLUgwDQYJKoZIhvcNAQEFBQAwOzEYMBYG
+A1UEChMPQ3liZXJ0cnVzdCwgSW5jMR8wHQYDVQQDExZDeWJlcnRydXN0IEdsb2Jh
+bCBSb290MB4XDTA2MTIxNTA4MDAwMFoXDTIxMTIxNTA4MDAwMFowOzEYMBYGA1UE
+ChMPQ3liZXJ0cnVzdCwgSW5jMR8wHQYDVQQDExZDeWJlcnRydXN0IEdsb2JhbCBS
+b290MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA+Mi8vRRQZhP/8NN5
+7CPytxrHjoXxEnOmGaoQ25yiZXRadz5RfVb23CO21O1fWLE3TdVJDm71aofW0ozS
+J8bi/zafmGWgE07GKmSb1ZASzxQG9Dvj1Ci+6A74q05IlG2OlTEQXO2iLb3VOm2y
+HLtgwEZLAfVJrn5GitB0jaEMAs7u/OePuGtm839EAL9mJRQr3RAwHQeWP032a7iP
+t3sMpTjr3kfb1V05/Iin89cqdPHoWqI7n1C6poxFNcJQZZXcY4Lv3b93TZxiyWNz
+FtApD0mpSPCzqrdsxacwOUBdrsTiXSZT8M4cIwhhqJQZugRiQOwfOHB3EgZxpzAY
+XSUnpQIDAQABo4GlMIGiMA4GA1UdDwEB/wQEAwIBBjAPBgNVHRMBAf8EBTADAQH/
+MB0GA1UdDgQWBBS2CHsNesysIEyGVjJez6tuhS1wVzA/BgNVHR8EODA2MDSgMqAw
+hi5odHRwOi8vd3d3Mi5wdWJsaWMtdHJ1c3QuY29tL2NybC9jdC9jdHJvb3QuY3Js
+MB8GA1UdIwQYMBaAFLYIew16zKwgTIZWMl7Pq26FLXBXMA0GCSqGSIb3DQEBBQUA
+A4IBAQBW7wojoFROlZfJ+InaRcHUowAl9B8Tq7ejhVhpwjCt2BWKLePJzYFa+HMj
+Wqd8BfP9IjsO0QbE2zZMcwSO5bAi5MXzLqXZI+O4Tkogp24CJJ8iYGd7ix1yCcUx
+XOl5n4BHPa2hCwcUPUf/A2kaDAtE52Mlp3+yybh2hO0j9n0Hq0V+09+zv+mKts2o
+omcrUtW3ZfA5TGOgkXmTUg9U3YO7n9GPp1Nzw8v/MOx8BLjYRB+TX3EJIrduPuoc
+A06dGiBh+4E37F78CkWr1+cXVdCg6mCbpvbjjFspwgZgFJ0tl0ypkxWdYcQBX0jW
+WL1WMRJOEcgh4LMRkWXbtKaIOM5V
+-----END CERTIFICATE-----
+
 # Issuer: O=Chunghwa Telecom Co., Ltd. OU=ePKI Root Certification Authority
 # Subject: O=Chunghwa Telecom Co., Ltd. OU=ePKI Root Certification Authority
 # Label: "ePKI Root Certification Authority"
@@ -1323,6 +1411,78 @@ t/2jioSgrGK+KwmHNPBqAbubKVY8/gA3zyNs8U6qtnRGEmyR7jTV7JqR50S+kDFy
 SjnRBUkLp7Y3gaVdjKozXoEofKd9J+sAro03
 -----END CERTIFICATE-----
 
+# Issuer: CN=EC-ACC O=Agencia Catalana de Certificacio (NIF Q-0801176-I) OU=Serveis Publics de Certificacio/Vegeu https://www.catcert.net/verarrel (c)03/Jerarquia Entitats de Certificacio Catalanes
+# Subject: CN=EC-ACC O=Agencia Catalana de Certificacio (NIF Q-0801176-I) OU=Serveis Publics de Certificacio/Vegeu https://www.catcert.net/verarrel (c)03/Jerarquia Entitats de Certificacio Catalanes
+# Label: "EC-ACC"
+# Serial: -23701579247955709139626555126524820479
+# MD5 Fingerprint: eb:f5:9d:29:0d:61:f9:42:1f:7c:c2:ba:6d:e3:15:09
+# SHA1 Fingerprint: 28:90:3a:63:5b:52:80:fa:e6:77:4c:0b:6d:a7:d6:ba:a6:4a:f2:e8
+# SHA256 Fingerprint: 88:49:7f:01:60:2f:31:54:24:6a:e2:8c:4d:5a:ef:10:f1:d8:7e:bb:76:62:6f:4a:e0:b7:f9:5b:a7:96:87:99
+-----BEGIN CERTIFICATE-----
+MIIFVjCCBD6gAwIBAgIQ7is969Qh3hSoYqwE893EATANBgkqhkiG9w0BAQUFADCB
+8zELMAkGA1UEBhMCRVMxOzA5BgNVBAoTMkFnZW5jaWEgQ2F0YWxhbmEgZGUgQ2Vy
+dGlmaWNhY2lvIChOSUYgUS0wODAxMTc2LUkpMSgwJgYDVQQLEx9TZXJ2ZWlzIFB1
+YmxpY3MgZGUgQ2VydGlmaWNhY2lvMTUwMwYDVQQLEyxWZWdldSBodHRwczovL3d3
+dy5jYXRjZXJ0Lm5ldC92ZXJhcnJlbCAoYykwMzE1MDMGA1UECxMsSmVyYXJxdWlh
+IEVudGl0YXRzIGRlIENlcnRpZmljYWNpbyBDYXRhbGFuZXMxDzANBgNVBAMTBkVD
+LUFDQzAeFw0wMzAxMDcyMzAwMDBaFw0zMTAxMDcyMjU5NTlaMIHzMQswCQYDVQQG
+EwJFUzE7MDkGA1UEChMyQWdlbmNpYSBDYXRhbGFuYSBkZSBDZXJ0aWZpY2FjaW8g
+KE5JRiBRLTA4MDExNzYtSSkxKDAmBgNVBAsTH1NlcnZlaXMgUHVibGljcyBkZSBD
+ZXJ0aWZpY2FjaW8xNTAzBgNVBAsTLFZlZ2V1IGh0dHBzOi8vd3d3LmNhdGNlcnQu
+bmV0L3ZlcmFycmVsIChjKTAzMTUwMwYDVQQLEyxKZXJhcnF1aWEgRW50aXRhdHMg
+ZGUgQ2VydGlmaWNhY2lvIENhdGFsYW5lczEPMA0GA1UEAxMGRUMtQUNDMIIBIjAN
+BgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAsyLHT+KXQpWIR4NA9h0X84NzJB5R
+85iKw5K4/0CQBXCHYMkAqbWUZRkiFRfCQ2xmRJoNBD45b6VLeqpjt4pEndljkYRm
+4CgPukLjbo73FCeTae6RDqNfDrHrZqJyTxIThmV6PttPB/SnCWDaOkKZx7J/sxaV
+HMf5NLWUhdWZXqBIoH7nF2W4onW4HvPlQn2v7fOKSGRdghST2MDk/7NQcvJ29rNd
+QlB50JQ+awwAvthrDk4q7D7SzIKiGGUzE3eeml0aE9jD2z3Il3rucO2n5nzbcc8t
+lGLfbdb1OL4/pYUKGbio2Al1QnDE6u/LDsg0qBIimAy4E5S2S+zw0JDnJwIDAQAB
+o4HjMIHgMB0GA1UdEQQWMBSBEmVjX2FjY0BjYXRjZXJ0Lm5ldDAPBgNVHRMBAf8E
+BTADAQH/MA4GA1UdDwEB/wQEAwIBBjAdBgNVHQ4EFgQUoMOLRKo3pUW/l4Ba0fF4
+opvpXY0wfwYDVR0gBHgwdjB0BgsrBgEEAfV4AQMBCjBlMCwGCCsGAQUFBwIBFiBo
+dHRwczovL3d3dy5jYXRjZXJ0Lm5ldC92ZXJhcnJlbDA1BggrBgEFBQcCAjApGidW
+ZWdldSBodHRwczovL3d3dy5jYXRjZXJ0Lm5ldC92ZXJhcnJlbCAwDQYJKoZIhvcN
+AQEFBQADggEBAKBIW4IB9k1IuDlVNZyAelOZ1Vr/sXE7zDkJlF7W2u++AVtd0x7Y
+/X1PzaBB4DSTv8vihpw3kpBWHNzrKQXlxJ7HNd+KDM3FIUPpqojlNcAZQmNaAl6k
+SBg6hW/cnbw/nZzBh7h6YQjpdwt/cKt63dmXLGQehb+8dJahw3oS7AwaboMMPOhy
+Rp/7SNVel+axofjk70YllJyJ22k4vuxcDlbHZVHlUIiIv0LVKz3l+bqeLrPK9HOS
+Agu+TGbrIP65y7WZf+a2E/rKS03Z7lNGBjvGTq2TWoF+bCpLagVFjPIhpDGQh2xl
+nJ2lYJU6Un/10asIbvPuW/mIPX64b24D5EI=
+-----END CERTIFICATE-----
+
+# Issuer: CN=Hellenic Academic and Research Institutions RootCA 2011 O=Hellenic Academic and Research Institutions Cert. Authority
+# Subject: CN=Hellenic Academic and Research Institutions RootCA 2011 O=Hellenic Academic and Research Institutions Cert. Authority
+# Label: "Hellenic Academic and Research Institutions RootCA 2011"
+# Serial: 0
+# MD5 Fingerprint: 73:9f:4c:4b:73:5b:79:e9:fa:ba:1c:ef:6e:cb:d5:c9
+# SHA1 Fingerprint: fe:45:65:9b:79:03:5b:98:a1:61:b5:51:2e:ac:da:58:09:48:22:4d
+# SHA256 Fingerprint: bc:10:4f:15:a4:8b:e7:09:dc:a5:42:a7:e1:d4:b9:df:6f:05:45:27:e8:02:ea:a9:2d:59:54:44:25:8a:fe:71
+-----BEGIN CERTIFICATE-----
+MIIEMTCCAxmgAwIBAgIBADANBgkqhkiG9w0BAQUFADCBlTELMAkGA1UEBhMCR1Ix
+RDBCBgNVBAoTO0hlbGxlbmljIEFjYWRlbWljIGFuZCBSZXNlYXJjaCBJbnN0aXR1
+dGlvbnMgQ2VydC4gQXV0aG9yaXR5MUAwPgYDVQQDEzdIZWxsZW5pYyBBY2FkZW1p
+YyBhbmQgUmVzZWFyY2ggSW5zdGl0dXRpb25zIFJvb3RDQSAyMDExMB4XDTExMTIw
+NjEzNDk1MloXDTMxMTIwMTEzNDk1MlowgZUxCzAJBgNVBAYTAkdSMUQwQgYDVQQK
+EztIZWxsZW5pYyBBY2FkZW1pYyBhbmQgUmVzZWFyY2ggSW5zdGl0dXRpb25zIENl
+cnQuIEF1dGhvcml0eTFAMD4GA1UEAxM3SGVsbGVuaWMgQWNhZGVtaWMgYW5kIFJl
+c2VhcmNoIEluc3RpdHV0aW9ucyBSb290Q0EgMjAxMTCCASIwDQYJKoZIhvcNAQEB
+BQADggEPADCCAQoCggEBAKlTAOMupvaO+mDYLZU++CwqVE7NuYRhlFhPjz2L5EPz
+dYmNUeTDN9KKiE15HrcS3UN4SoqS5tdI1Q+kOilENbgH9mgdVc04UfCMJDGFr4PJ
+fel3r+0ae50X+bOdOFAPplp5kYCvN66m0zH7tSYJnTxa71HFK9+WXesyHgLacEns
+bgzImjeN9/E2YEsmLIKe0HjzDQ9jpFEw4fkrJxIH2Oq9GGKYsFk3fb7u8yBRQlqD
+75O6aRXxYp2fmTmCobd0LovUxQt7L/DICto9eQqakxylKHJzkUOap9FNhYS5qXSP
+FEDH3N6sQWRstBmbAmNtJGSPRLIl6s5ddAxjMlyNh+UCAwEAAaOBiTCBhjAPBgNV
+HRMBAf8EBTADAQH/MAsGA1UdDwQEAwIBBjAdBgNVHQ4EFgQUppFC/RNhSiOeCKQp
+5dgTBCPuQSUwRwYDVR0eBEAwPqA8MAWCAy5ncjAFggMuZXUwBoIELmVkdTAGggQu
+b3JnMAWBAy5ncjAFgQMuZXUwBoEELmVkdTAGgQQub3JnMA0GCSqGSIb3DQEBBQUA
+A4IBAQAf73lB4XtuP7KMhjdCSk4cNx6NZrokgclPEg8hwAOXhiVtXdMiKahsog2p
+6z0GW5k6x8zDmjR/qw7IThzh+uTczQ2+vyT+bOdrwg3IBp5OjWEopmr95fZi6hg8
+TqBTnbI6nOulnJEWtk2C4AwFSKls9cz4y51JtPACpf1wA+2KIaWuE4ZJwzNzvoc7
+dIsXRSZMFpGD/md9zU1jZ/rzAxKWeAaNsWftjj++n08C9bMJL/NMh98qy5V8Acys
+Nnq/onN694/BtZqhFLKPM58N7yLcZnuEvUUXBj08yrl3NI/K6s8/MT7jiOOASSXI
+l7WdmplNsDz4SgCbZN2fOUvRJ9e4
+-----END CERTIFICATE-----
+
 # Issuer: CN=Actalis Authentication Root CA O=Actalis S.p.A./03358520967
 # Subject: CN=Actalis Authentication Root CA O=Actalis S.p.A./03358520967
 # Label: "Actalis Authentication Root CA"
@@ -2182,6 +2342,27 @@ zzuqQhFkoJ2UOQIReVx7Hfpkue4WQrO/isIJxOzksU0CMQDpKmFHjFJKS04YcPbW
 RNZu9YO6bVi9JNlWSOrvxKJGgYhqOkbRqZtNyWHa0V1Xahg=
 -----END CERTIFICATE-----
 
+# Issuer: CN=GlobalSign O=GlobalSign OU=GlobalSign ECC Root CA - R4
+# Subject: CN=GlobalSign O=GlobalSign OU=GlobalSign ECC Root CA - R4
+# Label: "GlobalSign ECC Root CA - R4"
+# Serial: 14367148294922964480859022125800977897474
+# MD5 Fingerprint: 20:f0:27:68:d1:7e:a0:9d:0e:e6:2a:ca:df:5c:89:8e
+# SHA1 Fingerprint: 69:69:56:2e:40:80:f4:24:a1:e7:19:9f:14:ba:f3:ee:58:ab:6a:bb
+# SHA256 Fingerprint: be:c9:49:11:c2:95:56:76:db:6c:0a:55:09:86:d7:6e:3b:a0:05:66:7c:44:2c:97:62:b4:fb:b7:73:de:22:8c
+-----BEGIN CERTIFICATE-----
+MIIB4TCCAYegAwIBAgIRKjikHJYKBN5CsiilC+g0mAIwCgYIKoZIzj0EAwIwUDEk
+MCIGA1UECxMbR2xvYmFsU2lnbiBFQ0MgUm9vdCBDQSAtIFI0MRMwEQYDVQQKEwpH
+bG9iYWxTaWduMRMwEQYDVQQDEwpHbG9iYWxTaWduMB4XDTEyMTExMzAwMDAwMFoX
+DTM4MDExOTAzMTQwN1owUDEkMCIGA1UECxMbR2xvYmFsU2lnbiBFQ0MgUm9vdCBD
+QSAtIFI0MRMwEQYDVQQKEwpHbG9iYWxTaWduMRMwEQYDVQQDEwpHbG9iYWxTaWdu
+MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEuMZ5049sJQ6fLjkZHAOkrprlOQcJ
+FspjsbmG+IpXwVfOQvpzofdlQv8ewQCybnMO/8ch5RikqtlxP6jUuc6MHaNCMEAw
+DgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFFSwe61F
+uOJAf/sKbvu+M8k8o4TVMAoGCCqGSM49BAMCA0gAMEUCIQDckqGgE6bPA7DmxCGX
+kPoUVy0D7O48027KqGx2vKLeuwIgJ6iFJzWbVsaj8kfSt24bAgAXqmemFZHe+pTs
+ewv4n4Q=
+-----END CERTIFICATE-----
+
 # Issuer: CN=GlobalSign O=GlobalSign OU=GlobalSign ECC Root CA - R5
 # Subject: CN=GlobalSign O=GlobalSign OU=GlobalSign ECC Root CA - R5
 # Label: "GlobalSign ECC Root CA - R5"
@@ -3156,6 +3337,126 @@ rYy0UGYwEAYJKwYBBAGCNxUBBAMCAQAwCgYIKoZIzj0EAwMDaAAwZQIwJsdpW9zV
 Mgj/mkkCtojeFK9dbJlxjRo/i9fgojaGHAeCOnZT/cKi7e97sIBPWA9LUzm9
 -----END CERTIFICATE-----
 
+# Issuer: CN=GTS Root R1 O=Google Trust Services LLC
+# Subject: CN=GTS Root R1 O=Google Trust Services LLC
+# Label: "GTS Root R1"
+# Serial: 146587175971765017618439757810265552097
+# MD5 Fingerprint: 82:1a:ef:d4:d2:4a:f2:9f:e2:3d:97:06:14:70:72:85
+# SHA1 Fingerprint: e1:c9:50:e6:ef:22:f8:4c:56:45:72:8b:92:20:60:d7:d5:a7:a3:e8
+# SHA256 Fingerprint: 2a:57:54:71:e3:13:40:bc:21:58:1c:bd:2c:f1:3e:15:84:63:20:3e:ce:94:bc:f9:d3:cc:19:6b:f0:9a:54:72
+-----BEGIN CERTIFICATE-----
+MIIFWjCCA0KgAwIBAgIQbkepxUtHDA3sM9CJuRz04TANBgkqhkiG9w0BAQwFADBH
+MQswCQYDVQQGEwJVUzEiMCAGA1UEChMZR29vZ2xlIFRydXN0IFNlcnZpY2VzIExM
+QzEUMBIGA1UEAxMLR1RTIFJvb3QgUjEwHhcNMTYwNjIyMDAwMDAwWhcNMzYwNjIy
+MDAwMDAwWjBHMQswCQYDVQQGEwJVUzEiMCAGA1UEChMZR29vZ2xlIFRydXN0IFNl
+cnZpY2VzIExMQzEUMBIGA1UEAxMLR1RTIFJvb3QgUjEwggIiMA0GCSqGSIb3DQEB
+AQUAA4ICDwAwggIKAoICAQC2EQKLHuOhd5s73L+UPreVp0A8of2C+X0yBoJx9vaM
+f/vo27xqLpeXo4xL+Sv2sfnOhB2x+cWX3u+58qPpvBKJXqeqUqv4IyfLpLGcY9vX
+mX7wCl7raKb0xlpHDU0QM+NOsROjyBhsS+z8CZDfnWQpJSMHobTSPS5g4M/SCYe7
+zUjwTcLCeoiKu7rPWRnWr4+wB7CeMfGCwcDfLqZtbBkOtdh+JhpFAz2weaSUKK0P
+fyblqAj+lug8aJRT7oM6iCsVlgmy4HqMLnXWnOunVmSPlk9orj2XwoSPwLxAwAtc
+vfaHszVsrBhQf4TgTM2S0yDpM7xSma8ytSmzJSq0SPly4cpk9+aCEI3oncKKiPo4
+Zor8Y/kB+Xj9e1x3+naH+uzfsQ55lVe0vSbv1gHR6xYKu44LtcXFilWr06zqkUsp
+zBmkMiVOKvFlRNACzqrOSbTqn3yDsEB750Orp2yjj32JgfpMpf/VjsPOS+C12LOO
+Rc92wO1AK/1TD7Cn1TsNsYqiA94xrcx36m97PtbfkSIS5r762DL8EGMUUXLeXdYW
+k70paDPvOmbsB4om3xPXV2V4J95eSRQAogB/mqghtqmxlbCluQ0WEdrHbEg8QOB+
+DVrNVjzRlwW5y0vtOUucxD/SVRNuJLDWcfr0wbrM7Rv1/oFB2ACYPTrIrnqYNxgF
+lQIDAQABo0IwQDAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNV
+HQ4EFgQU5K8rJnEaK0gnhS9SZizv8IkTcT4wDQYJKoZIhvcNAQEMBQADggIBADiW
+Cu49tJYeX++dnAsznyvgyv3SjgofQXSlfKqE1OXyHuY3UjKcC9FhHb8owbZEKTV1
+d5iyfNm9dKyKaOOpMQkpAWBz40d8U6iQSifvS9efk+eCNs6aaAyC58/UEBZvXw6Z
+XPYfcX3v73svfuo21pdwCxXu11xWajOl40k4DLh9+42FpLFZXvRq4d2h9mREruZR
+gyFmxhE+885H7pwoHyXa/6xmld01D1zvICxi/ZG6qcz8WpyTgYMpl0p8WnK0OdC3
+d8t5/Wk6kjftbjhlRn7pYL15iJdfOBL07q9bgsiG1eGZbYwE8na6SfZu6W0eX6Dv
+J4J2QPim01hcDyxC2kLGe4g0x8HYRZvBPsVhHdljUEn2NIVq4BjFbkerQUIpm/Zg
+DdIx02OYI5NaAIFItO/Nis3Jz5nu2Z6qNuFoS3FJFDYoOj0dzpqPJeaAcWErtXvM
++SUWgeExX6GjfhaknBZqlxi9dnKlC54dNuYvoS++cJEPqOba+MSSQGwlfnuzCdyy
+F62ARPBopY+Udf90WuioAnwMCeKpSwughQtiue+hMZL77/ZRBIls6Kl0obsXs7X9
+SQ98POyDGCBDTtWTurQ0sR8WNh8M5mQ5Fkzc4P4dyKliPUDqysU0ArSuiYgzNdws
+E3PYJ/HQcu51OyLemGhmW/HGY0dVHLqlCFF1pkgl
+-----END CERTIFICATE-----
+
+# Issuer: CN=GTS Root R2 O=Google Trust Services LLC
+# Subject: CN=GTS Root R2 O=Google Trust Services LLC
+# Label: "GTS Root R2"
+# Serial: 146587176055767053814479386953112547951
+# MD5 Fingerprint: 44:ed:9a:0e:a4:09:3b:00:f2:ae:4c:a3:c6:61:b0:8b
+# SHA1 Fingerprint: d2:73:96:2a:2a:5e:39:9f:73:3f:e1:c7:1e:64:3f:03:38:34:fc:4d
+# SHA256 Fingerprint: c4:5d:7b:b0:8e:6d:67:e6:2e:42:35:11:0b:56:4e:5f:78:fd:92:ef:05:8c:84:0a:ea:4e:64:55:d7:58:5c:60
+-----BEGIN CERTIFICATE-----
+MIIFWjCCA0KgAwIBAgIQbkepxlqz5yDFMJo/aFLybzANBgkqhkiG9w0BAQwFADBH
+MQswCQYDVQQGEwJVUzEiMCAGA1UEChMZR29vZ2xlIFRydXN0IFNlcnZpY2VzIExM
+QzEUMBIGA1UEAxMLR1RTIFJvb3QgUjIwHhcNMTYwNjIyMDAwMDAwWhcNMzYwNjIy
+MDAwMDAwWjBHMQswCQYDVQQGEwJVUzEiMCAGA1UEChMZR29vZ2xlIFRydXN0IFNl
+cnZpY2VzIExMQzEUMBIGA1UEAxMLR1RTIFJvb3QgUjIwggIiMA0GCSqGSIb3DQEB
+AQUAA4ICDwAwggIKAoICAQDO3v2m++zsFDQ8BwZabFn3GTXd98GdVarTzTukk3Lv
+CvptnfbwhYBboUhSnznFt+4orO/LdmgUud+tAWyZH8QiHZ/+cnfgLFuv5AS/T3Kg
+GjSY6Dlo7JUle3ah5mm5hRm9iYz+re026nO8/4Piy33B0s5Ks40FnotJk9/BW9Bu
+XvAuMC6C/Pq8tBcKSOWIm8Wba96wyrQD8Nr0kLhlZPdcTK3ofmZemde4wj7I0BOd
+re7kRXuJVfeKH2JShBKzwkCX44ofR5GmdFrS+LFjKBC4swm4VndAoiaYecb+3yXu
+PuWgf9RhD1FLPD+M2uFwdNjCaKH5wQzpoeJ/u1U8dgbuak7MkogwTZq9TwtImoS1
+mKPV+3PBV2HdKFZ1E66HjucMUQkQdYhMvI35ezzUIkgfKtzra7tEscszcTJGr61K
+8YzodDqs5xoic4DSMPclQsciOzsSrZYuxsN2B6ogtzVJV+mSSeh2FnIxZyuWfoqj
+x5RWIr9qS34BIbIjMt/kmkRtWVtd9QCgHJvGeJeNkP+byKq0rxFROV7Z+2et1VsR
+nTKaG73VululycslaVNVJ1zgyjbLiGH7HrfQy+4W+9OmTN6SpdTi3/UGVN4unUu0
+kzCqgc7dGtxRcw1PcOnlthYhGXmy5okLdWTK1au8CcEYof/UVKGFPP0UJAOyh9Ok
+twIDAQABo0IwQDAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNV
+HQ4EFgQUu//KjiOfT5nK2+JopqUVJxce2Q4wDQYJKoZIhvcNAQEMBQADggIBALZp
+8KZ3/p7uC4Gt4cCpx/k1HUCCq+YEtN/L9x0Pg/B+E02NjO7jMyLDOfxA325BS0JT
+vhaI8dI4XsRomRyYUpOM52jtG2pzegVATX9lO9ZY8c6DR2Dj/5epnGB3GFW1fgiT
+z9D2PGcDFWEJ+YF59exTpJ/JjwGLc8R3dtyDovUMSRqodt6Sm2T4syzFJ9MHwAiA
+pJiS4wGWAqoC7o87xdFtCjMwc3i5T1QWvwsHoaRc5svJXISPD+AVdyx+Jn7axEvb
+pxZ3B7DNdehyQtaVhJ2Gg/LkkM0JR9SLA3DaWsYDQvTtN6LwG1BUSw7YhN4ZKJmB
+R64JGz9I0cNv4rBgF/XuIwKl2gBbbZCr7qLpGzvpx0QnRY5rn/WkhLx3+WuXrD5R
+RaIRpsyF7gpo8j5QOHokYh4XIDdtak23CZvJ/KRY9bb7nE4Yu5UC56GtmwfuNmsk
+0jmGwZODUNKBRqhfYlcsu2xkiAhu7xNUX90txGdj08+JN7+dIPT7eoOboB6BAFDC
+5AwiWVIQ7UNWhwD4FFKnHYuTjKJNRn8nxnGbJN7k2oaLDX5rIMHAnuFl2GqjpuiF
+izoHCBy69Y9Vmhh1fuXsgWbRIXOhNUQLgD1bnF5vKheW0YMjiGZt5obicDIvUiLn
+yOd/xCxgXS/Dr55FBcOEArf9LAhST4Ldo/DUhgkC
+-----END CERTIFICATE-----
+
+# Issuer: CN=GTS Root R3 O=Google Trust Services LLC
+# Subject: CN=GTS Root R3 O=Google Trust Services LLC
+# Label: "GTS Root R3"
+# Serial: 146587176140553309517047991083707763997
+# MD5 Fingerprint: 1a:79:5b:6b:04:52:9c:5d:c7:74:33:1b:25:9a:f9:25
+# SHA1 Fingerprint: 30:d4:24:6f:07:ff:db:91:89:8a:0b:e9:49:66:11:eb:8c:5e:46:e5
+# SHA256 Fingerprint: 15:d5:b8:77:46:19:ea:7d:54:ce:1c:a6:d0:b0:c4:03:e0:37:a9:17:f1:31:e8:a0:4e:1e:6b:7a:71:ba:bc:e5
+-----BEGIN CERTIFICATE-----
+MIICDDCCAZGgAwIBAgIQbkepx2ypcyRAiQ8DVd2NHTAKBggqhkjOPQQDAzBHMQsw
+CQYDVQQGEwJVUzEiMCAGA1UEChMZR29vZ2xlIFRydXN0IFNlcnZpY2VzIExMQzEU
+MBIGA1UEAxMLR1RTIFJvb3QgUjMwHhcNMTYwNjIyMDAwMDAwWhcNMzYwNjIyMDAw
+MDAwWjBHMQswCQYDVQQGEwJVUzEiMCAGA1UEChMZR29vZ2xlIFRydXN0IFNlcnZp
+Y2VzIExMQzEUMBIGA1UEAxMLR1RTIFJvb3QgUjMwdjAQBgcqhkjOPQIBBgUrgQQA
+IgNiAAQfTzOHMymKoYTey8chWEGJ6ladK0uFxh1MJ7x/JlFyb+Kf1qPKzEUURout
+736GjOyxfi//qXGdGIRFBEFVbivqJn+7kAHjSxm65FSWRQmx1WyRRK2EE46ajA2A
+DDL24CejQjBAMA4GA1UdDwEB/wQEAwIBBjAPBgNVHRMBAf8EBTADAQH/MB0GA1Ud
+DgQWBBTB8Sa6oC2uhYHP0/EqEr24Cmf9vDAKBggqhkjOPQQDAwNpADBmAjEAgFuk
+fCPAlaUs3L6JbyO5o91lAFJekazInXJ0glMLfalAvWhgxeG4VDvBNhcl2MG9AjEA
+njWSdIUlUfUk7GRSJFClH9voy8l27OyCbvWFGFPouOOaKaqW04MjyaR7YbPMAuhd
+-----END CERTIFICATE-----
+
+# Issuer: CN=GTS Root R4 O=Google Trust Services LLC
+# Subject: CN=GTS Root R4 O=Google Trust Services LLC
+# Label: "GTS Root R4"
+# Serial: 146587176229350439916519468929765261721
+# MD5 Fingerprint: 5d:b6:6a:c4:60:17:24:6a:1a:99:a8:4b:ee:5e:b4:26
+# SHA1 Fingerprint: 2a:1d:60:27:d9:4a:b1:0a:1c:4d:91:5c:cd:33:a0:cb:3e:2d:54:cb
+# SHA256 Fingerprint: 71:cc:a5:39:1f:9e:79:4b:04:80:25:30:b3:63:e1:21:da:8a:30:43:bb:26:66:2f:ea:4d:ca:7f:c9:51:a4:bd
+-----BEGIN CERTIFICATE-----
+MIICCjCCAZGgAwIBAgIQbkepyIuUtui7OyrYorLBmTAKBggqhkjOPQQDAzBHMQsw
+CQYDVQQGEwJVUzEiMCAGA1UEChMZR29vZ2xlIFRydXN0IFNlcnZpY2VzIExMQzEU
+MBIGA1UEAxMLR1RTIFJvb3QgUjQwHhcNMTYwNjIyMDAwMDAwWhcNMzYwNjIyMDAw
+MDAwWjBHMQswCQYDVQQGEwJVUzEiMCAGA1UEChMZR29vZ2xlIFRydXN0IFNlcnZp
+Y2VzIExMQzEUMBIGA1UEAxMLR1RTIFJvb3QgUjQwdjAQBgcqhkjOPQIBBgUrgQQA
+IgNiAATzdHOnaItgrkO4NcWBMHtLSZ37wWHO5t5GvWvVYRg1rkDdc/eJkTBa6zzu
+hXyiQHY7qca4R9gq55KRanPpsXI5nymfopjTX15YhmUPoYRlBtHci8nHc8iMai/l
+xKvRHYqjQjBAMA4GA1UdDwEB/wQEAwIBBjAPBgNVHRMBAf8EBTADAQH/MB0GA1Ud
+DgQWBBSATNbrdP9JNqPV2Py1PsVq8JQdjDAKBggqhkjOPQQDAwNnADBkAjBqUFJ0
+CMRw3J5QdCHojXohw0+WbhXRIjVhLfoIN+4Zba3bssx9BzT1YBkstTTZbyACMANx
+sbqjYAuG7ZoIapVon+Kz4ZNkfF6Tpt95LY2F45TPI11xzPKwTdb+mciUqXWi4w==
+-----END CERTIFICATE-----
+
 # Issuer: CN=UCA Global G2 Root O=UniTrust
 # Subject: CN=UCA Global G2 Root O=UniTrust
 # Label: "UCA Global G2 Root"
@@ -4059,650 +4360,3 @@ AgGGMAoGCCqGSM49BAMDA2cAMGQCMBHervjcToiwqfAircJRQO9gcS3ujwLEXQNw
 SaSS6sUUiHCm0w2wqsosQJz76YJumgIwK0eaB8bRwoF8yguWGEEbo/QwCZ61IygN
 nxS2PFOiTAZpffpskcYqSUXm7LcT4Tps
 -----END CERTIFICATE-----
-
-# Issuer: CN=Autoridad de Certificacion Firmaprofesional CIF A62634068
-# Subject: CN=Autoridad de Certificacion Firmaprofesional CIF A62634068
-# Label: "Autoridad de Certificacion Firmaprofesional CIF A62634068"
-# Serial: 1977337328857672817
-# MD5 Fingerprint: 4e:6e:9b:54:4c:ca:b7:fa:48:e4:90:b1:15:4b:1c:a3
-# SHA1 Fingerprint: 0b:be:c2:27:22:49:cb:39:aa:db:35:5c:53:e3:8c:ae:78:ff:b6:fe
-# SHA256 Fingerprint: 57:de:05:83:ef:d2:b2:6e:03:61:da:99:da:9d:f4:64:8d:ef:7e:e8:44:1c:3b:72:8a:fa:9b:cd:e0:f9:b2:6a
------BEGIN CERTIFICATE-----
-MIIGFDCCA/ygAwIBAgIIG3Dp0v+ubHEwDQYJKoZIhvcNAQELBQAwUTELMAkGA1UE
-BhMCRVMxQjBABgNVBAMMOUF1dG9yaWRhZCBkZSBDZXJ0aWZpY2FjaW9uIEZpcm1h
-cHJvZmVzaW9uYWwgQ0lGIEE2MjYzNDA2ODAeFw0xNDA5MjMxNTIyMDdaFw0zNjA1
-MDUxNTIyMDdaMFExCzAJBgNVBAYTAkVTMUIwQAYDVQQDDDlBdXRvcmlkYWQgZGUg
-Q2VydGlmaWNhY2lvbiBGaXJtYXByb2Zlc2lvbmFsIENJRiBBNjI2MzQwNjgwggIi
-MA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQDKlmuO6vj78aI14H9M2uDDUtd9
-thDIAl6zQyrET2qyyhxdKJp4ERppWVevtSBC5IsP5t9bpgOSL/UR5GLXMnE42QQM
-cas9UX4PB99jBVzpv5RvwSmCwLTaUbDBPLutN0pcyvFLNg4kq7/DhHf9qFD0sefG
-L9ItWY16Ck6WaVICqjaY7Pz6FIMMNx/Jkjd/14Et5cS54D40/mf0PmbR0/RAz15i
-NA9wBj4gGFrO93IbJWyTdBSTo3OxDqqHECNZXyAFGUftaI6SEspd/NYrspI8IM/h
-X68gvqB2f3bl7BqGYTM+53u0P6APjqK5am+5hyZvQWyIplD9amML9ZMWGxmPsu2b
-m8mQ9QEM3xk9Dz44I8kvjwzRAv4bVdZO0I08r0+k8/6vKtMFnXkIoctXMbScyJCy
-Z/QYFpM6/EfY0XiWMR+6KwxfXZmtY4laJCB22N/9q06mIqqdXuYnin1oKaPnirja
-EbsXLZmdEyRG98Xi2J+Of8ePdG1asuhy9azuJBCtLxTa/y2aRnFHvkLfuwHb9H/T
-KI8xWVvTyQKmtFLKbpf7Q8UIJm+K9Lv9nyiqDdVF8xM6HdjAeI9BZzwelGSuewvF
-6NkBiDkal4ZkQdU7hwxu+g/GvUgUvzlN1J5Bto+WHWOWk9mVBngxaJ43BjuAiUVh
-OSPHG0SjFeUc+JIwuwIDAQABo4HvMIHsMB0GA1UdDgQWBBRlzeurNR4APn7VdMAc
-tHNHDhpkLzASBgNVHRMBAf8ECDAGAQH/AgEBMIGmBgNVHSAEgZ4wgZswgZgGBFUd
-IAAwgY8wLwYIKwYBBQUHAgEWI2h0dHA6Ly93d3cuZmlybWFwcm9mZXNpb25hbC5j
-b20vY3BzMFwGCCsGAQUFBwICMFAeTgBQAGEAcwBlAG8AIABkAGUAIABsAGEAIABC
-AG8AbgBhAG4AbwB2AGEAIAA0ADcAIABCAGEAcgBjAGUAbABvAG4AYQAgADAAOAAw
-ADEANzAOBgNVHQ8BAf8EBAMCAQYwDQYJKoZIhvcNAQELBQADggIBAHSHKAIrdx9m
-iWTtj3QuRhy7qPj4Cx2Dtjqn6EWKB7fgPiDL4QjbEwj4KKE1soCzC1HA01aajTNF
-Sa9J8OA9B3pFE1r/yJfY0xgsfZb43aJlQ3CTkBW6kN/oGbDbLIpgD7dvlAceHabJ
-hfa9NPhAeGIQcDq+fUs5gakQ1JZBu/hfHAsdCPKxsIl68veg4MSPi3i1O1ilI45P
-Vf42O+AMt8oqMEEgtIDNrvx2ZnOorm7hfNoD6JQg5iKj0B+QXSBTFCZX2lSX3xZE
-EAEeiGaPcjiT3SC3NL7X8e5jjkd5KAb881lFJWAiMxujX6i6KtoaPc1A6ozuBRWV
-1aUsIC+nmCjuRfzxuIgALI9C2lHVnOUTaHFFQ4ueCyE8S1wF3BqfmI7avSKecs2t
-CsvMo2ebKHTEm9caPARYpoKdrcd7b/+Alun4jWq9GJAd/0kakFI3ky88Al2CdgtR
-5xbHV/g4+afNmyJU72OwFW1TZQNKXkqgsqeOSQBZONXH9IBk9W6VULgRfhVwOEqw
-f9DEMnDAGf/JOC0ULGb0QkTmVXYbgBVX/8Cnp6o5qtjTcNAuuuuUavpfNIbnYrX9
-ivAwhZTJryQCL2/W3Wf+47BVTwSYT6RBVuKT0Gro1vP7ZeDOdcQxWQzugsgMYDNK
-GbqEZycPvEJdvSRUDewdcAZfpLz6IHxV
------END CERTIFICATE-----
-
-# Issuer: CN=vTrus ECC Root CA O=iTrusChina Co.,Ltd.
-# Subject: CN=vTrus ECC Root CA O=iTrusChina Co.,Ltd.
-# Label: "vTrus ECC Root CA"
-# Serial: 630369271402956006249506845124680065938238527194
-# MD5 Fingerprint: de:4b:c1:f5:52:8c:9b:43:e1:3e:8f:55:54:17:8d:85
-# SHA1 Fingerprint: f6:9c:db:b0:fc:f6:02:13:b6:52:32:a6:a3:91:3f:16:70:da:c3:e1
-# SHA256 Fingerprint: 30:fb:ba:2c:32:23:8e:2a:98:54:7a:f9:79:31:e5:50:42:8b:9b:3f:1c:8e:eb:66:33:dc:fa:86:c5:b2:7d:d3
------BEGIN CERTIFICATE-----
-MIICDzCCAZWgAwIBAgIUbmq8WapTvpg5Z6LSa6Q75m0c1towCgYIKoZIzj0EAwMw
-RzELMAkGA1UEBhMCQ04xHDAaBgNVBAoTE2lUcnVzQ2hpbmEgQ28uLEx0ZC4xGjAY
-BgNVBAMTEXZUcnVzIEVDQyBSb290IENBMB4XDTE4MDczMTA3MjY0NFoXDTQzMDcz
-MTA3MjY0NFowRzELMAkGA1UEBhMCQ04xHDAaBgNVBAoTE2lUcnVzQ2hpbmEgQ28u
-LEx0ZC4xGjAYBgNVBAMTEXZUcnVzIEVDQyBSb290IENBMHYwEAYHKoZIzj0CAQYF
-K4EEACIDYgAEZVBKrox5lkqqHAjDo6LN/llWQXf9JpRCux3NCNtzslt188+cToL0
-v/hhJoVs1oVbcnDS/dtitN9Ti72xRFhiQgnH+n9bEOf+QP3A2MMrMudwpremIFUd
-e4BdS49nTPEQo0IwQDAdBgNVHQ4EFgQUmDnNvtiyjPeyq+GtJK97fKHbH88wDwYD
-VR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwCgYIKoZIzj0EAwMDaAAwZQIw
-V53dVvHH4+m4SVBrm2nDb+zDfSXkV5UTQJtS0zvzQBm8JsctBp61ezaf9SXUY2sA
-AjEA6dPGnlaaKsyh2j/IZivTWJwghfqrkYpwcBE4YGQLYgmRWAD5Tfs0aNoJrSEG
-GJTO
------END CERTIFICATE-----
-
-# Issuer: CN=vTrus Root CA O=iTrusChina Co.,Ltd.
-# Subject: CN=vTrus Root CA O=iTrusChina Co.,Ltd.
-# Label: "vTrus Root CA"
-# Serial: 387574501246983434957692974888460947164905180485
-# MD5 Fingerprint: b8:c9:37:df:fa:6b:31:84:64:c5:ea:11:6a:1b:75:fc
-# SHA1 Fingerprint: 84:1a:69:fb:f5:cd:1a:25:34:13:3d:e3:f8:fc:b8:99:d0:c9:14:b7
-# SHA256 Fingerprint: 8a:71:de:65:59:33:6f:42:6c:26:e5:38:80:d0:0d:88:a1:8d:a4:c6:a9:1f:0d:cb:61:94:e2:06:c5:c9:63:87
------BEGIN CERTIFICATE-----
-MIIFVjCCAz6gAwIBAgIUQ+NxE9izWRRdt86M/TX9b7wFjUUwDQYJKoZIhvcNAQEL
-BQAwQzELMAkGA1UEBhMCQ04xHDAaBgNVBAoTE2lUcnVzQ2hpbmEgQ28uLEx0ZC4x
-FjAUBgNVBAMTDXZUcnVzIFJvb3QgQ0EwHhcNMTgwNzMxMDcyNDA1WhcNNDMwNzMx
-MDcyNDA1WjBDMQswCQYDVQQGEwJDTjEcMBoGA1UEChMTaVRydXNDaGluYSBDby4s
-THRkLjEWMBQGA1UEAxMNdlRydXMgUm9vdCBDQTCCAiIwDQYJKoZIhvcNAQEBBQAD
-ggIPADCCAgoCggIBAL1VfGHTuB0EYgWgrmy3cLRB6ksDXhA/kFocizuwZotsSKYc
-IrrVQJLuM7IjWcmOvFjai57QGfIvWcaMY1q6n6MLsLOaXLoRuBLpDLvPbmyAhykU
-AyyNJJrIZIO1aqwTLDPxn9wsYTwaP3BVm60AUn/PBLn+NvqcwBauYv6WTEN+VRS+
-GrPSbcKvdmaVayqwlHeFXgQPYh1jdfdr58tbmnDsPmcF8P4HCIDPKNsFxhQnL4Z9
-8Cfe/+Z+M0jnCx5Y0ScrUw5XSmXX+6KAYPxMvDVTAWqXcoKv8R1w6Jz1717CbMdH
-flqUhSZNO7rrTOiwCcJlwp2dCZtOtZcFrPUGoPc2BX70kLJrxLT5ZOrpGgrIDajt
-J8nU57O5q4IikCc9Kuh8kO+8T/3iCiSn3mUkpF3qwHYw03dQ+A0Em5Q2AXPKBlim
-0zvc+gRGE1WKyURHuFE5Gi7oNOJ5y1lKCn+8pu8fA2dqWSslYpPZUxlmPCdiKYZN
-pGvu/9ROutW04o5IWgAZCfEF2c6Rsffr6TlP9m8EQ5pV9T4FFL2/s1m02I4zhKOQ
-UqqzApVg+QxMaPnu1RcN+HFXtSXkKe5lXa/R7jwXC1pDxaWG6iSe4gUH3DRCEpHW
-OXSuTEGC2/KmSNGzm/MzqvOmwMVO9fSddmPmAsYiS8GVP1BkLFTltvA8Kc9XAgMB
-AAGjQjBAMB0GA1UdDgQWBBRUYnBj8XWEQ1iO0RYgscasGrz2iTAPBgNVHRMBAf8E
-BTADAQH/MA4GA1UdDwEB/wQEAwIBBjANBgkqhkiG9w0BAQsFAAOCAgEAKbqSSaet
-8PFww+SX8J+pJdVrnjT+5hpk9jprUrIQeBqfTNqK2uwcN1LgQkv7bHbKJAs5EhWd
-nxEt/Hlk3ODg9d3gV8mlsnZwUKT+twpw1aA08XXXTUm6EdGz2OyC/+sOxL9kLX1j
-bhd47F18iMjrjld22VkE+rxSH0Ws8HqA7Oxvdq6R2xCOBNyS36D25q5J08FsEhvM
-Kar5CKXiNxTKsbhm7xqC5PD48acWabfbqWE8n/Uxy+QARsIvdLGx14HuqCaVvIiv
-TDUHKgLKeBRtRytAVunLKmChZwOgzoy8sHJnxDHO2zTlJQNgJXtxmOTAGytfdELS
-S8VZCAeHvsXDf+eW2eHcKJfWjwXj9ZtOyh1QRwVTsMo554WgicEFOwE30z9J4nfr
-I8iIZjs9OXYhRvHsXyO466JmdXTBQPfYaJqT4i2pLr0cox7IdMakLXogqzu4sEb9
-b91fUlV1YvCXoHzXOP0l382gmxDPi7g4Xl7FtKYCNqEeXxzP4padKar9mK5S4fNB
-UvupLnKWnyfjqnN9+BojZns7q2WwMgFLFT49ok8MKzWixtlnEjUwzXYuFrOZnk1P
-Ti07NEPhmg4NpGaXutIcSkwsKouLgU9xGqndXHt7CMUADTdA43x7VF8vhV929ven
-sBxXVsFy6K2ir40zSbofitzmdHxghm+Hl3s=
------END CERTIFICATE-----
-
-# Issuer: CN=ISRG Root X2 O=Internet Security Research Group
-# Subject: CN=ISRG Root X2 O=Internet Security Research Group
-# Label: "ISRG Root X2"
-# Serial: 87493402998870891108772069816698636114
-# MD5 Fingerprint: d3:9e:c4:1e:23:3c:a6:df:cf:a3:7e:6d:e0:14:e6:e5
-# SHA1 Fingerprint: bd:b1:b9:3c:d5:97:8d:45:c6:26:14:55:f8:db:95:c7:5a:d1:53:af
-# SHA256 Fingerprint: 69:72:9b:8e:15:a8:6e:fc:17:7a:57:af:b7:17:1d:fc:64:ad:d2:8c:2f:ca:8c:f1:50:7e:34:45:3c:cb:14:70
------BEGIN CERTIFICATE-----
-MIICGzCCAaGgAwIBAgIQQdKd0XLq7qeAwSxs6S+HUjAKBggqhkjOPQQDAzBPMQsw
-CQYDVQQGEwJVUzEpMCcGA1UEChMgSW50ZXJuZXQgU2VjdXJpdHkgUmVzZWFyY2gg
-R3JvdXAxFTATBgNVBAMTDElTUkcgUm9vdCBYMjAeFw0yMDA5MDQwMDAwMDBaFw00
-MDA5MTcxNjAwMDBaME8xCzAJBgNVBAYTAlVTMSkwJwYDVQQKEyBJbnRlcm5ldCBT
-ZWN1cml0eSBSZXNlYXJjaCBHcm91cDEVMBMGA1UEAxMMSVNSRyBSb290IFgyMHYw
-EAYHKoZIzj0CAQYFK4EEACIDYgAEzZvVn4CDCuwJSvMWSj5cz3es3mcFDR0HttwW
-+1qLFNvicWDEukWVEYmO6gbf9yoWHKS5xcUy4APgHoIYOIvXRdgKam7mAHf7AlF9
-ItgKbppbd9/w+kHsOdx1ymgHDB/qo0IwQDAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0T
-AQH/BAUwAwEB/zAdBgNVHQ4EFgQUfEKWrt5LSDv6kviejM9ti6lyN5UwCgYIKoZI
-zj0EAwMDaAAwZQIwe3lORlCEwkSHRhtFcP9Ymd70/aTSVaYgLXTWNLxBo1BfASdW
-tL4ndQavEi51mI38AjEAi/V3bNTIZargCyzuFJ0nN6T5U6VR5CmD1/iQMVtCnwr1
-/q4AaOeMSQ+2b1tbFfLn
------END CERTIFICATE-----
-
-# Issuer: CN=HiPKI Root CA - G1 O=Chunghwa Telecom Co., Ltd.
-# Subject: CN=HiPKI Root CA - G1 O=Chunghwa Telecom Co., Ltd.
-# Label: "HiPKI Root CA - G1"
-# Serial: 60966262342023497858655262305426234976
-# MD5 Fingerprint: 69:45:df:16:65:4b:e8:68:9a:8f:76:5f:ff:80:9e:d3
-# SHA1 Fingerprint: 6a:92:e4:a8:ee:1b:ec:96:45:37:e3:29:57:49:cd:96:e3:e5:d2:60
-# SHA256 Fingerprint: f0:15:ce:3c:c2:39:bf:ef:06:4b:e9:f1:d2:c4:17:e1:a0:26:4a:0a:94:be:1f:0c:8d:12:18:64:eb:69:49:cc
------BEGIN CERTIFICATE-----
-MIIFajCCA1KgAwIBAgIQLd2szmKXlKFD6LDNdmpeYDANBgkqhkiG9w0BAQsFADBP
-MQswCQYDVQQGEwJUVzEjMCEGA1UECgwaQ2h1bmdod2EgVGVsZWNvbSBDby4sIEx0
-ZC4xGzAZBgNVBAMMEkhpUEtJIFJvb3QgQ0EgLSBHMTAeFw0xOTAyMjIwOTQ2MDRa
-Fw0zNzEyMzExNTU5NTlaME8xCzAJBgNVBAYTAlRXMSMwIQYDVQQKDBpDaHVuZ2h3
-YSBUZWxlY29tIENvLiwgTHRkLjEbMBkGA1UEAwwSSGlQS0kgUm9vdCBDQSAtIEcx
-MIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEA9B5/UnMyDHPkvRN0o9Qw
-qNCuS9i233VHZvR85zkEHmpwINJaR3JnVfSl6J3VHiGh8Ge6zCFovkRTv4354twv
-Vcg3Px+kwJyz5HdcoEb+d/oaoDjq7Zpy3iu9lFc6uux55199QmQ5eiY29yTw1S+6
-lZgRZq2XNdZ1AYDgr/SEYYwNHl98h5ZeQa/rh+r4XfEuiAU+TCK72h8q3VJGZDnz
-Qs7ZngyzsHeXZJzA9KMuH5UHsBffMNsAGJZMoYFL3QRtU6M9/Aes1MU3guvklQgZ
-KILSQjqj2FPseYlgSGDIcpJQ3AOPgz+yQlda22rpEZfdhSi8MEyr48KxRURHH+CK
-FgeW0iEPU8DtqX7UTuybCeyvQqww1r/REEXgphaypcXTT3OUM3ECoWqj1jOXTyFj
-HluP2cFeRXF3D4FdXyGarYPM+l7WjSNfGz1BryB1ZlpK9p/7qxj3ccC2HTHsOyDr
-y+K49a6SsvfhhEvyovKTmiKe0xRvNlS9H15ZFblzqMF8b3ti6RZsR1pl8w4Rm0bZ
-/W3c1pzAtH2lsN0/Vm+h+fbkEkj9Bn8SV7apI09bA8PgcSojt/ewsTu8mL3WmKgM
-a/aOEmem8rJY5AIJEzypuxC00jBF8ez3ABHfZfjcK0NVvxaXxA/VLGGEqnKG/uY6
-fsI/fe78LxQ+5oXdUG+3Se0CAwEAAaNCMEAwDwYDVR0TAQH/BAUwAwEB/zAdBgNV
-HQ4EFgQU8ncX+l6o/vY9cdVouslGDDjYr7AwDgYDVR0PAQH/BAQDAgGGMA0GCSqG
-SIb3DQEBCwUAA4ICAQBQUfB13HAE4/+qddRxosuej6ip0691x1TPOhwEmSKsxBHi
-7zNKpiMdDg1H2DfHb680f0+BazVP6XKlMeJ45/dOlBhbQH3PayFUhuaVevvGyuqc
-SE5XCV0vrPSltJczWNWseanMX/mF+lLFjfiRFOs6DRfQUsJ748JzjkZ4Bjgs6Fza
-ZsT0pPBWGTMpWmWSBUdGSquEwx4noR8RkpkndZMPvDY7l1ePJlsMu5wP1G4wB9Tc
-XzZoZjmDlicmisjEOf6aIW/Vcobpf2Lll07QJNBAsNB1CI69aO4I1258EHBGG3zg
-iLKecoaZAeO/n0kZtCW+VmWuF2PlHt/o/0elv+EmBYTksMCv5wiZqAxeJoBF1Pho
-L5aPruJKHJwWDBNvOIf2u8g0X5IDUXlwpt/L9ZlNec1OvFefQ05rLisY+GpzjLrF
-Ne85akEez3GoorKGB1s6yeHvP2UEgEcyRHCVTjFnanRbEEV16rCf0OY1/k6fi8wr
-kkVbbiVghUbN0aqwdmaTd5a+g744tiROJgvM7XpWGuDpWsZkrUx6AEhEL7lAuxM+
-vhV4nYWBSipX3tUZQ9rbyltHhoMLP7YNdnhzeSJesYAfz77RP1YQmCuVh6EfnWQU
-YDksswBVLuT1sw5XxJFBAJw/6KXf6vb/yPCtbVKoF6ubYfwSUTXkJf2vqmqGOQ==
------END CERTIFICATE-----
-
-# Issuer: CN=GlobalSign O=GlobalSign OU=GlobalSign ECC Root CA - R4
-# Subject: CN=GlobalSign O=GlobalSign OU=GlobalSign ECC Root CA - R4
-# Label: "GlobalSign ECC Root CA - R4"
-# Serial: 159662223612894884239637590694
-# MD5 Fingerprint: 26:29:f8:6d:e1:88:bf:a2:65:7f:aa:c4:cd:0f:7f:fc
-# SHA1 Fingerprint: 6b:a0:b0:98:e1:71:ef:5a:ad:fe:48:15:80:77:10:f4:bd:6f:0b:28
-# SHA256 Fingerprint: b0:85:d7:0b:96:4f:19:1a:73:e4:af:0d:54:ae:7a:0e:07:aa:fd:af:9b:71:dd:08:62:13:8a:b7:32:5a:24:a2
------BEGIN CERTIFICATE-----
-MIIB3DCCAYOgAwIBAgINAgPlfvU/k/2lCSGypjAKBggqhkjOPQQDAjBQMSQwIgYD
-VQQLExtHbG9iYWxTaWduIEVDQyBSb290IENBIC0gUjQxEzARBgNVBAoTCkdsb2Jh
-bFNpZ24xEzARBgNVBAMTCkdsb2JhbFNpZ24wHhcNMTIxMTEzMDAwMDAwWhcNMzgw
-MTE5MDMxNDA3WjBQMSQwIgYDVQQLExtHbG9iYWxTaWduIEVDQyBSb290IENBIC0g
-UjQxEzARBgNVBAoTCkdsb2JhbFNpZ24xEzARBgNVBAMTCkdsb2JhbFNpZ24wWTAT
-BgcqhkjOPQIBBggqhkjOPQMBBwNCAAS4xnnTj2wlDp8uORkcA6SumuU5BwkWymOx
-uYb4ilfBV85C+nOh92VC/x7BALJucw7/xyHlGKSq2XE/qNS5zowdo0IwQDAOBgNV
-HQ8BAf8EBAMCAYYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUVLB7rUW44kB/
-+wpu+74zyTyjhNUwCgYIKoZIzj0EAwIDRwAwRAIgIk90crlgr/HmnKAWBVBfw147
-bmF0774BxL4YSFlhgjICICadVGNA3jdgUM/I2O2dgq43mLyjj0xMqTQrbO/7lZsm
------END CERTIFICATE-----
-
-# Issuer: CN=GTS Root R1 O=Google Trust Services LLC
-# Subject: CN=GTS Root R1 O=Google Trust Services LLC
-# Label: "GTS Root R1"
-# Serial: 159662320309726417404178440727
-# MD5 Fingerprint: 05:fe:d0:bf:71:a8:a3:76:63:da:01:e0:d8:52:dc:40
-# SHA1 Fingerprint: e5:8c:1c:c4:91:3b:38:63:4b:e9:10:6e:e3:ad:8e:6b:9d:d9:81:4a
-# SHA256 Fingerprint: d9:47:43:2a:bd:e7:b7:fa:90:fc:2e:6b:59:10:1b:12:80:e0:e1:c7:e4:e4:0f:a3:c6:88:7f:ff:57:a7:f4:cf
------BEGIN CERTIFICATE-----
-MIIFVzCCAz+gAwIBAgINAgPlk28xsBNJiGuiFzANBgkqhkiG9w0BAQwFADBHMQsw
-CQYDVQQGEwJVUzEiMCAGA1UEChMZR29vZ2xlIFRydXN0IFNlcnZpY2VzIExMQzEU
-MBIGA1UEAxMLR1RTIFJvb3QgUjEwHhcNMTYwNjIyMDAwMDAwWhcNMzYwNjIyMDAw
-MDAwWjBHMQswCQYDVQQGEwJVUzEiMCAGA1UEChMZR29vZ2xlIFRydXN0IFNlcnZp
-Y2VzIExMQzEUMBIGA1UEAxMLR1RTIFJvb3QgUjEwggIiMA0GCSqGSIb3DQEBAQUA
-A4ICDwAwggIKAoICAQC2EQKLHuOhd5s73L+UPreVp0A8of2C+X0yBoJx9vaMf/vo
-27xqLpeXo4xL+Sv2sfnOhB2x+cWX3u+58qPpvBKJXqeqUqv4IyfLpLGcY9vXmX7w
-Cl7raKb0xlpHDU0QM+NOsROjyBhsS+z8CZDfnWQpJSMHobTSPS5g4M/SCYe7zUjw
-TcLCeoiKu7rPWRnWr4+wB7CeMfGCwcDfLqZtbBkOtdh+JhpFAz2weaSUKK0Pfybl
-qAj+lug8aJRT7oM6iCsVlgmy4HqMLnXWnOunVmSPlk9orj2XwoSPwLxAwAtcvfaH
-szVsrBhQf4TgTM2S0yDpM7xSma8ytSmzJSq0SPly4cpk9+aCEI3oncKKiPo4Zor8
-Y/kB+Xj9e1x3+naH+uzfsQ55lVe0vSbv1gHR6xYKu44LtcXFilWr06zqkUspzBmk
-MiVOKvFlRNACzqrOSbTqn3yDsEB750Orp2yjj32JgfpMpf/VjsPOS+C12LOORc92
-wO1AK/1TD7Cn1TsNsYqiA94xrcx36m97PtbfkSIS5r762DL8EGMUUXLeXdYWk70p
-aDPvOmbsB4om3xPXV2V4J95eSRQAogB/mqghtqmxlbCluQ0WEdrHbEg8QOB+DVrN
-VjzRlwW5y0vtOUucxD/SVRNuJLDWcfr0wbrM7Rv1/oFB2ACYPTrIrnqYNxgFlQID
-AQABo0IwQDAOBgNVHQ8BAf8EBAMCAYYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4E
-FgQU5K8rJnEaK0gnhS9SZizv8IkTcT4wDQYJKoZIhvcNAQEMBQADggIBAJ+qQibb
-C5u+/x6Wki4+omVKapi6Ist9wTrYggoGxval3sBOh2Z5ofmmWJyq+bXmYOfg6LEe
-QkEzCzc9zolwFcq1JKjPa7XSQCGYzyI0zzvFIoTgxQ6KfF2I5DUkzps+GlQebtuy
-h6f88/qBVRRiClmpIgUxPoLW7ttXNLwzldMXG+gnoot7TiYaelpkttGsN/H9oPM4
-7HLwEXWdyzRSjeZ2axfG34arJ45JK3VmgRAhpuo+9K4l/3wV3s6MJT/KYnAK9y8J
-ZgfIPxz88NtFMN9iiMG1D53Dn0reWVlHxYciNuaCp+0KueIHoI17eko8cdLiA6Ef
-MgfdG+RCzgwARWGAtQsgWSl4vflVy2PFPEz0tv/bal8xa5meLMFrUKTX5hgUvYU/
-Z6tGn6D/Qqc6f1zLXbBwHSs09dR2CQzreExZBfMzQsNhFRAbd03OIozUhfJFfbdT
-6u9AWpQKXCBfTkBdYiJ23//OYb2MI3jSNwLgjt7RETeJ9r/tSQdirpLsQBqvFAnZ
-0E6yove+7u7Y/9waLd64NnHi/Hm3lCXRSHNboTXns5lndcEZOitHTtNCjv0xyBZm
-2tIMPNuzjsmhDYAPexZ3FL//2wmUspO8IFgV6dtxQ/PeEMMA3KgqlbbC1j+Qa3bb
-bP6MvPJwNQzcmRk13NfIRmPVNnGuV/u3gm3c
------END CERTIFICATE-----
-
-# Issuer: CN=GTS Root R2 O=Google Trust Services LLC
-# Subject: CN=GTS Root R2 O=Google Trust Services LLC
-# Label: "GTS Root R2"
-# Serial: 159662449406622349769042896298
-# MD5 Fingerprint: 1e:39:c0:53:e6:1e:29:82:0b:ca:52:55:36:5d:57:dc
-# SHA1 Fingerprint: 9a:44:49:76:32:db:de:fa:d0:bc:fb:5a:7b:17:bd:9e:56:09:24:94
-# SHA256 Fingerprint: 8d:25:cd:97:22:9d:bf:70:35:6b:da:4e:b3:cc:73:40:31:e2:4c:f0:0f:af:cf:d3:2d:c7:6e:b5:84:1c:7e:a8
------BEGIN CERTIFICATE-----
-MIIFVzCCAz+gAwIBAgINAgPlrsWNBCUaqxElqjANBgkqhkiG9w0BAQwFADBHMQsw
-CQYDVQQGEwJVUzEiMCAGA1UEChMZR29vZ2xlIFRydXN0IFNlcnZpY2VzIExMQzEU
-MBIGA1UEAxMLR1RTIFJvb3QgUjIwHhcNMTYwNjIyMDAwMDAwWhcNMzYwNjIyMDAw
-MDAwWjBHMQswCQYDVQQGEwJVUzEiMCAGA1UEChMZR29vZ2xlIFRydXN0IFNlcnZp
-Y2VzIExMQzEUMBIGA1UEAxMLR1RTIFJvb3QgUjIwggIiMA0GCSqGSIb3DQEBAQUA
-A4ICDwAwggIKAoICAQDO3v2m++zsFDQ8BwZabFn3GTXd98GdVarTzTukk3LvCvpt
-nfbwhYBboUhSnznFt+4orO/LdmgUud+tAWyZH8QiHZ/+cnfgLFuv5AS/T3KgGjSY
-6Dlo7JUle3ah5mm5hRm9iYz+re026nO8/4Piy33B0s5Ks40FnotJk9/BW9BuXvAu
-MC6C/Pq8tBcKSOWIm8Wba96wyrQD8Nr0kLhlZPdcTK3ofmZemde4wj7I0BOdre7k
-RXuJVfeKH2JShBKzwkCX44ofR5GmdFrS+LFjKBC4swm4VndAoiaYecb+3yXuPuWg
-f9RhD1FLPD+M2uFwdNjCaKH5wQzpoeJ/u1U8dgbuak7MkogwTZq9TwtImoS1mKPV
-+3PBV2HdKFZ1E66HjucMUQkQdYhMvI35ezzUIkgfKtzra7tEscszcTJGr61K8Yzo
-dDqs5xoic4DSMPclQsciOzsSrZYuxsN2B6ogtzVJV+mSSeh2FnIxZyuWfoqjx5RW
-Ir9qS34BIbIjMt/kmkRtWVtd9QCgHJvGeJeNkP+byKq0rxFROV7Z+2et1VsRnTKa
-G73VululycslaVNVJ1zgyjbLiGH7HrfQy+4W+9OmTN6SpdTi3/UGVN4unUu0kzCq
-gc7dGtxRcw1PcOnlthYhGXmy5okLdWTK1au8CcEYof/UVKGFPP0UJAOyh9OktwID
-AQABo0IwQDAOBgNVHQ8BAf8EBAMCAYYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4E
-FgQUu//KjiOfT5nK2+JopqUVJxce2Q4wDQYJKoZIhvcNAQEMBQADggIBAB/Kzt3H
-vqGf2SdMC9wXmBFqiN495nFWcrKeGk6c1SuYJF2ba3uwM4IJvd8lRuqYnrYb/oM8
-0mJhwQTtzuDFycgTE1XnqGOtjHsB/ncw4c5omwX4Eu55MaBBRTUoCnGkJE+M3DyC
-B19m3H0Q/gxhswWV7uGugQ+o+MePTagjAiZrHYNSVc61LwDKgEDg4XSsYPWHgJ2u
-NmSRXbBoGOqKYcl3qJfEycel/FVL8/B/uWU9J2jQzGv6U53hkRrJXRqWbTKH7QMg
-yALOWr7Z6v2yTcQvG99fevX4i8buMTolUVVnjWQye+mew4K6Ki3pHrTgSAai/Gev
-HyICc/sgCq+dVEuhzf9gR7A/Xe8bVr2XIZYtCtFenTgCR2y59PYjJbigapordwj6
-xLEokCZYCDzifqrXPW+6MYgKBesntaFJ7qBFVHvmJ2WZICGoo7z7GJa7Um8M7YNR
-TOlZ4iBgxcJlkoKM8xAfDoqXvneCbT+PHV28SSe9zE8P4c52hgQjxcCMElv924Sg
-JPFI/2R80L5cFtHvma3AH/vLrrw4IgYmZNralw4/KBVEqE8AyvCazM90arQ+POuV
-7LXTWtiBmelDGDfrs7vRWGJB82bSj6p4lVQgw1oudCvV0b4YacCs1aTPObpRhANl
-6WLAYv7YTVWW4tAR+kg0Eeye7QUd5MjWHYbL
------END CERTIFICATE-----
-
-# Issuer: CN=GTS Root R3 O=Google Trust Services LLC
-# Subject: CN=GTS Root R3 O=Google Trust Services LLC
-# Label: "GTS Root R3"
-# Serial: 159662495401136852707857743206
-# MD5 Fingerprint: 3e:e7:9d:58:02:94:46:51:94:e5:e0:22:4a:8b:e7:73
-# SHA1 Fingerprint: ed:e5:71:80:2b:c8:92:b9:5b:83:3c:d2:32:68:3f:09:cd:a0:1e:46
-# SHA256 Fingerprint: 34:d8:a7:3e:e2:08:d9:bc:db:0d:95:65:20:93:4b:4e:40:e6:94:82:59:6e:8b:6f:73:c8:42:6b:01:0a:6f:48
------BEGIN CERTIFICATE-----
-MIICCTCCAY6gAwIBAgINAgPluILrIPglJ209ZjAKBggqhkjOPQQDAzBHMQswCQYD
-VQQGEwJVUzEiMCAGA1UEChMZR29vZ2xlIFRydXN0IFNlcnZpY2VzIExMQzEUMBIG
-A1UEAxMLR1RTIFJvb3QgUjMwHhcNMTYwNjIyMDAwMDAwWhcNMzYwNjIyMDAwMDAw
-WjBHMQswCQYDVQQGEwJVUzEiMCAGA1UEChMZR29vZ2xlIFRydXN0IFNlcnZpY2Vz
-IExMQzEUMBIGA1UEAxMLR1RTIFJvb3QgUjMwdjAQBgcqhkjOPQIBBgUrgQQAIgNi
-AAQfTzOHMymKoYTey8chWEGJ6ladK0uFxh1MJ7x/JlFyb+Kf1qPKzEUURout736G
-jOyxfi//qXGdGIRFBEFVbivqJn+7kAHjSxm65FSWRQmx1WyRRK2EE46ajA2ADDL2
-4CejQjBAMA4GA1UdDwEB/wQEAwIBhjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQW
-BBTB8Sa6oC2uhYHP0/EqEr24Cmf9vDAKBggqhkjOPQQDAwNpADBmAjEA9uEglRR7
-VKOQFhG/hMjqb2sXnh5GmCCbn9MN2azTL818+FsuVbu/3ZL3pAzcMeGiAjEA/Jdm
-ZuVDFhOD3cffL74UOO0BzrEXGhF16b0DjyZ+hOXJYKaV11RZt+cRLInUue4X
------END CERTIFICATE-----
-
-# Issuer: CN=GTS Root R4 O=Google Trust Services LLC
-# Subject: CN=GTS Root R4 O=Google Trust Services LLC
-# Label: "GTS Root R4"
-# Serial: 159662532700760215368942768210
-# MD5 Fingerprint: 43:96:83:77:19:4d:76:b3:9d:65:52:e4:1d:22:a5:e8
-# SHA1 Fingerprint: 77:d3:03:67:b5:e0:0c:15:f6:0c:38:61:df:7c:e1:3b:92:46:4d:47
-# SHA256 Fingerprint: 34:9d:fa:40:58:c5:e2:63:12:3b:39:8a:e7:95:57:3c:4e:13:13:c8:3f:e6:8f:93:55:6c:d5:e8:03:1b:3c:7d
------BEGIN CERTIFICATE-----
-MIICCTCCAY6gAwIBAgINAgPlwGjvYxqccpBQUjAKBggqhkjOPQQDAzBHMQswCQYD
-VQQGEwJVUzEiMCAGA1UEChMZR29vZ2xlIFRydXN0IFNlcnZpY2VzIExMQzEUMBIG
-A1UEAxMLR1RTIFJvb3QgUjQwHhcNMTYwNjIyMDAwMDAwWhcNMzYwNjIyMDAwMDAw
-WjBHMQswCQYDVQQGEwJVUzEiMCAGA1UEChMZR29vZ2xlIFRydXN0IFNlcnZpY2Vz
-IExMQzEUMBIGA1UEAxMLR1RTIFJvb3QgUjQwdjAQBgcqhkjOPQIBBgUrgQQAIgNi
-AATzdHOnaItgrkO4NcWBMHtLSZ37wWHO5t5GvWvVYRg1rkDdc/eJkTBa6zzuhXyi
-QHY7qca4R9gq55KRanPpsXI5nymfopjTX15YhmUPoYRlBtHci8nHc8iMai/lxKvR
-HYqjQjBAMA4GA1UdDwEB/wQEAwIBhjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQW
-BBSATNbrdP9JNqPV2Py1PsVq8JQdjDAKBggqhkjOPQQDAwNpADBmAjEA6ED/g94D
-9J+uHXqnLrmvT/aDHQ4thQEd0dlq7A/Cr8deVl5c1RxYIigL9zC2L7F8AjEA8GE8
-p/SgguMh1YQdc4acLa/KNJvxn7kjNuK8YAOdgLOaVsjh4rsUecrNIdSUtUlD
------END CERTIFICATE-----
-
-# Issuer: CN=Telia Root CA v2 O=Telia Finland Oyj
-# Subject: CN=Telia Root CA v2 O=Telia Finland Oyj
-# Label: "Telia Root CA v2"
-# Serial: 7288924052977061235122729490515358
-# MD5 Fingerprint: 0e:8f:ac:aa:82:df:85:b1:f4:dc:10:1c:fc:99:d9:48
-# SHA1 Fingerprint: b9:99:cd:d1:73:50:8a:c4:47:05:08:9c:8c:88:fb:be:a0:2b:40:cd
-# SHA256 Fingerprint: 24:2b:69:74:2f:cb:1e:5b:2a:bf:98:89:8b:94:57:21:87:54:4e:5b:4d:99:11:78:65:73:62:1f:6a:74:b8:2c
------BEGIN CERTIFICATE-----
-MIIFdDCCA1ygAwIBAgIPAWdfJ9b+euPkrL4JWwWeMA0GCSqGSIb3DQEBCwUAMEQx
-CzAJBgNVBAYTAkZJMRowGAYDVQQKDBFUZWxpYSBGaW5sYW5kIE95ajEZMBcGA1UE
-AwwQVGVsaWEgUm9vdCBDQSB2MjAeFw0xODExMjkxMTU1NTRaFw00MzExMjkxMTU1
-NTRaMEQxCzAJBgNVBAYTAkZJMRowGAYDVQQKDBFUZWxpYSBGaW5sYW5kIE95ajEZ
-MBcGA1UEAwwQVGVsaWEgUm9vdCBDQSB2MjCCAiIwDQYJKoZIhvcNAQEBBQADggIP
-ADCCAgoCggIBALLQPwe84nvQa5n44ndp586dpAO8gm2h/oFlH0wnrI4AuhZ76zBq
-AMCzdGh+sq/H1WKzej9Qyow2RCRj0jbpDIX2Q3bVTKFgcmfiKDOlyzG4OiIjNLh9
-vVYiQJ3q9HsDrWj8soFPmNB06o3lfc1jw6P23pLCWBnglrvFxKk9pXSW/q/5iaq9
-lRdU2HhE8Qx3FZLgmEKnpNaqIJLNwaCzlrI6hEKNfdWV5Nbb6WLEWLN5xYzTNTOD
-n3WhUidhOPFZPY5Q4L15POdslv5e2QJltI5c0BE0312/UqeBAMN/mUWZFdUXyApT
-7GPzmX3MaRKGwhfwAZ6/hLzRUssbkmbOpFPlob/E2wnW5olWK8jjfN7j/4nlNW4o
-6GwLI1GpJQXrSPjdscr6bAhR77cYbETKJuFzxokGgeWKrLDiKca5JLNrRBH0pUPC
-TEPlcDaMtjNXepUugqD0XBCzYYP2AgWGLnwtbNwDRm41k9V6lS/eINhbfpSQBGq6
-WT0EBXWdN6IOLj3rwaRSg/7Qa9RmjtzG6RJOHSpXqhC8fF6CfaamyfItufUXJ63R
-DolUK5X6wK0dmBR4M0KGCqlztft0DbcbMBnEWg4cJ7faGND/isgFuvGqHKI3t+ZI
-pEYslOqodmJHixBTB0hXbOKSTbauBcvcwUpej6w9GU7C7WB1K9vBykLVAgMBAAGj
-YzBhMB8GA1UdIwQYMBaAFHKs5DN5qkWH9v2sHZ7Wxy+G2CQ5MB0GA1UdDgQWBBRy
-rOQzeapFh/b9rB2e1scvhtgkOTAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUw
-AwEB/zANBgkqhkiG9w0BAQsFAAOCAgEAoDtZpwmUPjaE0n4vOaWWl/oRrfxn83EJ
-8rKJhGdEr7nv7ZbsnGTbMjBvZ5qsfl+yqwE2foH65IRe0qw24GtixX1LDoJt0nZi
-0f6X+J8wfBj5tFJ3gh1229MdqfDBmgC9bXXYfef6xzijnHDoRnkDry5023X4blMM
-A8iZGok1GTzTyVR8qPAs5m4HeW9q4ebqkYJpCh3DflminmtGFZhb069GHWLIzoBS
-SRE/yQQSwxN8PzuKlts8oB4KtItUsiRnDe+Cy748fdHif64W1lZYudogsYMVoe+K
-TTJvQS8TUoKU1xrBeKJR3Stwbbca+few4GeXVtt8YVMJAygCQMez2P2ccGrGKMOF
-6eLtGpOg3kuYooQ+BXcBlj37tCAPnHICehIv1aO6UXivKitEZU61/Qrowc15h2Er
-3oBXRb9n8ZuRXqWk7FlIEA04x7D6w0RtBPV4UBySllva9bguulvP5fBqnUsvWHMt
-Ty3EHD70sz+rFQ47GUGKpMFXEmZxTPpT41frYpUJnlTd0cI8Vzy9OK2YZLe4A5pT
-VmBds9hCG1xLEooc6+t9xnppxyd/pPiL8uSUZodL6ZQHCRJ5irLrdATczvREWeAW
-ysUsWNc8e89ihmpQfTU2Zqf7N+cox9jQraVplI/owd8k+BsHMYeB2F326CjYSlKA
-rBPuUBQemMc=
------END CERTIFICATE-----
-
-# Issuer: CN=D-TRUST BR Root CA 1 2020 O=D-Trust GmbH
-# Subject: CN=D-TRUST BR Root CA 1 2020 O=D-Trust GmbH
-# Label: "D-TRUST BR Root CA 1 2020"
-# Serial: 165870826978392376648679885835942448534
-# MD5 Fingerprint: b5:aa:4b:d5:ed:f7:e3:55:2e:8f:72:0a:f3:75:b8:ed
-# SHA1 Fingerprint: 1f:5b:98:f0:e3:b5:f7:74:3c:ed:e6:b0:36:7d:32:cd:f4:09:41:67
-# SHA256 Fingerprint: e5:9a:aa:81:60:09:c2:2b:ff:5b:25:ba:d3:7d:f3:06:f0:49:79:7c:1f:81:d8:5a:b0:89:e6:57:bd:8f:00:44
------BEGIN CERTIFICATE-----
-MIIC2zCCAmCgAwIBAgIQfMmPK4TX3+oPyWWa00tNljAKBggqhkjOPQQDAzBIMQsw
-CQYDVQQGEwJERTEVMBMGA1UEChMMRC1UcnVzdCBHbWJIMSIwIAYDVQQDExlELVRS
-VVNUIEJSIFJvb3QgQ0EgMSAyMDIwMB4XDTIwMDIxMTA5NDUwMFoXDTM1MDIxMTA5
-NDQ1OVowSDELMAkGA1UEBhMCREUxFTATBgNVBAoTDEQtVHJ1c3QgR21iSDEiMCAG
-A1UEAxMZRC1UUlVTVCBCUiBSb290IENBIDEgMjAyMDB2MBAGByqGSM49AgEGBSuB
-BAAiA2IABMbLxyjR+4T1mu9CFCDhQ2tuda38KwOE1HaTJddZO0Flax7mNCq7dPYS
-zuht56vkPE4/RAiLzRZxy7+SmfSk1zxQVFKQhYN4lGdnoxwJGT11NIXe7WB9xwy0
-QVK5buXuQqOCAQ0wggEJMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFHOREKv/
-VbNafAkl1bK6CKBrqx9tMA4GA1UdDwEB/wQEAwIBBjCBxgYDVR0fBIG+MIG7MD6g
-PKA6hjhodHRwOi8vY3JsLmQtdHJ1c3QubmV0L2NybC9kLXRydXN0X2JyX3Jvb3Rf
-Y2FfMV8yMDIwLmNybDB5oHegdYZzbGRhcDovL2RpcmVjdG9yeS5kLXRydXN0Lm5l
-dC9DTj1ELVRSVVNUJTIwQlIlMjBSb290JTIwQ0ElMjAxJTIwMjAyMCxPPUQtVHJ1
-c3QlMjBHbWJILEM9REU/Y2VydGlmaWNhdGVyZXZvY2F0aW9ubGlzdDAKBggqhkjO
-PQQDAwNpADBmAjEAlJAtE/rhY/hhY+ithXhUkZy4kzg+GkHaQBZTQgjKL47xPoFW
-wKrY7RjEsK70PvomAjEA8yjixtsrmfu3Ubgko6SUeho/5jbiA1czijDLgsfWFBHV
-dWNbFJWcHwHP2NVypw87
------END CERTIFICATE-----
-
-# Issuer: CN=D-TRUST EV Root CA 1 2020 O=D-Trust GmbH
-# Subject: CN=D-TRUST EV Root CA 1 2020 O=D-Trust GmbH
-# Label: "D-TRUST EV Root CA 1 2020"
-# Serial: 126288379621884218666039612629459926992
-# MD5 Fingerprint: 8c:2d:9d:70:9f:48:99:11:06:11:fb:e9:cb:30:c0:6e
-# SHA1 Fingerprint: 61:db:8c:21:59:69:03:90:d8:7c:9c:12:86:54:cf:9d:3d:f4:dd:07
-# SHA256 Fingerprint: 08:17:0d:1a:a3:64:53:90:1a:2f:95:92:45:e3:47:db:0c:8d:37:ab:aa:bc:56:b8:1a:a1:00:dc:95:89:70:db
------BEGIN CERTIFICATE-----
-MIIC2zCCAmCgAwIBAgIQXwJB13qHfEwDo6yWjfv/0DAKBggqhkjOPQQDAzBIMQsw
-CQYDVQQGEwJERTEVMBMGA1UEChMMRC1UcnVzdCBHbWJIMSIwIAYDVQQDExlELVRS
-VVNUIEVWIFJvb3QgQ0EgMSAyMDIwMB4XDTIwMDIxMTEwMDAwMFoXDTM1MDIxMTA5
-NTk1OVowSDELMAkGA1UEBhMCREUxFTATBgNVBAoTDEQtVHJ1c3QgR21iSDEiMCAG
-A1UEAxMZRC1UUlVTVCBFViBSb290IENBIDEgMjAyMDB2MBAGByqGSM49AgEGBSuB
-BAAiA2IABPEL3YZDIBnfl4XoIkqbz52Yv7QFJsnL46bSj8WeeHsxiamJrSc8ZRCC
-/N/DnU7wMyPE0jL1HLDfMxddxfCxivnvubcUyilKwg+pf3VlSSowZ/Rk99Yad9rD
-wpdhQntJraOCAQ0wggEJMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFH8QARY3
-OqQo5FD4pPfsazK2/umLMA4GA1UdDwEB/wQEAwIBBjCBxgYDVR0fBIG+MIG7MD6g
-PKA6hjhodHRwOi8vY3JsLmQtdHJ1c3QubmV0L2NybC9kLXRydXN0X2V2X3Jvb3Rf
-Y2FfMV8yMDIwLmNybDB5oHegdYZzbGRhcDovL2RpcmVjdG9yeS5kLXRydXN0Lm5l
-dC9DTj1ELVRSVVNUJTIwRVYlMjBSb290JTIwQ0ElMjAxJTIwMjAyMCxPPUQtVHJ1
-c3QlMjBHbWJILEM9REU/Y2VydGlmaWNhdGVyZXZvY2F0aW9ubGlzdDAKBggqhkjO
-PQQDAwNpADBmAjEAyjzGKnXCXnViOTYAYFqLwZOZzNnbQTs7h5kXO9XMT8oi96CA
-y/m0sRtW9XLS/BnRAjEAkfcwkz8QRitxpNA7RJvAKQIFskF3UfN5Wp6OFKBOQtJb
-gfM0agPnIjhQW+0ZT0MW
------END CERTIFICATE-----
-
-# Issuer: CN=DigiCert TLS ECC P384 Root G5 O=DigiCert, Inc.
-# Subject: CN=DigiCert TLS ECC P384 Root G5 O=DigiCert, Inc.
-# Label: "DigiCert TLS ECC P384 Root G5"
-# Serial: 13129116028163249804115411775095713523
-# MD5 Fingerprint: d3:71:04:6a:43:1c:db:a6:59:e1:a8:a3:aa:c5:71:ed
-# SHA1 Fingerprint: 17:f3:de:5e:9f:0f:19:e9:8e:f6:1f:32:26:6e:20:c4:07:ae:30:ee
-# SHA256 Fingerprint: 01:8e:13:f0:77:25:32:cf:80:9b:d1:b1:72:81:86:72:83:fc:48:c6:e1:3b:e9:c6:98:12:85:4a:49:0c:1b:05
------BEGIN CERTIFICATE-----
-MIICGTCCAZ+gAwIBAgIQCeCTZaz32ci5PhwLBCou8zAKBggqhkjOPQQDAzBOMQsw
-CQYDVQQGEwJVUzEXMBUGA1UEChMORGlnaUNlcnQsIEluYy4xJjAkBgNVBAMTHURp
-Z2lDZXJ0IFRMUyBFQ0MgUDM4NCBSb290IEc1MB4XDTIxMDExNTAwMDAwMFoXDTQ2
-MDExNDIzNTk1OVowTjELMAkGA1UEBhMCVVMxFzAVBgNVBAoTDkRpZ2lDZXJ0LCBJ
-bmMuMSYwJAYDVQQDEx1EaWdpQ2VydCBUTFMgRUNDIFAzODQgUm9vdCBHNTB2MBAG
-ByqGSM49AgEGBSuBBAAiA2IABMFEoc8Rl1Ca3iOCNQfN0MsYndLxf3c1TzvdlHJS
-7cI7+Oz6e2tYIOyZrsn8aLN1udsJ7MgT9U7GCh1mMEy7H0cKPGEQQil8pQgO4CLp
-0zVozptjn4S1mU1YoI71VOeVyaNCMEAwHQYDVR0OBBYEFMFRRVBZqz7nLFr6ICIS
-B4CIfBFqMA4GA1UdDwEB/wQEAwIBhjAPBgNVHRMBAf8EBTADAQH/MAoGCCqGSM49
-BAMDA2gAMGUCMQCJao1H5+z8blUD2WdsJk6Dxv3J+ysTvLd6jLRl0mlpYxNjOyZQ
-LgGheQaRnUi/wr4CMEfDFXuxoJGZSZOoPHzoRgaLLPIxAJSdYsiJvRmEFOml+wG4
-DXZDjC5Ty3zfDBeWUA==
------END CERTIFICATE-----
-
-# Issuer: CN=DigiCert TLS RSA4096 Root G5 O=DigiCert, Inc.
-# Subject: CN=DigiCert TLS RSA4096 Root G5 O=DigiCert, Inc.
-# Label: "DigiCert TLS RSA4096 Root G5"
-# Serial: 11930366277458970227240571539258396554
-# MD5 Fingerprint: ac:fe:f7:34:96:a9:f2:b3:b4:12:4b:e4:27:41:6f:e1
-# SHA1 Fingerprint: a7:88:49:dc:5d:7c:75:8c:8c:de:39:98:56:b3:aa:d0:b2:a5:71:35
-# SHA256 Fingerprint: 37:1a:00:dc:05:33:b3:72:1a:7e:eb:40:e8:41:9e:70:79:9d:2b:0a:0f:2c:1d:80:69:31:65:f7:ce:c4:ad:75
------BEGIN CERTIFICATE-----
-MIIFZjCCA06gAwIBAgIQCPm0eKj6ftpqMzeJ3nzPijANBgkqhkiG9w0BAQwFADBN
-MQswCQYDVQQGEwJVUzEXMBUGA1UEChMORGlnaUNlcnQsIEluYy4xJTAjBgNVBAMT
-HERpZ2lDZXJ0IFRMUyBSU0E0MDk2IFJvb3QgRzUwHhcNMjEwMTE1MDAwMDAwWhcN
-NDYwMTE0MjM1OTU5WjBNMQswCQYDVQQGEwJVUzEXMBUGA1UEChMORGlnaUNlcnQs
-IEluYy4xJTAjBgNVBAMTHERpZ2lDZXJ0IFRMUyBSU0E0MDk2IFJvb3QgRzUwggIi
-MA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQCz0PTJeRGd/fxmgefM1eS87IE+
-ajWOLrfn3q/5B03PMJ3qCQuZvWxX2hhKuHisOjmopkisLnLlvevxGs3npAOpPxG0
-2C+JFvuUAT27L/gTBaF4HI4o4EXgg/RZG5Wzrn4DReW+wkL+7vI8toUTmDKdFqgp
-wgscONyfMXdcvyej/Cestyu9dJsXLfKB2l2w4SMXPohKEiPQ6s+d3gMXsUJKoBZM
-pG2T6T867jp8nVid9E6P/DsjyG244gXazOvswzH016cpVIDPRFtMbzCe88zdH5RD
-nU1/cHAN1DrRN/BsnZvAFJNY781BOHW8EwOVfH/jXOnVDdXifBBiqmvwPXbzP6Po
-sMH976pXTayGpxi0KcEsDr9kvimM2AItzVwv8n/vFfQMFawKsPHTDU9qTXeXAaDx
-Zre3zu/O7Oyldcqs4+Fj97ihBMi8ez9dLRYiVu1ISf6nL3kwJZu6ay0/nTvEF+cd
-Lvvyz6b84xQslpghjLSR6Rlgg/IwKwZzUNWYOwbpx4oMYIwo+FKbbuH2TbsGJJvX
-KyY//SovcfXWJL5/MZ4PbeiPT02jP/816t9JXkGPhvnxd3lLG7SjXi/7RgLQZhNe
-XoVPzthwiHvOAbWWl9fNff2C+MIkwcoBOU+NosEUQB+cZtUMCUbW8tDRSHZWOkPL
-tgoRObqME2wGtZ7P6wIDAQABo0IwQDAdBgNVHQ4EFgQUUTMc7TZArxfTJc1paPKv
-TiM+s0EwDgYDVR0PAQH/BAQDAgGGMA8GA1UdEwEB/wQFMAMBAf8wDQYJKoZIhvcN
-AQEMBQADggIBAGCmr1tfV9qJ20tQqcQjNSH/0GEwhJG3PxDPJY7Jv0Y02cEhJhxw
-GXIeo8mH/qlDZJY6yFMECrZBu8RHANmfGBg7sg7zNOok992vIGCukihfNudd5N7H
-PNtQOa27PShNlnx2xlv0wdsUpasZYgcYQF+Xkdycx6u1UQ3maVNVzDl92sURVXLF
-O4uJ+DQtpBflF+aZfTCIITfNMBc9uPK8qHWgQ9w+iUuQrm0D4ByjoJYJu32jtyoQ
-REtGBzRj7TG5BO6jm5qu5jF49OokYTurWGT/u4cnYiWB39yhL/btp/96j1EuMPik
-AdKFOV8BmZZvWltwGUb+hmA+rYAQCd05JS9Yf7vSdPD3Rh9GOUrYU9DzLjtxpdRv
-/PNn5AeP3SYZ4Y1b+qOTEZvpyDrDVWiakuFSdjjo4bq9+0/V77PnSIMx8IIh47a+
-p6tv75/fTM8BuGJqIz3nCU2AG3swpMPdB380vqQmsvZB6Akd4yCYqjdP//fx4ilw
-MUc/dNAUFvohigLVigmUdy7yWSiLfFCSCmZ4OIN1xLVaqBHG5cGdZlXPU8Sv13WF
-qUITVuwhd4GTWgzqltlJyqEI8pc7bZsEGCREjnwB8twl2F6GmrE52/WRMmrRpnCK
-ovfepEWFJqgejF0pW8hL2JpqA15w8oVPbEtoL8pU9ozaMv7Da4M/OMZ+
------END CERTIFICATE-----
-
-# Issuer: CN=Certainly Root R1 O=Certainly
-# Subject: CN=Certainly Root R1 O=Certainly
-# Label: "Certainly Root R1"
-# Serial: 188833316161142517227353805653483829216
-# MD5 Fingerprint: 07:70:d4:3e:82:87:a0:fa:33:36:13:f4:fa:33:e7:12
-# SHA1 Fingerprint: a0:50:ee:0f:28:71:f4:27:b2:12:6d:6f:50:96:25:ba:cc:86:42:af
-# SHA256 Fingerprint: 77:b8:2c:d8:64:4c:43:05:f7:ac:c5:cb:15:6b:45:67:50:04:03:3d:51:c6:0c:62:02:a8:e0:c3:34:67:d3:a0
------BEGIN CERTIFICATE-----
-MIIFRzCCAy+gAwIBAgIRAI4P+UuQcWhlM1T01EQ5t+AwDQYJKoZIhvcNAQELBQAw
-PTELMAkGA1UEBhMCVVMxEjAQBgNVBAoTCUNlcnRhaW5seTEaMBgGA1UEAxMRQ2Vy
-dGFpbmx5IFJvb3QgUjEwHhcNMjEwNDAxMDAwMDAwWhcNNDYwNDAxMDAwMDAwWjA9
-MQswCQYDVQQGEwJVUzESMBAGA1UEChMJQ2VydGFpbmx5MRowGAYDVQQDExFDZXJ0
-YWlubHkgUm9vdCBSMTCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBANA2
-1B/q3avk0bbm+yLA3RMNansiExyXPGhjZjKcA7WNpIGD2ngwEc/csiu+kr+O5MQT
-vqRoTNoCaBZ0vrLdBORrKt03H2As2/X3oXyVtwxwhi7xOu9S98zTm/mLvg7fMbed
-aFySpvXl8wo0tf97ouSHocavFwDvA5HtqRxOcT3Si2yJ9HiG5mpJoM610rCrm/b0
-1C7jcvk2xusVtyWMOvwlDbMicyF0yEqWYZL1LwsYpfSt4u5BvQF5+paMjRcCMLT5
-r3gajLQ2EBAHBXDQ9DGQilHFhiZ5shGIXsXwClTNSaa/ApzSRKft43jvRl5tcdF5
-cBxGX1HpyTfcX35pe0HfNEXgO4T0oYoKNp43zGJS4YkNKPl6I7ENPT2a/Z2B7yyQ
-wHtETrtJ4A5KVpK8y7XdeReJkd5hiXSSqOMyhb5OhaRLWcsrxXiOcVTQAjeZjOVJ
-6uBUcqQRBi8LjMFbvrWhsFNunLhgkR9Za/kt9JQKl7XsxXYDVBtlUrpMklZRNaBA
-2CnbrlJ2Oy0wQJuK0EJWtLeIAaSHO1OWzaMWj/Nmqhexx2DgwUMFDO6bW2BvBlyH
-Wyf5QBGenDPBt+U1VwV/J84XIIwc/PH72jEpSe31C4SnT8H2TsIonPru4K8H+zMR
-eiFPCyEQtkA6qyI6BJyLm4SGcprSp6XEtHWRqSsjAgMBAAGjQjBAMA4GA1UdDwEB
-/wQEAwIBBjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBTgqj8ljZ9EXME66C6u
-d0yEPmcM9DANBgkqhkiG9w0BAQsFAAOCAgEAuVevuBLaV4OPaAszHQNTVfSVcOQr
-PbA56/qJYv331hgELyE03fFo8NWWWt7CgKPBjcZq91l3rhVkz1t5BXdm6ozTaw3d
-8VkswTOlMIAVRQdFGjEitpIAq5lNOo93r6kiyi9jyhXWx8bwPWz8HA2YEGGeEaIi
-1wrykXprOQ4vMMM2SZ/g6Q8CRFA3lFV96p/2O7qUpUzpvD5RtOjKkjZUbVwlKNrd
-rRT90+7iIgXr0PK3aBLXWopBGsaSpVo7Y0VPv+E6dyIvXL9G+VoDhRNCX8reU9di
-taY1BMJH/5n9hN9czulegChB8n3nHpDYT3Y+gjwN/KUD+nsa2UUeYNrEjvn8K8l7
-lcUq/6qJ34IxD3L/DCfXCh5WAFAeDJDBlrXYFIW7pw0WwfgHJBu6haEaBQmAupVj
-yTrsJZ9/nbqkRxWbRHDxakvWOF5D8xh+UG7pWijmZeZ3Gzr9Hb4DJqPb1OG7fpYn
-Kx3upPvaJVQTA945xsMfTZDsjxtK0hzthZU4UHlG1sGQUDGpXJpuHfUzVounmdLy
-yCwzk5Iwx06MZTMQZBf9JBeW0Y3COmor6xOLRPIh80oat3df1+2IpHLlOR+Vnb5n
-wXARPbv0+Em34yaXOp/SX3z7wJl8OSngex2/DaeP0ik0biQVy96QXr8axGbqwua6
-OV+KmalBWQewLK8=
------END CERTIFICATE-----
-
-# Issuer: CN=Certainly Root E1 O=Certainly
-# Subject: CN=Certainly Root E1 O=Certainly
-# Label: "Certainly Root E1"
-# Serial: 8168531406727139161245376702891150584
-# MD5 Fingerprint: 0a:9e:ca:cd:3e:52:50:c6:36:f3:4b:a3:ed:a7:53:e9
-# SHA1 Fingerprint: f9:e1:6d:dc:01:89:cf:d5:82:45:63:3e:c5:37:7d:c2:eb:93:6f:2b
-# SHA256 Fingerprint: b4:58:5f:22:e4:ac:75:6a:4e:86:12:a1:36:1c:5d:9d:03:1a:93:fd:84:fe:bb:77:8f:a3:06:8b:0f:c4:2d:c2
------BEGIN CERTIFICATE-----
-MIIB9zCCAX2gAwIBAgIQBiUzsUcDMydc+Y2aub/M+DAKBggqhkjOPQQDAzA9MQsw
-CQYDVQQGEwJVUzESMBAGA1UEChMJQ2VydGFpbmx5MRowGAYDVQQDExFDZXJ0YWlu
-bHkgUm9vdCBFMTAeFw0yMTA0MDEwMDAwMDBaFw00NjA0MDEwMDAwMDBaMD0xCzAJ
-BgNVBAYTAlVTMRIwEAYDVQQKEwlDZXJ0YWlubHkxGjAYBgNVBAMTEUNlcnRhaW5s
-eSBSb290IEUxMHYwEAYHKoZIzj0CAQYFK4EEACIDYgAE3m/4fxzf7flHh4axpMCK
-+IKXgOqPyEpeKn2IaKcBYhSRJHpcnqMXfYqGITQYUBsQ3tA3SybHGWCA6TS9YBk2
-QNYphwk8kXr2vBMj3VlOBF7PyAIcGFPBMdjaIOlEjeR2o0IwQDAOBgNVHQ8BAf8E
-BAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQU8ygYy2R17ikq6+2uI1g4
-hevIIgcwCgYIKoZIzj0EAwMDaAAwZQIxALGOWiDDshliTd6wT99u0nCK8Z9+aozm
-ut6Dacpps6kFtZaSF4fC0urQe87YQVt8rgIwRt7qy12a7DLCZRawTDBcMPPaTnOG
-BtjOiQRINzf43TNRnXCve1XYAS59BWQOhriR
------END CERTIFICATE-----
-
-# Issuer: CN=E-Tugra Global Root CA RSA v3 O=E-Tugra EBG A.S. OU=E-Tugra Trust Center
-# Subject: CN=E-Tugra Global Root CA RSA v3 O=E-Tugra EBG A.S. OU=E-Tugra Trust Center
-# Label: "E-Tugra Global Root CA RSA v3"
-# Serial: 75951268308633135324246244059508261641472512052
-# MD5 Fingerprint: 22:be:10:f6:c2:f8:03:88:73:5f:33:29:47:28:47:a4
-# SHA1 Fingerprint: e9:a8:5d:22:14:52:1c:5b:aa:0a:b4:be:24:6a:23:8a:c9:ba:e2:a9
-# SHA256 Fingerprint: ef:66:b0:b1:0a:3c:db:9f:2e:36:48:c7:6b:d2:af:18:ea:d2:bf:e6:f1:17:65:5e:28:c4:06:0d:a1:a3:f4:c2
------BEGIN CERTIFICATE-----
-MIIF8zCCA9ugAwIBAgIUDU3FzRYilZYIfrgLfxUGNPt5EDQwDQYJKoZIhvcNAQEL
-BQAwgYAxCzAJBgNVBAYTAlRSMQ8wDQYDVQQHEwZBbmthcmExGTAXBgNVBAoTEEUt
-VHVncmEgRUJHIEEuUy4xHTAbBgNVBAsTFEUtVHVncmEgVHJ1c3QgQ2VudGVyMSYw
-JAYDVQQDEx1FLVR1Z3JhIEdsb2JhbCBSb290IENBIFJTQSB2MzAeFw0yMDAzMTgw
-OTA3MTdaFw00NTAzMTIwOTA3MTdaMIGAMQswCQYDVQQGEwJUUjEPMA0GA1UEBxMG
-QW5rYXJhMRkwFwYDVQQKExBFLVR1Z3JhIEVCRyBBLlMuMR0wGwYDVQQLExRFLVR1
-Z3JhIFRydXN0IENlbnRlcjEmMCQGA1UEAxMdRS1UdWdyYSBHbG9iYWwgUm9vdCBD
-QSBSU0EgdjMwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQCiZvCJt3J7
-7gnJY9LTQ91ew6aEOErxjYG7FL1H6EAX8z3DeEVypi6Q3po61CBxyryfHUuXCscx
-uj7X/iWpKo429NEvx7epXTPcMHD4QGxLsqYxYdE0PD0xesevxKenhOGXpOhL9hd8
-7jwH7eKKV9y2+/hDJVDqJ4GohryPUkqWOmAalrv9c/SF/YP9f4RtNGx/ardLAQO/
-rWm31zLZ9Vdq6YaCPqVmMbMWPcLzJmAy01IesGykNz709a/r4d+ABs8qQedmCeFL
-l+d3vSFtKbZnwy1+7dZ5ZdHPOrbRsV5WYVB6Ws5OUDGAA5hH5+QYfERaxqSzO8bG
-wzrwbMOLyKSRBfP12baqBqG3q+Sx6iEUXIOk/P+2UNOMEiaZdnDpwA+mdPy70Bt4
-znKS4iicvObpCdg604nmvi533wEKb5b25Y08TVJ2Glbhc34XrD2tbKNSEhhw5oBO
-M/J+JjKsBY04pOZ2PJ8QaQ5tndLBeSBrW88zjdGUdjXnXVXHt6woq0bM5zshtQoK
-5EpZ3IE1S0SVEgpnpaH/WwAH0sDM+T/8nzPyAPiMbIedBi3x7+PmBvrFZhNb/FAH
-nnGGstpvdDDPk1Po3CLW3iAfYY2jLqN4MpBs3KwytQXk9TwzDdbgh3cXTJ2w2Amo
-DVf3RIXwyAS+XF1a4xeOVGNpf0l0ZAWMowIDAQABo2MwYTAPBgNVHRMBAf8EBTAD
-AQH/MB8GA1UdIwQYMBaAFLK0ruYt9ybVqnUtdkvAG1Mh0EjvMB0GA1UdDgQWBBSy
-tK7mLfcm1ap1LXZLwBtTIdBI7zAOBgNVHQ8BAf8EBAMCAQYwDQYJKoZIhvcNAQEL
-BQADggIBAImocn+M684uGMQQgC0QDP/7FM0E4BQ8Tpr7nym/Ip5XuYJzEmMmtcyQ
-6dIqKe6cLcwsmb5FJ+Sxce3kOJUxQfJ9emN438o2Fi+CiJ+8EUdPdk3ILY7r3y18
-Tjvarvbj2l0Upq7ohUSdBm6O++96SmotKygY/r+QLHUWnw/qln0F7psTpURs+APQ
-3SPh/QMSEgj0GDSz4DcLdxEBSL9htLX4GdnLTeqjjO/98Aa1bZL0SmFQhO3sSdPk
-vmjmLuMxC1QLGpLWgti2omU8ZgT5Vdps+9u1FGZNlIM7zR6mK7L+d0CGq+ffCsn9
-9t2HVhjYsCxVYJb6CH5SkPVLpi6HfMsg2wY+oF0Dd32iPBMbKaITVaA9FCKvb7jQ
-mhty3QUBjYZgv6Rn7rWlDdF/5horYmbDB7rnoEgcOMPpRfunf/ztAmgayncSd6YA
-VSgU7NbHEqIbZULpkejLPoeJVF3Zr52XnGnnCv8PWniLYypMfUeUP95L6VPQMPHF
-9p5J3zugkaOj/s1YzOrfr28oO6Bpm4/srK4rVJ2bBLFHIK+WEj5jlB0E5y67hscM
-moi/dkfv97ALl2bSRM9gUgfh1SxKOidhd8rXj+eHDjD/DLsE4mHDosiXYY60MGo8
-bcIHX0pzLz/5FooBZu+6kcpSV3uu1OYP3Qt6f4ueJiDPO++BcYNZ
------END CERTIFICATE-----
-
-# Issuer: CN=E-Tugra Global Root CA ECC v3 O=E-Tugra EBG A.S. OU=E-Tugra Trust Center
-# Subject: CN=E-Tugra Global Root CA ECC v3 O=E-Tugra EBG A.S. OU=E-Tugra Trust Center
-# Label: "E-Tugra Global Root CA ECC v3"
-# Serial: 218504919822255052842371958738296604628416471745
-# MD5 Fingerprint: 46:bc:81:bb:f1:b5:1e:f7:4b:96:bc:14:e2:e7:27:64
-# SHA1 Fingerprint: 8a:2f:af:57:53:b1:b0:e6:a1:04:ec:5b:6a:69:71:6d:f6:1c:e2:84
-# SHA256 Fingerprint: 87:3f:46:85:fa:7f:56:36:25:25:2e:6d:36:bc:d7:f1:6f:c2:49:51:f2:64:e4:7e:1b:95:4f:49:08:cd:ca:13
------BEGIN CERTIFICATE-----
-MIICpTCCAiqgAwIBAgIUJkYZdzHhT28oNt45UYbm1JeIIsEwCgYIKoZIzj0EAwMw
-gYAxCzAJBgNVBAYTAlRSMQ8wDQYDVQQHEwZBbmthcmExGTAXBgNVBAoTEEUtVHVn
-cmEgRUJHIEEuUy4xHTAbBgNVBAsTFEUtVHVncmEgVHJ1c3QgQ2VudGVyMSYwJAYD
-VQQDEx1FLVR1Z3JhIEdsb2JhbCBSb290IENBIEVDQyB2MzAeFw0yMDAzMTgwOTQ2
-NThaFw00NTAzMTIwOTQ2NThaMIGAMQswCQYDVQQGEwJUUjEPMA0GA1UEBxMGQW5r
-YXJhMRkwFwYDVQQKExBFLVR1Z3JhIEVCRyBBLlMuMR0wGwYDVQQLExRFLVR1Z3Jh
-IFRydXN0IENlbnRlcjEmMCQGA1UEAxMdRS1UdWdyYSBHbG9iYWwgUm9vdCBDQSBF
-Q0MgdjMwdjAQBgcqhkjOPQIBBgUrgQQAIgNiAASOmCm/xxAeJ9urA8woLNheSBkQ
-KczLWYHMjLiSF4mDKpL2w6QdTGLVn9agRtwcvHbB40fQWxPa56WzZkjnIZpKT4YK
-fWzqTTKACrJ6CZtpS5iB4i7sAnCWH/31Rs7K3IKjYzBhMA8GA1UdEwEB/wQFMAMB
-Af8wHwYDVR0jBBgwFoAU/4Ixcj75xGZsrTie0bBRiKWQzPUwHQYDVR0OBBYEFP+C
-MXI++cRmbK04ntGwUYilkMz1MA4GA1UdDwEB/wQEAwIBBjAKBggqhkjOPQQDAwNp
-ADBmAjEA5gVYaWHlLcoNy/EZCL3W/VGSGn5jVASQkZo1kTmZ+gepZpO6yGjUij/6
-7W4WAie3AjEA3VoXK3YdZUKWpqxdinlW2Iob35reX8dQj7FbcQwm32pAAOwzkSFx
-vmjkI6TZraE3
------END CERTIFICATE-----
-
-# Issuer: CN=Security Communication RootCA3 O=SECOM Trust Systems CO.,LTD.
-# Subject: CN=Security Communication RootCA3 O=SECOM Trust Systems CO.,LTD.
-# Label: "Security Communication RootCA3"
-# Serial: 16247922307909811815
-# MD5 Fingerprint: 1c:9a:16:ff:9e:5c:e0:4d:8a:14:01:f4:35:5d:29:26
-# SHA1 Fingerprint: c3:03:c8:22:74:92:e5:61:a2:9c:5f:79:91:2b:1e:44:13:91:30:3a
-# SHA256 Fingerprint: 24:a5:5c:2a:b0:51:44:2d:06:17:76:65:41:23:9a:4a:d0:32:d7:c5:51:75:aa:34:ff:de:2f:bc:4f:5c:52:94
------BEGIN CERTIFICATE-----
-MIIFfzCCA2egAwIBAgIJAOF8N0D9G/5nMA0GCSqGSIb3DQEBDAUAMF0xCzAJBgNV
-BAYTAkpQMSUwIwYDVQQKExxTRUNPTSBUcnVzdCBTeXN0ZW1zIENPLixMVEQuMScw
-JQYDVQQDEx5TZWN1cml0eSBDb21tdW5pY2F0aW9uIFJvb3RDQTMwHhcNMTYwNjE2
-MDYxNzE2WhcNMzgwMTE4MDYxNzE2WjBdMQswCQYDVQQGEwJKUDElMCMGA1UEChMc
-U0VDT00gVHJ1c3QgU3lzdGVtcyBDTy4sTFRELjEnMCUGA1UEAxMeU2VjdXJpdHkg
-Q29tbXVuaWNhdGlvbiBSb290Q0EzMIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIIC
-CgKCAgEA48lySfcw3gl8qUCBWNO0Ot26YQ+TUG5pPDXC7ltzkBtnTCHsXzW7OT4r
-CmDvu20rhvtxosis5FaU+cmvsXLUIKx00rgVrVH+hXShuRD+BYD5UpOzQD11EKzA
-lrenfna84xtSGc4RHwsENPXY9Wk8d/Nk9A2qhd7gCVAEF5aEt8iKvE1y/By7z/MG
-TfmfZPd+pmaGNXHIEYBMwXFAWB6+oHP2/D5Q4eAvJj1+XCO1eXDe+uDRpdYMQXF7
-9+qMHIjH7Iv10S9VlkZ8WjtYO/u62C21Jdp6Ts9EriGmnpjKIG58u4iFW/vAEGK7
-8vknR+/RiTlDxN/e4UG/VHMgly1s2vPUB6PmudhvrvyMGS7TZ2crldtYXLVqAvO4
-g160a75BflcJdURQVc1aEWEhCmHCqYj9E7wtiS/NYeCVvsq1e+F7NGcLH7YMx3we
-GVPKp7FKFSBWFHA9K4IsD50VHUeAR/94mQ4xr28+j+2GaR57GIgUssL8gjMunEst
-+3A7caoreyYn8xrC3PsXuKHqy6C0rtOUfnrQq8PsOC0RLoi/1D+tEjtCrI8Cbn3M
-0V9hvqG8OmpI6iZVIhZdXw3/JzOfGAN0iltSIEdrRU0id4xVJ/CvHozJgyJUt5rQ
-T9nO/NkuHJYosQLTA70lUhw0Zk8jq/R3gpYd0VcwCBEF/VfR2ccCAwEAAaNCMEAw
-HQYDVR0OBBYEFGQUfPxYchamCik0FW8qy7z8r6irMA4GA1UdDwEB/wQEAwIBBjAP
-BgNVHRMBAf8EBTADAQH/MA0GCSqGSIb3DQEBDAUAA4ICAQDcAiMI4u8hOscNtybS
-YpOnpSNyByCCYN8Y11StaSWSntkUz5m5UoHPrmyKO1o5yGwBQ8IibQLwYs1OY0PA
-FNr0Y/Dq9HHuTofjcan0yVflLl8cebsjqodEV+m9NU1Bu0soo5iyG9kLFwfl9+qd
-9XbXv8S2gVj/yP9kaWJ5rW4OH3/uHWnlt3Jxs/6lATWUVCvAUm2PVcTJ0rjLyjQI
-UYWg9by0F1jqClx6vWPGOi//lkkZhOpn2ASxYfQAW0q3nHE3GYV5v4GwxxMOdnE+
-OoAGrgYWp421wsTL/0ClXI2lyTrtcoHKXJg80jQDdwj98ClZXSEIx2C/pHF7uNke
-gr4Jr2VvKKu/S7XuPghHJ6APbw+LP6yVGPO5DtxnVW5inkYO0QR4ynKudtml+LLf
-iAlhi+8kTtFZP1rUPcmTPCtk9YENFpb3ksP+MW/oKjJ0DvRMmEoYDjBU1cXrvMUV
-nuiZIesnKwkK2/HmcBhWuwzkvvnoEKQTkrgc4NtnHVMDpCKn3F2SEDzq//wbEBrD
-2NCcnWXL0CsnMQMeNuE9dnUM/0Umud1RvCPHX9jYhxBAEg09ODfnRDwYwFMJZI//
-1ZqmfHAuc1Uh6N//g7kdPjIe1qZ9LPFm6Vwdp6POXiUyK+OVrCoHzrQoeIY8Laad
-TdJ0MN1kURXbg4NR16/9M51NZg==
------END CERTIFICATE-----
-
-# Issuer: CN=Security Communication ECC RootCA1 O=SECOM Trust Systems CO.,LTD.
-# Subject: CN=Security Communication ECC RootCA1 O=SECOM Trust Systems CO.,LTD.
-# Label: "Security Communication ECC RootCA1"
-# Serial: 15446673492073852651
-# MD5 Fingerprint: 7e:43:b0:92:68:ec:05:43:4c:98:ab:5d:35:2e:7e:86
-# SHA1 Fingerprint: b8:0e:26:a9:bf:d2:b2:3b:c0:ef:46:c9:ba:c7:bb:f6:1d:0d:41:41
-# SHA256 Fingerprint: e7:4f:bd:a5:5b:d5:64:c4:73:a3:6b:44:1a:a7:99:c8:a6:8e:07:74:40:e8:28:8b:9f:a1:e5:0e:4b:ba:ca:11
------BEGIN CERTIFICATE-----
-MIICODCCAb6gAwIBAgIJANZdm7N4gS7rMAoGCCqGSM49BAMDMGExCzAJBgNVBAYT
-AkpQMSUwIwYDVQQKExxTRUNPTSBUcnVzdCBTeXN0ZW1zIENPLixMVEQuMSswKQYD
-VQQDEyJTZWN1cml0eSBDb21tdW5pY2F0aW9uIEVDQyBSb290Q0ExMB4XDTE2MDYx
-NjA1MTUyOFoXDTM4MDExODA1MTUyOFowYTELMAkGA1UEBhMCSlAxJTAjBgNVBAoT
-HFNFQ09NIFRydXN0IFN5c3RlbXMgQ08uLExURC4xKzApBgNVBAMTIlNlY3VyaXR5
-IENvbW11bmljYXRpb24gRUNDIFJvb3RDQTEwdjAQBgcqhkjOPQIBBgUrgQQAIgNi
-AASkpW9gAwPDvTH00xecK4R1rOX9PVdu12O/5gSJko6BnOPpR27KkBLIE+Cnnfdl
-dB9sELLo5OnvbYUymUSxXv3MdhDYW72ixvnWQuRXdtyQwjWpS4g8EkdtXP9JTxpK
-ULGjQjBAMB0GA1UdDgQWBBSGHOf+LaVKiwj+KBH6vqNm+GBZLzAOBgNVHQ8BAf8E
-BAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAKBggqhkjOPQQDAwNoADBlAjAVXUI9/Lbu
-9zuxNuie9sRGKEkz0FhDKmMpzE2xtHqiuQ04pV1IKv3LsnNdo4gIxwwCMQDAqy0O
-be0YottT6SXbVQjgUMzfRGEWgqtJsLKB7HOHeLRMsmIbEvoWTSVLY70eN9k=
------END CERTIFICATE-----
diff --git a/venv/lib/python3.10/site-packages/pip/_vendor/certifi/core.py b/venv/lib/python3.10/site-packages/pip/_vendor/certifi/core.py
index c3e5466..f8d4313 100644
--- a/venv/lib/python3.10/site-packages/pip/_vendor/certifi/core.py
+++ b/venv/lib/python3.10/site-packages/pip/_vendor/certifi/core.py
@@ -1,20 +1,36 @@
+# -*- coding: utf-8 -*-
+
 """
 certifi.py
 ~~~~~~~~~~
 
 This module returns the installation location of cacert.pem or its contents.
 """
-import sys
+import os
+
+
+class _PipPatchedCertificate(Exception):
+    pass
 
 
-if sys.version_info >= (3, 11):
+DEBIAN_CA_CERTS_PATH = '/etc/ssl/certs/ca-certificates.crt'
 
-    from importlib.resources import as_file, files
+try:
+    # Return a certificate file on disk for a standalone pip zipapp running in
+    # an isolated build environment to use. Passing --cert to the standalone
+    # pip does not work since requests calls where() unconditionally on import.
+    _PIP_STANDALONE_CERT = os.environ.get("_PIP_STANDALONE_CERT")
+    if _PIP_STANDALONE_CERT:
+        def where():
+            return _PIP_STANDALONE_CERT
+        raise _PipPatchedCertificate()
+
+    from importlib.resources import path as get_path, read_text
 
     _CACERT_CTX = None
     _CACERT_PATH = None
 
-    def where() -> str:
+    def where():
         # This is slightly terrible, but we want to delay extracting the file
         # in cases where we're inside of a zipimport situation until someone
         # actually calls where(), but we don't want to re-extract the file
@@ -33,76 +49,28 @@ def where() -> str:
             # We also have to hold onto the actual context manager, because
             # it will do the cleanup whenever it gets garbage collected, so
             # we will also store that at the global level as well.
-            _CACERT_CTX = as_file(files("pip._vendor.certifi").joinpath("cacert.pem"))
-            _CACERT_PATH = str(_CACERT_CTX.__enter__())
-
-        return _CACERT_PATH
-
-    def contents() -> str:
-        return files("pip._vendor.certifi").joinpath("cacert.pem").read_text(encoding="ascii")
-
-elif sys.version_info >= (3, 7):
-
-    from importlib.resources import path as get_path, read_text
-
-    _CACERT_CTX = None
-    _CACERT_PATH = None
-
-    def where() -> str:
-        # This is slightly terrible, but we want to delay extracting the
-        # file in cases where we're inside of a zipimport situation until
-        # someone actually calls where(), but we don't want to re-extract
-        # the file on every call of where(), so we'll do it once then store
-        # it in a global variable.
-        global _CACERT_CTX
-        global _CACERT_PATH
-        if _CACERT_PATH is None:
-            # This is slightly janky, the importlib.resources API wants you
-            # to manage the cleanup of this file, so it doesn't actually
-            # return a path, it returns a context manager that will give
-            # you the path when you enter it and will do any cleanup when
-            # you leave it. In the common case of not needing a temporary
-            # file, it will just return the file system location and the
-            # __exit__() is a no-op.
-            #
-            # We also have to hold onto the actual context manager, because
-            # it will do the cleanup whenever it gets garbage collected, so
-            # we will also store that at the global level as well.
-            _CACERT_CTX = get_path("pip._vendor.certifi", "cacert.pem")
-            _CACERT_PATH = str(_CACERT_CTX.__enter__())
+            _CACERT_PATH = DEBIAN_CA_CERTS_PATH
 
         return _CACERT_PATH
 
-    def contents() -> str:
-        return read_text("pip._vendor.certifi", "cacert.pem", encoding="ascii")
-
-else:
-    import os
-    import types
-    from typing import Union
-
-    Package = Union[types.ModuleType, str]
-    Resource = Union[str, "os.PathLike"]
+except _PipPatchedCertificate:
+    pass
 
+except ImportError:
     # This fallback will work for Python versions prior to 3.7 that lack the
     # importlib.resources module but relies on the existing `where` function
     # so won't address issues with environments like PyOxidizer that don't set
     # __file__ on modules.
-    def read_text(
-        package: Package,
-        resource: Resource,
-        encoding: str = 'utf-8',
-        errors: str = 'strict'
-    ) -> str:
-        with open(where(), encoding=encoding) as data:
+    def read_text(_module, _path, encoding="ascii"):
+        with open(where(), "r", encoding=encoding) as data:
             return data.read()
 
     # If we don't have importlib.resources, then we will just do the old logic
     # of assuming we're on the filesystem and munge the path directly.
-    def where() -> str:
-        f = os.path.dirname(__file__)
+    def where():
+        return DEBIAN_CA_CERTS_PATH
 
-        return os.path.join(f, "cacert.pem")
 
-    def contents() -> str:
-        return read_text("pip._vendor.certifi", "cacert.pem", encoding="ascii")
+def contents():
+    with open(where(), "r", encoding="ascii") as data:
+        return data.read()
diff --git a/venv/lib/python3.10/site-packages/pip/_vendor/chardet/__init__.py b/venv/lib/python3.10/site-packages/pip/_vendor/chardet/__init__.py
index e91ad61..80ad254 100644
--- a/venv/lib/python3.10/site-packages/pip/_vendor/chardet/__init__.py
+++ b/venv/lib/python3.10/site-packages/pip/_vendor/chardet/__init__.py
@@ -15,11 +15,13 @@
 # 02110-1301  USA
 ######################### END LICENSE BLOCK #########################
 
-from .enums import InputState
+
 from .universaldetector import UniversalDetector
-from .version import VERSION, __version__
+from .enums import InputState
+from .version import __version__, VERSION
 
-__all__ = ["UniversalDetector", "detect", "detect_all", "__version__", "VERSION"]
+
+__all__ = ['UniversalDetector', 'detect', 'detect_all', '__version__', 'VERSION']
 
 
 def detect(byte_str):
@@ -31,63 +33,51 @@ def detect(byte_str):
     """
     if not isinstance(byte_str, bytearray):
         if not isinstance(byte_str, bytes):
-            raise TypeError(
-                f"Expected object of type bytes or bytearray, got: {type(byte_str)}"
-            )
-        byte_str = bytearray(byte_str)
+            raise TypeError('Expected object of type bytes or bytearray, got: '
+                            '{}'.format(type(byte_str)))
+        else:
+            byte_str = bytearray(byte_str)
     detector = UniversalDetector()
     detector.feed(byte_str)
     return detector.close()
 
 
-def detect_all(byte_str, ignore_threshold=False):
+def detect_all(byte_str):
     """
     Detect all the possible encodings of the given byte string.
 
-    :param byte_str:          The byte sequence to examine.
-    :type byte_str:           ``bytes`` or ``bytearray``
-    :param ignore_threshold:  Include encodings that are below
-                              ``UniversalDetector.MINIMUM_THRESHOLD``
-                              in results.
-    :type ignore_threshold:   ``bool``
+    :param byte_str:     The byte sequence to examine.
+    :type byte_str:      ``bytes`` or ``bytearray``
     """
     if not isinstance(byte_str, bytearray):
         if not isinstance(byte_str, bytes):
-            raise TypeError(
-                f"Expected object of type bytes or bytearray, got: {type(byte_str)}"
-            )
-        byte_str = bytearray(byte_str)
+            raise TypeError('Expected object of type bytes or bytearray, got: '
+                            '{}'.format(type(byte_str)))
+        else:
+            byte_str = bytearray(byte_str)
 
     detector = UniversalDetector()
     detector.feed(byte_str)
     detector.close()
 
-    if detector.input_state == InputState.HIGH_BYTE:
+    if detector._input_state == InputState.HIGH_BYTE:
         results = []
-        probers = []
-        for prober in detector.charset_probers:
-            if hasattr(prober, "probers"):
-                probers.extend(p for p in prober.probers)
-            else:
-                probers.append(prober)
-        for prober in probers:
-            if ignore_threshold or prober.get_confidence() > detector.MINIMUM_THRESHOLD:
-                charset_name = prober.charset_name or ""
-                lower_charset_name = charset_name.lower()
+        for prober in detector._charset_probers:
+            if prober.get_confidence() > detector.MINIMUM_THRESHOLD:
+                charset_name = prober.charset_name
+                lower_charset_name = prober.charset_name.lower()
                 # Use Windows encoding name instead of ISO-8859 if we saw any
                 # extra Windows-specific bytes
-                if lower_charset_name.startswith("iso-8859") and detector.has_win_bytes:
-                    charset_name = detector.ISO_WIN_MAP.get(
-                        lower_charset_name, charset_name
-                    )
-                results.append(
-                    {
-                        "encoding": charset_name,
-                        "confidence": prober.get_confidence(),
-                        "language": prober.language,
-                    }
-                )
+                if lower_charset_name.startswith('iso-8859'):
+                    if detector._has_win_bytes:
+                        charset_name = detector.ISO_WIN_MAP.get(lower_charset_name,
+                                                            charset_name)
+                results.append({
+                    'encoding': charset_name,
+                    'confidence': prober.get_confidence(),
+                    'language': prober.language,
+                })
         if len(results) > 0:
-            return sorted(results, key=lambda result: -result["confidence"])
+            return sorted(results, key=lambda result: -result['confidence'])
 
     return [detector.result]
diff --git a/venv/lib/python3.10/site-packages/pip/_vendor/chardet/big5freq.py b/venv/lib/python3.10/site-packages/pip/_vendor/chardet/big5freq.py
index 87d9f97..38f3251 100644
--- a/venv/lib/python3.10/site-packages/pip/_vendor/chardet/big5freq.py
+++ b/venv/lib/python3.10/site-packages/pip/_vendor/chardet/big5freq.py
@@ -42,9 +42,9 @@
 
 BIG5_TYPICAL_DISTRIBUTION_RATIO = 0.75
 
-# Char to FreqOrder table
+#Char to FreqOrder table
 BIG5_TABLE_SIZE = 5376
-# fmt: off
+
 BIG5_CHAR_TO_FREQ_ORDER = (
    1,1801,1506, 255,1431, 198,   9,  82,   6,5008, 177, 202,3681,1256,2821, 110, #   16
 3814,  33,3274, 261,  76,  44,2114,  16,2946,2187,1176, 659,3971,  26,3451,2653, #   32
@@ -383,4 +383,4 @@
  890,3669,3943,5791,1878,3798,3439,5792,2186,2358,3440,1652,5793,5794,5795, 941, # 5360
 2299, 208,3546,4161,2020, 330,4438,3944,2906,2499,3799,4439,4811,5796,5797,5798, # 5376
 )
-# fmt: on
+
diff --git a/venv/lib/python3.10/site-packages/pip/_vendor/chardet/big5prober.py b/venv/lib/python3.10/site-packages/pip/_vendor/chardet/big5prober.py
index e4dfa7a..98f9970 100644
--- a/venv/lib/python3.10/site-packages/pip/_vendor/chardet/big5prober.py
+++ b/venv/lib/python3.10/site-packages/pip/_vendor/chardet/big5prober.py
@@ -25,15 +25,15 @@
 # 02110-1301  USA
 ######################### END LICENSE BLOCK #########################
 
-from .chardistribution import Big5DistributionAnalysis
-from .codingstatemachine import CodingStateMachine
 from .mbcharsetprober import MultiByteCharSetProber
+from .codingstatemachine import CodingStateMachine
+from .chardistribution import Big5DistributionAnalysis
 from .mbcssm import BIG5_SM_MODEL
 
 
 class Big5Prober(MultiByteCharSetProber):
     def __init__(self):
-        super().__init__()
+        super(Big5Prober, self).__init__()
         self.coding_sm = CodingStateMachine(BIG5_SM_MODEL)
         self.distribution_analyzer = Big5DistributionAnalysis()
         self.reset()
diff --git a/venv/lib/python3.10/site-packages/pip/_vendor/chardet/chardistribution.py b/venv/lib/python3.10/site-packages/pip/_vendor/chardet/chardistribution.py
index 27b4a29..c0395f4 100644
--- a/venv/lib/python3.10/site-packages/pip/_vendor/chardet/chardistribution.py
+++ b/venv/lib/python3.10/site-packages/pip/_vendor/chardet/chardistribution.py
@@ -25,35 +25,19 @@
 # 02110-1301  USA
 ######################### END LICENSE BLOCK #########################
 
-from .big5freq import (
-    BIG5_CHAR_TO_FREQ_ORDER,
-    BIG5_TABLE_SIZE,
-    BIG5_TYPICAL_DISTRIBUTION_RATIO,
-)
-from .euckrfreq import (
-    EUCKR_CHAR_TO_FREQ_ORDER,
-    EUCKR_TABLE_SIZE,
-    EUCKR_TYPICAL_DISTRIBUTION_RATIO,
-)
-from .euctwfreq import (
-    EUCTW_CHAR_TO_FREQ_ORDER,
-    EUCTW_TABLE_SIZE,
-    EUCTW_TYPICAL_DISTRIBUTION_RATIO,
-)
-from .gb2312freq import (
-    GB2312_CHAR_TO_FREQ_ORDER,
-    GB2312_TABLE_SIZE,
-    GB2312_TYPICAL_DISTRIBUTION_RATIO,
-)
-from .jisfreq import (
-    JIS_CHAR_TO_FREQ_ORDER,
-    JIS_TABLE_SIZE,
-    JIS_TYPICAL_DISTRIBUTION_RATIO,
-)
-from .johabfreq import JOHAB_TO_EUCKR_ORDER_TABLE
-
-
-class CharDistributionAnalysis:
+from .euctwfreq import (EUCTW_CHAR_TO_FREQ_ORDER, EUCTW_TABLE_SIZE,
+                        EUCTW_TYPICAL_DISTRIBUTION_RATIO)
+from .euckrfreq import (EUCKR_CHAR_TO_FREQ_ORDER, EUCKR_TABLE_SIZE,
+                        EUCKR_TYPICAL_DISTRIBUTION_RATIO)
+from .gb2312freq import (GB2312_CHAR_TO_FREQ_ORDER, GB2312_TABLE_SIZE,
+                         GB2312_TYPICAL_DISTRIBUTION_RATIO)
+from .big5freq import (BIG5_CHAR_TO_FREQ_ORDER, BIG5_TABLE_SIZE,
+                       BIG5_TYPICAL_DISTRIBUTION_RATIO)
+from .jisfreq import (JIS_CHAR_TO_FREQ_ORDER, JIS_TABLE_SIZE,
+                      JIS_TYPICAL_DISTRIBUTION_RATIO)
+
+
+class CharDistributionAnalysis(object):
     ENOUGH_DATA_THRESHOLD = 1024
     SURE_YES = 0.99
     SURE_NO = 0.01
@@ -62,7 +46,7 @@ class CharDistributionAnalysis:
     def __init__(self):
         # Mapping table to get frequency order from char order (get from
         # GetOrder())
-        self._char_to_freq_order = tuple()
+        self._char_to_freq_order = None
         self._table_size = None  # Size of above table
         # This is a constant value which varies from language to language,
         # used in calculating confidence.  See
@@ -105,9 +89,8 @@ def get_confidence(self):
             return self.SURE_NO
 
         if self._total_chars != self._freq_chars:
-            r = self._freq_chars / (
-                (self._total_chars - self._freq_chars) * self.typical_distribution_ratio
-            )
+            r = (self._freq_chars / ((self._total_chars - self._freq_chars)
+                 * self.typical_distribution_ratio))
             if r < self.SURE_YES:
                 return r
 
@@ -119,7 +102,7 @@ def got_enough_data(self):
         # For charset detection, certain amount of data is enough
         return self._total_chars > self.ENOUGH_DATA_THRESHOLD
 
-    def get_order(self, _):
+    def get_order(self, byte_str):
         # We do not handle characters based on the original encoding string,
         # but convert this encoding string to a number, here called order.
         # This allows multiple encodings of a language to share one frequency
@@ -129,7 +112,7 @@ def get_order(self, _):
 
 class EUCTWDistributionAnalysis(CharDistributionAnalysis):
     def __init__(self):
-        super().__init__()
+        super(EUCTWDistributionAnalysis, self).__init__()
         self._char_to_freq_order = EUCTW_CHAR_TO_FREQ_ORDER
         self._table_size = EUCTW_TABLE_SIZE
         self.typical_distribution_ratio = EUCTW_TYPICAL_DISTRIBUTION_RATIO
@@ -142,12 +125,13 @@ def get_order(self, byte_str):
         first_char = byte_str[0]
         if first_char >= 0xC4:
             return 94 * (first_char - 0xC4) + byte_str[1] - 0xA1
-        return -1
+        else:
+            return -1
 
 
 class EUCKRDistributionAnalysis(CharDistributionAnalysis):
     def __init__(self):
-        super().__init__()
+        super(EUCKRDistributionAnalysis, self).__init__()
         self._char_to_freq_order = EUCKR_CHAR_TO_FREQ_ORDER
         self._table_size = EUCKR_TABLE_SIZE
         self.typical_distribution_ratio = EUCKR_TYPICAL_DISTRIBUTION_RATIO
@@ -160,27 +144,13 @@ def get_order(self, byte_str):
         first_char = byte_str[0]
         if first_char >= 0xB0:
             return 94 * (first_char - 0xB0) + byte_str[1] - 0xA1
-        return -1
-
-
-class JOHABDistributionAnalysis(CharDistributionAnalysis):
-    def __init__(self):
-        super().__init__()
-        self._char_to_freq_order = EUCKR_CHAR_TO_FREQ_ORDER
-        self._table_size = EUCKR_TABLE_SIZE
-        self.typical_distribution_ratio = EUCKR_TYPICAL_DISTRIBUTION_RATIO
-
-    def get_order(self, byte_str):
-        first_char = byte_str[0]
-        if 0x88 <= first_char < 0xD4:
-            code = first_char * 256 + byte_str[1]
-            return JOHAB_TO_EUCKR_ORDER_TABLE.get(code, -1)
-        return -1
+        else:
+            return -1
 
 
 class GB2312DistributionAnalysis(CharDistributionAnalysis):
     def __init__(self):
-        super().__init__()
+        super(GB2312DistributionAnalysis, self).__init__()
         self._char_to_freq_order = GB2312_CHAR_TO_FREQ_ORDER
         self._table_size = GB2312_TABLE_SIZE
         self.typical_distribution_ratio = GB2312_TYPICAL_DISTRIBUTION_RATIO
@@ -193,12 +163,13 @@ def get_order(self, byte_str):
         first_char, second_char = byte_str[0], byte_str[1]
         if (first_char >= 0xB0) and (second_char >= 0xA1):
             return 94 * (first_char - 0xB0) + second_char - 0xA1
-        return -1
+        else:
+            return -1
 
 
 class Big5DistributionAnalysis(CharDistributionAnalysis):
     def __init__(self):
-        super().__init__()
+        super(Big5DistributionAnalysis, self).__init__()
         self._char_to_freq_order = BIG5_CHAR_TO_FREQ_ORDER
         self._table_size = BIG5_TABLE_SIZE
         self.typical_distribution_ratio = BIG5_TYPICAL_DISTRIBUTION_RATIO
@@ -212,13 +183,15 @@ def get_order(self, byte_str):
         if first_char >= 0xA4:
             if second_char >= 0xA1:
                 return 157 * (first_char - 0xA4) + second_char - 0xA1 + 63
-            return 157 * (first_char - 0xA4) + second_char - 0x40
-        return -1
+            else:
+                return 157 * (first_char - 0xA4) + second_char - 0x40
+        else:
+            return -1
 
 
 class SJISDistributionAnalysis(CharDistributionAnalysis):
     def __init__(self):
-        super().__init__()
+        super(SJISDistributionAnalysis, self).__init__()
         self._char_to_freq_order = JIS_CHAR_TO_FREQ_ORDER
         self._table_size = JIS_TABLE_SIZE
         self.typical_distribution_ratio = JIS_TYPICAL_DISTRIBUTION_RATIO
@@ -229,9 +202,9 @@ def get_order(self, byte_str):
         #   second byte range: 0x40 -- 0x7e,  0x81 -- oxfe
         # no validation needed here. State machine has done that
         first_char, second_char = byte_str[0], byte_str[1]
-        if 0x81 <= first_char <= 0x9F:
+        if (first_char >= 0x81) and (first_char <= 0x9F):
             order = 188 * (first_char - 0x81)
-        elif 0xE0 <= first_char <= 0xEF:
+        elif (first_char >= 0xE0) and (first_char <= 0xEF):
             order = 188 * (first_char - 0xE0 + 31)
         else:
             return -1
@@ -243,7 +216,7 @@ def get_order(self, byte_str):
 
 class EUCJPDistributionAnalysis(CharDistributionAnalysis):
     def __init__(self):
-        super().__init__()
+        super(EUCJPDistributionAnalysis, self).__init__()
         self._char_to_freq_order = JIS_CHAR_TO_FREQ_ORDER
         self._table_size = JIS_TABLE_SIZE
         self.typical_distribution_ratio = JIS_TYPICAL_DISTRIBUTION_RATIO
@@ -255,5 +228,6 @@ def get_order(self, byte_str):
         # no validation needed here. State machine has done that
         char = byte_str[0]
         if char >= 0xA0:
-            return 94 * (char - 0xA1) + byte_str[1] - 0xA1
-        return -1
+            return 94 * (char - 0xA1) + byte_str[1] - 0xa1
+        else:
+            return -1
diff --git a/venv/lib/python3.10/site-packages/pip/_vendor/chardet/charsetgroupprober.py b/venv/lib/python3.10/site-packages/pip/_vendor/chardet/charsetgroupprober.py
index 778ff33..5812cef 100644
--- a/venv/lib/python3.10/site-packages/pip/_vendor/chardet/charsetgroupprober.py
+++ b/venv/lib/python3.10/site-packages/pip/_vendor/chardet/charsetgroupprober.py
@@ -25,19 +25,19 @@
 # 02110-1301  USA
 ######################### END LICENSE BLOCK #########################
 
-from .charsetprober import CharSetProber
 from .enums import ProbingState
+from .charsetprober import CharSetProber
 
 
 class CharSetGroupProber(CharSetProber):
     def __init__(self, lang_filter=None):
-        super().__init__(lang_filter=lang_filter)
+        super(CharSetGroupProber, self).__init__(lang_filter=lang_filter)
         self._active_num = 0
         self.probers = []
         self._best_guess_prober = None
 
     def reset(self):
-        super().reset()
+        super(CharSetGroupProber, self).reset()
         self._active_num = 0
         for prober in self.probers:
             if prober:
@@ -75,7 +75,7 @@ def feed(self, byte_str):
                 self._best_guess_prober = prober
                 self._state = ProbingState.FOUND_IT
                 return self.state
-            if state == ProbingState.NOT_ME:
+            elif state == ProbingState.NOT_ME:
                 prober.active = False
                 self._active_num -= 1
                 if self._active_num <= 0:
@@ -87,7 +87,7 @@ def get_confidence(self):
         state = self.state
         if state == ProbingState.FOUND_IT:
             return 0.99
-        if state == ProbingState.NOT_ME:
+        elif state == ProbingState.NOT_ME:
             return 0.01
         best_conf = 0.0
         self._best_guess_prober = None
@@ -95,12 +95,10 @@ def get_confidence(self):
             if not prober:
                 continue
             if not prober.active:
-                self.logger.debug("%s not active", prober.charset_name)
+                self.logger.debug('%s not active', prober.charset_name)
                 continue
             conf = prober.get_confidence()
-            self.logger.debug(
-                "%s %s confidence = %s", prober.charset_name, prober.language, conf
-            )
+            self.logger.debug('%s %s confidence = %s', prober.charset_name, prober.language, conf)
             if best_conf < conf:
                 best_conf = conf
                 self._best_guess_prober = prober
diff --git a/venv/lib/python3.10/site-packages/pip/_vendor/chardet/charsetprober.py b/venv/lib/python3.10/site-packages/pip/_vendor/chardet/charsetprober.py
index 9f1afd9..eac4e59 100644
--- a/venv/lib/python3.10/site-packages/pip/_vendor/chardet/charsetprober.py
+++ b/venv/lib/python3.10/site-packages/pip/_vendor/chardet/charsetprober.py
@@ -31,12 +31,8 @@
 
 from .enums import ProbingState
 
-INTERNATIONAL_WORDS_PATTERN = re.compile(
-    b"[a-zA-Z]*[\x80-\xFF]+[a-zA-Z]*[^a-zA-Z\x80-\xFF]?"
-)
 
-
-class CharSetProber:
+class CharSetProber(object):
 
     SHORTCUT_THRESHOLD = 0.95
 
@@ -52,8 +48,8 @@ def reset(self):
     def charset_name(self):
         return None
 
-    def feed(self, byte_str):
-        raise NotImplementedError
+    def feed(self, buf):
+        pass
 
     @property
     def state(self):
@@ -64,7 +60,7 @@ def get_confidence(self):
 
     @staticmethod
     def filter_high_byte_only(buf):
-        buf = re.sub(b"([\x00-\x7F])+", b" ", buf)
+        buf = re.sub(b'([\x00-\x7F])+', b' ', buf)
         return buf
 
     @staticmethod
@@ -74,10 +70,12 @@ def filter_international_words(buf):
         alphabet: english alphabets [a-zA-Z]
         international: international characters [\x80-\xFF]
         marker: everything else [^a-zA-Z\x80-\xFF]
+
         The input buffer can be thought to contain a series of words delimited
         by markers. This function works to filter all words that contain at
         least one international character. All contiguous sequences of markers
         are replaced by a single space ascii character.
+
         This filter applies to all scripts which do not use English characters.
         """
         filtered = bytearray()
@@ -85,7 +83,8 @@ def filter_international_words(buf):
         # This regex expression filters out only words that have at-least one
         # international character. The word may include one marker character at
         # the end.
-        words = INTERNATIONAL_WORDS_PATTERN.findall(buf)
+        words = re.findall(b'[a-zA-Z]*[\x80-\xFF]+[a-zA-Z]*[^a-zA-Z\x80-\xFF]?',
+                           buf)
 
         for word in words:
             filtered.extend(word[:-1])
@@ -95,17 +94,20 @@ def filter_international_words(buf):
             # similarly across all languages and may thus have similar
             # frequencies).
             last_char = word[-1:]
-            if not last_char.isalpha() and last_char < b"\x80":
-                last_char = b" "
+            if not last_char.isalpha() and last_char < b'\x80':
+                last_char = b' '
             filtered.extend(last_char)
 
         return filtered
 
     @staticmethod
-    def remove_xml_tags(buf):
+    def filter_with_english_letters(buf):
         """
         Returns a copy of ``buf`` that retains only the sequences of English
         alphabet and high byte characters that are not between <> characters.
+        Also retains English alphabet and high byte characters immediately
+        before occurrences of >.
+
         This filter can be applied to all scripts which contain both English
         characters and extended ASCII characters, but is currently only used by
         ``Latin1Prober``.
@@ -113,21 +115,26 @@ def remove_xml_tags(buf):
         filtered = bytearray()
         in_tag = False
         prev = 0
-        buf = memoryview(buf).cast("c")
 
-        for curr, buf_char in enumerate(buf):
-            # Check if we're coming out of or entering an XML tag
-            if buf_char == b">":
-                prev = curr + 1
+        for curr in range(len(buf)):
+            # Slice here to get bytes instead of an int with Python 3
+            buf_char = buf[curr:curr + 1]
+            # Check if we're coming out of or entering an HTML tag
+            if buf_char == b'>':
                 in_tag = False
-            elif buf_char == b"<":
+            elif buf_char == b'<':
+                in_tag = True
+
+            # If current character is not extended-ASCII and not alphabetic...
+            if buf_char < b'\x80' and not buf_char.isalpha():
+                # ...and we're not in a tag
                 if curr > prev and not in_tag:
                     # Keep everything after last non-extended-ASCII,
                     # non-alphabetic character
                     filtered.extend(buf[prev:curr])
                     # Output a space to delimit stretch we kept
-                    filtered.extend(b" ")
-                in_tag = True
+                    filtered.extend(b' ')
+                prev = curr + 1
 
         # If we're not in a tag...
         if not in_tag:
diff --git a/venv/lib/python3.10/site-packages/pip/_vendor/chardet/cli/__init__.py b/venv/lib/python3.10/site-packages/pip/_vendor/chardet/cli/__init__.py
index e69de29..8b13789 100644
--- a/venv/lib/python3.10/site-packages/pip/_vendor/chardet/cli/__init__.py
+++ b/venv/lib/python3.10/site-packages/pip/_vendor/chardet/cli/__init__.py
@@ -0,0 +1 @@
+
diff --git a/venv/lib/python3.10/site-packages/pip/_vendor/chardet/cli/chardetect.py b/venv/lib/python3.10/site-packages/pip/_vendor/chardet/cli/chardetect.py
index 7926fa3..6d6f93a 100644
--- a/venv/lib/python3.10/site-packages/pip/_vendor/chardet/cli/chardetect.py
+++ b/venv/lib/python3.10/site-packages/pip/_vendor/chardet/cli/chardetect.py
@@ -12,15 +12,17 @@
 
 """
 
+from __future__ import absolute_import, print_function, unicode_literals
 
 import argparse
 import sys
 
-from .. import __version__
-from ..universaldetector import UniversalDetector
+from pip._vendor.chardet import __version__
+from pip._vendor.chardet.compat import PY2
+from pip._vendor.chardet.universaldetector import UniversalDetector
 
 
-def description_of(lines, name="stdin"):
+def description_of(lines, name='stdin'):
     """
     Return a string describing the probable encoding of a file or
     list of strings.
@@ -39,9 +41,13 @@ def description_of(lines, name="stdin"):
             break
     u.close()
     result = u.result
-    if result["encoding"]:
-        return f'{name}: {result["encoding"]} with confidence {result["confidence"]}'
-    return f"{name}: no result"
+    if PY2:
+        name = name.decode(sys.getfilesystemencoding(), 'ignore')
+    if result['encoding']:
+        return '{}: {} with confidence {}'.format(name, result['encoding'],
+                                                     result['confidence'])
+    else:
+        return '{}: no result'.format(name)
 
 
 def main(argv=None):
@@ -55,32 +61,24 @@ def main(argv=None):
     # Get command line arguments
     parser = argparse.ArgumentParser(
         description="Takes one or more file paths and reports their detected \
-                     encodings"
-    )
-    parser.add_argument(
-        "input",
-        help="File whose encoding we would like to determine. \
-                              (default: stdin)",
-        type=argparse.FileType("rb"),
-        nargs="*",
-        default=[sys.stdin.buffer],
-    )
-    parser.add_argument(
-        "--version", action="version", version=f"%(prog)s {__version__}"
-    )
+                     encodings")
+    parser.add_argument('input',
+                        help='File whose encoding we would like to determine. \
+                              (default: stdin)',
+                        type=argparse.FileType('rb'), nargs='*',
+                        default=[sys.stdin if PY2 else sys.stdin.buffer])
+    parser.add_argument('--version', action='version',
+                        version='%(prog)s {}'.format(__version__))
     args = parser.parse_args(argv)
 
     for f in args.input:
         if f.isatty():
-            print(
-                "You are running chardetect interactively. Press "
-                "CTRL-D twice at the start of a blank line to signal the "
-                "end of your input. If you want help, run chardetect "
-                "--help\n",
-                file=sys.stderr,
-            )
+            print("You are running chardetect interactively. Press " +
+                  "CTRL-D twice at the start of a blank line to signal the " +
+                  "end of your input. If you want help, run chardetect " +
+                  "--help\n", file=sys.stderr)
         print(description_of(f, f.name))
 
 
-if __name__ == "__main__":
+if __name__ == '__main__':
     main()
diff --git a/venv/lib/python3.10/site-packages/pip/_vendor/chardet/codingstatemachine.py b/venv/lib/python3.10/site-packages/pip/_vendor/chardet/codingstatemachine.py
index d3e3e82..68fba44 100644
--- a/venv/lib/python3.10/site-packages/pip/_vendor/chardet/codingstatemachine.py
+++ b/venv/lib/python3.10/site-packages/pip/_vendor/chardet/codingstatemachine.py
@@ -30,7 +30,7 @@
 from .enums import MachineState
 
 
-class CodingStateMachine:
+class CodingStateMachine(object):
     """
     A state machine to verify a byte sequence for a particular encoding. For
     each byte the detector receives, it will feed that byte to every active
@@ -52,7 +52,6 @@ class CodingStateMachine:
                  negative answer for this encoding. Detector will exclude this
                  encoding from consideration from here on.
     """
-
     def __init__(self, sm):
         self._model = sm
         self._curr_byte_pos = 0
@@ -67,13 +66,14 @@ def reset(self):
     def next_state(self, c):
         # for each byte we get its class
         # if it is first byte, we also get byte length
-        byte_class = self._model["class_table"][c]
+        byte_class = self._model['class_table'][c]
         if self._curr_state == MachineState.START:
             self._curr_byte_pos = 0
-            self._curr_char_len = self._model["char_len_table"][byte_class]
+            self._curr_char_len = self._model['char_len_table'][byte_class]
         # from byte's class and state_table, we get its next state
-        curr_state = self._curr_state * self._model["class_factor"] + byte_class
-        self._curr_state = self._model["state_table"][curr_state]
+        curr_state = (self._curr_state * self._model['class_factor']
+                      + byte_class)
+        self._curr_state = self._model['state_table'][curr_state]
         self._curr_byte_pos += 1
         return self._curr_state
 
@@ -81,8 +81,8 @@ def get_current_charlen(self):
         return self._curr_char_len
 
     def get_coding_state_machine(self):
-        return self._model["name"]
+        return self._model['name']
 
     @property
     def language(self):
-        return self._model["language"]
+        return self._model['language']
diff --git a/venv/lib/python3.10/site-packages/pip/_vendor/chardet/cp949prober.py b/venv/lib/python3.10/site-packages/pip/_vendor/chardet/cp949prober.py
index 28a1f3d..efd793a 100644
--- a/venv/lib/python3.10/site-packages/pip/_vendor/chardet/cp949prober.py
+++ b/venv/lib/python3.10/site-packages/pip/_vendor/chardet/cp949prober.py
@@ -33,7 +33,7 @@
 
 class CP949Prober(MultiByteCharSetProber):
     def __init__(self):
-        super().__init__()
+        super(CP949Prober, self).__init__()
         self.coding_sm = CodingStateMachine(CP949_SM_MODEL)
         # NOTE: CP949 is a superset of EUC-KR, so the distribution should be
         #       not different.
diff --git a/venv/lib/python3.10/site-packages/pip/_vendor/chardet/enums.py b/venv/lib/python3.10/site-packages/pip/_vendor/chardet/enums.py
index 32a77e7..0451207 100644
--- a/venv/lib/python3.10/site-packages/pip/_vendor/chardet/enums.py
+++ b/venv/lib/python3.10/site-packages/pip/_vendor/chardet/enums.py
@@ -5,22 +5,20 @@
 """
 
 
-class InputState:
+class InputState(object):
     """
     This enum represents the different states a universal detector can be in.
     """
-
     PURE_ASCII = 0
     ESC_ASCII = 1
     HIGH_BYTE = 2
 
 
-class LanguageFilter:
+class LanguageFilter(object):
     """
     This enum represents the different language filters we can apply to a
     ``UniversalDetector``.
     """
-
     CHINESE_SIMPLIFIED = 0x01
     CHINESE_TRADITIONAL = 0x02
     JAPANESE = 0x04
@@ -31,31 +29,28 @@ class LanguageFilter:
     CJK = CHINESE | JAPANESE | KOREAN
 
 
-class ProbingState:
+class ProbingState(object):
     """
     This enum represents the different states a prober can be in.
     """
-
     DETECTING = 0
     FOUND_IT = 1
     NOT_ME = 2
 
 
-class MachineState:
+class MachineState(object):
     """
     This enum represents the different states a state machine can be in.
     """
-
     START = 0
     ERROR = 1
     ITS_ME = 2
 
 
-class SequenceLikelihood:
+class SequenceLikelihood(object):
     """
     This enum represents the likelihood of a character following the previous one.
     """
-
     NEGATIVE = 0
     UNLIKELY = 1
     LIKELY = 2
@@ -67,14 +62,13 @@ def get_num_categories(cls):
         return 4
 
 
-class CharacterCategory:
+class CharacterCategory(object):
     """
     This enum represents the different categories language models for
     ``SingleByteCharsetProber`` put characters into.
 
     Anything less than CONTROL is considered a letter.
     """
-
     UNDEFINED = 255
     LINE_BREAK = 254
     SYMBOL = 253
diff --git a/venv/lib/python3.10/site-packages/pip/_vendor/chardet/escprober.py b/venv/lib/python3.10/site-packages/pip/_vendor/chardet/escprober.py
index d992611..c70493f 100644
--- a/venv/lib/python3.10/site-packages/pip/_vendor/chardet/escprober.py
+++ b/venv/lib/python3.10/site-packages/pip/_vendor/chardet/escprober.py
@@ -27,13 +27,9 @@
 
 from .charsetprober import CharSetProber
 from .codingstatemachine import CodingStateMachine
-from .enums import LanguageFilter, MachineState, ProbingState
-from .escsm import (
-    HZ_SM_MODEL,
-    ISO2022CN_SM_MODEL,
-    ISO2022JP_SM_MODEL,
-    ISO2022KR_SM_MODEL,
-)
+from .enums import LanguageFilter, ProbingState, MachineState
+from .escsm import (HZ_SM_MODEL, ISO2022CN_SM_MODEL, ISO2022JP_SM_MODEL,
+                    ISO2022KR_SM_MODEL)
 
 
 class EscCharSetProber(CharSetProber):
@@ -44,7 +40,7 @@ class EscCharSetProber(CharSetProber):
     """
 
     def __init__(self, lang_filter=None):
-        super().__init__(lang_filter=lang_filter)
+        super(EscCharSetProber, self).__init__(lang_filter=lang_filter)
         self.coding_sm = []
         if self.lang_filter & LanguageFilter.CHINESE_SIMPLIFIED:
             self.coding_sm.append(CodingStateMachine(HZ_SM_MODEL))
@@ -60,7 +56,7 @@ def __init__(self, lang_filter=None):
         self.reset()
 
     def reset(self):
-        super().reset()
+        super(EscCharSetProber, self).reset()
         for coding_sm in self.coding_sm:
             if not coding_sm:
                 continue
@@ -79,7 +75,10 @@ def language(self):
         return self._detected_language
 
     def get_confidence(self):
-        return 0.99 if self._detected_charset else 0.00
+        if self._detected_charset:
+            return 0.99
+        else:
+            return 0.00
 
     def feed(self, byte_str):
         for c in byte_str:
diff --git a/venv/lib/python3.10/site-packages/pip/_vendor/chardet/escsm.py b/venv/lib/python3.10/site-packages/pip/_vendor/chardet/escsm.py
index 3aa0f4d..0069523 100644
--- a/venv/lib/python3.10/site-packages/pip/_vendor/chardet/escsm.py
+++ b/venv/lib/python3.10/site-packages/pip/_vendor/chardet/escsm.py
@@ -12,7 +12,7 @@
 # This library is free software; you can redistribute it and/or
 # modify it under the terms of the GNU Lesser General Public
 # License as published by the Free Software Foundation; either
-# version 2.1 of the License,  or (at your option) any later version.
+# version 2.1 of the License, or (at your option) any later version.
 #
 # This library is distributed in the hope that it will be useful,
 # but WITHOUT ANY WARRANTY; without even the implied warranty of
@@ -20,241 +20,227 @@
 # Lesser General Public License for more details.
 #
 # You should have received a copy of the GNU Lesser General Public
-# License along with this library; if not,  write to the Free Software
-# Foundation,  Inc.,  51 Franklin St,  Fifth Floor,  Boston,  MA
+# License along with this library; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
 # 02110-1301  USA
 ######################### END LICENSE BLOCK #########################
 
 from .enums import MachineState
 
-# fmt: off
 HZ_CLS = (
-    1, 0, 0, 0, 0, 0, 0, 0,  # 00 - 07
-    0, 0, 0, 0, 0, 0, 0, 0,  # 08 - 0f
-    0, 0, 0, 0, 0, 0, 0, 0,  # 10 - 17
-    0, 0, 0, 1, 0, 0, 0, 0,  # 18 - 1f
-    0, 0, 0, 0, 0, 0, 0, 0,  # 20 - 27
-    0, 0, 0, 0, 0, 0, 0, 0,  # 28 - 2f
-    0, 0, 0, 0, 0, 0, 0, 0,  # 30 - 37
-    0, 0, 0, 0, 0, 0, 0, 0,  # 38 - 3f
-    0, 0, 0, 0, 0, 0, 0, 0,  # 40 - 47
-    0, 0, 0, 0, 0, 0, 0, 0,  # 48 - 4f
-    0, 0, 0, 0, 0, 0, 0, 0,  # 50 - 57
-    0, 0, 0, 0, 0, 0, 0, 0,  # 58 - 5f
-    0, 0, 0, 0, 0, 0, 0, 0,  # 60 - 67
-    0, 0, 0, 0, 0, 0, 0, 0,  # 68 - 6f
-    0, 0, 0, 0, 0, 0, 0, 0,  # 70 - 77
-    0, 0, 0, 4, 0, 5, 2, 0,  # 78 - 7f
-    1, 1, 1, 1, 1, 1, 1, 1,  # 80 - 87
-    1, 1, 1, 1, 1, 1, 1, 1,  # 88 - 8f
-    1, 1, 1, 1, 1, 1, 1, 1,  # 90 - 97
-    1, 1, 1, 1, 1, 1, 1, 1,  # 98 - 9f
-    1, 1, 1, 1, 1, 1, 1, 1,  # a0 - a7
-    1, 1, 1, 1, 1, 1, 1, 1,  # a8 - af
-    1, 1, 1, 1, 1, 1, 1, 1,  # b0 - b7
-    1, 1, 1, 1, 1, 1, 1, 1,  # b8 - bf
-    1, 1, 1, 1, 1, 1, 1, 1,  # c0 - c7
-    1, 1, 1, 1, 1, 1, 1, 1,  # c8 - cf
-    1, 1, 1, 1, 1, 1, 1, 1,  # d0 - d7
-    1, 1, 1, 1, 1, 1, 1, 1,  # d8 - df
-    1, 1, 1, 1, 1, 1, 1, 1,  # e0 - e7
-    1, 1, 1, 1, 1, 1, 1, 1,  # e8 - ef
-    1, 1, 1, 1, 1, 1, 1, 1,  # f0 - f7
-    1, 1, 1, 1, 1, 1, 1, 1,  # f8 - ff
+1,0,0,0,0,0,0,0,  # 00 - 07
+0,0,0,0,0,0,0,0,  # 08 - 0f
+0,0,0,0,0,0,0,0,  # 10 - 17
+0,0,0,1,0,0,0,0,  # 18 - 1f
+0,0,0,0,0,0,0,0,  # 20 - 27
+0,0,0,0,0,0,0,0,  # 28 - 2f
+0,0,0,0,0,0,0,0,  # 30 - 37
+0,0,0,0,0,0,0,0,  # 38 - 3f
+0,0,0,0,0,0,0,0,  # 40 - 47
+0,0,0,0,0,0,0,0,  # 48 - 4f
+0,0,0,0,0,0,0,0,  # 50 - 57
+0,0,0,0,0,0,0,0,  # 58 - 5f
+0,0,0,0,0,0,0,0,  # 60 - 67
+0,0,0,0,0,0,0,0,  # 68 - 6f
+0,0,0,0,0,0,0,0,  # 70 - 77
+0,0,0,4,0,5,2,0,  # 78 - 7f
+1,1,1,1,1,1,1,1,  # 80 - 87
+1,1,1,1,1,1,1,1,  # 88 - 8f
+1,1,1,1,1,1,1,1,  # 90 - 97
+1,1,1,1,1,1,1,1,  # 98 - 9f
+1,1,1,1,1,1,1,1,  # a0 - a7
+1,1,1,1,1,1,1,1,  # a8 - af
+1,1,1,1,1,1,1,1,  # b0 - b7
+1,1,1,1,1,1,1,1,  # b8 - bf
+1,1,1,1,1,1,1,1,  # c0 - c7
+1,1,1,1,1,1,1,1,  # c8 - cf
+1,1,1,1,1,1,1,1,  # d0 - d7
+1,1,1,1,1,1,1,1,  # d8 - df
+1,1,1,1,1,1,1,1,  # e0 - e7
+1,1,1,1,1,1,1,1,  # e8 - ef
+1,1,1,1,1,1,1,1,  # f0 - f7
+1,1,1,1,1,1,1,1,  # f8 - ff
 )
 
 HZ_ST = (
-MachineState.START, MachineState.ERROR,      3, MachineState.START, MachineState.START, MachineState.START, MachineState.ERROR, MachineState.ERROR, # 00-07
-MachineState.ERROR, MachineState.ERROR, MachineState.ERROR, MachineState.ERROR, MachineState.ITS_ME, MachineState.ITS_ME, MachineState.ITS_ME, MachineState.ITS_ME, # 08-0f
-MachineState.ITS_ME, MachineState.ITS_ME, MachineState.ERROR, MachineState.ERROR, MachineState.START, MachineState.START,      4, MachineState.ERROR, # 10-17
-     5, MachineState.ERROR,      6, MachineState.ERROR,      5,      5,      4, MachineState.ERROR, # 18-1f
-     4, MachineState.ERROR,      4,      4,      4, MachineState.ERROR,      4, MachineState.ERROR, # 20-27
-     4, MachineState.ITS_ME, MachineState.START, MachineState.START, MachineState.START, MachineState.START, MachineState.START, MachineState.START, # 28-2f
+MachineState.START,MachineState.ERROR,     3,MachineState.START,MachineState.START,MachineState.START,MachineState.ERROR,MachineState.ERROR,# 00-07
+MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,# 08-0f
+MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ERROR,MachineState.ERROR,MachineState.START,MachineState.START,     4,MachineState.ERROR,# 10-17
+     5,MachineState.ERROR,     6,MachineState.ERROR,     5,     5,     4,MachineState.ERROR,# 18-1f
+     4,MachineState.ERROR,     4,     4,     4,MachineState.ERROR,     4,MachineState.ERROR,# 20-27
+     4,MachineState.ITS_ME,MachineState.START,MachineState.START,MachineState.START,MachineState.START,MachineState.START,MachineState.START,# 28-2f
 )
-# fmt: on
 
 HZ_CHAR_LEN_TABLE = (0, 0, 0, 0, 0, 0)
 
-HZ_SM_MODEL = {
-    "class_table": HZ_CLS,
-    "class_factor": 6,
-    "state_table": HZ_ST,
-    "char_len_table": HZ_CHAR_LEN_TABLE,
-    "name": "HZ-GB-2312",
-    "language": "Chinese",
-}
+HZ_SM_MODEL = {'class_table': HZ_CLS,
+               'class_factor': 6,
+               'state_table': HZ_ST,
+               'char_len_table': HZ_CHAR_LEN_TABLE,
+               'name': "HZ-GB-2312",
+               'language': 'Chinese'}
 
-# fmt: off
 ISO2022CN_CLS = (
-    2, 0, 0, 0, 0, 0, 0, 0,  # 00 - 07
-    0, 0, 0, 0, 0, 0, 0, 0,  # 08 - 0f
-    0, 0, 0, 0, 0, 0, 0, 0,  # 10 - 17
-    0, 0, 0, 1, 0, 0, 0, 0,  # 18 - 1f
-    0, 0, 0, 0, 0, 0, 0, 0,  # 20 - 27
-    0, 3, 0, 0, 0, 0, 0, 0,  # 28 - 2f
-    0, 0, 0, 0, 0, 0, 0, 0,  # 30 - 37
-    0, 0, 0, 0, 0, 0, 0, 0,  # 38 - 3f
-    0, 0, 0, 4, 0, 0, 0, 0,  # 40 - 47
-    0, 0, 0, 0, 0, 0, 0, 0,  # 48 - 4f
-    0, 0, 0, 0, 0, 0, 0, 0,  # 50 - 57
-    0, 0, 0, 0, 0, 0, 0, 0,  # 58 - 5f
-    0, 0, 0, 0, 0, 0, 0, 0,  # 60 - 67
-    0, 0, 0, 0, 0, 0, 0, 0,  # 68 - 6f
-    0, 0, 0, 0, 0, 0, 0, 0,  # 70 - 77
-    0, 0, 0, 0, 0, 0, 0, 0,  # 78 - 7f
-    2, 2, 2, 2, 2, 2, 2, 2,  # 80 - 87
-    2, 2, 2, 2, 2, 2, 2, 2,  # 88 - 8f
-    2, 2, 2, 2, 2, 2, 2, 2,  # 90 - 97
-    2, 2, 2, 2, 2, 2, 2, 2,  # 98 - 9f
-    2, 2, 2, 2, 2, 2, 2, 2,  # a0 - a7
-    2, 2, 2, 2, 2, 2, 2, 2,  # a8 - af
-    2, 2, 2, 2, 2, 2, 2, 2,  # b0 - b7
-    2, 2, 2, 2, 2, 2, 2, 2,  # b8 - bf
-    2, 2, 2, 2, 2, 2, 2, 2,  # c0 - c7
-    2, 2, 2, 2, 2, 2, 2, 2,  # c8 - cf
-    2, 2, 2, 2, 2, 2, 2, 2,  # d0 - d7
-    2, 2, 2, 2, 2, 2, 2, 2,  # d8 - df
-    2, 2, 2, 2, 2, 2, 2, 2,  # e0 - e7
-    2, 2, 2, 2, 2, 2, 2, 2,  # e8 - ef
-    2, 2, 2, 2, 2, 2, 2, 2,  # f0 - f7
-    2, 2, 2, 2, 2, 2, 2, 2,  # f8 - ff
+2,0,0,0,0,0,0,0,  # 00 - 07
+0,0,0,0,0,0,0,0,  # 08 - 0f
+0,0,0,0,0,0,0,0,  # 10 - 17
+0,0,0,1,0,0,0,0,  # 18 - 1f
+0,0,0,0,0,0,0,0,  # 20 - 27
+0,3,0,0,0,0,0,0,  # 28 - 2f
+0,0,0,0,0,0,0,0,  # 30 - 37
+0,0,0,0,0,0,0,0,  # 38 - 3f
+0,0,0,4,0,0,0,0,  # 40 - 47
+0,0,0,0,0,0,0,0,  # 48 - 4f
+0,0,0,0,0,0,0,0,  # 50 - 57
+0,0,0,0,0,0,0,0,  # 58 - 5f
+0,0,0,0,0,0,0,0,  # 60 - 67
+0,0,0,0,0,0,0,0,  # 68 - 6f
+0,0,0,0,0,0,0,0,  # 70 - 77
+0,0,0,0,0,0,0,0,  # 78 - 7f
+2,2,2,2,2,2,2,2,  # 80 - 87
+2,2,2,2,2,2,2,2,  # 88 - 8f
+2,2,2,2,2,2,2,2,  # 90 - 97
+2,2,2,2,2,2,2,2,  # 98 - 9f
+2,2,2,2,2,2,2,2,  # a0 - a7
+2,2,2,2,2,2,2,2,  # a8 - af
+2,2,2,2,2,2,2,2,  # b0 - b7
+2,2,2,2,2,2,2,2,  # b8 - bf
+2,2,2,2,2,2,2,2,  # c0 - c7
+2,2,2,2,2,2,2,2,  # c8 - cf
+2,2,2,2,2,2,2,2,  # d0 - d7
+2,2,2,2,2,2,2,2,  # d8 - df
+2,2,2,2,2,2,2,2,  # e0 - e7
+2,2,2,2,2,2,2,2,  # e8 - ef
+2,2,2,2,2,2,2,2,  # f0 - f7
+2,2,2,2,2,2,2,2,  # f8 - ff
 )
 
 ISO2022CN_ST = (
-    MachineState.START,      3, MachineState.ERROR, MachineState.START, MachineState.START, MachineState.START, MachineState.START, MachineState.START, # 00-07
-    MachineState.START, MachineState.ERROR, MachineState.ERROR, MachineState.ERROR, MachineState.ERROR, MachineState.ERROR, MachineState.ERROR, MachineState.ERROR, # 08-0f
-    MachineState.ERROR, MachineState.ERROR, MachineState.ITS_ME, MachineState.ITS_ME, MachineState.ITS_ME, MachineState.ITS_ME, MachineState.ITS_ME, MachineState.ITS_ME, # 10-17
-    MachineState.ITS_ME, MachineState.ITS_ME, MachineState.ITS_ME, MachineState.ERROR, MachineState.ERROR, MachineState.ERROR,      4, MachineState.ERROR, # 18-1f
-    MachineState.ERROR, MachineState.ERROR, MachineState.ERROR, MachineState.ITS_ME, MachineState.ERROR, MachineState.ERROR, MachineState.ERROR, MachineState.ERROR, # 20-27
-        5,      6, MachineState.ERROR, MachineState.ERROR, MachineState.ERROR, MachineState.ERROR, MachineState.ERROR, MachineState.ERROR, # 28-2f
-    MachineState.ERROR, MachineState.ERROR, MachineState.ERROR, MachineState.ITS_ME, MachineState.ERROR, MachineState.ERROR, MachineState.ERROR, MachineState.ERROR, # 30-37
-    MachineState.ERROR, MachineState.ERROR, MachineState.ERROR, MachineState.ERROR, MachineState.ERROR, MachineState.ITS_ME, MachineState.ERROR, MachineState.START, # 38-3f
+MachineState.START,     3,MachineState.ERROR,MachineState.START,MachineState.START,MachineState.START,MachineState.START,MachineState.START,# 00-07
+MachineState.START,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,# 08-0f
+MachineState.ERROR,MachineState.ERROR,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,# 10-17
+MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,     4,MachineState.ERROR,# 18-1f
+MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ITS_ME,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,# 20-27
+     5,     6,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,# 28-2f
+MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ITS_ME,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,# 30-37
+MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ITS_ME,MachineState.ERROR,MachineState.START,# 38-3f
 )
-# fmt: on
 
 ISO2022CN_CHAR_LEN_TABLE = (0, 0, 0, 0, 0, 0, 0, 0, 0)
 
-ISO2022CN_SM_MODEL = {
-    "class_table": ISO2022CN_CLS,
-    "class_factor": 9,
-    "state_table": ISO2022CN_ST,
-    "char_len_table": ISO2022CN_CHAR_LEN_TABLE,
-    "name": "ISO-2022-CN",
-    "language": "Chinese",
-}
+ISO2022CN_SM_MODEL = {'class_table': ISO2022CN_CLS,
+                      'class_factor': 9,
+                      'state_table': ISO2022CN_ST,
+                      'char_len_table': ISO2022CN_CHAR_LEN_TABLE,
+                      'name': "ISO-2022-CN",
+                      'language': 'Chinese'}
 
-# fmt: off
 ISO2022JP_CLS = (
-    2, 0, 0, 0, 0, 0, 0, 0,  # 00 - 07
-    0, 0, 0, 0, 0, 0, 2, 2,  # 08 - 0f
-    0, 0, 0, 0, 0, 0, 0, 0,  # 10 - 17
-    0, 0, 0, 1, 0, 0, 0, 0,  # 18 - 1f
-    0, 0, 0, 0, 7, 0, 0, 0,  # 20 - 27
-    3, 0, 0, 0, 0, 0, 0, 0,  # 28 - 2f
-    0, 0, 0, 0, 0, 0, 0, 0,  # 30 - 37
-    0, 0, 0, 0, 0, 0, 0, 0,  # 38 - 3f
-    6, 0, 4, 0, 8, 0, 0, 0,  # 40 - 47
-    0, 9, 5, 0, 0, 0, 0, 0,  # 48 - 4f
-    0, 0, 0, 0, 0, 0, 0, 0,  # 50 - 57
-    0, 0, 0, 0, 0, 0, 0, 0,  # 58 - 5f
-    0, 0, 0, 0, 0, 0, 0, 0,  # 60 - 67
-    0, 0, 0, 0, 0, 0, 0, 0,  # 68 - 6f
-    0, 0, 0, 0, 0, 0, 0, 0,  # 70 - 77
-    0, 0, 0, 0, 0, 0, 0, 0,  # 78 - 7f
-    2, 2, 2, 2, 2, 2, 2, 2,  # 80 - 87
-    2, 2, 2, 2, 2, 2, 2, 2,  # 88 - 8f
-    2, 2, 2, 2, 2, 2, 2, 2,  # 90 - 97
-    2, 2, 2, 2, 2, 2, 2, 2,  # 98 - 9f
-    2, 2, 2, 2, 2, 2, 2, 2,  # a0 - a7
-    2, 2, 2, 2, 2, 2, 2, 2,  # a8 - af
-    2, 2, 2, 2, 2, 2, 2, 2,  # b0 - b7
-    2, 2, 2, 2, 2, 2, 2, 2,  # b8 - bf
-    2, 2, 2, 2, 2, 2, 2, 2,  # c0 - c7
-    2, 2, 2, 2, 2, 2, 2, 2,  # c8 - cf
-    2, 2, 2, 2, 2, 2, 2, 2,  # d0 - d7
-    2, 2, 2, 2, 2, 2, 2, 2,  # d8 - df
-    2, 2, 2, 2, 2, 2, 2, 2,  # e0 - e7
-    2, 2, 2, 2, 2, 2, 2, 2,  # e8 - ef
-    2, 2, 2, 2, 2, 2, 2, 2,  # f0 - f7
-    2, 2, 2, 2, 2, 2, 2, 2,  # f8 - ff
+2,0,0,0,0,0,0,0,  # 00 - 07
+0,0,0,0,0,0,2,2,  # 08 - 0f
+0,0,0,0,0,0,0,0,  # 10 - 17
+0,0,0,1,0,0,0,0,  # 18 - 1f
+0,0,0,0,7,0,0,0,  # 20 - 27
+3,0,0,0,0,0,0,0,  # 28 - 2f
+0,0,0,0,0,0,0,0,  # 30 - 37
+0,0,0,0,0,0,0,0,  # 38 - 3f
+6,0,4,0,8,0,0,0,  # 40 - 47
+0,9,5,0,0,0,0,0,  # 48 - 4f
+0,0,0,0,0,0,0,0,  # 50 - 57
+0,0,0,0,0,0,0,0,  # 58 - 5f
+0,0,0,0,0,0,0,0,  # 60 - 67
+0,0,0,0,0,0,0,0,  # 68 - 6f
+0,0,0,0,0,0,0,0,  # 70 - 77
+0,0,0,0,0,0,0,0,  # 78 - 7f
+2,2,2,2,2,2,2,2,  # 80 - 87
+2,2,2,2,2,2,2,2,  # 88 - 8f
+2,2,2,2,2,2,2,2,  # 90 - 97
+2,2,2,2,2,2,2,2,  # 98 - 9f
+2,2,2,2,2,2,2,2,  # a0 - a7
+2,2,2,2,2,2,2,2,  # a8 - af
+2,2,2,2,2,2,2,2,  # b0 - b7
+2,2,2,2,2,2,2,2,  # b8 - bf
+2,2,2,2,2,2,2,2,  # c0 - c7
+2,2,2,2,2,2,2,2,  # c8 - cf
+2,2,2,2,2,2,2,2,  # d0 - d7
+2,2,2,2,2,2,2,2,  # d8 - df
+2,2,2,2,2,2,2,2,  # e0 - e7
+2,2,2,2,2,2,2,2,  # e8 - ef
+2,2,2,2,2,2,2,2,  # f0 - f7
+2,2,2,2,2,2,2,2,  # f8 - ff
 )
 
 ISO2022JP_ST = (
-    MachineState.START,      3, MachineState.ERROR, MachineState.START, MachineState.START, MachineState.START, MachineState.START, MachineState.START, # 00-07
-    MachineState.START, MachineState.START, MachineState.ERROR, MachineState.ERROR, MachineState.ERROR, MachineState.ERROR, MachineState.ERROR, MachineState.ERROR, # 08-0f
-    MachineState.ERROR, MachineState.ERROR, MachineState.ERROR, MachineState.ERROR, MachineState.ITS_ME, MachineState.ITS_ME, MachineState.ITS_ME, MachineState.ITS_ME, # 10-17
-    MachineState.ITS_ME, MachineState.ITS_ME, MachineState.ITS_ME, MachineState.ITS_ME, MachineState.ITS_ME, MachineState.ITS_ME, MachineState.ERROR, MachineState.ERROR, # 18-1f
-    MachineState.ERROR,      5, MachineState.ERROR, MachineState.ERROR, MachineState.ERROR,      4, MachineState.ERROR, MachineState.ERROR, # 20-27
-    MachineState.ERROR, MachineState.ERROR, MachineState.ERROR,      6, MachineState.ITS_ME, MachineState.ERROR, MachineState.ITS_ME, MachineState.ERROR, # 28-2f
-    MachineState.ERROR, MachineState.ERROR, MachineState.ERROR, MachineState.ERROR, MachineState.ERROR, MachineState.ERROR, MachineState.ITS_ME, MachineState.ITS_ME, # 30-37
-    MachineState.ERROR, MachineState.ERROR, MachineState.ERROR, MachineState.ITS_ME, MachineState.ERROR, MachineState.ERROR, MachineState.ERROR, MachineState.ERROR, # 38-3f
-    MachineState.ERROR, MachineState.ERROR, MachineState.ERROR, MachineState.ERROR, MachineState.ITS_ME, MachineState.ERROR, MachineState.START, MachineState.START, # 40-47
+MachineState.START,     3,MachineState.ERROR,MachineState.START,MachineState.START,MachineState.START,MachineState.START,MachineState.START,# 00-07
+MachineState.START,MachineState.START,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,# 08-0f
+MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,# 10-17
+MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ERROR,MachineState.ERROR,# 18-1f
+MachineState.ERROR,     5,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,     4,MachineState.ERROR,MachineState.ERROR,# 20-27
+MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,     6,MachineState.ITS_ME,MachineState.ERROR,MachineState.ITS_ME,MachineState.ERROR,# 28-2f
+MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ITS_ME,MachineState.ITS_ME,# 30-37
+MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ITS_ME,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,# 38-3f
+MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ITS_ME,MachineState.ERROR,MachineState.START,MachineState.START,# 40-47
 )
-# fmt: on
 
 ISO2022JP_CHAR_LEN_TABLE = (0, 0, 0, 0, 0, 0, 0, 0, 0, 0)
 
-ISO2022JP_SM_MODEL = {
-    "class_table": ISO2022JP_CLS,
-    "class_factor": 10,
-    "state_table": ISO2022JP_ST,
-    "char_len_table": ISO2022JP_CHAR_LEN_TABLE,
-    "name": "ISO-2022-JP",
-    "language": "Japanese",
-}
+ISO2022JP_SM_MODEL = {'class_table': ISO2022JP_CLS,
+                      'class_factor': 10,
+                      'state_table': ISO2022JP_ST,
+                      'char_len_table': ISO2022JP_CHAR_LEN_TABLE,
+                      'name': "ISO-2022-JP",
+                      'language': 'Japanese'}
 
-# fmt: off
 ISO2022KR_CLS = (
-    2, 0, 0, 0, 0, 0, 0, 0,  # 00 - 07
-    0, 0, 0, 0, 0, 0, 0, 0,  # 08 - 0f
-    0, 0, 0, 0, 0, 0, 0, 0,  # 10 - 17
-    0, 0, 0, 1, 0, 0, 0, 0,  # 18 - 1f
-    0, 0, 0, 0, 3, 0, 0, 0,  # 20 - 27
-    0, 4, 0, 0, 0, 0, 0, 0,  # 28 - 2f
-    0, 0, 0, 0, 0, 0, 0, 0,  # 30 - 37
-    0, 0, 0, 0, 0, 0, 0, 0,  # 38 - 3f
-    0, 0, 0, 5, 0, 0, 0, 0,  # 40 - 47
-    0, 0, 0, 0, 0, 0, 0, 0,  # 48 - 4f
-    0, 0, 0, 0, 0, 0, 0, 0,  # 50 - 57
-    0, 0, 0, 0, 0, 0, 0, 0,  # 58 - 5f
-    0, 0, 0, 0, 0, 0, 0, 0,  # 60 - 67
-    0, 0, 0, 0, 0, 0, 0, 0,  # 68 - 6f
-    0, 0, 0, 0, 0, 0, 0, 0,  # 70 - 77
-    0, 0, 0, 0, 0, 0, 0, 0,  # 78 - 7f
-    2, 2, 2, 2, 2, 2, 2, 2,  # 80 - 87
-    2, 2, 2, 2, 2, 2, 2, 2,  # 88 - 8f
-    2, 2, 2, 2, 2, 2, 2, 2,  # 90 - 97
-    2, 2, 2, 2, 2, 2, 2, 2,  # 98 - 9f
-    2, 2, 2, 2, 2, 2, 2, 2,  # a0 - a7
-    2, 2, 2, 2, 2, 2, 2, 2,  # a8 - af
-    2, 2, 2, 2, 2, 2, 2, 2,  # b0 - b7
-    2, 2, 2, 2, 2, 2, 2, 2,  # b8 - bf
-    2, 2, 2, 2, 2, 2, 2, 2,  # c0 - c7
-    2, 2, 2, 2, 2, 2, 2, 2,  # c8 - cf
-    2, 2, 2, 2, 2, 2, 2, 2,  # d0 - d7
-    2, 2, 2, 2, 2, 2, 2, 2,  # d8 - df
-    2, 2, 2, 2, 2, 2, 2, 2,  # e0 - e7
-    2, 2, 2, 2, 2, 2, 2, 2,  # e8 - ef
-    2, 2, 2, 2, 2, 2, 2, 2,  # f0 - f7
-    2, 2, 2, 2, 2, 2, 2, 2,  # f8 - ff
+2,0,0,0,0,0,0,0,  # 00 - 07
+0,0,0,0,0,0,0,0,  # 08 - 0f
+0,0,0,0,0,0,0,0,  # 10 - 17
+0,0,0,1,0,0,0,0,  # 18 - 1f
+0,0,0,0,3,0,0,0,  # 20 - 27
+0,4,0,0,0,0,0,0,  # 28 - 2f
+0,0,0,0,0,0,0,0,  # 30 - 37
+0,0,0,0,0,0,0,0,  # 38 - 3f
+0,0,0,5,0,0,0,0,  # 40 - 47
+0,0,0,0,0,0,0,0,  # 48 - 4f
+0,0,0,0,0,0,0,0,  # 50 - 57
+0,0,0,0,0,0,0,0,  # 58 - 5f
+0,0,0,0,0,0,0,0,  # 60 - 67
+0,0,0,0,0,0,0,0,  # 68 - 6f
+0,0,0,0,0,0,0,0,  # 70 - 77
+0,0,0,0,0,0,0,0,  # 78 - 7f
+2,2,2,2,2,2,2,2,  # 80 - 87
+2,2,2,2,2,2,2,2,  # 88 - 8f
+2,2,2,2,2,2,2,2,  # 90 - 97
+2,2,2,2,2,2,2,2,  # 98 - 9f
+2,2,2,2,2,2,2,2,  # a0 - a7
+2,2,2,2,2,2,2,2,  # a8 - af
+2,2,2,2,2,2,2,2,  # b0 - b7
+2,2,2,2,2,2,2,2,  # b8 - bf
+2,2,2,2,2,2,2,2,  # c0 - c7
+2,2,2,2,2,2,2,2,  # c8 - cf
+2,2,2,2,2,2,2,2,  # d0 - d7
+2,2,2,2,2,2,2,2,  # d8 - df
+2,2,2,2,2,2,2,2,  # e0 - e7
+2,2,2,2,2,2,2,2,  # e8 - ef
+2,2,2,2,2,2,2,2,  # f0 - f7
+2,2,2,2,2,2,2,2,  # f8 - ff
 )
 
 ISO2022KR_ST = (
-    MachineState.START,      3, MachineState.ERROR, MachineState.START, MachineState.START, MachineState.START, MachineState.ERROR, MachineState.ERROR, # 00-07
-    MachineState.ERROR, MachineState.ERROR, MachineState.ERROR, MachineState.ERROR, MachineState.ITS_ME, MachineState.ITS_ME, MachineState.ITS_ME, MachineState.ITS_ME, # 08-0f
-    MachineState.ITS_ME, MachineState.ITS_ME, MachineState.ERROR, MachineState.ERROR, MachineState.ERROR,      4, MachineState.ERROR, MachineState.ERROR, # 10-17
-    MachineState.ERROR, MachineState.ERROR, MachineState.ERROR, MachineState.ERROR,      5, MachineState.ERROR, MachineState.ERROR, MachineState.ERROR, # 18-1f
-    MachineState.ERROR, MachineState.ERROR, MachineState.ERROR, MachineState.ITS_ME, MachineState.START, MachineState.START, MachineState.START, MachineState.START, # 20-27
+MachineState.START,     3,MachineState.ERROR,MachineState.START,MachineState.START,MachineState.START,MachineState.ERROR,MachineState.ERROR,# 00-07
+MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,# 08-0f
+MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,     4,MachineState.ERROR,MachineState.ERROR,# 10-17
+MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,     5,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,# 18-1f
+MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ITS_ME,MachineState.START,MachineState.START,MachineState.START,MachineState.START,# 20-27
 )
-# fmt: on
 
 ISO2022KR_CHAR_LEN_TABLE = (0, 0, 0, 0, 0, 0)
 
-ISO2022KR_SM_MODEL = {
-    "class_table": ISO2022KR_CLS,
-    "class_factor": 6,
-    "state_table": ISO2022KR_ST,
-    "char_len_table": ISO2022KR_CHAR_LEN_TABLE,
-    "name": "ISO-2022-KR",
-    "language": "Korean",
-}
+ISO2022KR_SM_MODEL = {'class_table': ISO2022KR_CLS,
+                      'class_factor': 6,
+                      'state_table': ISO2022KR_ST,
+                      'char_len_table': ISO2022KR_CHAR_LEN_TABLE,
+                      'name': "ISO-2022-KR",
+                      'language': 'Korean'}
+
+
diff --git a/venv/lib/python3.10/site-packages/pip/_vendor/chardet/eucjpprober.py b/venv/lib/python3.10/site-packages/pip/_vendor/chardet/eucjpprober.py
index abf2e66..20ce8f7 100644
--- a/venv/lib/python3.10/site-packages/pip/_vendor/chardet/eucjpprober.py
+++ b/venv/lib/python3.10/site-packages/pip/_vendor/chardet/eucjpprober.py
@@ -25,24 +25,24 @@
 # 02110-1301  USA
 ######################### END LICENSE BLOCK #########################
 
-from .chardistribution import EUCJPDistributionAnalysis
+from .enums import ProbingState, MachineState
+from .mbcharsetprober import MultiByteCharSetProber
 from .codingstatemachine import CodingStateMachine
-from .enums import MachineState, ProbingState
+from .chardistribution import EUCJPDistributionAnalysis
 from .jpcntx import EUCJPContextAnalysis
-from .mbcharsetprober import MultiByteCharSetProber
 from .mbcssm import EUCJP_SM_MODEL
 
 
 class EUCJPProber(MultiByteCharSetProber):
     def __init__(self):
-        super().__init__()
+        super(EUCJPProber, self).__init__()
         self.coding_sm = CodingStateMachine(EUCJP_SM_MODEL)
         self.distribution_analyzer = EUCJPDistributionAnalysis()
         self.context_analyzer = EUCJPContextAnalysis()
         self.reset()
 
     def reset(self):
-        super().reset()
+        super(EUCJPProber, self).reset()
         self.context_analyzer.reset()
 
     @property
@@ -54,37 +54,34 @@ def language(self):
         return "Japanese"
 
     def feed(self, byte_str):
-        for i, byte in enumerate(byte_str):
-            # PY3K: byte_str is a byte array, so byte is an int, not a byte
-            coding_state = self.coding_sm.next_state(byte)
+        for i in range(len(byte_str)):
+            # PY3K: byte_str is a byte array, so byte_str[i] is an int, not a byte
+            coding_state = self.coding_sm.next_state(byte_str[i])
             if coding_state == MachineState.ERROR:
-                self.logger.debug(
-                    "%s %s prober hit error at byte %s",
-                    self.charset_name,
-                    self.language,
-                    i,
-                )
+                self.logger.debug('%s %s prober hit error at byte %s',
+                                  self.charset_name, self.language, i)
                 self._state = ProbingState.NOT_ME
                 break
-            if coding_state == MachineState.ITS_ME:
+            elif coding_state == MachineState.ITS_ME:
                 self._state = ProbingState.FOUND_IT
                 break
-            if coding_state == MachineState.START:
+            elif coding_state == MachineState.START:
                 char_len = self.coding_sm.get_current_charlen()
                 if i == 0:
-                    self._last_char[1] = byte
+                    self._last_char[1] = byte_str[0]
                     self.context_analyzer.feed(self._last_char, char_len)
                     self.distribution_analyzer.feed(self._last_char, char_len)
                 else:
-                    self.context_analyzer.feed(byte_str[i - 1 : i + 1], char_len)
-                    self.distribution_analyzer.feed(byte_str[i - 1 : i + 1], char_len)
+                    self.context_analyzer.feed(byte_str[i - 1:i + 1],
+                                                char_len)
+                    self.distribution_analyzer.feed(byte_str[i - 1:i + 1],
+                                                     char_len)
 
         self._last_char[0] = byte_str[-1]
 
         if self.state == ProbingState.DETECTING:
-            if self.context_analyzer.got_enough_data() and (
-                self.get_confidence() > self.SHORTCUT_THRESHOLD
-            ):
+            if (self.context_analyzer.got_enough_data() and
+               (self.get_confidence() > self.SHORTCUT_THRESHOLD)):
                 self._state = ProbingState.FOUND_IT
 
         return self.state
diff --git a/venv/lib/python3.10/site-packages/pip/_vendor/chardet/euckrfreq.py b/venv/lib/python3.10/site-packages/pip/_vendor/chardet/euckrfreq.py
index 7dc3b10..b68078c 100644
--- a/venv/lib/python3.10/site-packages/pip/_vendor/chardet/euckrfreq.py
+++ b/venv/lib/python3.10/site-packages/pip/_vendor/chardet/euckrfreq.py
@@ -43,7 +43,6 @@
 EUCKR_TABLE_SIZE = 2352
 
 # Char to FreqOrder table ,
-# fmt: off
 EUCKR_CHAR_TO_FREQ_ORDER = (
   13, 130, 120,1396, 481,1719,1720, 328, 609, 212,1721, 707, 400, 299,1722,  87,
 1397,1723, 104, 536,1117,1203,1724,1267, 685,1268, 508,1725,1726,1727,1728,1398,
@@ -193,4 +192,4 @@
 2629,2630,2631, 924, 648, 863, 603,2632,2633, 934,1540, 864, 865,2634, 642,1042,
  670,1190,2635,2636,2637,2638, 168,2639, 652, 873, 542,1054,1541,2640,2641,2642,  # 512, 256
 )
-# fmt: on
+
diff --git a/venv/lib/python3.10/site-packages/pip/_vendor/chardet/euckrprober.py b/venv/lib/python3.10/site-packages/pip/_vendor/chardet/euckrprober.py
index 154a6d2..345a060 100644
--- a/venv/lib/python3.10/site-packages/pip/_vendor/chardet/euckrprober.py
+++ b/venv/lib/python3.10/site-packages/pip/_vendor/chardet/euckrprober.py
@@ -25,15 +25,15 @@
 # 02110-1301  USA
 ######################### END LICENSE BLOCK #########################
 
-from .chardistribution import EUCKRDistributionAnalysis
-from .codingstatemachine import CodingStateMachine
 from .mbcharsetprober import MultiByteCharSetProber
+from .codingstatemachine import CodingStateMachine
+from .chardistribution import EUCKRDistributionAnalysis
 from .mbcssm import EUCKR_SM_MODEL
 
 
 class EUCKRProber(MultiByteCharSetProber):
     def __init__(self):
-        super().__init__()
+        super(EUCKRProber, self).__init__()
         self.coding_sm = CodingStateMachine(EUCKR_SM_MODEL)
         self.distribution_analyzer = EUCKRDistributionAnalysis()
         self.reset()
diff --git a/venv/lib/python3.10/site-packages/pip/_vendor/chardet/euctwfreq.py b/venv/lib/python3.10/site-packages/pip/_vendor/chardet/euctwfreq.py
index 4900ccc..ed7a995 100644
--- a/venv/lib/python3.10/site-packages/pip/_vendor/chardet/euctwfreq.py
+++ b/venv/lib/python3.10/site-packages/pip/_vendor/chardet/euctwfreq.py
@@ -43,346 +43,345 @@
 
 EUCTW_TYPICAL_DISTRIBUTION_RATIO = 0.75
 
-# Char to FreqOrder table
+# Char to FreqOrder table ,
 EUCTW_TABLE_SIZE = 5376
 
-# fmt: off
 EUCTW_CHAR_TO_FREQ_ORDER = (
-    1, 1800, 1506, 255, 1431, 198, 9, 82, 6, 7310, 177, 202, 3615, 1256, 2808, 110,  # 2742
-    3735, 33, 3241, 261, 76, 44, 2113, 16, 2931, 2184, 1176, 659, 3868, 26, 3404, 2643,  # 2758
-    1198, 3869, 3313, 4060, 410, 2211, 302, 590, 361, 1963, 8, 204, 58, 4296, 7311, 1931,  # 2774
-    63, 7312, 7313, 317, 1614, 75, 222, 159, 4061, 2412, 1480, 7314, 3500, 3068, 224, 2809,  # 2790
-    3616, 3, 10, 3870, 1471, 29, 2774, 1135, 2852, 1939, 873, 130, 3242, 1123, 312, 7315,  # 2806
-    4297, 2051, 507, 252, 682, 7316, 142, 1914, 124, 206, 2932, 34, 3501, 3173, 64, 604,  # 2822
-    7317, 2494, 1976, 1977, 155, 1990, 645, 641, 1606, 7318, 3405, 337, 72, 406, 7319, 80,  # 2838
-    630, 238, 3174, 1509, 263, 939, 1092, 2644, 756, 1440, 1094, 3406, 449, 69, 2969, 591,  # 2854
-    179, 2095, 471, 115, 2034, 1843, 60, 50, 2970, 134, 806, 1868, 734, 2035, 3407, 180,  # 2870
-    995, 1607, 156, 537, 2893, 688, 7320, 319, 1305, 779, 2144, 514, 2374, 298, 4298, 359,  # 2886
-    2495, 90, 2707, 1338, 663, 11, 906, 1099, 2545, 20, 2436, 182, 532, 1716, 7321, 732,  # 2902
-    1376, 4062, 1311, 1420, 3175, 25, 2312, 1056, 113, 399, 382, 1949, 242, 3408, 2467, 529,  # 2918
-    3243, 475, 1447, 3617, 7322, 117, 21, 656, 810, 1297, 2295, 2329, 3502, 7323, 126, 4063,  # 2934
-    706, 456, 150, 613, 4299, 71, 1118, 2036, 4064, 145, 3069, 85, 835, 486, 2114, 1246,  # 2950
-    1426, 428, 727, 1285, 1015, 800, 106, 623, 303, 1281, 7324, 2127, 2354, 347, 3736, 221,  # 2966
-    3503, 3110, 7325, 1955, 1153, 4065, 83, 296, 1199, 3070, 192, 624, 93, 7326, 822, 1897,  # 2982
-    2810, 3111, 795, 2064, 991, 1554, 1542, 1592, 27, 43, 2853, 859, 139, 1456, 860, 4300,  # 2998
-    437, 712, 3871, 164, 2392, 3112, 695, 211, 3017, 2096, 195, 3872, 1608, 3504, 3505, 3618,  # 3014
-    3873, 234, 811, 2971, 2097, 3874, 2229, 1441, 3506, 1615, 2375, 668, 2076, 1638, 305, 228,  # 3030
-    1664, 4301, 467, 415, 7327, 262, 2098, 1593, 239, 108, 300, 200, 1033, 512, 1247, 2077,  # 3046
-    7328, 7329, 2173, 3176, 3619, 2673, 593, 845, 1062, 3244, 88, 1723, 2037, 3875, 1950, 212,  # 3062
-    266, 152, 149, 468, 1898, 4066, 4302, 77, 187, 7330, 3018, 37, 5, 2972, 7331, 3876,  # 3078
-    7332, 7333, 39, 2517, 4303, 2894, 3177, 2078, 55, 148, 74, 4304, 545, 483, 1474, 1029,  # 3094
-    1665, 217, 1869, 1531, 3113, 1104, 2645, 4067, 24, 172, 3507, 900, 3877, 3508, 3509, 4305,  # 3110
-    32, 1408, 2811, 1312, 329, 487, 2355, 2247, 2708, 784, 2674, 4, 3019, 3314, 1427, 1788,  # 3126
-    188, 109, 499, 7334, 3620, 1717, 1789, 888, 1217, 3020, 4306, 7335, 3510, 7336, 3315, 1520,  # 3142
-    3621, 3878, 196, 1034, 775, 7337, 7338, 929, 1815, 249, 439, 38, 7339, 1063, 7340, 794,  # 3158
-    3879, 1435, 2296, 46, 178, 3245, 2065, 7341, 2376, 7342, 214, 1709, 4307, 804, 35, 707,  # 3174
-    324, 3622, 1601, 2546, 140, 459, 4068, 7343, 7344, 1365, 839, 272, 978, 2257, 2572, 3409,  # 3190
-    2128, 1363, 3623, 1423, 697, 100, 3071, 48, 70, 1231, 495, 3114, 2193, 7345, 1294, 7346,  # 3206
-    2079, 462, 586, 1042, 3246, 853, 256, 988, 185, 2377, 3410, 1698, 434, 1084, 7347, 3411,  # 3222
-    314, 2615, 2775, 4308, 2330, 2331, 569, 2280, 637, 1816, 2518, 757, 1162, 1878, 1616, 3412,  # 3238
-    287, 1577, 2115, 768, 4309, 1671, 2854, 3511, 2519, 1321, 3737, 909, 2413, 7348, 4069, 933,  # 3254
-    3738, 7349, 2052, 2356, 1222, 4310, 765, 2414, 1322, 786, 4311, 7350, 1919, 1462, 1677, 2895,  # 3270
-    1699, 7351, 4312, 1424, 2437, 3115, 3624, 2590, 3316, 1774, 1940, 3413, 3880, 4070, 309, 1369,  # 3286
-    1130, 2812, 364, 2230, 1653, 1299, 3881, 3512, 3882, 3883, 2646, 525, 1085, 3021, 902, 2000,  # 3302
-    1475, 964, 4313, 421, 1844, 1415, 1057, 2281, 940, 1364, 3116, 376, 4314, 4315, 1381, 7,  # 3318
-    2520, 983, 2378, 336, 1710, 2675, 1845, 321, 3414, 559, 1131, 3022, 2742, 1808, 1132, 1313,  # 3334
-    265, 1481, 1857, 7352, 352, 1203, 2813, 3247, 167, 1089, 420, 2814, 776, 792, 1724, 3513,  # 3350
-    4071, 2438, 3248, 7353, 4072, 7354, 446, 229, 333, 2743, 901, 3739, 1200, 1557, 4316, 2647,  # 3366
-    1920, 395, 2744, 2676, 3740, 4073, 1835, 125, 916, 3178, 2616, 4317, 7355, 7356, 3741, 7357,  # 3382
-    7358, 7359, 4318, 3117, 3625, 1133, 2547, 1757, 3415, 1510, 2313, 1409, 3514, 7360, 2145, 438,  # 3398
-    2591, 2896, 2379, 3317, 1068, 958, 3023, 461, 311, 2855, 2677, 4074, 1915, 3179, 4075, 1978,  # 3414
-    383, 750, 2745, 2617, 4076, 274, 539, 385, 1278, 1442, 7361, 1154, 1964, 384, 561, 210,  # 3430
-    98, 1295, 2548, 3515, 7362, 1711, 2415, 1482, 3416, 3884, 2897, 1257, 129, 7363, 3742, 642,  # 3446
-    523, 2776, 2777, 2648, 7364, 141, 2231, 1333, 68, 176, 441, 876, 907, 4077, 603, 2592,  # 3462
-    710, 171, 3417, 404, 549, 18, 3118, 2393, 1410, 3626, 1666, 7365, 3516, 4319, 2898, 4320,  # 3478
-    7366, 2973, 368, 7367, 146, 366, 99, 871, 3627, 1543, 748, 807, 1586, 1185, 22, 2258,  # 3494
-    379, 3743, 3180, 7368, 3181, 505, 1941, 2618, 1991, 1382, 2314, 7369, 380, 2357, 218, 702,  # 3510
-    1817, 1248, 3418, 3024, 3517, 3318, 3249, 7370, 2974, 3628, 930, 3250, 3744, 7371, 59, 7372,  # 3526
-    585, 601, 4078, 497, 3419, 1112, 1314, 4321, 1801, 7373, 1223, 1472, 2174, 7374, 749, 1836,  # 3542
-    690, 1899, 3745, 1772, 3885, 1476, 429, 1043, 1790, 2232, 2116, 917, 4079, 447, 1086, 1629,  # 3558
-    7375, 556, 7376, 7377, 2020, 1654, 844, 1090, 105, 550, 966, 1758, 2815, 1008, 1782, 686,  # 3574
-    1095, 7378, 2282, 793, 1602, 7379, 3518, 2593, 4322, 4080, 2933, 2297, 4323, 3746, 980, 2496,  # 3590
-    544, 353, 527, 4324, 908, 2678, 2899, 7380, 381, 2619, 1942, 1348, 7381, 1341, 1252, 560,  # 3606
-    3072, 7382, 3420, 2856, 7383, 2053, 973, 886, 2080, 143, 4325, 7384, 7385, 157, 3886, 496,  # 3622
-    4081, 57, 840, 540, 2038, 4326, 4327, 3421, 2117, 1445, 970, 2259, 1748, 1965, 2081, 4082,  # 3638
-    3119, 1234, 1775, 3251, 2816, 3629, 773, 1206, 2129, 1066, 2039, 1326, 3887, 1738, 1725, 4083,  # 3654
-    279, 3120, 51, 1544, 2594, 423, 1578, 2130, 2066, 173, 4328, 1879, 7386, 7387, 1583, 264,  # 3670
-    610, 3630, 4329, 2439, 280, 154, 7388, 7389, 7390, 1739, 338, 1282, 3073, 693, 2857, 1411,  # 3686
-    1074, 3747, 2440, 7391, 4330, 7392, 7393, 1240, 952, 2394, 7394, 2900, 1538, 2679, 685, 1483,  # 3702
-    4084, 2468, 1436, 953, 4085, 2054, 4331, 671, 2395, 79, 4086, 2441, 3252, 608, 567, 2680,  # 3718
-    3422, 4087, 4088, 1691, 393, 1261, 1791, 2396, 7395, 4332, 7396, 7397, 7398, 7399, 1383, 1672,  # 3734
-    3748, 3182, 1464, 522, 1119, 661, 1150, 216, 675, 4333, 3888, 1432, 3519, 609, 4334, 2681,  # 3750
-    2397, 7400, 7401, 7402, 4089, 3025, 0, 7403, 2469, 315, 231, 2442, 301, 3319, 4335, 2380,  # 3766
-    7404, 233, 4090, 3631, 1818, 4336, 4337, 7405, 96, 1776, 1315, 2082, 7406, 257, 7407, 1809,  # 3782
-    3632, 2709, 1139, 1819, 4091, 2021, 1124, 2163, 2778, 1777, 2649, 7408, 3074, 363, 1655, 3183,  # 3798
-    7409, 2975, 7410, 7411, 7412, 3889, 1567, 3890, 718, 103, 3184, 849, 1443, 341, 3320, 2934,  # 3814
-    1484, 7413, 1712, 127, 67, 339, 4092, 2398, 679, 1412, 821, 7414, 7415, 834, 738, 351,  # 3830
-    2976, 2146, 846, 235, 1497, 1880, 418, 1992, 3749, 2710, 186, 1100, 2147, 2746, 3520, 1545,  # 3846
-    1355, 2935, 2858, 1377, 583, 3891, 4093, 2573, 2977, 7416, 1298, 3633, 1078, 2549, 3634, 2358,  # 3862
-    78, 3750, 3751, 267, 1289, 2099, 2001, 1594, 4094, 348, 369, 1274, 2194, 2175, 1837, 4338,  # 3878
-    1820, 2817, 3635, 2747, 2283, 2002, 4339, 2936, 2748, 144, 3321, 882, 4340, 3892, 2749, 3423,  # 3894
-    4341, 2901, 7417, 4095, 1726, 320, 7418, 3893, 3026, 788, 2978, 7419, 2818, 1773, 1327, 2859,  # 3910
-    3894, 2819, 7420, 1306, 4342, 2003, 1700, 3752, 3521, 2359, 2650, 787, 2022, 506, 824, 3636,  # 3926
-    534, 323, 4343, 1044, 3322, 2023, 1900, 946, 3424, 7421, 1778, 1500, 1678, 7422, 1881, 4344,  # 3942
-    165, 243, 4345, 3637, 2521, 123, 683, 4096, 764, 4346, 36, 3895, 1792, 589, 2902, 816,  # 3958
-    626, 1667, 3027, 2233, 1639, 1555, 1622, 3753, 3896, 7423, 3897, 2860, 1370, 1228, 1932, 891,  # 3974
-    2083, 2903, 304, 4097, 7424, 292, 2979, 2711, 3522, 691, 2100, 4098, 1115, 4347, 118, 662,  # 3990
-    7425, 611, 1156, 854, 2381, 1316, 2861, 2, 386, 515, 2904, 7426, 7427, 3253, 868, 2234,  # 4006
-    1486, 855, 2651, 785, 2212, 3028, 7428, 1040, 3185, 3523, 7429, 3121, 448, 7430, 1525, 7431,  # 4022
-    2164, 4348, 7432, 3754, 7433, 4099, 2820, 3524, 3122, 503, 818, 3898, 3123, 1568, 814, 676,  # 4038
-    1444, 306, 1749, 7434, 3755, 1416, 1030, 197, 1428, 805, 2821, 1501, 4349, 7435, 7436, 7437,  # 4054
-    1993, 7438, 4350, 7439, 7440, 2195, 13, 2779, 3638, 2980, 3124, 1229, 1916, 7441, 3756, 2131,  # 4070
-    7442, 4100, 4351, 2399, 3525, 7443, 2213, 1511, 1727, 1120, 7444, 7445, 646, 3757, 2443, 307,  # 4086
-    7446, 7447, 1595, 3186, 7448, 7449, 7450, 3639, 1113, 1356, 3899, 1465, 2522, 2523, 7451, 519,  # 4102
-    7452, 128, 2132, 92, 2284, 1979, 7453, 3900, 1512, 342, 3125, 2196, 7454, 2780, 2214, 1980,  # 4118
-    3323, 7455, 290, 1656, 1317, 789, 827, 2360, 7456, 3758, 4352, 562, 581, 3901, 7457, 401,  # 4134
-    4353, 2248, 94, 4354, 1399, 2781, 7458, 1463, 2024, 4355, 3187, 1943, 7459, 828, 1105, 4101,  # 4150
-    1262, 1394, 7460, 4102, 605, 4356, 7461, 1783, 2862, 7462, 2822, 819, 2101, 578, 2197, 2937,  # 4166
-    7463, 1502, 436, 3254, 4103, 3255, 2823, 3902, 2905, 3425, 3426, 7464, 2712, 2315, 7465, 7466,  # 4182
-    2332, 2067, 23, 4357, 193, 826, 3759, 2102, 699, 1630, 4104, 3075, 390, 1793, 1064, 3526,  # 4198
-    7467, 1579, 3076, 3077, 1400, 7468, 4105, 1838, 1640, 2863, 7469, 4358, 4359, 137, 4106, 598,  # 4214
-    3078, 1966, 780, 104, 974, 2938, 7470, 278, 899, 253, 402, 572, 504, 493, 1339, 7471,  # 4230
-    3903, 1275, 4360, 2574, 2550, 7472, 3640, 3029, 3079, 2249, 565, 1334, 2713, 863, 41, 7473,  # 4246
-    7474, 4361, 7475, 1657, 2333, 19, 463, 2750, 4107, 606, 7476, 2981, 3256, 1087, 2084, 1323,  # 4262
-    2652, 2982, 7477, 1631, 1623, 1750, 4108, 2682, 7478, 2864, 791, 2714, 2653, 2334, 232, 2416,  # 4278
-    7479, 2983, 1498, 7480, 2654, 2620, 755, 1366, 3641, 3257, 3126, 2025, 1609, 119, 1917, 3427,  # 4294
-    862, 1026, 4109, 7481, 3904, 3760, 4362, 3905, 4363, 2260, 1951, 2470, 7482, 1125, 817, 4110,  # 4310
-    4111, 3906, 1513, 1766, 2040, 1487, 4112, 3030, 3258, 2824, 3761, 3127, 7483, 7484, 1507, 7485,  # 4326
-    2683, 733, 40, 1632, 1106, 2865, 345, 4113, 841, 2524, 230, 4364, 2984, 1846, 3259, 3428,  # 4342
-    7486, 1263, 986, 3429, 7487, 735, 879, 254, 1137, 857, 622, 1300, 1180, 1388, 1562, 3907,  # 4358
-    3908, 2939, 967, 2751, 2655, 1349, 592, 2133, 1692, 3324, 2985, 1994, 4114, 1679, 3909, 1901,  # 4374
-    2185, 7488, 739, 3642, 2715, 1296, 1290, 7489, 4115, 2198, 2199, 1921, 1563, 2595, 2551, 1870,  # 4390
-    2752, 2986, 7490, 435, 7491, 343, 1108, 596, 17, 1751, 4365, 2235, 3430, 3643, 7492, 4366,  # 4406
-    294, 3527, 2940, 1693, 477, 979, 281, 2041, 3528, 643, 2042, 3644, 2621, 2782, 2261, 1031,  # 4422
-    2335, 2134, 2298, 3529, 4367, 367, 1249, 2552, 7493, 3530, 7494, 4368, 1283, 3325, 2004, 240,  # 4438
-    1762, 3326, 4369, 4370, 836, 1069, 3128, 474, 7495, 2148, 2525, 268, 3531, 7496, 3188, 1521,  # 4454
-    1284, 7497, 1658, 1546, 4116, 7498, 3532, 3533, 7499, 4117, 3327, 2684, 1685, 4118, 961, 1673,  # 4470
-    2622, 190, 2005, 2200, 3762, 4371, 4372, 7500, 570, 2497, 3645, 1490, 7501, 4373, 2623, 3260,  # 4486
-    1956, 4374, 584, 1514, 396, 1045, 1944, 7502, 4375, 1967, 2444, 7503, 7504, 4376, 3910, 619,  # 4502
-    7505, 3129, 3261, 215, 2006, 2783, 2553, 3189, 4377, 3190, 4378, 763, 4119, 3763, 4379, 7506,  # 4518
-    7507, 1957, 1767, 2941, 3328, 3646, 1174, 452, 1477, 4380, 3329, 3130, 7508, 2825, 1253, 2382,  # 4534
-    2186, 1091, 2285, 4120, 492, 7509, 638, 1169, 1824, 2135, 1752, 3911, 648, 926, 1021, 1324,  # 4550
-    4381, 520, 4382, 997, 847, 1007, 892, 4383, 3764, 2262, 1871, 3647, 7510, 2400, 1784, 4384,  # 4566
-    1952, 2942, 3080, 3191, 1728, 4121, 2043, 3648, 4385, 2007, 1701, 3131, 1551, 30, 2263, 4122,  # 4582
-    7511, 2026, 4386, 3534, 7512, 501, 7513, 4123, 594, 3431, 2165, 1821, 3535, 3432, 3536, 3192,  # 4598
-    829, 2826, 4124, 7514, 1680, 3132, 1225, 4125, 7515, 3262, 4387, 4126, 3133, 2336, 7516, 4388,  # 4614
-    4127, 7517, 3912, 3913, 7518, 1847, 2383, 2596, 3330, 7519, 4389, 374, 3914, 652, 4128, 4129,  # 4630
-    375, 1140, 798, 7520, 7521, 7522, 2361, 4390, 2264, 546, 1659, 138, 3031, 2445, 4391, 7523,  # 4646
-    2250, 612, 1848, 910, 796, 3765, 1740, 1371, 825, 3766, 3767, 7524, 2906, 2554, 7525, 692,  # 4662
-    444, 3032, 2624, 801, 4392, 4130, 7526, 1491, 244, 1053, 3033, 4131, 4132, 340, 7527, 3915,  # 4678
-    1041, 2987, 293, 1168, 87, 1357, 7528, 1539, 959, 7529, 2236, 721, 694, 4133, 3768, 219,  # 4694
-    1478, 644, 1417, 3331, 2656, 1413, 1401, 1335, 1389, 3916, 7530, 7531, 2988, 2362, 3134, 1825,  # 4710
-    730, 1515, 184, 2827, 66, 4393, 7532, 1660, 2943, 246, 3332, 378, 1457, 226, 3433, 975,  # 4726
-    3917, 2944, 1264, 3537, 674, 696, 7533, 163, 7534, 1141, 2417, 2166, 713, 3538, 3333, 4394,  # 4742
-    3918, 7535, 7536, 1186, 15, 7537, 1079, 1070, 7538, 1522, 3193, 3539, 276, 1050, 2716, 758,  # 4758
-    1126, 653, 2945, 3263, 7539, 2337, 889, 3540, 3919, 3081, 2989, 903, 1250, 4395, 3920, 3434,  # 4774
-    3541, 1342, 1681, 1718, 766, 3264, 286, 89, 2946, 3649, 7540, 1713, 7541, 2597, 3334, 2990,  # 4790
-    7542, 2947, 2215, 3194, 2866, 7543, 4396, 2498, 2526, 181, 387, 1075, 3921, 731, 2187, 3335,  # 4806
-    7544, 3265, 310, 313, 3435, 2299, 770, 4134, 54, 3034, 189, 4397, 3082, 3769, 3922, 7545,  # 4822
-    1230, 1617, 1849, 355, 3542, 4135, 4398, 3336, 111, 4136, 3650, 1350, 3135, 3436, 3035, 4137,  # 4838
-    2149, 3266, 3543, 7546, 2784, 3923, 3924, 2991, 722, 2008, 7547, 1071, 247, 1207, 2338, 2471,  # 4854
-    1378, 4399, 2009, 864, 1437, 1214, 4400, 373, 3770, 1142, 2216, 667, 4401, 442, 2753, 2555,  # 4870
-    3771, 3925, 1968, 4138, 3267, 1839, 837, 170, 1107, 934, 1336, 1882, 7548, 7549, 2118, 4139,  # 4886
-    2828, 743, 1569, 7550, 4402, 4140, 582, 2384, 1418, 3437, 7551, 1802, 7552, 357, 1395, 1729,  # 4902
-    3651, 3268, 2418, 1564, 2237, 7553, 3083, 3772, 1633, 4403, 1114, 2085, 4141, 1532, 7554, 482,  # 4918
-    2446, 4404, 7555, 7556, 1492, 833, 1466, 7557, 2717, 3544, 1641, 2829, 7558, 1526, 1272, 3652,  # 4934
-    4142, 1686, 1794, 416, 2556, 1902, 1953, 1803, 7559, 3773, 2785, 3774, 1159, 2316, 7560, 2867,  # 4950
-    4405, 1610, 1584, 3036, 2419, 2754, 443, 3269, 1163, 3136, 7561, 7562, 3926, 7563, 4143, 2499,  # 4966
-    3037, 4406, 3927, 3137, 2103, 1647, 3545, 2010, 1872, 4144, 7564, 4145, 431, 3438, 7565, 250,  # 4982
-    97, 81, 4146, 7566, 1648, 1850, 1558, 160, 848, 7567, 866, 740, 1694, 7568, 2201, 2830,  # 4998
-    3195, 4147, 4407, 3653, 1687, 950, 2472, 426, 469, 3196, 3654, 3655, 3928, 7569, 7570, 1188,  # 5014
-    424, 1995, 861, 3546, 4148, 3775, 2202, 2685, 168, 1235, 3547, 4149, 7571, 2086, 1674, 4408,  # 5030
-    3337, 3270, 220, 2557, 1009, 7572, 3776, 670, 2992, 332, 1208, 717, 7573, 7574, 3548, 2447,  # 5046
-    3929, 3338, 7575, 513, 7576, 1209, 2868, 3339, 3138, 4409, 1080, 7577, 7578, 7579, 7580, 2527,  # 5062
-    3656, 3549, 815, 1587, 3930, 3931, 7581, 3550, 3439, 3777, 1254, 4410, 1328, 3038, 1390, 3932,  # 5078
-    1741, 3933, 3778, 3934, 7582, 236, 3779, 2448, 3271, 7583, 7584, 3657, 3780, 1273, 3781, 4411,  # 5094
-    7585, 308, 7586, 4412, 245, 4413, 1851, 2473, 1307, 2575, 430, 715, 2136, 2449, 7587, 270,  # 5110
-    199, 2869, 3935, 7588, 3551, 2718, 1753, 761, 1754, 725, 1661, 1840, 4414, 3440, 3658, 7589,  # 5126
-    7590, 587, 14, 3272, 227, 2598, 326, 480, 2265, 943, 2755, 3552, 291, 650, 1883, 7591,  # 5142
-    1702, 1226, 102, 1547, 62, 3441, 904, 4415, 3442, 1164, 4150, 7592, 7593, 1224, 1548, 2756,  # 5158
-    391, 498, 1493, 7594, 1386, 1419, 7595, 2055, 1177, 4416, 813, 880, 1081, 2363, 566, 1145,  # 5174
-    4417, 2286, 1001, 1035, 2558, 2599, 2238, 394, 1286, 7596, 7597, 2068, 7598, 86, 1494, 1730,  # 5190
-    3936, 491, 1588, 745, 897, 2948, 843, 3340, 3937, 2757, 2870, 3273, 1768, 998, 2217, 2069,  # 5206
-    397, 1826, 1195, 1969, 3659, 2993, 3341, 284, 7599, 3782, 2500, 2137, 2119, 1903, 7600, 3938,  # 5222
-    2150, 3939, 4151, 1036, 3443, 1904, 114, 2559, 4152, 209, 1527, 7601, 7602, 2949, 2831, 2625,  # 5238
-    2385, 2719, 3139, 812, 2560, 7603, 3274, 7604, 1559, 737, 1884, 3660, 1210, 885, 28, 2686,  # 5254
-    3553, 3783, 7605, 4153, 1004, 1779, 4418, 7606, 346, 1981, 2218, 2687, 4419, 3784, 1742, 797,  # 5270
-    1642, 3940, 1933, 1072, 1384, 2151, 896, 3941, 3275, 3661, 3197, 2871, 3554, 7607, 2561, 1958,  # 5286
-    4420, 2450, 1785, 7608, 7609, 7610, 3942, 4154, 1005, 1308, 3662, 4155, 2720, 4421, 4422, 1528,  # 5302
-    2600, 161, 1178, 4156, 1982, 987, 4423, 1101, 4157, 631, 3943, 1157, 3198, 2420, 1343, 1241,  # 5318
-    1016, 2239, 2562, 372, 877, 2339, 2501, 1160, 555, 1934, 911, 3944, 7611, 466, 1170, 169,  # 5334
-    1051, 2907, 2688, 3663, 2474, 2994, 1182, 2011, 2563, 1251, 2626, 7612, 992, 2340, 3444, 1540,  # 5350
-    2721, 1201, 2070, 2401, 1996, 2475, 7613, 4424, 528, 1922, 2188, 1503, 1873, 1570, 2364, 3342,  # 5366
-    3276, 7614, 557, 1073, 7615, 1827, 3445, 2087, 2266, 3140, 3039, 3084, 767, 3085, 2786, 4425,  # 5382
-    1006, 4158, 4426, 2341, 1267, 2176, 3664, 3199, 778, 3945, 3200, 2722, 1597, 2657, 7616, 4427,  # 5398
-    7617, 3446, 7618, 7619, 7620, 3277, 2689, 1433, 3278, 131, 95, 1504, 3946, 723, 4159, 3141,  # 5414
-    1841, 3555, 2758, 2189, 3947, 2027, 2104, 3665, 7621, 2995, 3948, 1218, 7622, 3343, 3201, 3949,  # 5430
-    4160, 2576, 248, 1634, 3785, 912, 7623, 2832, 3666, 3040, 3786, 654, 53, 7624, 2996, 7625,  # 5446
-    1688, 4428, 777, 3447, 1032, 3950, 1425, 7626, 191, 820, 2120, 2833, 971, 4429, 931, 3202,  # 5462
-    135, 664, 783, 3787, 1997, 772, 2908, 1935, 3951, 3788, 4430, 2909, 3203, 282, 2723, 640,  # 5478
-    1372, 3448, 1127, 922, 325, 3344, 7627, 7628, 711, 2044, 7629, 7630, 3952, 2219, 2787, 1936,  # 5494
-    3953, 3345, 2220, 2251, 3789, 2300, 7631, 4431, 3790, 1258, 3279, 3954, 3204, 2138, 2950, 3955,  # 5510
-    3956, 7632, 2221, 258, 3205, 4432, 101, 1227, 7633, 3280, 1755, 7634, 1391, 3281, 7635, 2910,  # 5526
-    2056, 893, 7636, 7637, 7638, 1402, 4161, 2342, 7639, 7640, 3206, 3556, 7641, 7642, 878, 1325,  # 5542
-    1780, 2788, 4433, 259, 1385, 2577, 744, 1183, 2267, 4434, 7643, 3957, 2502, 7644, 684, 1024,  # 5558
-    4162, 7645, 472, 3557, 3449, 1165, 3282, 3958, 3959, 322, 2152, 881, 455, 1695, 1152, 1340,  # 5574
-    660, 554, 2153, 4435, 1058, 4436, 4163, 830, 1065, 3346, 3960, 4437, 1923, 7646, 1703, 1918,  # 5590
-    7647, 932, 2268, 122, 7648, 4438, 947, 677, 7649, 3791, 2627, 297, 1905, 1924, 2269, 4439,  # 5606
-    2317, 3283, 7650, 7651, 4164, 7652, 4165, 84, 4166, 112, 989, 7653, 547, 1059, 3961, 701,  # 5622
-    3558, 1019, 7654, 4167, 7655, 3450, 942, 639, 457, 2301, 2451, 993, 2951, 407, 851, 494,  # 5638
-    4440, 3347, 927, 7656, 1237, 7657, 2421, 3348, 573, 4168, 680, 921, 2911, 1279, 1874, 285,  # 5654
-    790, 1448, 1983, 719, 2167, 7658, 7659, 4441, 3962, 3963, 1649, 7660, 1541, 563, 7661, 1077,  # 5670
-    7662, 3349, 3041, 3451, 511, 2997, 3964, 3965, 3667, 3966, 1268, 2564, 3350, 3207, 4442, 4443,  # 5686
-    7663, 535, 1048, 1276, 1189, 2912, 2028, 3142, 1438, 1373, 2834, 2952, 1134, 2012, 7664, 4169,  # 5702
-    1238, 2578, 3086, 1259, 7665, 700, 7666, 2953, 3143, 3668, 4170, 7667, 4171, 1146, 1875, 1906,  # 5718
-    4444, 2601, 3967, 781, 2422, 132, 1589, 203, 147, 273, 2789, 2402, 898, 1786, 2154, 3968,  # 5734
-    3969, 7668, 3792, 2790, 7669, 7670, 4445, 4446, 7671, 3208, 7672, 1635, 3793, 965, 7673, 1804,  # 5750
-    2690, 1516, 3559, 1121, 1082, 1329, 3284, 3970, 1449, 3794, 65, 1128, 2835, 2913, 2759, 1590,  # 5766
-    3795, 7674, 7675, 12, 2658, 45, 976, 2579, 3144, 4447, 517, 2528, 1013, 1037, 3209, 7676,  # 5782
-    3796, 2836, 7677, 3797, 7678, 3452, 7679, 2602, 614, 1998, 2318, 3798, 3087, 2724, 2628, 7680,  # 5798
-    2580, 4172, 599, 1269, 7681, 1810, 3669, 7682, 2691, 3088, 759, 1060, 489, 1805, 3351, 3285,  # 5814
-    1358, 7683, 7684, 2386, 1387, 1215, 2629, 2252, 490, 7685, 7686, 4173, 1759, 2387, 2343, 7687,  # 5830
-    4448, 3799, 1907, 3971, 2630, 1806, 3210, 4449, 3453, 3286, 2760, 2344, 874, 7688, 7689, 3454,  # 5846
-    3670, 1858, 91, 2914, 3671, 3042, 3800, 4450, 7690, 3145, 3972, 2659, 7691, 3455, 1202, 1403,  # 5862
-    3801, 2954, 2529, 1517, 2503, 4451, 3456, 2504, 7692, 4452, 7693, 2692, 1885, 1495, 1731, 3973,  # 5878
-    2365, 4453, 7694, 2029, 7695, 7696, 3974, 2693, 1216, 237, 2581, 4174, 2319, 3975, 3802, 4454,  # 5894
-    4455, 2694, 3560, 3457, 445, 4456, 7697, 7698, 7699, 7700, 2761, 61, 3976, 3672, 1822, 3977,  # 5910
-    7701, 687, 2045, 935, 925, 405, 2660, 703, 1096, 1859, 2725, 4457, 3978, 1876, 1367, 2695,  # 5926
-    3352, 918, 2105, 1781, 2476, 334, 3287, 1611, 1093, 4458, 564, 3146, 3458, 3673, 3353, 945,  # 5942
-    2631, 2057, 4459, 7702, 1925, 872, 4175, 7703, 3459, 2696, 3089, 349, 4176, 3674, 3979, 4460,  # 5958
-    3803, 4177, 3675, 2155, 3980, 4461, 4462, 4178, 4463, 2403, 2046, 782, 3981, 400, 251, 4179,  # 5974
-    1624, 7704, 7705, 277, 3676, 299, 1265, 476, 1191, 3804, 2121, 4180, 4181, 1109, 205, 7706,  # 5990
-    2582, 1000, 2156, 3561, 1860, 7707, 7708, 7709, 4464, 7710, 4465, 2565, 107, 2477, 2157, 3982,  # 6006
-    3460, 3147, 7711, 1533, 541, 1301, 158, 753, 4182, 2872, 3562, 7712, 1696, 370, 1088, 4183,  # 6022
-    4466, 3563, 579, 327, 440, 162, 2240, 269, 1937, 1374, 3461, 968, 3043, 56, 1396, 3090,  # 6038
-    2106, 3288, 3354, 7713, 1926, 2158, 4467, 2998, 7714, 3564, 7715, 7716, 3677, 4468, 2478, 7717,  # 6054
-    2791, 7718, 1650, 4469, 7719, 2603, 7720, 7721, 3983, 2661, 3355, 1149, 3356, 3984, 3805, 3985,  # 6070
-    7722, 1076, 49, 7723, 951, 3211, 3289, 3290, 450, 2837, 920, 7724, 1811, 2792, 2366, 4184,  # 6086
-    1908, 1138, 2367, 3806, 3462, 7725, 3212, 4470, 1909, 1147, 1518, 2423, 4471, 3807, 7726, 4472,  # 6102
-    2388, 2604, 260, 1795, 3213, 7727, 7728, 3808, 3291, 708, 7729, 3565, 1704, 7730, 3566, 1351,  # 6118
-    1618, 3357, 2999, 1886, 944, 4185, 3358, 4186, 3044, 3359, 4187, 7731, 3678, 422, 413, 1714,  # 6134
-    3292, 500, 2058, 2345, 4188, 2479, 7732, 1344, 1910, 954, 7733, 1668, 7734, 7735, 3986, 2404,  # 6150
-    4189, 3567, 3809, 4190, 7736, 2302, 1318, 2505, 3091, 133, 3092, 2873, 4473, 629, 31, 2838,  # 6166
-    2697, 3810, 4474, 850, 949, 4475, 3987, 2955, 1732, 2088, 4191, 1496, 1852, 7737, 3988, 620,  # 6182
-    3214, 981, 1242, 3679, 3360, 1619, 3680, 1643, 3293, 2139, 2452, 1970, 1719, 3463, 2168, 7738,  # 6198
-    3215, 7739, 7740, 3361, 1828, 7741, 1277, 4476, 1565, 2047, 7742, 1636, 3568, 3093, 7743, 869,  # 6214
-    2839, 655, 3811, 3812, 3094, 3989, 3000, 3813, 1310, 3569, 4477, 7744, 7745, 7746, 1733, 558,  # 6230
-    4478, 3681, 335, 1549, 3045, 1756, 4192, 3682, 1945, 3464, 1829, 1291, 1192, 470, 2726, 2107,  # 6246
-    2793, 913, 1054, 3990, 7747, 1027, 7748, 3046, 3991, 4479, 982, 2662, 3362, 3148, 3465, 3216,  # 6262
-    3217, 1946, 2794, 7749, 571, 4480, 7750, 1830, 7751, 3570, 2583, 1523, 2424, 7752, 2089, 984,  # 6278
-    4481, 3683, 1959, 7753, 3684, 852, 923, 2795, 3466, 3685, 969, 1519, 999, 2048, 2320, 1705,  # 6294
-    7754, 3095, 615, 1662, 151, 597, 3992, 2405, 2321, 1049, 275, 4482, 3686, 4193, 568, 3687,  # 6310
-    3571, 2480, 4194, 3688, 7755, 2425, 2270, 409, 3218, 7756, 1566, 2874, 3467, 1002, 769, 2840,  # 6326
-    194, 2090, 3149, 3689, 2222, 3294, 4195, 628, 1505, 7757, 7758, 1763, 2177, 3001, 3993, 521,  # 6342
-    1161, 2584, 1787, 2203, 2406, 4483, 3994, 1625, 4196, 4197, 412, 42, 3096, 464, 7759, 2632,  # 6358
-    4484, 3363, 1760, 1571, 2875, 3468, 2530, 1219, 2204, 3814, 2633, 2140, 2368, 4485, 4486, 3295,  # 6374
-    1651, 3364, 3572, 7760, 7761, 3573, 2481, 3469, 7762, 3690, 7763, 7764, 2271, 2091, 460, 7765,  # 6390
-    4487, 7766, 3002, 962, 588, 3574, 289, 3219, 2634, 1116, 52, 7767, 3047, 1796, 7768, 7769,  # 6406
-    7770, 1467, 7771, 1598, 1143, 3691, 4198, 1984, 1734, 1067, 4488, 1280, 3365, 465, 4489, 1572,  # 6422
-    510, 7772, 1927, 2241, 1812, 1644, 3575, 7773, 4490, 3692, 7774, 7775, 2663, 1573, 1534, 7776,  # 6438
-    7777, 4199, 536, 1807, 1761, 3470, 3815, 3150, 2635, 7778, 7779, 7780, 4491, 3471, 2915, 1911,  # 6454
-    2796, 7781, 3296, 1122, 377, 3220, 7782, 360, 7783, 7784, 4200, 1529, 551, 7785, 2059, 3693,  # 6470
-    1769, 2426, 7786, 2916, 4201, 3297, 3097, 2322, 2108, 2030, 4492, 1404, 136, 1468, 1479, 672,  # 6486
-    1171, 3221, 2303, 271, 3151, 7787, 2762, 7788, 2049, 678, 2727, 865, 1947, 4493, 7789, 2013,  # 6502
-    3995, 2956, 7790, 2728, 2223, 1397, 3048, 3694, 4494, 4495, 1735, 2917, 3366, 3576, 7791, 3816,  # 6518
-    509, 2841, 2453, 2876, 3817, 7792, 7793, 3152, 3153, 4496, 4202, 2531, 4497, 2304, 1166, 1010,  # 6534
-    552, 681, 1887, 7794, 7795, 2957, 2958, 3996, 1287, 1596, 1861, 3154, 358, 453, 736, 175,  # 6550
-    478, 1117, 905, 1167, 1097, 7796, 1853, 1530, 7797, 1706, 7798, 2178, 3472, 2287, 3695, 3473,  # 6566
-    3577, 4203, 2092, 4204, 7799, 3367, 1193, 2482, 4205, 1458, 2190, 2205, 1862, 1888, 1421, 3298,  # 6582
-    2918, 3049, 2179, 3474, 595, 2122, 7800, 3997, 7801, 7802, 4206, 1707, 2636, 223, 3696, 1359,  # 6598
-    751, 3098, 183, 3475, 7803, 2797, 3003, 419, 2369, 633, 704, 3818, 2389, 241, 7804, 7805,  # 6614
-    7806, 838, 3004, 3697, 2272, 2763, 2454, 3819, 1938, 2050, 3998, 1309, 3099, 2242, 1181, 7807,  # 6630
-    1136, 2206, 3820, 2370, 1446, 4207, 2305, 4498, 7808, 7809, 4208, 1055, 2605, 484, 3698, 7810,  # 6646
-    3999, 625, 4209, 2273, 3368, 1499, 4210, 4000, 7811, 4001, 4211, 3222, 2274, 2275, 3476, 7812,  # 6662
-    7813, 2764, 808, 2606, 3699, 3369, 4002, 4212, 3100, 2532, 526, 3370, 3821, 4213, 955, 7814,  # 6678
-    1620, 4214, 2637, 2427, 7815, 1429, 3700, 1669, 1831, 994, 928, 7816, 3578, 1260, 7817, 7818,  # 6694
-    7819, 1948, 2288, 741, 2919, 1626, 4215, 2729, 2455, 867, 1184, 362, 3371, 1392, 7820, 7821,  # 6710
-    4003, 4216, 1770, 1736, 3223, 2920, 4499, 4500, 1928, 2698, 1459, 1158, 7822, 3050, 3372, 2877,  # 6726
-    1292, 1929, 2506, 2842, 3701, 1985, 1187, 2071, 2014, 2607, 4217, 7823, 2566, 2507, 2169, 3702,  # 6742
-    2483, 3299, 7824, 3703, 4501, 7825, 7826, 666, 1003, 3005, 1022, 3579, 4218, 7827, 4502, 1813,  # 6758
-    2253, 574, 3822, 1603, 295, 1535, 705, 3823, 4219, 283, 858, 417, 7828, 7829, 3224, 4503,  # 6774
-    4504, 3051, 1220, 1889, 1046, 2276, 2456, 4004, 1393, 1599, 689, 2567, 388, 4220, 7830, 2484,  # 6790
-    802, 7831, 2798, 3824, 2060, 1405, 2254, 7832, 4505, 3825, 2109, 1052, 1345, 3225, 1585, 7833,  # 6806
-    809, 7834, 7835, 7836, 575, 2730, 3477, 956, 1552, 1469, 1144, 2323, 7837, 2324, 1560, 2457,  # 6822
-    3580, 3226, 4005, 616, 2207, 3155, 2180, 2289, 7838, 1832, 7839, 3478, 4506, 7840, 1319, 3704,  # 6838
-    3705, 1211, 3581, 1023, 3227, 1293, 2799, 7841, 7842, 7843, 3826, 607, 2306, 3827, 762, 2878,  # 6854
-    1439, 4221, 1360, 7844, 1485, 3052, 7845, 4507, 1038, 4222, 1450, 2061, 2638, 4223, 1379, 4508,  # 6870
-    2585, 7846, 7847, 4224, 1352, 1414, 2325, 2921, 1172, 7848, 7849, 3828, 3829, 7850, 1797, 1451,  # 6886
-    7851, 7852, 7853, 7854, 2922, 4006, 4007, 2485, 2346, 411, 4008, 4009, 3582, 3300, 3101, 4509,  # 6902
-    1561, 2664, 1452, 4010, 1375, 7855, 7856, 47, 2959, 316, 7857, 1406, 1591, 2923, 3156, 7858,  # 6918
-    1025, 2141, 3102, 3157, 354, 2731, 884, 2224, 4225, 2407, 508, 3706, 726, 3583, 996, 2428,  # 6934
-    3584, 729, 7859, 392, 2191, 1453, 4011, 4510, 3707, 7860, 7861, 2458, 3585, 2608, 1675, 2800,  # 6950
-    919, 2347, 2960, 2348, 1270, 4511, 4012, 73, 7862, 7863, 647, 7864, 3228, 2843, 2255, 1550,  # 6966
-    1346, 3006, 7865, 1332, 883, 3479, 7866, 7867, 7868, 7869, 3301, 2765, 7870, 1212, 831, 1347,  # 6982
-    4226, 4512, 2326, 3830, 1863, 3053, 720, 3831, 4513, 4514, 3832, 7871, 4227, 7872, 7873, 4515,  # 6998
-    7874, 7875, 1798, 4516, 3708, 2609, 4517, 3586, 1645, 2371, 7876, 7877, 2924, 669, 2208, 2665,  # 7014
-    2429, 7878, 2879, 7879, 7880, 1028, 3229, 7881, 4228, 2408, 7882, 2256, 1353, 7883, 7884, 4518,  # 7030
-    3158, 518, 7885, 4013, 7886, 4229, 1960, 7887, 2142, 4230, 7888, 7889, 3007, 2349, 2350, 3833,  # 7046
-    516, 1833, 1454, 4014, 2699, 4231, 4519, 2225, 2610, 1971, 1129, 3587, 7890, 2766, 7891, 2961,  # 7062
-    1422, 577, 1470, 3008, 1524, 3373, 7892, 7893, 432, 4232, 3054, 3480, 7894, 2586, 1455, 2508,  # 7078
-    2226, 1972, 1175, 7895, 1020, 2732, 4015, 3481, 4520, 7896, 2733, 7897, 1743, 1361, 3055, 3482,  # 7094
-    2639, 4016, 4233, 4521, 2290, 895, 924, 4234, 2170, 331, 2243, 3056, 166, 1627, 3057, 1098,  # 7110
-    7898, 1232, 2880, 2227, 3374, 4522, 657, 403, 1196, 2372, 542, 3709, 3375, 1600, 4235, 3483,  # 7126
-    7899, 4523, 2767, 3230, 576, 530, 1362, 7900, 4524, 2533, 2666, 3710, 4017, 7901, 842, 3834,  # 7142
-    7902, 2801, 2031, 1014, 4018, 213, 2700, 3376, 665, 621, 4236, 7903, 3711, 2925, 2430, 7904,  # 7158
-    2431, 3302, 3588, 3377, 7905, 4237, 2534, 4238, 4525, 3589, 1682, 4239, 3484, 1380, 7906, 724,  # 7174
-    2277, 600, 1670, 7907, 1337, 1233, 4526, 3103, 2244, 7908, 1621, 4527, 7909, 651, 4240, 7910,  # 7190
-    1612, 4241, 2611, 7911, 2844, 7912, 2734, 2307, 3058, 7913, 716, 2459, 3059, 174, 1255, 2701,  # 7206
-    4019, 3590, 548, 1320, 1398, 728, 4020, 1574, 7914, 1890, 1197, 3060, 4021, 7915, 3061, 3062,  # 7222
-    3712, 3591, 3713, 747, 7916, 635, 4242, 4528, 7917, 7918, 7919, 4243, 7920, 7921, 4529, 7922,  # 7238
-    3378, 4530, 2432, 451, 7923, 3714, 2535, 2072, 4244, 2735, 4245, 4022, 7924, 1764, 4531, 7925,  # 7254
-    4246, 350, 7926, 2278, 2390, 2486, 7927, 4247, 4023, 2245, 1434, 4024, 488, 4532, 458, 4248,  # 7270
-    4025, 3715, 771, 1330, 2391, 3835, 2568, 3159, 2159, 2409, 1553, 2667, 3160, 4249, 7928, 2487,  # 7286
-    2881, 2612, 1720, 2702, 4250, 3379, 4533, 7929, 2536, 4251, 7930, 3231, 4252, 2768, 7931, 2015,  # 7302
-    2736, 7932, 1155, 1017, 3716, 3836, 7933, 3303, 2308, 201, 1864, 4253, 1430, 7934, 4026, 7935,  # 7318
-    7936, 7937, 7938, 7939, 4254, 1604, 7940, 414, 1865, 371, 2587, 4534, 4535, 3485, 2016, 3104,  # 7334
-    4536, 1708, 960, 4255, 887, 389, 2171, 1536, 1663, 1721, 7941, 2228, 4027, 2351, 2926, 1580,  # 7350
-    7942, 7943, 7944, 1744, 7945, 2537, 4537, 4538, 7946, 4539, 7947, 2073, 7948, 7949, 3592, 3380,  # 7366
-    2882, 4256, 7950, 4257, 2640, 3381, 2802, 673, 2703, 2460, 709, 3486, 4028, 3593, 4258, 7951,  # 7382
-    1148, 502, 634, 7952, 7953, 1204, 4540, 3594, 1575, 4541, 2613, 3717, 7954, 3718, 3105, 948,  # 7398
-    3232, 121, 1745, 3837, 1110, 7955, 4259, 3063, 2509, 3009, 4029, 3719, 1151, 1771, 3838, 1488,  # 7414
-    4030, 1986, 7956, 2433, 3487, 7957, 7958, 2093, 7959, 4260, 3839, 1213, 1407, 2803, 531, 2737,  # 7430
-    2538, 3233, 1011, 1537, 7960, 2769, 4261, 3106, 1061, 7961, 3720, 3721, 1866, 2883, 7962, 2017,  # 7446
-    120, 4262, 4263, 2062, 3595, 3234, 2309, 3840, 2668, 3382, 1954, 4542, 7963, 7964, 3488, 1047,  # 7462
-    2704, 1266, 7965, 1368, 4543, 2845, 649, 3383, 3841, 2539, 2738, 1102, 2846, 2669, 7966, 7967,  # 7478
-    1999, 7968, 1111, 3596, 2962, 7969, 2488, 3842, 3597, 2804, 1854, 3384, 3722, 7970, 7971, 3385,  # 7494
-    2410, 2884, 3304, 3235, 3598, 7972, 2569, 7973, 3599, 2805, 4031, 1460, 856, 7974, 3600, 7975,  # 7510
-    2885, 2963, 7976, 2886, 3843, 7977, 4264, 632, 2510, 875, 3844, 1697, 3845, 2291, 7978, 7979,  # 7526
-    4544, 3010, 1239, 580, 4545, 4265, 7980, 914, 936, 2074, 1190, 4032, 1039, 2123, 7981, 7982,  # 7542
-    7983, 3386, 1473, 7984, 1354, 4266, 3846, 7985, 2172, 3064, 4033, 915, 3305, 4267, 4268, 3306,  # 7558
-    1605, 1834, 7986, 2739, 398, 3601, 4269, 3847, 4034, 328, 1912, 2847, 4035, 3848, 1331, 4270,  # 7574
-    3011, 937, 4271, 7987, 3602, 4036, 4037, 3387, 2160, 4546, 3388, 524, 742, 538, 3065, 1012,  # 7590
-    7988, 7989, 3849, 2461, 7990, 658, 1103, 225, 3850, 7991, 7992, 4547, 7993, 4548, 7994, 3236,  # 7606
-    1243, 7995, 4038, 963, 2246, 4549, 7996, 2705, 3603, 3161, 7997, 7998, 2588, 2327, 7999, 4550,  # 7622
-    8000, 8001, 8002, 3489, 3307, 957, 3389, 2540, 2032, 1930, 2927, 2462, 870, 2018, 3604, 1746,  # 7638
-    2770, 2771, 2434, 2463, 8003, 3851, 8004, 3723, 3107, 3724, 3490, 3390, 3725, 8005, 1179, 3066,  # 7654
-    8006, 3162, 2373, 4272, 3726, 2541, 3163, 3108, 2740, 4039, 8007, 3391, 1556, 2542, 2292, 977,  # 7670
-    2887, 2033, 4040, 1205, 3392, 8008, 1765, 3393, 3164, 2124, 1271, 1689, 714, 4551, 3491, 8009,  # 7686
-    2328, 3852, 533, 4273, 3605, 2181, 617, 8010, 2464, 3308, 3492, 2310, 8011, 8012, 3165, 8013,  # 7702
-    8014, 3853, 1987, 618, 427, 2641, 3493, 3394, 8015, 8016, 1244, 1690, 8017, 2806, 4274, 4552,  # 7718
-    8018, 3494, 8019, 8020, 2279, 1576, 473, 3606, 4275, 3395, 972, 8021, 3607, 8022, 3067, 8023,  # 7734
-    8024, 4553, 4554, 8025, 3727, 4041, 4042, 8026, 153, 4555, 356, 8027, 1891, 2888, 4276, 2143,  # 7750
-    408, 803, 2352, 8028, 3854, 8029, 4277, 1646, 2570, 2511, 4556, 4557, 3855, 8030, 3856, 4278,  # 7766
-    8031, 2411, 3396, 752, 8032, 8033, 1961, 2964, 8034, 746, 3012, 2465, 8035, 4279, 3728, 698,  # 7782
-    4558, 1892, 4280, 3608, 2543, 4559, 3609, 3857, 8036, 3166, 3397, 8037, 1823, 1302, 4043, 2706,  # 7798
-    3858, 1973, 4281, 8038, 4282, 3167, 823, 1303, 1288, 1236, 2848, 3495, 4044, 3398, 774, 3859,  # 7814
-    8039, 1581, 4560, 1304, 2849, 3860, 4561, 8040, 2435, 2161, 1083, 3237, 4283, 4045, 4284, 344,  # 7830
-    1173, 288, 2311, 454, 1683, 8041, 8042, 1461, 4562, 4046, 2589, 8043, 8044, 4563, 985, 894,  # 7846
-    8045, 3399, 3168, 8046, 1913, 2928, 3729, 1988, 8047, 2110, 1974, 8048, 4047, 8049, 2571, 1194,  # 7862
-    425, 8050, 4564, 3169, 1245, 3730, 4285, 8051, 8052, 2850, 8053, 636, 4565, 1855, 3861, 760,  # 7878
-    1799, 8054, 4286, 2209, 1508, 4566, 4048, 1893, 1684, 2293, 8055, 8056, 8057, 4287, 4288, 2210,  # 7894
-    479, 8058, 8059, 832, 8060, 4049, 2489, 8061, 2965, 2490, 3731, 990, 3109, 627, 1814, 2642,  # 7910
-    4289, 1582, 4290, 2125, 2111, 3496, 4567, 8062, 799, 4291, 3170, 8063, 4568, 2112, 1737, 3013,  # 7926
-    1018, 543, 754, 4292, 3309, 1676, 4569, 4570, 4050, 8064, 1489, 8065, 3497, 8066, 2614, 2889,  # 7942
-    4051, 8067, 8068, 2966, 8069, 8070, 8071, 8072, 3171, 4571, 4572, 2182, 1722, 8073, 3238, 3239,  # 7958
-    1842, 3610, 1715, 481, 365, 1975, 1856, 8074, 8075, 1962, 2491, 4573, 8076, 2126, 3611, 3240,  # 7974
-    433, 1894, 2063, 2075, 8077, 602, 2741, 8078, 8079, 8080, 8081, 8082, 3014, 1628, 3400, 8083,  # 7990
-    3172, 4574, 4052, 2890, 4575, 2512, 8084, 2544, 2772, 8085, 8086, 8087, 3310, 4576, 2891, 8088,  # 8006
-    4577, 8089, 2851, 4578, 4579, 1221, 2967, 4053, 2513, 8090, 8091, 8092, 1867, 1989, 8093, 8094,  # 8022
-    8095, 1895, 8096, 8097, 4580, 1896, 4054, 318, 8098, 2094, 4055, 4293, 8099, 8100, 485, 8101,  # 8038
-    938, 3862, 553, 2670, 116, 8102, 3863, 3612, 8103, 3498, 2671, 2773, 3401, 3311, 2807, 8104,  # 8054
-    3613, 2929, 4056, 1747, 2930, 2968, 8105, 8106, 207, 8107, 8108, 2672, 4581, 2514, 8109, 3015,  # 8070
-    890, 3614, 3864, 8110, 1877, 3732, 3402, 8111, 2183, 2353, 3403, 1652, 8112, 8113, 8114, 941,  # 8086
-    2294, 208, 3499, 4057, 2019, 330, 4294, 3865, 2892, 2492, 3733, 4295, 8115, 8116, 8117, 8118,  # 8102
+   1,1800,1506, 255,1431, 198,   9,  82,   6,7310, 177, 202,3615,1256,2808, 110,  # 2742
+3735,  33,3241, 261,  76,  44,2113,  16,2931,2184,1176, 659,3868,  26,3404,2643,  # 2758
+1198,3869,3313,4060, 410,2211, 302, 590, 361,1963,   8, 204,  58,4296,7311,1931,  # 2774
+  63,7312,7313, 317,1614,  75, 222, 159,4061,2412,1480,7314,3500,3068, 224,2809,  # 2790
+3616,   3,  10,3870,1471,  29,2774,1135,2852,1939, 873, 130,3242,1123, 312,7315,  # 2806
+4297,2051, 507, 252, 682,7316, 142,1914, 124, 206,2932,  34,3501,3173,  64, 604,  # 2822
+7317,2494,1976,1977, 155,1990, 645, 641,1606,7318,3405, 337,  72, 406,7319,  80,  # 2838
+ 630, 238,3174,1509, 263, 939,1092,2644, 756,1440,1094,3406, 449,  69,2969, 591,  # 2854
+ 179,2095, 471, 115,2034,1843,  60,  50,2970, 134, 806,1868, 734,2035,3407, 180,  # 2870
+ 995,1607, 156, 537,2893, 688,7320, 319,1305, 779,2144, 514,2374, 298,4298, 359,  # 2886
+2495,  90,2707,1338, 663,  11, 906,1099,2545,  20,2436, 182, 532,1716,7321, 732,  # 2902
+1376,4062,1311,1420,3175,  25,2312,1056, 113, 399, 382,1949, 242,3408,2467, 529,  # 2918
+3243, 475,1447,3617,7322, 117,  21, 656, 810,1297,2295,2329,3502,7323, 126,4063,  # 2934
+ 706, 456, 150, 613,4299,  71,1118,2036,4064, 145,3069,  85, 835, 486,2114,1246,  # 2950
+1426, 428, 727,1285,1015, 800, 106, 623, 303,1281,7324,2127,2354, 347,3736, 221,  # 2966
+3503,3110,7325,1955,1153,4065,  83, 296,1199,3070, 192, 624,  93,7326, 822,1897,  # 2982
+2810,3111, 795,2064, 991,1554,1542,1592,  27,  43,2853, 859, 139,1456, 860,4300,  # 2998
+ 437, 712,3871, 164,2392,3112, 695, 211,3017,2096, 195,3872,1608,3504,3505,3618,  # 3014
+3873, 234, 811,2971,2097,3874,2229,1441,3506,1615,2375, 668,2076,1638, 305, 228,  # 3030
+1664,4301, 467, 415,7327, 262,2098,1593, 239, 108, 300, 200,1033, 512,1247,2077,  # 3046
+7328,7329,2173,3176,3619,2673, 593, 845,1062,3244,  88,1723,2037,3875,1950, 212,  # 3062
+ 266, 152, 149, 468,1898,4066,4302,  77, 187,7330,3018,  37,   5,2972,7331,3876,  # 3078
+7332,7333,  39,2517,4303,2894,3177,2078,  55, 148,  74,4304, 545, 483,1474,1029,  # 3094
+1665, 217,1869,1531,3113,1104,2645,4067,  24, 172,3507, 900,3877,3508,3509,4305,  # 3110
+  32,1408,2811,1312, 329, 487,2355,2247,2708, 784,2674,   4,3019,3314,1427,1788,  # 3126
+ 188, 109, 499,7334,3620,1717,1789, 888,1217,3020,4306,7335,3510,7336,3315,1520,  # 3142
+3621,3878, 196,1034, 775,7337,7338, 929,1815, 249, 439,  38,7339,1063,7340, 794,  # 3158
+3879,1435,2296,  46, 178,3245,2065,7341,2376,7342, 214,1709,4307, 804,  35, 707,  # 3174
+ 324,3622,1601,2546, 140, 459,4068,7343,7344,1365, 839, 272, 978,2257,2572,3409,  # 3190
+2128,1363,3623,1423, 697, 100,3071,  48,  70,1231, 495,3114,2193,7345,1294,7346,  # 3206
+2079, 462, 586,1042,3246, 853, 256, 988, 185,2377,3410,1698, 434,1084,7347,3411,  # 3222
+ 314,2615,2775,4308,2330,2331, 569,2280, 637,1816,2518, 757,1162,1878,1616,3412,  # 3238
+ 287,1577,2115, 768,4309,1671,2854,3511,2519,1321,3737, 909,2413,7348,4069, 933,  # 3254
+3738,7349,2052,2356,1222,4310, 765,2414,1322, 786,4311,7350,1919,1462,1677,2895,  # 3270
+1699,7351,4312,1424,2437,3115,3624,2590,3316,1774,1940,3413,3880,4070, 309,1369,  # 3286
+1130,2812, 364,2230,1653,1299,3881,3512,3882,3883,2646, 525,1085,3021, 902,2000,  # 3302
+1475, 964,4313, 421,1844,1415,1057,2281, 940,1364,3116, 376,4314,4315,1381,   7,  # 3318
+2520, 983,2378, 336,1710,2675,1845, 321,3414, 559,1131,3022,2742,1808,1132,1313,  # 3334
+ 265,1481,1857,7352, 352,1203,2813,3247, 167,1089, 420,2814, 776, 792,1724,3513,  # 3350
+4071,2438,3248,7353,4072,7354, 446, 229, 333,2743, 901,3739,1200,1557,4316,2647,  # 3366
+1920, 395,2744,2676,3740,4073,1835, 125, 916,3178,2616,4317,7355,7356,3741,7357,  # 3382
+7358,7359,4318,3117,3625,1133,2547,1757,3415,1510,2313,1409,3514,7360,2145, 438,  # 3398
+2591,2896,2379,3317,1068, 958,3023, 461, 311,2855,2677,4074,1915,3179,4075,1978,  # 3414
+ 383, 750,2745,2617,4076, 274, 539, 385,1278,1442,7361,1154,1964, 384, 561, 210,  # 3430
+  98,1295,2548,3515,7362,1711,2415,1482,3416,3884,2897,1257, 129,7363,3742, 642,  # 3446
+ 523,2776,2777,2648,7364, 141,2231,1333,  68, 176, 441, 876, 907,4077, 603,2592,  # 3462
+ 710, 171,3417, 404, 549,  18,3118,2393,1410,3626,1666,7365,3516,4319,2898,4320,  # 3478
+7366,2973, 368,7367, 146, 366,  99, 871,3627,1543, 748, 807,1586,1185,  22,2258,  # 3494
+ 379,3743,3180,7368,3181, 505,1941,2618,1991,1382,2314,7369, 380,2357, 218, 702,  # 3510
+1817,1248,3418,3024,3517,3318,3249,7370,2974,3628, 930,3250,3744,7371,  59,7372,  # 3526
+ 585, 601,4078, 497,3419,1112,1314,4321,1801,7373,1223,1472,2174,7374, 749,1836,  # 3542
+ 690,1899,3745,1772,3885,1476, 429,1043,1790,2232,2116, 917,4079, 447,1086,1629,  # 3558
+7375, 556,7376,7377,2020,1654, 844,1090, 105, 550, 966,1758,2815,1008,1782, 686,  # 3574
+1095,7378,2282, 793,1602,7379,3518,2593,4322,4080,2933,2297,4323,3746, 980,2496,  # 3590
+ 544, 353, 527,4324, 908,2678,2899,7380, 381,2619,1942,1348,7381,1341,1252, 560,  # 3606
+3072,7382,3420,2856,7383,2053, 973, 886,2080, 143,4325,7384,7385, 157,3886, 496,  # 3622
+4081,  57, 840, 540,2038,4326,4327,3421,2117,1445, 970,2259,1748,1965,2081,4082,  # 3638
+3119,1234,1775,3251,2816,3629, 773,1206,2129,1066,2039,1326,3887,1738,1725,4083,  # 3654
+ 279,3120,  51,1544,2594, 423,1578,2130,2066, 173,4328,1879,7386,7387,1583, 264,  # 3670
+ 610,3630,4329,2439, 280, 154,7388,7389,7390,1739, 338,1282,3073, 693,2857,1411,  # 3686
+1074,3747,2440,7391,4330,7392,7393,1240, 952,2394,7394,2900,1538,2679, 685,1483,  # 3702
+4084,2468,1436, 953,4085,2054,4331, 671,2395,  79,4086,2441,3252, 608, 567,2680,  # 3718
+3422,4087,4088,1691, 393,1261,1791,2396,7395,4332,7396,7397,7398,7399,1383,1672,  # 3734
+3748,3182,1464, 522,1119, 661,1150, 216, 675,4333,3888,1432,3519, 609,4334,2681,  # 3750
+2397,7400,7401,7402,4089,3025,   0,7403,2469, 315, 231,2442, 301,3319,4335,2380,  # 3766
+7404, 233,4090,3631,1818,4336,4337,7405,  96,1776,1315,2082,7406, 257,7407,1809,  # 3782
+3632,2709,1139,1819,4091,2021,1124,2163,2778,1777,2649,7408,3074, 363,1655,3183,  # 3798
+7409,2975,7410,7411,7412,3889,1567,3890, 718, 103,3184, 849,1443, 341,3320,2934,  # 3814
+1484,7413,1712, 127,  67, 339,4092,2398, 679,1412, 821,7414,7415, 834, 738, 351,  # 3830
+2976,2146, 846, 235,1497,1880, 418,1992,3749,2710, 186,1100,2147,2746,3520,1545,  # 3846
+1355,2935,2858,1377, 583,3891,4093,2573,2977,7416,1298,3633,1078,2549,3634,2358,  # 3862
+  78,3750,3751, 267,1289,2099,2001,1594,4094, 348, 369,1274,2194,2175,1837,4338,  # 3878
+1820,2817,3635,2747,2283,2002,4339,2936,2748, 144,3321, 882,4340,3892,2749,3423,  # 3894
+4341,2901,7417,4095,1726, 320,7418,3893,3026, 788,2978,7419,2818,1773,1327,2859,  # 3910
+3894,2819,7420,1306,4342,2003,1700,3752,3521,2359,2650, 787,2022, 506, 824,3636,  # 3926
+ 534, 323,4343,1044,3322,2023,1900, 946,3424,7421,1778,1500,1678,7422,1881,4344,  # 3942
+ 165, 243,4345,3637,2521, 123, 683,4096, 764,4346,  36,3895,1792, 589,2902, 816,  # 3958
+ 626,1667,3027,2233,1639,1555,1622,3753,3896,7423,3897,2860,1370,1228,1932, 891,  # 3974
+2083,2903, 304,4097,7424, 292,2979,2711,3522, 691,2100,4098,1115,4347, 118, 662,  # 3990
+7425, 611,1156, 854,2381,1316,2861,   2, 386, 515,2904,7426,7427,3253, 868,2234,  # 4006
+1486, 855,2651, 785,2212,3028,7428,1040,3185,3523,7429,3121, 448,7430,1525,7431,  # 4022
+2164,4348,7432,3754,7433,4099,2820,3524,3122, 503, 818,3898,3123,1568, 814, 676,  # 4038
+1444, 306,1749,7434,3755,1416,1030, 197,1428, 805,2821,1501,4349,7435,7436,7437,  # 4054
+1993,7438,4350,7439,7440,2195,  13,2779,3638,2980,3124,1229,1916,7441,3756,2131,  # 4070
+7442,4100,4351,2399,3525,7443,2213,1511,1727,1120,7444,7445, 646,3757,2443, 307,  # 4086
+7446,7447,1595,3186,7448,7449,7450,3639,1113,1356,3899,1465,2522,2523,7451, 519,  # 4102
+7452, 128,2132,  92,2284,1979,7453,3900,1512, 342,3125,2196,7454,2780,2214,1980,  # 4118
+3323,7455, 290,1656,1317, 789, 827,2360,7456,3758,4352, 562, 581,3901,7457, 401,  # 4134
+4353,2248,  94,4354,1399,2781,7458,1463,2024,4355,3187,1943,7459, 828,1105,4101,  # 4150
+1262,1394,7460,4102, 605,4356,7461,1783,2862,7462,2822, 819,2101, 578,2197,2937,  # 4166
+7463,1502, 436,3254,4103,3255,2823,3902,2905,3425,3426,7464,2712,2315,7465,7466,  # 4182
+2332,2067,  23,4357, 193, 826,3759,2102, 699,1630,4104,3075, 390,1793,1064,3526,  # 4198
+7467,1579,3076,3077,1400,7468,4105,1838,1640,2863,7469,4358,4359, 137,4106, 598,  # 4214
+3078,1966, 780, 104, 974,2938,7470, 278, 899, 253, 402, 572, 504, 493,1339,7471,  # 4230
+3903,1275,4360,2574,2550,7472,3640,3029,3079,2249, 565,1334,2713, 863,  41,7473,  # 4246
+7474,4361,7475,1657,2333,  19, 463,2750,4107, 606,7476,2981,3256,1087,2084,1323,  # 4262
+2652,2982,7477,1631,1623,1750,4108,2682,7478,2864, 791,2714,2653,2334, 232,2416,  # 4278
+7479,2983,1498,7480,2654,2620, 755,1366,3641,3257,3126,2025,1609, 119,1917,3427,  # 4294
+ 862,1026,4109,7481,3904,3760,4362,3905,4363,2260,1951,2470,7482,1125, 817,4110,  # 4310
+4111,3906,1513,1766,2040,1487,4112,3030,3258,2824,3761,3127,7483,7484,1507,7485,  # 4326
+2683, 733,  40,1632,1106,2865, 345,4113, 841,2524, 230,4364,2984,1846,3259,3428,  # 4342
+7486,1263, 986,3429,7487, 735, 879, 254,1137, 857, 622,1300,1180,1388,1562,3907,  # 4358
+3908,2939, 967,2751,2655,1349, 592,2133,1692,3324,2985,1994,4114,1679,3909,1901,  # 4374
+2185,7488, 739,3642,2715,1296,1290,7489,4115,2198,2199,1921,1563,2595,2551,1870,  # 4390
+2752,2986,7490, 435,7491, 343,1108, 596,  17,1751,4365,2235,3430,3643,7492,4366,  # 4406
+ 294,3527,2940,1693, 477, 979, 281,2041,3528, 643,2042,3644,2621,2782,2261,1031,  # 4422
+2335,2134,2298,3529,4367, 367,1249,2552,7493,3530,7494,4368,1283,3325,2004, 240,  # 4438
+1762,3326,4369,4370, 836,1069,3128, 474,7495,2148,2525, 268,3531,7496,3188,1521,  # 4454
+1284,7497,1658,1546,4116,7498,3532,3533,7499,4117,3327,2684,1685,4118, 961,1673,  # 4470
+2622, 190,2005,2200,3762,4371,4372,7500, 570,2497,3645,1490,7501,4373,2623,3260,  # 4486
+1956,4374, 584,1514, 396,1045,1944,7502,4375,1967,2444,7503,7504,4376,3910, 619,  # 4502
+7505,3129,3261, 215,2006,2783,2553,3189,4377,3190,4378, 763,4119,3763,4379,7506,  # 4518
+7507,1957,1767,2941,3328,3646,1174, 452,1477,4380,3329,3130,7508,2825,1253,2382,  # 4534
+2186,1091,2285,4120, 492,7509, 638,1169,1824,2135,1752,3911, 648, 926,1021,1324,  # 4550
+4381, 520,4382, 997, 847,1007, 892,4383,3764,2262,1871,3647,7510,2400,1784,4384,  # 4566
+1952,2942,3080,3191,1728,4121,2043,3648,4385,2007,1701,3131,1551,  30,2263,4122,  # 4582
+7511,2026,4386,3534,7512, 501,7513,4123, 594,3431,2165,1821,3535,3432,3536,3192,  # 4598
+ 829,2826,4124,7514,1680,3132,1225,4125,7515,3262,4387,4126,3133,2336,7516,4388,  # 4614
+4127,7517,3912,3913,7518,1847,2383,2596,3330,7519,4389, 374,3914, 652,4128,4129,  # 4630
+ 375,1140, 798,7520,7521,7522,2361,4390,2264, 546,1659, 138,3031,2445,4391,7523,  # 4646
+2250, 612,1848, 910, 796,3765,1740,1371, 825,3766,3767,7524,2906,2554,7525, 692,  # 4662
+ 444,3032,2624, 801,4392,4130,7526,1491, 244,1053,3033,4131,4132, 340,7527,3915,  # 4678
+1041,2987, 293,1168,  87,1357,7528,1539, 959,7529,2236, 721, 694,4133,3768, 219,  # 4694
+1478, 644,1417,3331,2656,1413,1401,1335,1389,3916,7530,7531,2988,2362,3134,1825,  # 4710
+ 730,1515, 184,2827,  66,4393,7532,1660,2943, 246,3332, 378,1457, 226,3433, 975,  # 4726
+3917,2944,1264,3537, 674, 696,7533, 163,7534,1141,2417,2166, 713,3538,3333,4394,  # 4742
+3918,7535,7536,1186,  15,7537,1079,1070,7538,1522,3193,3539, 276,1050,2716, 758,  # 4758
+1126, 653,2945,3263,7539,2337, 889,3540,3919,3081,2989, 903,1250,4395,3920,3434,  # 4774
+3541,1342,1681,1718, 766,3264, 286,  89,2946,3649,7540,1713,7541,2597,3334,2990,  # 4790
+7542,2947,2215,3194,2866,7543,4396,2498,2526, 181, 387,1075,3921, 731,2187,3335,  # 4806
+7544,3265, 310, 313,3435,2299, 770,4134,  54,3034, 189,4397,3082,3769,3922,7545,  # 4822
+1230,1617,1849, 355,3542,4135,4398,3336, 111,4136,3650,1350,3135,3436,3035,4137,  # 4838
+2149,3266,3543,7546,2784,3923,3924,2991, 722,2008,7547,1071, 247,1207,2338,2471,  # 4854
+1378,4399,2009, 864,1437,1214,4400, 373,3770,1142,2216, 667,4401, 442,2753,2555,  # 4870
+3771,3925,1968,4138,3267,1839, 837, 170,1107, 934,1336,1882,7548,7549,2118,4139,  # 4886
+2828, 743,1569,7550,4402,4140, 582,2384,1418,3437,7551,1802,7552, 357,1395,1729,  # 4902
+3651,3268,2418,1564,2237,7553,3083,3772,1633,4403,1114,2085,4141,1532,7554, 482,  # 4918
+2446,4404,7555,7556,1492, 833,1466,7557,2717,3544,1641,2829,7558,1526,1272,3652,  # 4934
+4142,1686,1794, 416,2556,1902,1953,1803,7559,3773,2785,3774,1159,2316,7560,2867,  # 4950
+4405,1610,1584,3036,2419,2754, 443,3269,1163,3136,7561,7562,3926,7563,4143,2499,  # 4966
+3037,4406,3927,3137,2103,1647,3545,2010,1872,4144,7564,4145, 431,3438,7565, 250,  # 4982
+  97,  81,4146,7566,1648,1850,1558, 160, 848,7567, 866, 740,1694,7568,2201,2830,  # 4998
+3195,4147,4407,3653,1687, 950,2472, 426, 469,3196,3654,3655,3928,7569,7570,1188,  # 5014
+ 424,1995, 861,3546,4148,3775,2202,2685, 168,1235,3547,4149,7571,2086,1674,4408,  # 5030
+3337,3270, 220,2557,1009,7572,3776, 670,2992, 332,1208, 717,7573,7574,3548,2447,  # 5046
+3929,3338,7575, 513,7576,1209,2868,3339,3138,4409,1080,7577,7578,7579,7580,2527,  # 5062
+3656,3549, 815,1587,3930,3931,7581,3550,3439,3777,1254,4410,1328,3038,1390,3932,  # 5078
+1741,3933,3778,3934,7582, 236,3779,2448,3271,7583,7584,3657,3780,1273,3781,4411,  # 5094
+7585, 308,7586,4412, 245,4413,1851,2473,1307,2575, 430, 715,2136,2449,7587, 270,  # 5110
+ 199,2869,3935,7588,3551,2718,1753, 761,1754, 725,1661,1840,4414,3440,3658,7589,  # 5126
+7590, 587,  14,3272, 227,2598, 326, 480,2265, 943,2755,3552, 291, 650,1883,7591,  # 5142
+1702,1226, 102,1547,  62,3441, 904,4415,3442,1164,4150,7592,7593,1224,1548,2756,  # 5158
+ 391, 498,1493,7594,1386,1419,7595,2055,1177,4416, 813, 880,1081,2363, 566,1145,  # 5174
+4417,2286,1001,1035,2558,2599,2238, 394,1286,7596,7597,2068,7598,  86,1494,1730,  # 5190
+3936, 491,1588, 745, 897,2948, 843,3340,3937,2757,2870,3273,1768, 998,2217,2069,  # 5206
+ 397,1826,1195,1969,3659,2993,3341, 284,7599,3782,2500,2137,2119,1903,7600,3938,  # 5222
+2150,3939,4151,1036,3443,1904, 114,2559,4152, 209,1527,7601,7602,2949,2831,2625,  # 5238
+2385,2719,3139, 812,2560,7603,3274,7604,1559, 737,1884,3660,1210, 885,  28,2686,  # 5254
+3553,3783,7605,4153,1004,1779,4418,7606, 346,1981,2218,2687,4419,3784,1742, 797,  # 5270
+1642,3940,1933,1072,1384,2151, 896,3941,3275,3661,3197,2871,3554,7607,2561,1958,  # 5286
+4420,2450,1785,7608,7609,7610,3942,4154,1005,1308,3662,4155,2720,4421,4422,1528,  # 5302
+2600, 161,1178,4156,1982, 987,4423,1101,4157, 631,3943,1157,3198,2420,1343,1241,  # 5318
+1016,2239,2562, 372, 877,2339,2501,1160, 555,1934, 911,3944,7611, 466,1170, 169,  # 5334
+1051,2907,2688,3663,2474,2994,1182,2011,2563,1251,2626,7612, 992,2340,3444,1540,  # 5350
+2721,1201,2070,2401,1996,2475,7613,4424, 528,1922,2188,1503,1873,1570,2364,3342,  # 5366
+3276,7614, 557,1073,7615,1827,3445,2087,2266,3140,3039,3084, 767,3085,2786,4425,  # 5382
+1006,4158,4426,2341,1267,2176,3664,3199, 778,3945,3200,2722,1597,2657,7616,4427,  # 5398
+7617,3446,7618,7619,7620,3277,2689,1433,3278, 131,  95,1504,3946, 723,4159,3141,  # 5414
+1841,3555,2758,2189,3947,2027,2104,3665,7621,2995,3948,1218,7622,3343,3201,3949,  # 5430
+4160,2576, 248,1634,3785, 912,7623,2832,3666,3040,3786, 654,  53,7624,2996,7625,  # 5446
+1688,4428, 777,3447,1032,3950,1425,7626, 191, 820,2120,2833, 971,4429, 931,3202,  # 5462
+ 135, 664, 783,3787,1997, 772,2908,1935,3951,3788,4430,2909,3203, 282,2723, 640,  # 5478
+1372,3448,1127, 922, 325,3344,7627,7628, 711,2044,7629,7630,3952,2219,2787,1936,  # 5494
+3953,3345,2220,2251,3789,2300,7631,4431,3790,1258,3279,3954,3204,2138,2950,3955,  # 5510
+3956,7632,2221, 258,3205,4432, 101,1227,7633,3280,1755,7634,1391,3281,7635,2910,  # 5526
+2056, 893,7636,7637,7638,1402,4161,2342,7639,7640,3206,3556,7641,7642, 878,1325,  # 5542
+1780,2788,4433, 259,1385,2577, 744,1183,2267,4434,7643,3957,2502,7644, 684,1024,  # 5558
+4162,7645, 472,3557,3449,1165,3282,3958,3959, 322,2152, 881, 455,1695,1152,1340,  # 5574
+ 660, 554,2153,4435,1058,4436,4163, 830,1065,3346,3960,4437,1923,7646,1703,1918,  # 5590
+7647, 932,2268, 122,7648,4438, 947, 677,7649,3791,2627, 297,1905,1924,2269,4439,  # 5606
+2317,3283,7650,7651,4164,7652,4165,  84,4166, 112, 989,7653, 547,1059,3961, 701,  # 5622
+3558,1019,7654,4167,7655,3450, 942, 639, 457,2301,2451, 993,2951, 407, 851, 494,  # 5638
+4440,3347, 927,7656,1237,7657,2421,3348, 573,4168, 680, 921,2911,1279,1874, 285,  # 5654
+ 790,1448,1983, 719,2167,7658,7659,4441,3962,3963,1649,7660,1541, 563,7661,1077,  # 5670
+7662,3349,3041,3451, 511,2997,3964,3965,3667,3966,1268,2564,3350,3207,4442,4443,  # 5686
+7663, 535,1048,1276,1189,2912,2028,3142,1438,1373,2834,2952,1134,2012,7664,4169,  # 5702
+1238,2578,3086,1259,7665, 700,7666,2953,3143,3668,4170,7667,4171,1146,1875,1906,  # 5718
+4444,2601,3967, 781,2422, 132,1589, 203, 147, 273,2789,2402, 898,1786,2154,3968,  # 5734
+3969,7668,3792,2790,7669,7670,4445,4446,7671,3208,7672,1635,3793, 965,7673,1804,  # 5750
+2690,1516,3559,1121,1082,1329,3284,3970,1449,3794,  65,1128,2835,2913,2759,1590,  # 5766
+3795,7674,7675,  12,2658,  45, 976,2579,3144,4447, 517,2528,1013,1037,3209,7676,  # 5782
+3796,2836,7677,3797,7678,3452,7679,2602, 614,1998,2318,3798,3087,2724,2628,7680,  # 5798
+2580,4172, 599,1269,7681,1810,3669,7682,2691,3088, 759,1060, 489,1805,3351,3285,  # 5814
+1358,7683,7684,2386,1387,1215,2629,2252, 490,7685,7686,4173,1759,2387,2343,7687,  # 5830
+4448,3799,1907,3971,2630,1806,3210,4449,3453,3286,2760,2344, 874,7688,7689,3454,  # 5846
+3670,1858,  91,2914,3671,3042,3800,4450,7690,3145,3972,2659,7691,3455,1202,1403,  # 5862
+3801,2954,2529,1517,2503,4451,3456,2504,7692,4452,7693,2692,1885,1495,1731,3973,  # 5878
+2365,4453,7694,2029,7695,7696,3974,2693,1216, 237,2581,4174,2319,3975,3802,4454,  # 5894
+4455,2694,3560,3457, 445,4456,7697,7698,7699,7700,2761,  61,3976,3672,1822,3977,  # 5910
+7701, 687,2045, 935, 925, 405,2660, 703,1096,1859,2725,4457,3978,1876,1367,2695,  # 5926
+3352, 918,2105,1781,2476, 334,3287,1611,1093,4458, 564,3146,3458,3673,3353, 945,  # 5942
+2631,2057,4459,7702,1925, 872,4175,7703,3459,2696,3089, 349,4176,3674,3979,4460,  # 5958
+3803,4177,3675,2155,3980,4461,4462,4178,4463,2403,2046, 782,3981, 400, 251,4179,  # 5974
+1624,7704,7705, 277,3676, 299,1265, 476,1191,3804,2121,4180,4181,1109, 205,7706,  # 5990
+2582,1000,2156,3561,1860,7707,7708,7709,4464,7710,4465,2565, 107,2477,2157,3982,  # 6006
+3460,3147,7711,1533, 541,1301, 158, 753,4182,2872,3562,7712,1696, 370,1088,4183,  # 6022
+4466,3563, 579, 327, 440, 162,2240, 269,1937,1374,3461, 968,3043,  56,1396,3090,  # 6038
+2106,3288,3354,7713,1926,2158,4467,2998,7714,3564,7715,7716,3677,4468,2478,7717,  # 6054
+2791,7718,1650,4469,7719,2603,7720,7721,3983,2661,3355,1149,3356,3984,3805,3985,  # 6070
+7722,1076,  49,7723, 951,3211,3289,3290, 450,2837, 920,7724,1811,2792,2366,4184,  # 6086
+1908,1138,2367,3806,3462,7725,3212,4470,1909,1147,1518,2423,4471,3807,7726,4472,  # 6102
+2388,2604, 260,1795,3213,7727,7728,3808,3291, 708,7729,3565,1704,7730,3566,1351,  # 6118
+1618,3357,2999,1886, 944,4185,3358,4186,3044,3359,4187,7731,3678, 422, 413,1714,  # 6134
+3292, 500,2058,2345,4188,2479,7732,1344,1910, 954,7733,1668,7734,7735,3986,2404,  # 6150
+4189,3567,3809,4190,7736,2302,1318,2505,3091, 133,3092,2873,4473, 629,  31,2838,  # 6166
+2697,3810,4474, 850, 949,4475,3987,2955,1732,2088,4191,1496,1852,7737,3988, 620,  # 6182
+3214, 981,1242,3679,3360,1619,3680,1643,3293,2139,2452,1970,1719,3463,2168,7738,  # 6198
+3215,7739,7740,3361,1828,7741,1277,4476,1565,2047,7742,1636,3568,3093,7743, 869,  # 6214
+2839, 655,3811,3812,3094,3989,3000,3813,1310,3569,4477,7744,7745,7746,1733, 558,  # 6230
+4478,3681, 335,1549,3045,1756,4192,3682,1945,3464,1829,1291,1192, 470,2726,2107,  # 6246
+2793, 913,1054,3990,7747,1027,7748,3046,3991,4479, 982,2662,3362,3148,3465,3216,  # 6262
+3217,1946,2794,7749, 571,4480,7750,1830,7751,3570,2583,1523,2424,7752,2089, 984,  # 6278
+4481,3683,1959,7753,3684, 852, 923,2795,3466,3685, 969,1519, 999,2048,2320,1705,  # 6294
+7754,3095, 615,1662, 151, 597,3992,2405,2321,1049, 275,4482,3686,4193, 568,3687,  # 6310
+3571,2480,4194,3688,7755,2425,2270, 409,3218,7756,1566,2874,3467,1002, 769,2840,  # 6326
+ 194,2090,3149,3689,2222,3294,4195, 628,1505,7757,7758,1763,2177,3001,3993, 521,  # 6342
+1161,2584,1787,2203,2406,4483,3994,1625,4196,4197, 412,  42,3096, 464,7759,2632,  # 6358
+4484,3363,1760,1571,2875,3468,2530,1219,2204,3814,2633,2140,2368,4485,4486,3295,  # 6374
+1651,3364,3572,7760,7761,3573,2481,3469,7762,3690,7763,7764,2271,2091, 460,7765,  # 6390
+4487,7766,3002, 962, 588,3574, 289,3219,2634,1116,  52,7767,3047,1796,7768,7769,  # 6406
+7770,1467,7771,1598,1143,3691,4198,1984,1734,1067,4488,1280,3365, 465,4489,1572,  # 6422
+ 510,7772,1927,2241,1812,1644,3575,7773,4490,3692,7774,7775,2663,1573,1534,7776,  # 6438
+7777,4199, 536,1807,1761,3470,3815,3150,2635,7778,7779,7780,4491,3471,2915,1911,  # 6454
+2796,7781,3296,1122, 377,3220,7782, 360,7783,7784,4200,1529, 551,7785,2059,3693,  # 6470
+1769,2426,7786,2916,4201,3297,3097,2322,2108,2030,4492,1404, 136,1468,1479, 672,  # 6486
+1171,3221,2303, 271,3151,7787,2762,7788,2049, 678,2727, 865,1947,4493,7789,2013,  # 6502
+3995,2956,7790,2728,2223,1397,3048,3694,4494,4495,1735,2917,3366,3576,7791,3816,  # 6518
+ 509,2841,2453,2876,3817,7792,7793,3152,3153,4496,4202,2531,4497,2304,1166,1010,  # 6534
+ 552, 681,1887,7794,7795,2957,2958,3996,1287,1596,1861,3154, 358, 453, 736, 175,  # 6550
+ 478,1117, 905,1167,1097,7796,1853,1530,7797,1706,7798,2178,3472,2287,3695,3473,  # 6566
+3577,4203,2092,4204,7799,3367,1193,2482,4205,1458,2190,2205,1862,1888,1421,3298,  # 6582
+2918,3049,2179,3474, 595,2122,7800,3997,7801,7802,4206,1707,2636, 223,3696,1359,  # 6598
+ 751,3098, 183,3475,7803,2797,3003, 419,2369, 633, 704,3818,2389, 241,7804,7805,  # 6614
+7806, 838,3004,3697,2272,2763,2454,3819,1938,2050,3998,1309,3099,2242,1181,7807,  # 6630
+1136,2206,3820,2370,1446,4207,2305,4498,7808,7809,4208,1055,2605, 484,3698,7810,  # 6646
+3999, 625,4209,2273,3368,1499,4210,4000,7811,4001,4211,3222,2274,2275,3476,7812,  # 6662
+7813,2764, 808,2606,3699,3369,4002,4212,3100,2532, 526,3370,3821,4213, 955,7814,  # 6678
+1620,4214,2637,2427,7815,1429,3700,1669,1831, 994, 928,7816,3578,1260,7817,7818,  # 6694
+7819,1948,2288, 741,2919,1626,4215,2729,2455, 867,1184, 362,3371,1392,7820,7821,  # 6710
+4003,4216,1770,1736,3223,2920,4499,4500,1928,2698,1459,1158,7822,3050,3372,2877,  # 6726
+1292,1929,2506,2842,3701,1985,1187,2071,2014,2607,4217,7823,2566,2507,2169,3702,  # 6742
+2483,3299,7824,3703,4501,7825,7826, 666,1003,3005,1022,3579,4218,7827,4502,1813,  # 6758
+2253, 574,3822,1603, 295,1535, 705,3823,4219, 283, 858, 417,7828,7829,3224,4503,  # 6774
+4504,3051,1220,1889,1046,2276,2456,4004,1393,1599, 689,2567, 388,4220,7830,2484,  # 6790
+ 802,7831,2798,3824,2060,1405,2254,7832,4505,3825,2109,1052,1345,3225,1585,7833,  # 6806
+ 809,7834,7835,7836, 575,2730,3477, 956,1552,1469,1144,2323,7837,2324,1560,2457,  # 6822
+3580,3226,4005, 616,2207,3155,2180,2289,7838,1832,7839,3478,4506,7840,1319,3704,  # 6838
+3705,1211,3581,1023,3227,1293,2799,7841,7842,7843,3826, 607,2306,3827, 762,2878,  # 6854
+1439,4221,1360,7844,1485,3052,7845,4507,1038,4222,1450,2061,2638,4223,1379,4508,  # 6870
+2585,7846,7847,4224,1352,1414,2325,2921,1172,7848,7849,3828,3829,7850,1797,1451,  # 6886
+7851,7852,7853,7854,2922,4006,4007,2485,2346, 411,4008,4009,3582,3300,3101,4509,  # 6902
+1561,2664,1452,4010,1375,7855,7856,  47,2959, 316,7857,1406,1591,2923,3156,7858,  # 6918
+1025,2141,3102,3157, 354,2731, 884,2224,4225,2407, 508,3706, 726,3583, 996,2428,  # 6934
+3584, 729,7859, 392,2191,1453,4011,4510,3707,7860,7861,2458,3585,2608,1675,2800,  # 6950
+ 919,2347,2960,2348,1270,4511,4012,  73,7862,7863, 647,7864,3228,2843,2255,1550,  # 6966
+1346,3006,7865,1332, 883,3479,7866,7867,7868,7869,3301,2765,7870,1212, 831,1347,  # 6982
+4226,4512,2326,3830,1863,3053, 720,3831,4513,4514,3832,7871,4227,7872,7873,4515,  # 6998
+7874,7875,1798,4516,3708,2609,4517,3586,1645,2371,7876,7877,2924, 669,2208,2665,  # 7014
+2429,7878,2879,7879,7880,1028,3229,7881,4228,2408,7882,2256,1353,7883,7884,4518,  # 7030
+3158, 518,7885,4013,7886,4229,1960,7887,2142,4230,7888,7889,3007,2349,2350,3833,  # 7046
+ 516,1833,1454,4014,2699,4231,4519,2225,2610,1971,1129,3587,7890,2766,7891,2961,  # 7062
+1422, 577,1470,3008,1524,3373,7892,7893, 432,4232,3054,3480,7894,2586,1455,2508,  # 7078
+2226,1972,1175,7895,1020,2732,4015,3481,4520,7896,2733,7897,1743,1361,3055,3482,  # 7094
+2639,4016,4233,4521,2290, 895, 924,4234,2170, 331,2243,3056, 166,1627,3057,1098,  # 7110
+7898,1232,2880,2227,3374,4522, 657, 403,1196,2372, 542,3709,3375,1600,4235,3483,  # 7126
+7899,4523,2767,3230, 576, 530,1362,7900,4524,2533,2666,3710,4017,7901, 842,3834,  # 7142
+7902,2801,2031,1014,4018, 213,2700,3376, 665, 621,4236,7903,3711,2925,2430,7904,  # 7158
+2431,3302,3588,3377,7905,4237,2534,4238,4525,3589,1682,4239,3484,1380,7906, 724,  # 7174
+2277, 600,1670,7907,1337,1233,4526,3103,2244,7908,1621,4527,7909, 651,4240,7910,  # 7190
+1612,4241,2611,7911,2844,7912,2734,2307,3058,7913, 716,2459,3059, 174,1255,2701,  # 7206
+4019,3590, 548,1320,1398, 728,4020,1574,7914,1890,1197,3060,4021,7915,3061,3062,  # 7222
+3712,3591,3713, 747,7916, 635,4242,4528,7917,7918,7919,4243,7920,7921,4529,7922,  # 7238
+3378,4530,2432, 451,7923,3714,2535,2072,4244,2735,4245,4022,7924,1764,4531,7925,  # 7254
+4246, 350,7926,2278,2390,2486,7927,4247,4023,2245,1434,4024, 488,4532, 458,4248,  # 7270
+4025,3715, 771,1330,2391,3835,2568,3159,2159,2409,1553,2667,3160,4249,7928,2487,  # 7286
+2881,2612,1720,2702,4250,3379,4533,7929,2536,4251,7930,3231,4252,2768,7931,2015,  # 7302
+2736,7932,1155,1017,3716,3836,7933,3303,2308, 201,1864,4253,1430,7934,4026,7935,  # 7318
+7936,7937,7938,7939,4254,1604,7940, 414,1865, 371,2587,4534,4535,3485,2016,3104,  # 7334
+4536,1708, 960,4255, 887, 389,2171,1536,1663,1721,7941,2228,4027,2351,2926,1580,  # 7350
+7942,7943,7944,1744,7945,2537,4537,4538,7946,4539,7947,2073,7948,7949,3592,3380,  # 7366
+2882,4256,7950,4257,2640,3381,2802, 673,2703,2460, 709,3486,4028,3593,4258,7951,  # 7382
+1148, 502, 634,7952,7953,1204,4540,3594,1575,4541,2613,3717,7954,3718,3105, 948,  # 7398
+3232, 121,1745,3837,1110,7955,4259,3063,2509,3009,4029,3719,1151,1771,3838,1488,  # 7414
+4030,1986,7956,2433,3487,7957,7958,2093,7959,4260,3839,1213,1407,2803, 531,2737,  # 7430
+2538,3233,1011,1537,7960,2769,4261,3106,1061,7961,3720,3721,1866,2883,7962,2017,  # 7446
+ 120,4262,4263,2062,3595,3234,2309,3840,2668,3382,1954,4542,7963,7964,3488,1047,  # 7462
+2704,1266,7965,1368,4543,2845, 649,3383,3841,2539,2738,1102,2846,2669,7966,7967,  # 7478
+1999,7968,1111,3596,2962,7969,2488,3842,3597,2804,1854,3384,3722,7970,7971,3385,  # 7494
+2410,2884,3304,3235,3598,7972,2569,7973,3599,2805,4031,1460, 856,7974,3600,7975,  # 7510
+2885,2963,7976,2886,3843,7977,4264, 632,2510, 875,3844,1697,3845,2291,7978,7979,  # 7526
+4544,3010,1239, 580,4545,4265,7980, 914, 936,2074,1190,4032,1039,2123,7981,7982,  # 7542
+7983,3386,1473,7984,1354,4266,3846,7985,2172,3064,4033, 915,3305,4267,4268,3306,  # 7558
+1605,1834,7986,2739, 398,3601,4269,3847,4034, 328,1912,2847,4035,3848,1331,4270,  # 7574
+3011, 937,4271,7987,3602,4036,4037,3387,2160,4546,3388, 524, 742, 538,3065,1012,  # 7590
+7988,7989,3849,2461,7990, 658,1103, 225,3850,7991,7992,4547,7993,4548,7994,3236,  # 7606
+1243,7995,4038, 963,2246,4549,7996,2705,3603,3161,7997,7998,2588,2327,7999,4550,  # 7622
+8000,8001,8002,3489,3307, 957,3389,2540,2032,1930,2927,2462, 870,2018,3604,1746,  # 7638
+2770,2771,2434,2463,8003,3851,8004,3723,3107,3724,3490,3390,3725,8005,1179,3066,  # 7654
+8006,3162,2373,4272,3726,2541,3163,3108,2740,4039,8007,3391,1556,2542,2292, 977,  # 7670
+2887,2033,4040,1205,3392,8008,1765,3393,3164,2124,1271,1689, 714,4551,3491,8009,  # 7686
+2328,3852, 533,4273,3605,2181, 617,8010,2464,3308,3492,2310,8011,8012,3165,8013,  # 7702
+8014,3853,1987, 618, 427,2641,3493,3394,8015,8016,1244,1690,8017,2806,4274,4552,  # 7718
+8018,3494,8019,8020,2279,1576, 473,3606,4275,3395, 972,8021,3607,8022,3067,8023,  # 7734
+8024,4553,4554,8025,3727,4041,4042,8026, 153,4555, 356,8027,1891,2888,4276,2143,  # 7750
+ 408, 803,2352,8028,3854,8029,4277,1646,2570,2511,4556,4557,3855,8030,3856,4278,  # 7766
+8031,2411,3396, 752,8032,8033,1961,2964,8034, 746,3012,2465,8035,4279,3728, 698,  # 7782
+4558,1892,4280,3608,2543,4559,3609,3857,8036,3166,3397,8037,1823,1302,4043,2706,  # 7798
+3858,1973,4281,8038,4282,3167, 823,1303,1288,1236,2848,3495,4044,3398, 774,3859,  # 7814
+8039,1581,4560,1304,2849,3860,4561,8040,2435,2161,1083,3237,4283,4045,4284, 344,  # 7830
+1173, 288,2311, 454,1683,8041,8042,1461,4562,4046,2589,8043,8044,4563, 985, 894,  # 7846
+8045,3399,3168,8046,1913,2928,3729,1988,8047,2110,1974,8048,4047,8049,2571,1194,  # 7862
+ 425,8050,4564,3169,1245,3730,4285,8051,8052,2850,8053, 636,4565,1855,3861, 760,  # 7878
+1799,8054,4286,2209,1508,4566,4048,1893,1684,2293,8055,8056,8057,4287,4288,2210,  # 7894
+ 479,8058,8059, 832,8060,4049,2489,8061,2965,2490,3731, 990,3109, 627,1814,2642,  # 7910
+4289,1582,4290,2125,2111,3496,4567,8062, 799,4291,3170,8063,4568,2112,1737,3013,  # 7926
+1018, 543, 754,4292,3309,1676,4569,4570,4050,8064,1489,8065,3497,8066,2614,2889,  # 7942
+4051,8067,8068,2966,8069,8070,8071,8072,3171,4571,4572,2182,1722,8073,3238,3239,  # 7958
+1842,3610,1715, 481, 365,1975,1856,8074,8075,1962,2491,4573,8076,2126,3611,3240,  # 7974
+ 433,1894,2063,2075,8077, 602,2741,8078,8079,8080,8081,8082,3014,1628,3400,8083,  # 7990
+3172,4574,4052,2890,4575,2512,8084,2544,2772,8085,8086,8087,3310,4576,2891,8088,  # 8006
+4577,8089,2851,4578,4579,1221,2967,4053,2513,8090,8091,8092,1867,1989,8093,8094,  # 8022
+8095,1895,8096,8097,4580,1896,4054, 318,8098,2094,4055,4293,8099,8100, 485,8101,  # 8038
+ 938,3862, 553,2670, 116,8102,3863,3612,8103,3498,2671,2773,3401,3311,2807,8104,  # 8054
+3613,2929,4056,1747,2930,2968,8105,8106, 207,8107,8108,2672,4581,2514,8109,3015,  # 8070
+ 890,3614,3864,8110,1877,3732,3402,8111,2183,2353,3403,1652,8112,8113,8114, 941,  # 8086
+2294, 208,3499,4057,2019, 330,4294,3865,2892,2492,3733,4295,8115,8116,8117,8118,  # 8102
 )
-# fmt: on
+
diff --git a/venv/lib/python3.10/site-packages/pip/_vendor/chardet/euctwprober.py b/venv/lib/python3.10/site-packages/pip/_vendor/chardet/euctwprober.py
index ca10a23..35669cc 100644
--- a/venv/lib/python3.10/site-packages/pip/_vendor/chardet/euctwprober.py
+++ b/venv/lib/python3.10/site-packages/pip/_vendor/chardet/euctwprober.py
@@ -25,15 +25,14 @@
 # 02110-1301  USA
 ######################### END LICENSE BLOCK #########################
 
-from .chardistribution import EUCTWDistributionAnalysis
-from .codingstatemachine import CodingStateMachine
 from .mbcharsetprober import MultiByteCharSetProber
+from .codingstatemachine import CodingStateMachine
+from .chardistribution import EUCTWDistributionAnalysis
 from .mbcssm import EUCTW_SM_MODEL
 
-
 class EUCTWProber(MultiByteCharSetProber):
     def __init__(self):
-        super().__init__()
+        super(EUCTWProber, self).__init__()
         self.coding_sm = CodingStateMachine(EUCTW_SM_MODEL)
         self.distribution_analyzer = EUCTWDistributionAnalysis()
         self.reset()
diff --git a/venv/lib/python3.10/site-packages/pip/_vendor/chardet/gb2312freq.py b/venv/lib/python3.10/site-packages/pip/_vendor/chardet/gb2312freq.py
index b32bfc7..697837b 100644
--- a/venv/lib/python3.10/site-packages/pip/_vendor/chardet/gb2312freq.py
+++ b/venv/lib/python3.10/site-packages/pip/_vendor/chardet/gb2312freq.py
@@ -43,7 +43,6 @@
 
 GB2312_TABLE_SIZE = 3760
 
-# fmt: off
 GB2312_CHAR_TO_FREQ_ORDER = (
 1671, 749,1443,2364,3924,3807,2330,3921,1704,3463,2691,1511,1515, 572,3191,2205,
 2361, 224,2558, 479,1711, 963,3162, 440,4060,1905,2966,2947,3580,2647,3961,3842,
@@ -281,4 +280,4 @@
  381,1638,4592,1020, 516,3214, 458, 947,4575,1432, 211,1514,2926,1865,2142, 189,
  852,1221,1400,1486, 882,2299,4036, 351,  28,1122, 700,6479,6480,6481,6482,6483,  #last 512
 )
-# fmt: on
+
diff --git a/venv/lib/python3.10/site-packages/pip/_vendor/chardet/gb2312prober.py b/venv/lib/python3.10/site-packages/pip/_vendor/chardet/gb2312prober.py
index 251c042..8446d2d 100644
--- a/venv/lib/python3.10/site-packages/pip/_vendor/chardet/gb2312prober.py
+++ b/venv/lib/python3.10/site-packages/pip/_vendor/chardet/gb2312prober.py
@@ -25,15 +25,14 @@
 # 02110-1301  USA
 ######################### END LICENSE BLOCK #########################
 
-from .chardistribution import GB2312DistributionAnalysis
-from .codingstatemachine import CodingStateMachine
 from .mbcharsetprober import MultiByteCharSetProber
+from .codingstatemachine import CodingStateMachine
+from .chardistribution import GB2312DistributionAnalysis
 from .mbcssm import GB2312_SM_MODEL
 
-
 class GB2312Prober(MultiByteCharSetProber):
     def __init__(self):
-        super().__init__()
+        super(GB2312Prober, self).__init__()
         self.coding_sm = CodingStateMachine(GB2312_SM_MODEL)
         self.distribution_analyzer = GB2312DistributionAnalysis()
         self.reset()
diff --git a/venv/lib/python3.10/site-packages/pip/_vendor/chardet/hebrewprober.py b/venv/lib/python3.10/site-packages/pip/_vendor/chardet/hebrewprober.py
index 3ca634b..b0e1bf4 100644
--- a/venv/lib/python3.10/site-packages/pip/_vendor/chardet/hebrewprober.py
+++ b/venv/lib/python3.10/site-packages/pip/_vendor/chardet/hebrewprober.py
@@ -125,19 +125,18 @@
 # model probers scores. The answer is returned in the form of the name of the
 # charset identified, either "windows-1255" or "ISO-8859-8".
 
-
 class HebrewProber(CharSetProber):
     # windows-1255 / ISO-8859-8 code points of interest
-    FINAL_KAF = 0xEA
-    NORMAL_KAF = 0xEB
-    FINAL_MEM = 0xED
-    NORMAL_MEM = 0xEE
-    FINAL_NUN = 0xEF
-    NORMAL_NUN = 0xF0
-    FINAL_PE = 0xF3
-    NORMAL_PE = 0xF4
-    FINAL_TSADI = 0xF5
-    NORMAL_TSADI = 0xF6
+    FINAL_KAF = 0xea
+    NORMAL_KAF = 0xeb
+    FINAL_MEM = 0xed
+    NORMAL_MEM = 0xee
+    FINAL_NUN = 0xef
+    NORMAL_NUN = 0xf0
+    FINAL_PE = 0xf3
+    NORMAL_PE = 0xf4
+    FINAL_TSADI = 0xf5
+    NORMAL_TSADI = 0xf6
 
     # Minimum Visual vs Logical final letter score difference.
     # If the difference is below this, don't rely solely on the final letter score
@@ -153,7 +152,7 @@ class HebrewProber(CharSetProber):
     LOGICAL_HEBREW_NAME = "windows-1255"
 
     def __init__(self):
-        super().__init__()
+        super(HebrewProber, self).__init__()
         self._final_char_logical_score = None
         self._final_char_visual_score = None
         self._prev = None
@@ -168,22 +167,17 @@ def reset(self):
         # The two last characters seen in the previous buffer,
         # mPrev and mBeforePrev are initialized to space in order to simulate
         # a word delimiter at the beginning of the data
-        self._prev = " "
-        self._before_prev = " "
+        self._prev = ' '
+        self._before_prev = ' '
         # These probers are owned by the group prober.
 
-    def set_model_probers(self, logical_prober, visual_prober):
-        self._logical_prober = logical_prober
-        self._visual_prober = visual_prober
+    def set_model_probers(self, logicalProber, visualProber):
+        self._logical_prober = logicalProber
+        self._visual_prober = visualProber
 
     def is_final(self, c):
-        return c in [
-            self.FINAL_KAF,
-            self.FINAL_MEM,
-            self.FINAL_NUN,
-            self.FINAL_PE,
-            self.FINAL_TSADI,
-        ]
+        return c in [self.FINAL_KAF, self.FINAL_MEM, self.FINAL_NUN,
+                     self.FINAL_PE, self.FINAL_TSADI]
 
     def is_non_final(self, c):
         # The normal Tsadi is not a good Non-Final letter due to words like
@@ -196,7 +190,8 @@ def is_non_final(self, c):
         # for example legally end with a Non-Final Pe or Kaf. However, the
         # benefit of these letters as Non-Final letters outweighs the damage
         # since these words are quite rare.
-        return c in [self.NORMAL_KAF, self.NORMAL_MEM, self.NORMAL_NUN, self.NORMAL_PE]
+        return c in [self.NORMAL_KAF, self.NORMAL_MEM,
+                     self.NORMAL_NUN, self.NORMAL_PE]
 
     def feed(self, byte_str):
         # Final letter analysis for logical-visual decision.
@@ -232,9 +227,9 @@ def feed(self, byte_str):
         byte_str = self.filter_high_byte_only(byte_str)
 
         for cur in byte_str:
-            if cur == " ":
+            if cur == ' ':
                 # We stand on a space - a word just ended
-                if self._before_prev != " ":
+                if self._before_prev != ' ':
                     # next-to-last char was not a space so self._prev is not a
                     # 1 letter word
                     if self.is_final(self._prev):
@@ -246,11 +241,8 @@ def feed(self, byte_str):
                         self._final_char_visual_score += 1
             else:
                 # Not standing on a space
-                if (
-                    (self._before_prev == " ")
-                    and (self.is_final(self._prev))
-                    and (cur != " ")
-                ):
+                if ((self._before_prev == ' ') and
+                        (self.is_final(self._prev)) and (cur != ' ')):
                     # case (3) [-2:space][-1:final letter][cur:not space]
                     self._final_char_visual_score += 1
             self._before_prev = self._prev
@@ -271,9 +263,8 @@ def charset_name(self):
             return self.VISUAL_HEBREW_NAME
 
         # It's not dominant enough, try to rely on the model scores instead.
-        modelsub = (
-            self._logical_prober.get_confidence() - self._visual_prober.get_confidence()
-        )
+        modelsub = (self._logical_prober.get_confidence()
+                    - self._visual_prober.get_confidence())
         if modelsub > self.MIN_MODEL_DISTANCE:
             return self.LOGICAL_HEBREW_NAME
         if modelsub < -self.MIN_MODEL_DISTANCE:
@@ -290,13 +281,12 @@ def charset_name(self):
 
     @property
     def language(self):
-        return "Hebrew"
+        return 'Hebrew'
 
     @property
     def state(self):
         # Remain active as long as any of the model probers are active.
-        if (self._logical_prober.state == ProbingState.NOT_ME) and (
-            self._visual_prober.state == ProbingState.NOT_ME
-        ):
+        if (self._logical_prober.state == ProbingState.NOT_ME) and \
+           (self._visual_prober.state == ProbingState.NOT_ME):
             return ProbingState.NOT_ME
         return ProbingState.DETECTING
diff --git a/venv/lib/python3.10/site-packages/pip/_vendor/chardet/jisfreq.py b/venv/lib/python3.10/site-packages/pip/_vendor/chardet/jisfreq.py
index 3293576..83fc082 100644
--- a/venv/lib/python3.10/site-packages/pip/_vendor/chardet/jisfreq.py
+++ b/venv/lib/python3.10/site-packages/pip/_vendor/chardet/jisfreq.py
@@ -46,7 +46,6 @@
 # Char to FreqOrder table ,
 JIS_TABLE_SIZE = 4368
 
-# fmt: off
 JIS_CHAR_TO_FREQ_ORDER = (
   40,   1,   6, 182, 152, 180, 295,2127, 285, 381,3295,4304,3068,4606,3165,3510, #   16
 3511,1822,2785,4607,1193,2226,5070,4608, 171,2996,1247,  18, 179,5071, 856,1661, #   32
@@ -322,4 +321,5 @@
 1444,1698,2385,2251,3729,1365,2281,2235,1717,6188, 864,3841,2515, 444, 527,2767, # 4352
 2922,3625, 544, 461,6189, 566, 209,2437,3398,2098,1065,2068,3331,3626,3257,2137, # 4368  #last 512
 )
-# fmt: on
+
+
diff --git a/venv/lib/python3.10/site-packages/pip/_vendor/chardet/johabfreq.py b/venv/lib/python3.10/site-packages/pip/_vendor/chardet/johabfreq.py
deleted file mode 100644
index c129699..0000000
--- a/venv/lib/python3.10/site-packages/pip/_vendor/chardet/johabfreq.py
+++ /dev/null
@@ -1,2382 +0,0 @@
-######################## BEGIN LICENSE BLOCK ########################
-# The Original Code is Mozilla Communicator client code.
-#
-# The Initial Developer of the Original Code is
-# Netscape Communications Corporation.
-# Portions created by the Initial Developer are Copyright (C) 1998
-# the Initial Developer. All Rights Reserved.
-#
-# Contributor(s):
-#   Mark Pilgrim - port to Python
-#
-# This library is free software; you can redistribute it and/or
-# modify it under the terms of the GNU Lesser General Public
-# License as published by the Free Software Foundation; either
-# version 2.1 of the License, or (at your option) any later version.
-#
-# This library is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
-# Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public
-# License along with this library; if not, write to the Free Software
-# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
-# 02110-1301  USA
-######################### END LICENSE BLOCK #########################
-
-# The frequency data itself is the same as euc-kr.
-# This is just a mapping table to euc-kr.
-
-JOHAB_TO_EUCKR_ORDER_TABLE = {
-    0x8861: 0,
-    0x8862: 1,
-    0x8865: 2,
-    0x8868: 3,
-    0x8869: 4,
-    0x886A: 5,
-    0x886B: 6,
-    0x8871: 7,
-    0x8873: 8,
-    0x8874: 9,
-    0x8875: 10,
-    0x8876: 11,
-    0x8877: 12,
-    0x8878: 13,
-    0x8879: 14,
-    0x887B: 15,
-    0x887C: 16,
-    0x887D: 17,
-    0x8881: 18,
-    0x8882: 19,
-    0x8885: 20,
-    0x8889: 21,
-    0x8891: 22,
-    0x8893: 23,
-    0x8895: 24,
-    0x8896: 25,
-    0x8897: 26,
-    0x88A1: 27,
-    0x88A2: 28,
-    0x88A5: 29,
-    0x88A9: 30,
-    0x88B5: 31,
-    0x88B7: 32,
-    0x88C1: 33,
-    0x88C5: 34,
-    0x88C9: 35,
-    0x88E1: 36,
-    0x88E2: 37,
-    0x88E5: 38,
-    0x88E8: 39,
-    0x88E9: 40,
-    0x88EB: 41,
-    0x88F1: 42,
-    0x88F3: 43,
-    0x88F5: 44,
-    0x88F6: 45,
-    0x88F7: 46,
-    0x88F8: 47,
-    0x88FB: 48,
-    0x88FC: 49,
-    0x88FD: 50,
-    0x8941: 51,
-    0x8945: 52,
-    0x8949: 53,
-    0x8951: 54,
-    0x8953: 55,
-    0x8955: 56,
-    0x8956: 57,
-    0x8957: 58,
-    0x8961: 59,
-    0x8962: 60,
-    0x8963: 61,
-    0x8965: 62,
-    0x8968: 63,
-    0x8969: 64,
-    0x8971: 65,
-    0x8973: 66,
-    0x8975: 67,
-    0x8976: 68,
-    0x8977: 69,
-    0x897B: 70,
-    0x8981: 71,
-    0x8985: 72,
-    0x8989: 73,
-    0x8993: 74,
-    0x8995: 75,
-    0x89A1: 76,
-    0x89A2: 77,
-    0x89A5: 78,
-    0x89A8: 79,
-    0x89A9: 80,
-    0x89AB: 81,
-    0x89AD: 82,
-    0x89B0: 83,
-    0x89B1: 84,
-    0x89B3: 85,
-    0x89B5: 86,
-    0x89B7: 87,
-    0x89B8: 88,
-    0x89C1: 89,
-    0x89C2: 90,
-    0x89C5: 91,
-    0x89C9: 92,
-    0x89CB: 93,
-    0x89D1: 94,
-    0x89D3: 95,
-    0x89D5: 96,
-    0x89D7: 97,
-    0x89E1: 98,
-    0x89E5: 99,
-    0x89E9: 100,
-    0x89F3: 101,
-    0x89F6: 102,
-    0x89F7: 103,
-    0x8A41: 104,
-    0x8A42: 105,
-    0x8A45: 106,
-    0x8A49: 107,
-    0x8A51: 108,
-    0x8A53: 109,
-    0x8A55: 110,
-    0x8A57: 111,
-    0x8A61: 112,
-    0x8A65: 113,
-    0x8A69: 114,
-    0x8A73: 115,
-    0x8A75: 116,
-    0x8A81: 117,
-    0x8A82: 118,
-    0x8A85: 119,
-    0x8A88: 120,
-    0x8A89: 121,
-    0x8A8A: 122,
-    0x8A8B: 123,
-    0x8A90: 124,
-    0x8A91: 125,
-    0x8A93: 126,
-    0x8A95: 127,
-    0x8A97: 128,
-    0x8A98: 129,
-    0x8AA1: 130,
-    0x8AA2: 131,
-    0x8AA5: 132,
-    0x8AA9: 133,
-    0x8AB6: 134,
-    0x8AB7: 135,
-    0x8AC1: 136,
-    0x8AD5: 137,
-    0x8AE1: 138,
-    0x8AE2: 139,
-    0x8AE5: 140,
-    0x8AE9: 141,
-    0x8AF1: 142,
-    0x8AF3: 143,
-    0x8AF5: 144,
-    0x8B41: 145,
-    0x8B45: 146,
-    0x8B49: 147,
-    0x8B61: 148,
-    0x8B62: 149,
-    0x8B65: 150,
-    0x8B68: 151,
-    0x8B69: 152,
-    0x8B6A: 153,
-    0x8B71: 154,
-    0x8B73: 155,
-    0x8B75: 156,
-    0x8B77: 157,
-    0x8B81: 158,
-    0x8BA1: 159,
-    0x8BA2: 160,
-    0x8BA5: 161,
-    0x8BA8: 162,
-    0x8BA9: 163,
-    0x8BAB: 164,
-    0x8BB1: 165,
-    0x8BB3: 166,
-    0x8BB5: 167,
-    0x8BB7: 168,
-    0x8BB8: 169,
-    0x8BBC: 170,
-    0x8C61: 171,
-    0x8C62: 172,
-    0x8C63: 173,
-    0x8C65: 174,
-    0x8C69: 175,
-    0x8C6B: 176,
-    0x8C71: 177,
-    0x8C73: 178,
-    0x8C75: 179,
-    0x8C76: 180,
-    0x8C77: 181,
-    0x8C7B: 182,
-    0x8C81: 183,
-    0x8C82: 184,
-    0x8C85: 185,
-    0x8C89: 186,
-    0x8C91: 187,
-    0x8C93: 188,
-    0x8C95: 189,
-    0x8C96: 190,
-    0x8C97: 191,
-    0x8CA1: 192,
-    0x8CA2: 193,
-    0x8CA9: 194,
-    0x8CE1: 195,
-    0x8CE2: 196,
-    0x8CE3: 197,
-    0x8CE5: 198,
-    0x8CE9: 199,
-    0x8CF1: 200,
-    0x8CF3: 201,
-    0x8CF5: 202,
-    0x8CF6: 203,
-    0x8CF7: 204,
-    0x8D41: 205,
-    0x8D42: 206,
-    0x8D45: 207,
-    0x8D51: 208,
-    0x8D55: 209,
-    0x8D57: 210,
-    0x8D61: 211,
-    0x8D65: 212,
-    0x8D69: 213,
-    0x8D75: 214,
-    0x8D76: 215,
-    0x8D7B: 216,
-    0x8D81: 217,
-    0x8DA1: 218,
-    0x8DA2: 219,
-    0x8DA5: 220,
-    0x8DA7: 221,
-    0x8DA9: 222,
-    0x8DB1: 223,
-    0x8DB3: 224,
-    0x8DB5: 225,
-    0x8DB7: 226,
-    0x8DB8: 227,
-    0x8DB9: 228,
-    0x8DC1: 229,
-    0x8DC2: 230,
-    0x8DC9: 231,
-    0x8DD6: 232,
-    0x8DD7: 233,
-    0x8DE1: 234,
-    0x8DE2: 235,
-    0x8DF7: 236,
-    0x8E41: 237,
-    0x8E45: 238,
-    0x8E49: 239,
-    0x8E51: 240,
-    0x8E53: 241,
-    0x8E57: 242,
-    0x8E61: 243,
-    0x8E81: 244,
-    0x8E82: 245,
-    0x8E85: 246,
-    0x8E89: 247,
-    0x8E90: 248,
-    0x8E91: 249,
-    0x8E93: 250,
-    0x8E95: 251,
-    0x8E97: 252,
-    0x8E98: 253,
-    0x8EA1: 254,
-    0x8EA9: 255,
-    0x8EB6: 256,
-    0x8EB7: 257,
-    0x8EC1: 258,
-    0x8EC2: 259,
-    0x8EC5: 260,
-    0x8EC9: 261,
-    0x8ED1: 262,
-    0x8ED3: 263,
-    0x8ED6: 264,
-    0x8EE1: 265,
-    0x8EE5: 266,
-    0x8EE9: 267,
-    0x8EF1: 268,
-    0x8EF3: 269,
-    0x8F41: 270,
-    0x8F61: 271,
-    0x8F62: 272,
-    0x8F65: 273,
-    0x8F67: 274,
-    0x8F69: 275,
-    0x8F6B: 276,
-    0x8F70: 277,
-    0x8F71: 278,
-    0x8F73: 279,
-    0x8F75: 280,
-    0x8F77: 281,
-    0x8F7B: 282,
-    0x8FA1: 283,
-    0x8FA2: 284,
-    0x8FA5: 285,
-    0x8FA9: 286,
-    0x8FB1: 287,
-    0x8FB3: 288,
-    0x8FB5: 289,
-    0x8FB7: 290,
-    0x9061: 291,
-    0x9062: 292,
-    0x9063: 293,
-    0x9065: 294,
-    0x9068: 295,
-    0x9069: 296,
-    0x906A: 297,
-    0x906B: 298,
-    0x9071: 299,
-    0x9073: 300,
-    0x9075: 301,
-    0x9076: 302,
-    0x9077: 303,
-    0x9078: 304,
-    0x9079: 305,
-    0x907B: 306,
-    0x907D: 307,
-    0x9081: 308,
-    0x9082: 309,
-    0x9085: 310,
-    0x9089: 311,
-    0x9091: 312,
-    0x9093: 313,
-    0x9095: 314,
-    0x9096: 315,
-    0x9097: 316,
-    0x90A1: 317,
-    0x90A2: 318,
-    0x90A5: 319,
-    0x90A9: 320,
-    0x90B1: 321,
-    0x90B7: 322,
-    0x90E1: 323,
-    0x90E2: 324,
-    0x90E4: 325,
-    0x90E5: 326,
-    0x90E9: 327,
-    0x90EB: 328,
-    0x90EC: 329,
-    0x90F1: 330,
-    0x90F3: 331,
-    0x90F5: 332,
-    0x90F6: 333,
-    0x90F7: 334,
-    0x90FD: 335,
-    0x9141: 336,
-    0x9142: 337,
-    0x9145: 338,
-    0x9149: 339,
-    0x9151: 340,
-    0x9153: 341,
-    0x9155: 342,
-    0x9156: 343,
-    0x9157: 344,
-    0x9161: 345,
-    0x9162: 346,
-    0x9165: 347,
-    0x9169: 348,
-    0x9171: 349,
-    0x9173: 350,
-    0x9176: 351,
-    0x9177: 352,
-    0x917A: 353,
-    0x9181: 354,
-    0x9185: 355,
-    0x91A1: 356,
-    0x91A2: 357,
-    0x91A5: 358,
-    0x91A9: 359,
-    0x91AB: 360,
-    0x91B1: 361,
-    0x91B3: 362,
-    0x91B5: 363,
-    0x91B7: 364,
-    0x91BC: 365,
-    0x91BD: 366,
-    0x91C1: 367,
-    0x91C5: 368,
-    0x91C9: 369,
-    0x91D6: 370,
-    0x9241: 371,
-    0x9245: 372,
-    0x9249: 373,
-    0x9251: 374,
-    0x9253: 375,
-    0x9255: 376,
-    0x9261: 377,
-    0x9262: 378,
-    0x9265: 379,
-    0x9269: 380,
-    0x9273: 381,
-    0x9275: 382,
-    0x9277: 383,
-    0x9281: 384,
-    0x9282: 385,
-    0x9285: 386,
-    0x9288: 387,
-    0x9289: 388,
-    0x9291: 389,
-    0x9293: 390,
-    0x9295: 391,
-    0x9297: 392,
-    0x92A1: 393,
-    0x92B6: 394,
-    0x92C1: 395,
-    0x92E1: 396,
-    0x92E5: 397,
-    0x92E9: 398,
-    0x92F1: 399,
-    0x92F3: 400,
-    0x9341: 401,
-    0x9342: 402,
-    0x9349: 403,
-    0x9351: 404,
-    0x9353: 405,
-    0x9357: 406,
-    0x9361: 407,
-    0x9362: 408,
-    0x9365: 409,
-    0x9369: 410,
-    0x936A: 411,
-    0x936B: 412,
-    0x9371: 413,
-    0x9373: 414,
-    0x9375: 415,
-    0x9377: 416,
-    0x9378: 417,
-    0x937C: 418,
-    0x9381: 419,
-    0x9385: 420,
-    0x9389: 421,
-    0x93A1: 422,
-    0x93A2: 423,
-    0x93A5: 424,
-    0x93A9: 425,
-    0x93AB: 426,
-    0x93B1: 427,
-    0x93B3: 428,
-    0x93B5: 429,
-    0x93B7: 430,
-    0x93BC: 431,
-    0x9461: 432,
-    0x9462: 433,
-    0x9463: 434,
-    0x9465: 435,
-    0x9468: 436,
-    0x9469: 437,
-    0x946A: 438,
-    0x946B: 439,
-    0x946C: 440,
-    0x9470: 441,
-    0x9471: 442,
-    0x9473: 443,
-    0x9475: 444,
-    0x9476: 445,
-    0x9477: 446,
-    0x9478: 447,
-    0x9479: 448,
-    0x947D: 449,
-    0x9481: 450,
-    0x9482: 451,
-    0x9485: 452,
-    0x9489: 453,
-    0x9491: 454,
-    0x9493: 455,
-    0x9495: 456,
-    0x9496: 457,
-    0x9497: 458,
-    0x94A1: 459,
-    0x94E1: 460,
-    0x94E2: 461,
-    0x94E3: 462,
-    0x94E5: 463,
-    0x94E8: 464,
-    0x94E9: 465,
-    0x94EB: 466,
-    0x94EC: 467,
-    0x94F1: 468,
-    0x94F3: 469,
-    0x94F5: 470,
-    0x94F7: 471,
-    0x94F9: 472,
-    0x94FC: 473,
-    0x9541: 474,
-    0x9542: 475,
-    0x9545: 476,
-    0x9549: 477,
-    0x9551: 478,
-    0x9553: 479,
-    0x9555: 480,
-    0x9556: 481,
-    0x9557: 482,
-    0x9561: 483,
-    0x9565: 484,
-    0x9569: 485,
-    0x9576: 486,
-    0x9577: 487,
-    0x9581: 488,
-    0x9585: 489,
-    0x95A1: 490,
-    0x95A2: 491,
-    0x95A5: 492,
-    0x95A8: 493,
-    0x95A9: 494,
-    0x95AB: 495,
-    0x95AD: 496,
-    0x95B1: 497,
-    0x95B3: 498,
-    0x95B5: 499,
-    0x95B7: 500,
-    0x95B9: 501,
-    0x95BB: 502,
-    0x95C1: 503,
-    0x95C5: 504,
-    0x95C9: 505,
-    0x95E1: 506,
-    0x95F6: 507,
-    0x9641: 508,
-    0x9645: 509,
-    0x9649: 510,
-    0x9651: 511,
-    0x9653: 512,
-    0x9655: 513,
-    0x9661: 514,
-    0x9681: 515,
-    0x9682: 516,
-    0x9685: 517,
-    0x9689: 518,
-    0x9691: 519,
-    0x9693: 520,
-    0x9695: 521,
-    0x9697: 522,
-    0x96A1: 523,
-    0x96B6: 524,
-    0x96C1: 525,
-    0x96D7: 526,
-    0x96E1: 527,
-    0x96E5: 528,
-    0x96E9: 529,
-    0x96F3: 530,
-    0x96F5: 531,
-    0x96F7: 532,
-    0x9741: 533,
-    0x9745: 534,
-    0x9749: 535,
-    0x9751: 536,
-    0x9757: 537,
-    0x9761: 538,
-    0x9762: 539,
-    0x9765: 540,
-    0x9768: 541,
-    0x9769: 542,
-    0x976B: 543,
-    0x9771: 544,
-    0x9773: 545,
-    0x9775: 546,
-    0x9777: 547,
-    0x9781: 548,
-    0x97A1: 549,
-    0x97A2: 550,
-    0x97A5: 551,
-    0x97A8: 552,
-    0x97A9: 553,
-    0x97B1: 554,
-    0x97B3: 555,
-    0x97B5: 556,
-    0x97B6: 557,
-    0x97B7: 558,
-    0x97B8: 559,
-    0x9861: 560,
-    0x9862: 561,
-    0x9865: 562,
-    0x9869: 563,
-    0x9871: 564,
-    0x9873: 565,
-    0x9875: 566,
-    0x9876: 567,
-    0x9877: 568,
-    0x987D: 569,
-    0x9881: 570,
-    0x9882: 571,
-    0x9885: 572,
-    0x9889: 573,
-    0x9891: 574,
-    0x9893: 575,
-    0x9895: 576,
-    0x9896: 577,
-    0x9897: 578,
-    0x98E1: 579,
-    0x98E2: 580,
-    0x98E5: 581,
-    0x98E9: 582,
-    0x98EB: 583,
-    0x98EC: 584,
-    0x98F1: 585,
-    0x98F3: 586,
-    0x98F5: 587,
-    0x98F6: 588,
-    0x98F7: 589,
-    0x98FD: 590,
-    0x9941: 591,
-    0x9942: 592,
-    0x9945: 593,
-    0x9949: 594,
-    0x9951: 595,
-    0x9953: 596,
-    0x9955: 597,
-    0x9956: 598,
-    0x9957: 599,
-    0x9961: 600,
-    0x9976: 601,
-    0x99A1: 602,
-    0x99A2: 603,
-    0x99A5: 604,
-    0x99A9: 605,
-    0x99B7: 606,
-    0x99C1: 607,
-    0x99C9: 608,
-    0x99E1: 609,
-    0x9A41: 610,
-    0x9A45: 611,
-    0x9A81: 612,
-    0x9A82: 613,
-    0x9A85: 614,
-    0x9A89: 615,
-    0x9A90: 616,
-    0x9A91: 617,
-    0x9A97: 618,
-    0x9AC1: 619,
-    0x9AE1: 620,
-    0x9AE5: 621,
-    0x9AE9: 622,
-    0x9AF1: 623,
-    0x9AF3: 624,
-    0x9AF7: 625,
-    0x9B61: 626,
-    0x9B62: 627,
-    0x9B65: 628,
-    0x9B68: 629,
-    0x9B69: 630,
-    0x9B71: 631,
-    0x9B73: 632,
-    0x9B75: 633,
-    0x9B81: 634,
-    0x9B85: 635,
-    0x9B89: 636,
-    0x9B91: 637,
-    0x9B93: 638,
-    0x9BA1: 639,
-    0x9BA5: 640,
-    0x9BA9: 641,
-    0x9BB1: 642,
-    0x9BB3: 643,
-    0x9BB5: 644,
-    0x9BB7: 645,
-    0x9C61: 646,
-    0x9C62: 647,
-    0x9C65: 648,
-    0x9C69: 649,
-    0x9C71: 650,
-    0x9C73: 651,
-    0x9C75: 652,
-    0x9C76: 653,
-    0x9C77: 654,
-    0x9C78: 655,
-    0x9C7C: 656,
-    0x9C7D: 657,
-    0x9C81: 658,
-    0x9C82: 659,
-    0x9C85: 660,
-    0x9C89: 661,
-    0x9C91: 662,
-    0x9C93: 663,
-    0x9C95: 664,
-    0x9C96: 665,
-    0x9C97: 666,
-    0x9CA1: 667,
-    0x9CA2: 668,
-    0x9CA5: 669,
-    0x9CB5: 670,
-    0x9CB7: 671,
-    0x9CE1: 672,
-    0x9CE2: 673,
-    0x9CE5: 674,
-    0x9CE9: 675,
-    0x9CF1: 676,
-    0x9CF3: 677,
-    0x9CF5: 678,
-    0x9CF6: 679,
-    0x9CF7: 680,
-    0x9CFD: 681,
-    0x9D41: 682,
-    0x9D42: 683,
-    0x9D45: 684,
-    0x9D49: 685,
-    0x9D51: 686,
-    0x9D53: 687,
-    0x9D55: 688,
-    0x9D57: 689,
-    0x9D61: 690,
-    0x9D62: 691,
-    0x9D65: 692,
-    0x9D69: 693,
-    0x9D71: 694,
-    0x9D73: 695,
-    0x9D75: 696,
-    0x9D76: 697,
-    0x9D77: 698,
-    0x9D81: 699,
-    0x9D85: 700,
-    0x9D93: 701,
-    0x9D95: 702,
-    0x9DA1: 703,
-    0x9DA2: 704,
-    0x9DA5: 705,
-    0x9DA9: 706,
-    0x9DB1: 707,
-    0x9DB3: 708,
-    0x9DB5: 709,
-    0x9DB7: 710,
-    0x9DC1: 711,
-    0x9DC5: 712,
-    0x9DD7: 713,
-    0x9DF6: 714,
-    0x9E41: 715,
-    0x9E45: 716,
-    0x9E49: 717,
-    0x9E51: 718,
-    0x9E53: 719,
-    0x9E55: 720,
-    0x9E57: 721,
-    0x9E61: 722,
-    0x9E65: 723,
-    0x9E69: 724,
-    0x9E73: 725,
-    0x9E75: 726,
-    0x9E77: 727,
-    0x9E81: 728,
-    0x9E82: 729,
-    0x9E85: 730,
-    0x9E89: 731,
-    0x9E91: 732,
-    0x9E93: 733,
-    0x9E95: 734,
-    0x9E97: 735,
-    0x9EA1: 736,
-    0x9EB6: 737,
-    0x9EC1: 738,
-    0x9EE1: 739,
-    0x9EE2: 740,
-    0x9EE5: 741,
-    0x9EE9: 742,
-    0x9EF1: 743,
-    0x9EF5: 744,
-    0x9EF7: 745,
-    0x9F41: 746,
-    0x9F42: 747,
-    0x9F45: 748,
-    0x9F49: 749,
-    0x9F51: 750,
-    0x9F53: 751,
-    0x9F55: 752,
-    0x9F57: 753,
-    0x9F61: 754,
-    0x9F62: 755,
-    0x9F65: 756,
-    0x9F69: 757,
-    0x9F71: 758,
-    0x9F73: 759,
-    0x9F75: 760,
-    0x9F77: 761,
-    0x9F78: 762,
-    0x9F7B: 763,
-    0x9F7C: 764,
-    0x9FA1: 765,
-    0x9FA2: 766,
-    0x9FA5: 767,
-    0x9FA9: 768,
-    0x9FB1: 769,
-    0x9FB3: 770,
-    0x9FB5: 771,
-    0x9FB7: 772,
-    0xA061: 773,
-    0xA062: 774,
-    0xA065: 775,
-    0xA067: 776,
-    0xA068: 777,
-    0xA069: 778,
-    0xA06A: 779,
-    0xA06B: 780,
-    0xA071: 781,
-    0xA073: 782,
-    0xA075: 783,
-    0xA077: 784,
-    0xA078: 785,
-    0xA07B: 786,
-    0xA07D: 787,
-    0xA081: 788,
-    0xA082: 789,
-    0xA085: 790,
-    0xA089: 791,
-    0xA091: 792,
-    0xA093: 793,
-    0xA095: 794,
-    0xA096: 795,
-    0xA097: 796,
-    0xA098: 797,
-    0xA0A1: 798,
-    0xA0A2: 799,
-    0xA0A9: 800,
-    0xA0B7: 801,
-    0xA0E1: 802,
-    0xA0E2: 803,
-    0xA0E5: 804,
-    0xA0E9: 805,
-    0xA0EB: 806,
-    0xA0F1: 807,
-    0xA0F3: 808,
-    0xA0F5: 809,
-    0xA0F7: 810,
-    0xA0F8: 811,
-    0xA0FD: 812,
-    0xA141: 813,
-    0xA142: 814,
-    0xA145: 815,
-    0xA149: 816,
-    0xA151: 817,
-    0xA153: 818,
-    0xA155: 819,
-    0xA156: 820,
-    0xA157: 821,
-    0xA161: 822,
-    0xA162: 823,
-    0xA165: 824,
-    0xA169: 825,
-    0xA175: 826,
-    0xA176: 827,
-    0xA177: 828,
-    0xA179: 829,
-    0xA181: 830,
-    0xA1A1: 831,
-    0xA1A2: 832,
-    0xA1A4: 833,
-    0xA1A5: 834,
-    0xA1A9: 835,
-    0xA1AB: 836,
-    0xA1B1: 837,
-    0xA1B3: 838,
-    0xA1B5: 839,
-    0xA1B7: 840,
-    0xA1C1: 841,
-    0xA1C5: 842,
-    0xA1D6: 843,
-    0xA1D7: 844,
-    0xA241: 845,
-    0xA245: 846,
-    0xA249: 847,
-    0xA253: 848,
-    0xA255: 849,
-    0xA257: 850,
-    0xA261: 851,
-    0xA265: 852,
-    0xA269: 853,
-    0xA273: 854,
-    0xA275: 855,
-    0xA281: 856,
-    0xA282: 857,
-    0xA283: 858,
-    0xA285: 859,
-    0xA288: 860,
-    0xA289: 861,
-    0xA28A: 862,
-    0xA28B: 863,
-    0xA291: 864,
-    0xA293: 865,
-    0xA295: 866,
-    0xA297: 867,
-    0xA29B: 868,
-    0xA29D: 869,
-    0xA2A1: 870,
-    0xA2A5: 871,
-    0xA2A9: 872,
-    0xA2B3: 873,
-    0xA2B5: 874,
-    0xA2C1: 875,
-    0xA2E1: 876,
-    0xA2E5: 877,
-    0xA2E9: 878,
-    0xA341: 879,
-    0xA345: 880,
-    0xA349: 881,
-    0xA351: 882,
-    0xA355: 883,
-    0xA361: 884,
-    0xA365: 885,
-    0xA369: 886,
-    0xA371: 887,
-    0xA375: 888,
-    0xA3A1: 889,
-    0xA3A2: 890,
-    0xA3A5: 891,
-    0xA3A8: 892,
-    0xA3A9: 893,
-    0xA3AB: 894,
-    0xA3B1: 895,
-    0xA3B3: 896,
-    0xA3B5: 897,
-    0xA3B6: 898,
-    0xA3B7: 899,
-    0xA3B9: 900,
-    0xA3BB: 901,
-    0xA461: 902,
-    0xA462: 903,
-    0xA463: 904,
-    0xA464: 905,
-    0xA465: 906,
-    0xA468: 907,
-    0xA469: 908,
-    0xA46A: 909,
-    0xA46B: 910,
-    0xA46C: 911,
-    0xA471: 912,
-    0xA473: 913,
-    0xA475: 914,
-    0xA477: 915,
-    0xA47B: 916,
-    0xA481: 917,
-    0xA482: 918,
-    0xA485: 919,
-    0xA489: 920,
-    0xA491: 921,
-    0xA493: 922,
-    0xA495: 923,
-    0xA496: 924,
-    0xA497: 925,
-    0xA49B: 926,
-    0xA4A1: 927,
-    0xA4A2: 928,
-    0xA4A5: 929,
-    0xA4B3: 930,
-    0xA4E1: 931,
-    0xA4E2: 932,
-    0xA4E5: 933,
-    0xA4E8: 934,
-    0xA4E9: 935,
-    0xA4EB: 936,
-    0xA4F1: 937,
-    0xA4F3: 938,
-    0xA4F5: 939,
-    0xA4F7: 940,
-    0xA4F8: 941,
-    0xA541: 942,
-    0xA542: 943,
-    0xA545: 944,
-    0xA548: 945,
-    0xA549: 946,
-    0xA551: 947,
-    0xA553: 948,
-    0xA555: 949,
-    0xA556: 950,
-    0xA557: 951,
-    0xA561: 952,
-    0xA562: 953,
-    0xA565: 954,
-    0xA569: 955,
-    0xA573: 956,
-    0xA575: 957,
-    0xA576: 958,
-    0xA577: 959,
-    0xA57B: 960,
-    0xA581: 961,
-    0xA585: 962,
-    0xA5A1: 963,
-    0xA5A2: 964,
-    0xA5A3: 965,
-    0xA5A5: 966,
-    0xA5A9: 967,
-    0xA5B1: 968,
-    0xA5B3: 969,
-    0xA5B5: 970,
-    0xA5B7: 971,
-    0xA5C1: 972,
-    0xA5C5: 973,
-    0xA5D6: 974,
-    0xA5E1: 975,
-    0xA5F6: 976,
-    0xA641: 977,
-    0xA642: 978,
-    0xA645: 979,
-    0xA649: 980,
-    0xA651: 981,
-    0xA653: 982,
-    0xA661: 983,
-    0xA665: 984,
-    0xA681: 985,
-    0xA682: 986,
-    0xA685: 987,
-    0xA688: 988,
-    0xA689: 989,
-    0xA68A: 990,
-    0xA68B: 991,
-    0xA691: 992,
-    0xA693: 993,
-    0xA695: 994,
-    0xA697: 995,
-    0xA69B: 996,
-    0xA69C: 997,
-    0xA6A1: 998,
-    0xA6A9: 999,
-    0xA6B6: 1000,
-    0xA6C1: 1001,
-    0xA6E1: 1002,
-    0xA6E2: 1003,
-    0xA6E5: 1004,
-    0xA6E9: 1005,
-    0xA6F7: 1006,
-    0xA741: 1007,
-    0xA745: 1008,
-    0xA749: 1009,
-    0xA751: 1010,
-    0xA755: 1011,
-    0xA757: 1012,
-    0xA761: 1013,
-    0xA762: 1014,
-    0xA765: 1015,
-    0xA769: 1016,
-    0xA771: 1017,
-    0xA773: 1018,
-    0xA775: 1019,
-    0xA7A1: 1020,
-    0xA7A2: 1021,
-    0xA7A5: 1022,
-    0xA7A9: 1023,
-    0xA7AB: 1024,
-    0xA7B1: 1025,
-    0xA7B3: 1026,
-    0xA7B5: 1027,
-    0xA7B7: 1028,
-    0xA7B8: 1029,
-    0xA7B9: 1030,
-    0xA861: 1031,
-    0xA862: 1032,
-    0xA865: 1033,
-    0xA869: 1034,
-    0xA86B: 1035,
-    0xA871: 1036,
-    0xA873: 1037,
-    0xA875: 1038,
-    0xA876: 1039,
-    0xA877: 1040,
-    0xA87D: 1041,
-    0xA881: 1042,
-    0xA882: 1043,
-    0xA885: 1044,
-    0xA889: 1045,
-    0xA891: 1046,
-    0xA893: 1047,
-    0xA895: 1048,
-    0xA896: 1049,
-    0xA897: 1050,
-    0xA8A1: 1051,
-    0xA8A2: 1052,
-    0xA8B1: 1053,
-    0xA8E1: 1054,
-    0xA8E2: 1055,
-    0xA8E5: 1056,
-    0xA8E8: 1057,
-    0xA8E9: 1058,
-    0xA8F1: 1059,
-    0xA8F5: 1060,
-    0xA8F6: 1061,
-    0xA8F7: 1062,
-    0xA941: 1063,
-    0xA957: 1064,
-    0xA961: 1065,
-    0xA962: 1066,
-    0xA971: 1067,
-    0xA973: 1068,
-    0xA975: 1069,
-    0xA976: 1070,
-    0xA977: 1071,
-    0xA9A1: 1072,
-    0xA9A2: 1073,
-    0xA9A5: 1074,
-    0xA9A9: 1075,
-    0xA9B1: 1076,
-    0xA9B3: 1077,
-    0xA9B7: 1078,
-    0xAA41: 1079,
-    0xAA61: 1080,
-    0xAA77: 1081,
-    0xAA81: 1082,
-    0xAA82: 1083,
-    0xAA85: 1084,
-    0xAA89: 1085,
-    0xAA91: 1086,
-    0xAA95: 1087,
-    0xAA97: 1088,
-    0xAB41: 1089,
-    0xAB57: 1090,
-    0xAB61: 1091,
-    0xAB65: 1092,
-    0xAB69: 1093,
-    0xAB71: 1094,
-    0xAB73: 1095,
-    0xABA1: 1096,
-    0xABA2: 1097,
-    0xABA5: 1098,
-    0xABA9: 1099,
-    0xABB1: 1100,
-    0xABB3: 1101,
-    0xABB5: 1102,
-    0xABB7: 1103,
-    0xAC61: 1104,
-    0xAC62: 1105,
-    0xAC64: 1106,
-    0xAC65: 1107,
-    0xAC68: 1108,
-    0xAC69: 1109,
-    0xAC6A: 1110,
-    0xAC6B: 1111,
-    0xAC71: 1112,
-    0xAC73: 1113,
-    0xAC75: 1114,
-    0xAC76: 1115,
-    0xAC77: 1116,
-    0xAC7B: 1117,
-    0xAC81: 1118,
-    0xAC82: 1119,
-    0xAC85: 1120,
-    0xAC89: 1121,
-    0xAC91: 1122,
-    0xAC93: 1123,
-    0xAC95: 1124,
-    0xAC96: 1125,
-    0xAC97: 1126,
-    0xACA1: 1127,
-    0xACA2: 1128,
-    0xACA5: 1129,
-    0xACA9: 1130,
-    0xACB1: 1131,
-    0xACB3: 1132,
-    0xACB5: 1133,
-    0xACB7: 1134,
-    0xACC1: 1135,
-    0xACC5: 1136,
-    0xACC9: 1137,
-    0xACD1: 1138,
-    0xACD7: 1139,
-    0xACE1: 1140,
-    0xACE2: 1141,
-    0xACE3: 1142,
-    0xACE4: 1143,
-    0xACE5: 1144,
-    0xACE8: 1145,
-    0xACE9: 1146,
-    0xACEB: 1147,
-    0xACEC: 1148,
-    0xACF1: 1149,
-    0xACF3: 1150,
-    0xACF5: 1151,
-    0xACF6: 1152,
-    0xACF7: 1153,
-    0xACFC: 1154,
-    0xAD41: 1155,
-    0xAD42: 1156,
-    0xAD45: 1157,
-    0xAD49: 1158,
-    0xAD51: 1159,
-    0xAD53: 1160,
-    0xAD55: 1161,
-    0xAD56: 1162,
-    0xAD57: 1163,
-    0xAD61: 1164,
-    0xAD62: 1165,
-    0xAD65: 1166,
-    0xAD69: 1167,
-    0xAD71: 1168,
-    0xAD73: 1169,
-    0xAD75: 1170,
-    0xAD76: 1171,
-    0xAD77: 1172,
-    0xAD81: 1173,
-    0xAD85: 1174,
-    0xAD89: 1175,
-    0xAD97: 1176,
-    0xADA1: 1177,
-    0xADA2: 1178,
-    0xADA3: 1179,
-    0xADA5: 1180,
-    0xADA9: 1181,
-    0xADAB: 1182,
-    0xADB1: 1183,
-    0xADB3: 1184,
-    0xADB5: 1185,
-    0xADB7: 1186,
-    0xADBB: 1187,
-    0xADC1: 1188,
-    0xADC2: 1189,
-    0xADC5: 1190,
-    0xADC9: 1191,
-    0xADD7: 1192,
-    0xADE1: 1193,
-    0xADE5: 1194,
-    0xADE9: 1195,
-    0xADF1: 1196,
-    0xADF5: 1197,
-    0xADF6: 1198,
-    0xAE41: 1199,
-    0xAE45: 1200,
-    0xAE49: 1201,
-    0xAE51: 1202,
-    0xAE53: 1203,
-    0xAE55: 1204,
-    0xAE61: 1205,
-    0xAE62: 1206,
-    0xAE65: 1207,
-    0xAE69: 1208,
-    0xAE71: 1209,
-    0xAE73: 1210,
-    0xAE75: 1211,
-    0xAE77: 1212,
-    0xAE81: 1213,
-    0xAE82: 1214,
-    0xAE85: 1215,
-    0xAE88: 1216,
-    0xAE89: 1217,
-    0xAE91: 1218,
-    0xAE93: 1219,
-    0xAE95: 1220,
-    0xAE97: 1221,
-    0xAE99: 1222,
-    0xAE9B: 1223,
-    0xAE9C: 1224,
-    0xAEA1: 1225,
-    0xAEB6: 1226,
-    0xAEC1: 1227,
-    0xAEC2: 1228,
-    0xAEC5: 1229,
-    0xAEC9: 1230,
-    0xAED1: 1231,
-    0xAED7: 1232,
-    0xAEE1: 1233,
-    0xAEE2: 1234,
-    0xAEE5: 1235,
-    0xAEE9: 1236,
-    0xAEF1: 1237,
-    0xAEF3: 1238,
-    0xAEF5: 1239,
-    0xAEF7: 1240,
-    0xAF41: 1241,
-    0xAF42: 1242,
-    0xAF49: 1243,
-    0xAF51: 1244,
-    0xAF55: 1245,
-    0xAF57: 1246,
-    0xAF61: 1247,
-    0xAF62: 1248,
-    0xAF65: 1249,
-    0xAF69: 1250,
-    0xAF6A: 1251,
-    0xAF71: 1252,
-    0xAF73: 1253,
-    0xAF75: 1254,
-    0xAF77: 1255,
-    0xAFA1: 1256,
-    0xAFA2: 1257,
-    0xAFA5: 1258,
-    0xAFA8: 1259,
-    0xAFA9: 1260,
-    0xAFB0: 1261,
-    0xAFB1: 1262,
-    0xAFB3: 1263,
-    0xAFB5: 1264,
-    0xAFB7: 1265,
-    0xAFBC: 1266,
-    0xB061: 1267,
-    0xB062: 1268,
-    0xB064: 1269,
-    0xB065: 1270,
-    0xB069: 1271,
-    0xB071: 1272,
-    0xB073: 1273,
-    0xB076: 1274,
-    0xB077: 1275,
-    0xB07D: 1276,
-    0xB081: 1277,
-    0xB082: 1278,
-    0xB085: 1279,
-    0xB089: 1280,
-    0xB091: 1281,
-    0xB093: 1282,
-    0xB096: 1283,
-    0xB097: 1284,
-    0xB0B7: 1285,
-    0xB0E1: 1286,
-    0xB0E2: 1287,
-    0xB0E5: 1288,
-    0xB0E9: 1289,
-    0xB0EB: 1290,
-    0xB0F1: 1291,
-    0xB0F3: 1292,
-    0xB0F6: 1293,
-    0xB0F7: 1294,
-    0xB141: 1295,
-    0xB145: 1296,
-    0xB149: 1297,
-    0xB185: 1298,
-    0xB1A1: 1299,
-    0xB1A2: 1300,
-    0xB1A5: 1301,
-    0xB1A8: 1302,
-    0xB1A9: 1303,
-    0xB1AB: 1304,
-    0xB1B1: 1305,
-    0xB1B3: 1306,
-    0xB1B7: 1307,
-    0xB1C1: 1308,
-    0xB1C2: 1309,
-    0xB1C5: 1310,
-    0xB1D6: 1311,
-    0xB1E1: 1312,
-    0xB1F6: 1313,
-    0xB241: 1314,
-    0xB245: 1315,
-    0xB249: 1316,
-    0xB251: 1317,
-    0xB253: 1318,
-    0xB261: 1319,
-    0xB281: 1320,
-    0xB282: 1321,
-    0xB285: 1322,
-    0xB289: 1323,
-    0xB291: 1324,
-    0xB293: 1325,
-    0xB297: 1326,
-    0xB2A1: 1327,
-    0xB2B6: 1328,
-    0xB2C1: 1329,
-    0xB2E1: 1330,
-    0xB2E5: 1331,
-    0xB357: 1332,
-    0xB361: 1333,
-    0xB362: 1334,
-    0xB365: 1335,
-    0xB369: 1336,
-    0xB36B: 1337,
-    0xB370: 1338,
-    0xB371: 1339,
-    0xB373: 1340,
-    0xB381: 1341,
-    0xB385: 1342,
-    0xB389: 1343,
-    0xB391: 1344,
-    0xB3A1: 1345,
-    0xB3A2: 1346,
-    0xB3A5: 1347,
-    0xB3A9: 1348,
-    0xB3B1: 1349,
-    0xB3B3: 1350,
-    0xB3B5: 1351,
-    0xB3B7: 1352,
-    0xB461: 1353,
-    0xB462: 1354,
-    0xB465: 1355,
-    0xB466: 1356,
-    0xB467: 1357,
-    0xB469: 1358,
-    0xB46A: 1359,
-    0xB46B: 1360,
-    0xB470: 1361,
-    0xB471: 1362,
-    0xB473: 1363,
-    0xB475: 1364,
-    0xB476: 1365,
-    0xB477: 1366,
-    0xB47B: 1367,
-    0xB47C: 1368,
-    0xB481: 1369,
-    0xB482: 1370,
-    0xB485: 1371,
-    0xB489: 1372,
-    0xB491: 1373,
-    0xB493: 1374,
-    0xB495: 1375,
-    0xB496: 1376,
-    0xB497: 1377,
-    0xB4A1: 1378,
-    0xB4A2: 1379,
-    0xB4A5: 1380,
-    0xB4A9: 1381,
-    0xB4AC: 1382,
-    0xB4B1: 1383,
-    0xB4B3: 1384,
-    0xB4B5: 1385,
-    0xB4B7: 1386,
-    0xB4BB: 1387,
-    0xB4BD: 1388,
-    0xB4C1: 1389,
-    0xB4C5: 1390,
-    0xB4C9: 1391,
-    0xB4D3: 1392,
-    0xB4E1: 1393,
-    0xB4E2: 1394,
-    0xB4E5: 1395,
-    0xB4E6: 1396,
-    0xB4E8: 1397,
-    0xB4E9: 1398,
-    0xB4EA: 1399,
-    0xB4EB: 1400,
-    0xB4F1: 1401,
-    0xB4F3: 1402,
-    0xB4F4: 1403,
-    0xB4F5: 1404,
-    0xB4F6: 1405,
-    0xB4F7: 1406,
-    0xB4F8: 1407,
-    0xB4FA: 1408,
-    0xB4FC: 1409,
-    0xB541: 1410,
-    0xB542: 1411,
-    0xB545: 1412,
-    0xB549: 1413,
-    0xB551: 1414,
-    0xB553: 1415,
-    0xB555: 1416,
-    0xB557: 1417,
-    0xB561: 1418,
-    0xB562: 1419,
-    0xB563: 1420,
-    0xB565: 1421,
-    0xB569: 1422,
-    0xB56B: 1423,
-    0xB56C: 1424,
-    0xB571: 1425,
-    0xB573: 1426,
-    0xB574: 1427,
-    0xB575: 1428,
-    0xB576: 1429,
-    0xB577: 1430,
-    0xB57B: 1431,
-    0xB57C: 1432,
-    0xB57D: 1433,
-    0xB581: 1434,
-    0xB585: 1435,
-    0xB589: 1436,
-    0xB591: 1437,
-    0xB593: 1438,
-    0xB595: 1439,
-    0xB596: 1440,
-    0xB5A1: 1441,
-    0xB5A2: 1442,
-    0xB5A5: 1443,
-    0xB5A9: 1444,
-    0xB5AA: 1445,
-    0xB5AB: 1446,
-    0xB5AD: 1447,
-    0xB5B0: 1448,
-    0xB5B1: 1449,
-    0xB5B3: 1450,
-    0xB5B5: 1451,
-    0xB5B7: 1452,
-    0xB5B9: 1453,
-    0xB5C1: 1454,
-    0xB5C2: 1455,
-    0xB5C5: 1456,
-    0xB5C9: 1457,
-    0xB5D1: 1458,
-    0xB5D3: 1459,
-    0xB5D5: 1460,
-    0xB5D6: 1461,
-    0xB5D7: 1462,
-    0xB5E1: 1463,
-    0xB5E2: 1464,
-    0xB5E5: 1465,
-    0xB5F1: 1466,
-    0xB5F5: 1467,
-    0xB5F7: 1468,
-    0xB641: 1469,
-    0xB642: 1470,
-    0xB645: 1471,
-    0xB649: 1472,
-    0xB651: 1473,
-    0xB653: 1474,
-    0xB655: 1475,
-    0xB657: 1476,
-    0xB661: 1477,
-    0xB662: 1478,
-    0xB665: 1479,
-    0xB669: 1480,
-    0xB671: 1481,
-    0xB673: 1482,
-    0xB675: 1483,
-    0xB677: 1484,
-    0xB681: 1485,
-    0xB682: 1486,
-    0xB685: 1487,
-    0xB689: 1488,
-    0xB68A: 1489,
-    0xB68B: 1490,
-    0xB691: 1491,
-    0xB693: 1492,
-    0xB695: 1493,
-    0xB697: 1494,
-    0xB6A1: 1495,
-    0xB6A2: 1496,
-    0xB6A5: 1497,
-    0xB6A9: 1498,
-    0xB6B1: 1499,
-    0xB6B3: 1500,
-    0xB6B6: 1501,
-    0xB6B7: 1502,
-    0xB6C1: 1503,
-    0xB6C2: 1504,
-    0xB6C5: 1505,
-    0xB6C9: 1506,
-    0xB6D1: 1507,
-    0xB6D3: 1508,
-    0xB6D7: 1509,
-    0xB6E1: 1510,
-    0xB6E2: 1511,
-    0xB6E5: 1512,
-    0xB6E9: 1513,
-    0xB6F1: 1514,
-    0xB6F3: 1515,
-    0xB6F5: 1516,
-    0xB6F7: 1517,
-    0xB741: 1518,
-    0xB742: 1519,
-    0xB745: 1520,
-    0xB749: 1521,
-    0xB751: 1522,
-    0xB753: 1523,
-    0xB755: 1524,
-    0xB757: 1525,
-    0xB759: 1526,
-    0xB761: 1527,
-    0xB762: 1528,
-    0xB765: 1529,
-    0xB769: 1530,
-    0xB76F: 1531,
-    0xB771: 1532,
-    0xB773: 1533,
-    0xB775: 1534,
-    0xB777: 1535,
-    0xB778: 1536,
-    0xB779: 1537,
-    0xB77A: 1538,
-    0xB77B: 1539,
-    0xB77C: 1540,
-    0xB77D: 1541,
-    0xB781: 1542,
-    0xB785: 1543,
-    0xB789: 1544,
-    0xB791: 1545,
-    0xB795: 1546,
-    0xB7A1: 1547,
-    0xB7A2: 1548,
-    0xB7A5: 1549,
-    0xB7A9: 1550,
-    0xB7AA: 1551,
-    0xB7AB: 1552,
-    0xB7B0: 1553,
-    0xB7B1: 1554,
-    0xB7B3: 1555,
-    0xB7B5: 1556,
-    0xB7B6: 1557,
-    0xB7B7: 1558,
-    0xB7B8: 1559,
-    0xB7BC: 1560,
-    0xB861: 1561,
-    0xB862: 1562,
-    0xB865: 1563,
-    0xB867: 1564,
-    0xB868: 1565,
-    0xB869: 1566,
-    0xB86B: 1567,
-    0xB871: 1568,
-    0xB873: 1569,
-    0xB875: 1570,
-    0xB876: 1571,
-    0xB877: 1572,
-    0xB878: 1573,
-    0xB881: 1574,
-    0xB882: 1575,
-    0xB885: 1576,
-    0xB889: 1577,
-    0xB891: 1578,
-    0xB893: 1579,
-    0xB895: 1580,
-    0xB896: 1581,
-    0xB897: 1582,
-    0xB8A1: 1583,
-    0xB8A2: 1584,
-    0xB8A5: 1585,
-    0xB8A7: 1586,
-    0xB8A9: 1587,
-    0xB8B1: 1588,
-    0xB8B7: 1589,
-    0xB8C1: 1590,
-    0xB8C5: 1591,
-    0xB8C9: 1592,
-    0xB8E1: 1593,
-    0xB8E2: 1594,
-    0xB8E5: 1595,
-    0xB8E9: 1596,
-    0xB8EB: 1597,
-    0xB8F1: 1598,
-    0xB8F3: 1599,
-    0xB8F5: 1600,
-    0xB8F7: 1601,
-    0xB8F8: 1602,
-    0xB941: 1603,
-    0xB942: 1604,
-    0xB945: 1605,
-    0xB949: 1606,
-    0xB951: 1607,
-    0xB953: 1608,
-    0xB955: 1609,
-    0xB957: 1610,
-    0xB961: 1611,
-    0xB965: 1612,
-    0xB969: 1613,
-    0xB971: 1614,
-    0xB973: 1615,
-    0xB976: 1616,
-    0xB977: 1617,
-    0xB981: 1618,
-    0xB9A1: 1619,
-    0xB9A2: 1620,
-    0xB9A5: 1621,
-    0xB9A9: 1622,
-    0xB9AB: 1623,
-    0xB9B1: 1624,
-    0xB9B3: 1625,
-    0xB9B5: 1626,
-    0xB9B7: 1627,
-    0xB9B8: 1628,
-    0xB9B9: 1629,
-    0xB9BD: 1630,
-    0xB9C1: 1631,
-    0xB9C2: 1632,
-    0xB9C9: 1633,
-    0xB9D3: 1634,
-    0xB9D5: 1635,
-    0xB9D7: 1636,
-    0xB9E1: 1637,
-    0xB9F6: 1638,
-    0xB9F7: 1639,
-    0xBA41: 1640,
-    0xBA45: 1641,
-    0xBA49: 1642,
-    0xBA51: 1643,
-    0xBA53: 1644,
-    0xBA55: 1645,
-    0xBA57: 1646,
-    0xBA61: 1647,
-    0xBA62: 1648,
-    0xBA65: 1649,
-    0xBA77: 1650,
-    0xBA81: 1651,
-    0xBA82: 1652,
-    0xBA85: 1653,
-    0xBA89: 1654,
-    0xBA8A: 1655,
-    0xBA8B: 1656,
-    0xBA91: 1657,
-    0xBA93: 1658,
-    0xBA95: 1659,
-    0xBA97: 1660,
-    0xBAA1: 1661,
-    0xBAB6: 1662,
-    0xBAC1: 1663,
-    0xBAE1: 1664,
-    0xBAE2: 1665,
-    0xBAE5: 1666,
-    0xBAE9: 1667,
-    0xBAF1: 1668,
-    0xBAF3: 1669,
-    0xBAF5: 1670,
-    0xBB41: 1671,
-    0xBB45: 1672,
-    0xBB49: 1673,
-    0xBB51: 1674,
-    0xBB61: 1675,
-    0xBB62: 1676,
-    0xBB65: 1677,
-    0xBB69: 1678,
-    0xBB71: 1679,
-    0xBB73: 1680,
-    0xBB75: 1681,
-    0xBB77: 1682,
-    0xBBA1: 1683,
-    0xBBA2: 1684,
-    0xBBA5: 1685,
-    0xBBA8: 1686,
-    0xBBA9: 1687,
-    0xBBAB: 1688,
-    0xBBB1: 1689,
-    0xBBB3: 1690,
-    0xBBB5: 1691,
-    0xBBB7: 1692,
-    0xBBB8: 1693,
-    0xBBBB: 1694,
-    0xBBBC: 1695,
-    0xBC61: 1696,
-    0xBC62: 1697,
-    0xBC65: 1698,
-    0xBC67: 1699,
-    0xBC69: 1700,
-    0xBC6C: 1701,
-    0xBC71: 1702,
-    0xBC73: 1703,
-    0xBC75: 1704,
-    0xBC76: 1705,
-    0xBC77: 1706,
-    0xBC81: 1707,
-    0xBC82: 1708,
-    0xBC85: 1709,
-    0xBC89: 1710,
-    0xBC91: 1711,
-    0xBC93: 1712,
-    0xBC95: 1713,
-    0xBC96: 1714,
-    0xBC97: 1715,
-    0xBCA1: 1716,
-    0xBCA5: 1717,
-    0xBCB7: 1718,
-    0xBCE1: 1719,
-    0xBCE2: 1720,
-    0xBCE5: 1721,
-    0xBCE9: 1722,
-    0xBCF1: 1723,
-    0xBCF3: 1724,
-    0xBCF5: 1725,
-    0xBCF6: 1726,
-    0xBCF7: 1727,
-    0xBD41: 1728,
-    0xBD57: 1729,
-    0xBD61: 1730,
-    0xBD76: 1731,
-    0xBDA1: 1732,
-    0xBDA2: 1733,
-    0xBDA5: 1734,
-    0xBDA9: 1735,
-    0xBDB1: 1736,
-    0xBDB3: 1737,
-    0xBDB5: 1738,
-    0xBDB7: 1739,
-    0xBDB9: 1740,
-    0xBDC1: 1741,
-    0xBDC2: 1742,
-    0xBDC9: 1743,
-    0xBDD6: 1744,
-    0xBDE1: 1745,
-    0xBDF6: 1746,
-    0xBE41: 1747,
-    0xBE45: 1748,
-    0xBE49: 1749,
-    0xBE51: 1750,
-    0xBE53: 1751,
-    0xBE77: 1752,
-    0xBE81: 1753,
-    0xBE82: 1754,
-    0xBE85: 1755,
-    0xBE89: 1756,
-    0xBE91: 1757,
-    0xBE93: 1758,
-    0xBE97: 1759,
-    0xBEA1: 1760,
-    0xBEB6: 1761,
-    0xBEB7: 1762,
-    0xBEE1: 1763,
-    0xBF41: 1764,
-    0xBF61: 1765,
-    0xBF71: 1766,
-    0xBF75: 1767,
-    0xBF77: 1768,
-    0xBFA1: 1769,
-    0xBFA2: 1770,
-    0xBFA5: 1771,
-    0xBFA9: 1772,
-    0xBFB1: 1773,
-    0xBFB3: 1774,
-    0xBFB7: 1775,
-    0xBFB8: 1776,
-    0xBFBD: 1777,
-    0xC061: 1778,
-    0xC062: 1779,
-    0xC065: 1780,
-    0xC067: 1781,
-    0xC069: 1782,
-    0xC071: 1783,
-    0xC073: 1784,
-    0xC075: 1785,
-    0xC076: 1786,
-    0xC077: 1787,
-    0xC078: 1788,
-    0xC081: 1789,
-    0xC082: 1790,
-    0xC085: 1791,
-    0xC089: 1792,
-    0xC091: 1793,
-    0xC093: 1794,
-    0xC095: 1795,
-    0xC096: 1796,
-    0xC097: 1797,
-    0xC0A1: 1798,
-    0xC0A5: 1799,
-    0xC0A7: 1800,
-    0xC0A9: 1801,
-    0xC0B1: 1802,
-    0xC0B7: 1803,
-    0xC0E1: 1804,
-    0xC0E2: 1805,
-    0xC0E5: 1806,
-    0xC0E9: 1807,
-    0xC0F1: 1808,
-    0xC0F3: 1809,
-    0xC0F5: 1810,
-    0xC0F6: 1811,
-    0xC0F7: 1812,
-    0xC141: 1813,
-    0xC142: 1814,
-    0xC145: 1815,
-    0xC149: 1816,
-    0xC151: 1817,
-    0xC153: 1818,
-    0xC155: 1819,
-    0xC157: 1820,
-    0xC161: 1821,
-    0xC165: 1822,
-    0xC176: 1823,
-    0xC181: 1824,
-    0xC185: 1825,
-    0xC197: 1826,
-    0xC1A1: 1827,
-    0xC1A2: 1828,
-    0xC1A5: 1829,
-    0xC1A9: 1830,
-    0xC1B1: 1831,
-    0xC1B3: 1832,
-    0xC1B5: 1833,
-    0xC1B7: 1834,
-    0xC1C1: 1835,
-    0xC1C5: 1836,
-    0xC1C9: 1837,
-    0xC1D7: 1838,
-    0xC241: 1839,
-    0xC245: 1840,
-    0xC249: 1841,
-    0xC251: 1842,
-    0xC253: 1843,
-    0xC255: 1844,
-    0xC257: 1845,
-    0xC261: 1846,
-    0xC271: 1847,
-    0xC281: 1848,
-    0xC282: 1849,
-    0xC285: 1850,
-    0xC289: 1851,
-    0xC291: 1852,
-    0xC293: 1853,
-    0xC295: 1854,
-    0xC297: 1855,
-    0xC2A1: 1856,
-    0xC2B6: 1857,
-    0xC2C1: 1858,
-    0xC2C5: 1859,
-    0xC2E1: 1860,
-    0xC2E5: 1861,
-    0xC2E9: 1862,
-    0xC2F1: 1863,
-    0xC2F3: 1864,
-    0xC2F5: 1865,
-    0xC2F7: 1866,
-    0xC341: 1867,
-    0xC345: 1868,
-    0xC349: 1869,
-    0xC351: 1870,
-    0xC357: 1871,
-    0xC361: 1872,
-    0xC362: 1873,
-    0xC365: 1874,
-    0xC369: 1875,
-    0xC371: 1876,
-    0xC373: 1877,
-    0xC375: 1878,
-    0xC377: 1879,
-    0xC3A1: 1880,
-    0xC3A2: 1881,
-    0xC3A5: 1882,
-    0xC3A8: 1883,
-    0xC3A9: 1884,
-    0xC3AA: 1885,
-    0xC3B1: 1886,
-    0xC3B3: 1887,
-    0xC3B5: 1888,
-    0xC3B7: 1889,
-    0xC461: 1890,
-    0xC462: 1891,
-    0xC465: 1892,
-    0xC469: 1893,
-    0xC471: 1894,
-    0xC473: 1895,
-    0xC475: 1896,
-    0xC477: 1897,
-    0xC481: 1898,
-    0xC482: 1899,
-    0xC485: 1900,
-    0xC489: 1901,
-    0xC491: 1902,
-    0xC493: 1903,
-    0xC495: 1904,
-    0xC496: 1905,
-    0xC497: 1906,
-    0xC4A1: 1907,
-    0xC4A2: 1908,
-    0xC4B7: 1909,
-    0xC4E1: 1910,
-    0xC4E2: 1911,
-    0xC4E5: 1912,
-    0xC4E8: 1913,
-    0xC4E9: 1914,
-    0xC4F1: 1915,
-    0xC4F3: 1916,
-    0xC4F5: 1917,
-    0xC4F6: 1918,
-    0xC4F7: 1919,
-    0xC541: 1920,
-    0xC542: 1921,
-    0xC545: 1922,
-    0xC549: 1923,
-    0xC551: 1924,
-    0xC553: 1925,
-    0xC555: 1926,
-    0xC557: 1927,
-    0xC561: 1928,
-    0xC565: 1929,
-    0xC569: 1930,
-    0xC571: 1931,
-    0xC573: 1932,
-    0xC575: 1933,
-    0xC576: 1934,
-    0xC577: 1935,
-    0xC581: 1936,
-    0xC5A1: 1937,
-    0xC5A2: 1938,
-    0xC5A5: 1939,
-    0xC5A9: 1940,
-    0xC5B1: 1941,
-    0xC5B3: 1942,
-    0xC5B5: 1943,
-    0xC5B7: 1944,
-    0xC5C1: 1945,
-    0xC5C2: 1946,
-    0xC5C5: 1947,
-    0xC5C9: 1948,
-    0xC5D1: 1949,
-    0xC5D7: 1950,
-    0xC5E1: 1951,
-    0xC5F7: 1952,
-    0xC641: 1953,
-    0xC649: 1954,
-    0xC661: 1955,
-    0xC681: 1956,
-    0xC682: 1957,
-    0xC685: 1958,
-    0xC689: 1959,
-    0xC691: 1960,
-    0xC693: 1961,
-    0xC695: 1962,
-    0xC697: 1963,
-    0xC6A1: 1964,
-    0xC6A5: 1965,
-    0xC6A9: 1966,
-    0xC6B7: 1967,
-    0xC6C1: 1968,
-    0xC6D7: 1969,
-    0xC6E1: 1970,
-    0xC6E2: 1971,
-    0xC6E5: 1972,
-    0xC6E9: 1973,
-    0xC6F1: 1974,
-    0xC6F3: 1975,
-    0xC6F5: 1976,
-    0xC6F7: 1977,
-    0xC741: 1978,
-    0xC745: 1979,
-    0xC749: 1980,
-    0xC751: 1981,
-    0xC761: 1982,
-    0xC762: 1983,
-    0xC765: 1984,
-    0xC769: 1985,
-    0xC771: 1986,
-    0xC773: 1987,
-    0xC777: 1988,
-    0xC7A1: 1989,
-    0xC7A2: 1990,
-    0xC7A5: 1991,
-    0xC7A9: 1992,
-    0xC7B1: 1993,
-    0xC7B3: 1994,
-    0xC7B5: 1995,
-    0xC7B7: 1996,
-    0xC861: 1997,
-    0xC862: 1998,
-    0xC865: 1999,
-    0xC869: 2000,
-    0xC86A: 2001,
-    0xC871: 2002,
-    0xC873: 2003,
-    0xC875: 2004,
-    0xC876: 2005,
-    0xC877: 2006,
-    0xC881: 2007,
-    0xC882: 2008,
-    0xC885: 2009,
-    0xC889: 2010,
-    0xC891: 2011,
-    0xC893: 2012,
-    0xC895: 2013,
-    0xC896: 2014,
-    0xC897: 2015,
-    0xC8A1: 2016,
-    0xC8B7: 2017,
-    0xC8E1: 2018,
-    0xC8E2: 2019,
-    0xC8E5: 2020,
-    0xC8E9: 2021,
-    0xC8EB: 2022,
-    0xC8F1: 2023,
-    0xC8F3: 2024,
-    0xC8F5: 2025,
-    0xC8F6: 2026,
-    0xC8F7: 2027,
-    0xC941: 2028,
-    0xC942: 2029,
-    0xC945: 2030,
-    0xC949: 2031,
-    0xC951: 2032,
-    0xC953: 2033,
-    0xC955: 2034,
-    0xC957: 2035,
-    0xC961: 2036,
-    0xC965: 2037,
-    0xC976: 2038,
-    0xC981: 2039,
-    0xC985: 2040,
-    0xC9A1: 2041,
-    0xC9A2: 2042,
-    0xC9A5: 2043,
-    0xC9A9: 2044,
-    0xC9B1: 2045,
-    0xC9B3: 2046,
-    0xC9B5: 2047,
-    0xC9B7: 2048,
-    0xC9BC: 2049,
-    0xC9C1: 2050,
-    0xC9C5: 2051,
-    0xC9E1: 2052,
-    0xCA41: 2053,
-    0xCA45: 2054,
-    0xCA55: 2055,
-    0xCA57: 2056,
-    0xCA61: 2057,
-    0xCA81: 2058,
-    0xCA82: 2059,
-    0xCA85: 2060,
-    0xCA89: 2061,
-    0xCA91: 2062,
-    0xCA93: 2063,
-    0xCA95: 2064,
-    0xCA97: 2065,
-    0xCAA1: 2066,
-    0xCAB6: 2067,
-    0xCAC1: 2068,
-    0xCAE1: 2069,
-    0xCAE2: 2070,
-    0xCAE5: 2071,
-    0xCAE9: 2072,
-    0xCAF1: 2073,
-    0xCAF3: 2074,
-    0xCAF7: 2075,
-    0xCB41: 2076,
-    0xCB45: 2077,
-    0xCB49: 2078,
-    0xCB51: 2079,
-    0xCB57: 2080,
-    0xCB61: 2081,
-    0xCB62: 2082,
-    0xCB65: 2083,
-    0xCB68: 2084,
-    0xCB69: 2085,
-    0xCB6B: 2086,
-    0xCB71: 2087,
-    0xCB73: 2088,
-    0xCB75: 2089,
-    0xCB81: 2090,
-    0xCB85: 2091,
-    0xCB89: 2092,
-    0xCB91: 2093,
-    0xCB93: 2094,
-    0xCBA1: 2095,
-    0xCBA2: 2096,
-    0xCBA5: 2097,
-    0xCBA9: 2098,
-    0xCBB1: 2099,
-    0xCBB3: 2100,
-    0xCBB5: 2101,
-    0xCBB7: 2102,
-    0xCC61: 2103,
-    0xCC62: 2104,
-    0xCC63: 2105,
-    0xCC65: 2106,
-    0xCC69: 2107,
-    0xCC6B: 2108,
-    0xCC71: 2109,
-    0xCC73: 2110,
-    0xCC75: 2111,
-    0xCC76: 2112,
-    0xCC77: 2113,
-    0xCC7B: 2114,
-    0xCC81: 2115,
-    0xCC82: 2116,
-    0xCC85: 2117,
-    0xCC89: 2118,
-    0xCC91: 2119,
-    0xCC93: 2120,
-    0xCC95: 2121,
-    0xCC96: 2122,
-    0xCC97: 2123,
-    0xCCA1: 2124,
-    0xCCA2: 2125,
-    0xCCE1: 2126,
-    0xCCE2: 2127,
-    0xCCE5: 2128,
-    0xCCE9: 2129,
-    0xCCF1: 2130,
-    0xCCF3: 2131,
-    0xCCF5: 2132,
-    0xCCF6: 2133,
-    0xCCF7: 2134,
-    0xCD41: 2135,
-    0xCD42: 2136,
-    0xCD45: 2137,
-    0xCD49: 2138,
-    0xCD51: 2139,
-    0xCD53: 2140,
-    0xCD55: 2141,
-    0xCD57: 2142,
-    0xCD61: 2143,
-    0xCD65: 2144,
-    0xCD69: 2145,
-    0xCD71: 2146,
-    0xCD73: 2147,
-    0xCD76: 2148,
-    0xCD77: 2149,
-    0xCD81: 2150,
-    0xCD89: 2151,
-    0xCD93: 2152,
-    0xCD95: 2153,
-    0xCDA1: 2154,
-    0xCDA2: 2155,
-    0xCDA5: 2156,
-    0xCDA9: 2157,
-    0xCDB1: 2158,
-    0xCDB3: 2159,
-    0xCDB5: 2160,
-    0xCDB7: 2161,
-    0xCDC1: 2162,
-    0xCDD7: 2163,
-    0xCE41: 2164,
-    0xCE45: 2165,
-    0xCE61: 2166,
-    0xCE65: 2167,
-    0xCE69: 2168,
-    0xCE73: 2169,
-    0xCE75: 2170,
-    0xCE81: 2171,
-    0xCE82: 2172,
-    0xCE85: 2173,
-    0xCE88: 2174,
-    0xCE89: 2175,
-    0xCE8B: 2176,
-    0xCE91: 2177,
-    0xCE93: 2178,
-    0xCE95: 2179,
-    0xCE97: 2180,
-    0xCEA1: 2181,
-    0xCEB7: 2182,
-    0xCEE1: 2183,
-    0xCEE5: 2184,
-    0xCEE9: 2185,
-    0xCEF1: 2186,
-    0xCEF5: 2187,
-    0xCF41: 2188,
-    0xCF45: 2189,
-    0xCF49: 2190,
-    0xCF51: 2191,
-    0xCF55: 2192,
-    0xCF57: 2193,
-    0xCF61: 2194,
-    0xCF65: 2195,
-    0xCF69: 2196,
-    0xCF71: 2197,
-    0xCF73: 2198,
-    0xCF75: 2199,
-    0xCFA1: 2200,
-    0xCFA2: 2201,
-    0xCFA5: 2202,
-    0xCFA9: 2203,
-    0xCFB1: 2204,
-    0xCFB3: 2205,
-    0xCFB5: 2206,
-    0xCFB7: 2207,
-    0xD061: 2208,
-    0xD062: 2209,
-    0xD065: 2210,
-    0xD069: 2211,
-    0xD06E: 2212,
-    0xD071: 2213,
-    0xD073: 2214,
-    0xD075: 2215,
-    0xD077: 2216,
-    0xD081: 2217,
-    0xD082: 2218,
-    0xD085: 2219,
-    0xD089: 2220,
-    0xD091: 2221,
-    0xD093: 2222,
-    0xD095: 2223,
-    0xD096: 2224,
-    0xD097: 2225,
-    0xD0A1: 2226,
-    0xD0B7: 2227,
-    0xD0E1: 2228,
-    0xD0E2: 2229,
-    0xD0E5: 2230,
-    0xD0E9: 2231,
-    0xD0EB: 2232,
-    0xD0F1: 2233,
-    0xD0F3: 2234,
-    0xD0F5: 2235,
-    0xD0F7: 2236,
-    0xD141: 2237,
-    0xD142: 2238,
-    0xD145: 2239,
-    0xD149: 2240,
-    0xD151: 2241,
-    0xD153: 2242,
-    0xD155: 2243,
-    0xD157: 2244,
-    0xD161: 2245,
-    0xD162: 2246,
-    0xD165: 2247,
-    0xD169: 2248,
-    0xD171: 2249,
-    0xD173: 2250,
-    0xD175: 2251,
-    0xD176: 2252,
-    0xD177: 2253,
-    0xD181: 2254,
-    0xD185: 2255,
-    0xD189: 2256,
-    0xD193: 2257,
-    0xD1A1: 2258,
-    0xD1A2: 2259,
-    0xD1A5: 2260,
-    0xD1A9: 2261,
-    0xD1AE: 2262,
-    0xD1B1: 2263,
-    0xD1B3: 2264,
-    0xD1B5: 2265,
-    0xD1B7: 2266,
-    0xD1BB: 2267,
-    0xD1C1: 2268,
-    0xD1C2: 2269,
-    0xD1C5: 2270,
-    0xD1C9: 2271,
-    0xD1D5: 2272,
-    0xD1D7: 2273,
-    0xD1E1: 2274,
-    0xD1E2: 2275,
-    0xD1E5: 2276,
-    0xD1F5: 2277,
-    0xD1F7: 2278,
-    0xD241: 2279,
-    0xD242: 2280,
-    0xD245: 2281,
-    0xD249: 2282,
-    0xD253: 2283,
-    0xD255: 2284,
-    0xD257: 2285,
-    0xD261: 2286,
-    0xD265: 2287,
-    0xD269: 2288,
-    0xD273: 2289,
-    0xD275: 2290,
-    0xD281: 2291,
-    0xD282: 2292,
-    0xD285: 2293,
-    0xD289: 2294,
-    0xD28E: 2295,
-    0xD291: 2296,
-    0xD295: 2297,
-    0xD297: 2298,
-    0xD2A1: 2299,
-    0xD2A5: 2300,
-    0xD2A9: 2301,
-    0xD2B1: 2302,
-    0xD2B7: 2303,
-    0xD2C1: 2304,
-    0xD2C2: 2305,
-    0xD2C5: 2306,
-    0xD2C9: 2307,
-    0xD2D7: 2308,
-    0xD2E1: 2309,
-    0xD2E2: 2310,
-    0xD2E5: 2311,
-    0xD2E9: 2312,
-    0xD2F1: 2313,
-    0xD2F3: 2314,
-    0xD2F5: 2315,
-    0xD2F7: 2316,
-    0xD341: 2317,
-    0xD342: 2318,
-    0xD345: 2319,
-    0xD349: 2320,
-    0xD351: 2321,
-    0xD355: 2322,
-    0xD357: 2323,
-    0xD361: 2324,
-    0xD362: 2325,
-    0xD365: 2326,
-    0xD367: 2327,
-    0xD368: 2328,
-    0xD369: 2329,
-    0xD36A: 2330,
-    0xD371: 2331,
-    0xD373: 2332,
-    0xD375: 2333,
-    0xD377: 2334,
-    0xD37B: 2335,
-    0xD381: 2336,
-    0xD385: 2337,
-    0xD389: 2338,
-    0xD391: 2339,
-    0xD393: 2340,
-    0xD397: 2341,
-    0xD3A1: 2342,
-    0xD3A2: 2343,
-    0xD3A5: 2344,
-    0xD3A9: 2345,
-    0xD3B1: 2346,
-    0xD3B3: 2347,
-    0xD3B5: 2348,
-    0xD3B7: 2349,
-}
diff --git a/venv/lib/python3.10/site-packages/pip/_vendor/chardet/johabprober.py b/venv/lib/python3.10/site-packages/pip/_vendor/chardet/johabprober.py
deleted file mode 100644
index 6f359d1..0000000
--- a/venv/lib/python3.10/site-packages/pip/_vendor/chardet/johabprober.py
+++ /dev/null
@@ -1,47 +0,0 @@
-######################## BEGIN LICENSE BLOCK ########################
-# The Original Code is mozilla.org code.
-#
-# The Initial Developer of the Original Code is
-# Netscape Communications Corporation.
-# Portions created by the Initial Developer are Copyright (C) 1998
-# the Initial Developer. All Rights Reserved.
-#
-# Contributor(s):
-#   Mark Pilgrim - port to Python
-#
-# This library is free software; you can redistribute it and/or
-# modify it under the terms of the GNU Lesser General Public
-# License as published by the Free Software Foundation; either
-# version 2.1 of the License, or (at your option) any later version.
-#
-# This library is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
-# Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public
-# License along with this library; if not, write to the Free Software
-# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
-# 02110-1301  USA
-######################### END LICENSE BLOCK #########################
-
-from .chardistribution import JOHABDistributionAnalysis
-from .codingstatemachine import CodingStateMachine
-from .mbcharsetprober import MultiByteCharSetProber
-from .mbcssm import JOHAB_SM_MODEL
-
-
-class JOHABProber(MultiByteCharSetProber):
-    def __init__(self):
-        super().__init__()
-        self.coding_sm = CodingStateMachine(JOHAB_SM_MODEL)
-        self.distribution_analyzer = JOHABDistributionAnalysis()
-        self.reset()
-
-    @property
-    def charset_name(self):
-        return "Johab"
-
-    @property
-    def language(self):
-        return "Korean"
diff --git a/venv/lib/python3.10/site-packages/pip/_vendor/chardet/jpcntx.py b/venv/lib/python3.10/site-packages/pip/_vendor/chardet/jpcntx.py
index 7a8e5be..20044e4 100644
--- a/venv/lib/python3.10/site-packages/pip/_vendor/chardet/jpcntx.py
+++ b/venv/lib/python3.10/site-packages/pip/_vendor/chardet/jpcntx.py
@@ -27,96 +27,93 @@
 
 
 # This is hiragana 2-char sequence table, the number in each cell represents its frequency category
-# fmt: off
-jp2_char_context = (
-    (0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1),
-    (2, 4, 0, 4, 0, 3, 0, 4, 0, 3, 4, 4, 4, 2, 4, 3, 3, 4, 3, 2, 3, 3, 4, 2, 3, 3, 3, 2, 4, 1, 4, 3, 3, 1, 5, 4, 3, 4, 3, 4, 3, 5, 3, 0, 3, 5, 4, 2, 0, 3, 1, 0, 3, 3, 0, 3, 3, 0, 1, 1, 0, 4, 3, 0, 3, 3, 0, 4, 0, 2, 0, 3, 5, 5, 5, 5, 4, 0, 4, 1, 0, 3, 4),
-    (0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2),
-    (0, 4, 0, 5, 0, 5, 0, 4, 0, 4, 5, 4, 4, 3, 5, 3, 5, 1, 5, 3, 4, 3, 4, 4, 3, 4, 3, 3, 4, 3, 5, 4, 4, 3, 5, 5, 3, 5, 5, 5, 3, 5, 5, 3, 4, 5, 5, 3, 1, 3, 2, 0, 3, 4, 0, 4, 2, 0, 4, 2, 1, 5, 3, 2, 3, 5, 0, 4, 0, 2, 0, 5, 4, 4, 5, 4, 5, 0, 4, 0, 0, 4, 4),
-    (0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0),
-    (0, 3, 0, 4, 0, 3, 0, 3, 0, 4, 5, 4, 3, 3, 3, 3, 4, 3, 5, 4, 4, 3, 5, 4, 4, 3, 4, 3, 4, 4, 4, 4, 5, 3, 4, 4, 3, 4, 5, 5, 4, 5, 5, 1, 4, 5, 4, 3, 0, 3, 3, 1, 3, 3, 0, 4, 4, 0, 3, 3, 1, 5, 3, 3, 3, 5, 0, 4, 0, 3, 0, 4, 4, 3, 4, 3, 3, 0, 4, 1, 1, 3, 4),
-    (0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0),
-    (0, 4, 0, 3, 0, 3, 0, 4, 0, 3, 4, 4, 3, 2, 2, 1, 2, 1, 3, 1, 3, 3, 3, 3, 3, 4, 3, 1, 3, 3, 5, 3, 3, 0, 4, 3, 0, 5, 4, 3, 3, 5, 4, 4, 3, 4, 4, 5, 0, 1, 2, 0, 1, 2, 0, 2, 2, 0, 1, 0, 0, 5, 2, 2, 1, 4, 0, 3, 0, 1, 0, 4, 4, 3, 5, 4, 3, 0, 2, 1, 0, 4, 3),
-    (0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0),
-    (0, 3, 0, 5, 0, 4, 0, 2, 1, 4, 4, 2, 4, 1, 4, 2, 4, 2, 4, 3, 3, 3, 4, 3, 3, 3, 3, 1, 4, 2, 3, 3, 3, 1, 4, 4, 1, 1, 1, 4, 3, 3, 2, 0, 2, 4, 3, 2, 0, 3, 3, 0, 3, 1, 1, 0, 0, 0, 3, 3, 0, 4, 2, 2, 3, 4, 0, 4, 0, 3, 0, 4, 4, 5, 3, 4, 4, 0, 3, 0, 0, 1, 4),
-    (1, 4, 0, 4, 0, 4, 0, 4, 0, 3, 5, 4, 4, 3, 4, 3, 5, 4, 3, 3, 4, 3, 5, 4, 4, 4, 4, 3, 4, 2, 4, 3, 3, 1, 5, 4, 3, 2, 4, 5, 4, 5, 5, 4, 4, 5, 4, 4, 0, 3, 2, 2, 3, 3, 0, 4, 3, 1, 3, 2, 1, 4, 3, 3, 4, 5, 0, 3, 0, 2, 0, 4, 5, 5, 4, 5, 4, 0, 4, 0, 0, 5, 4),
-    (0, 5, 0, 5, 0, 4, 0, 3, 0, 4, 4, 3, 4, 3, 3, 3, 4, 0, 4, 4, 4, 3, 4, 3, 4, 3, 3, 1, 4, 2, 4, 3, 4, 0, 5, 4, 1, 4, 5, 4, 4, 5, 3, 2, 4, 3, 4, 3, 2, 4, 1, 3, 3, 3, 2, 3, 2, 0, 4, 3, 3, 4, 3, 3, 3, 4, 0, 4, 0, 3, 0, 4, 5, 4, 4, 4, 3, 0, 4, 1, 0, 1, 3),
-    (0, 3, 1, 4, 0, 3, 0, 2, 0, 3, 4, 4, 3, 1, 4, 2, 3, 3, 4, 3, 4, 3, 4, 3, 4, 4, 3, 2, 3, 1, 5, 4, 4, 1, 4, 4, 3, 5, 4, 4, 3, 5, 5, 4, 3, 4, 4, 3, 1, 2, 3, 1, 2, 2, 0, 3, 2, 0, 3, 1, 0, 5, 3, 3, 3, 4, 3, 3, 3, 3, 4, 4, 4, 4, 5, 4, 2, 0, 3, 3, 2, 4, 3),
-    (0, 2, 0, 3, 0, 1, 0, 1, 0, 0, 3, 2, 0, 0, 2, 0, 1, 0, 2, 1, 3, 3, 3, 1, 2, 3, 1, 0, 1, 0, 4, 2, 1, 1, 3, 3, 0, 4, 3, 3, 1, 4, 3, 3, 0, 3, 3, 2, 0, 0, 0, 0, 1, 0, 0, 2, 0, 0, 0, 0, 0, 4, 1, 0, 2, 3, 2, 2, 2, 1, 3, 3, 3, 4, 4, 3, 2, 0, 3, 1, 0, 3, 3),
-    (0, 4, 0, 4, 0, 3, 0, 3, 0, 4, 4, 4, 3, 3, 3, 3, 3, 3, 4, 3, 4, 2, 4, 3, 4, 3, 3, 2, 4, 3, 4, 5, 4, 1, 4, 5, 3, 5, 4, 5, 3, 5, 4, 0, 3, 5, 5, 3, 1, 3, 3, 2, 2, 3, 0, 3, 4, 1, 3, 3, 2, 4, 3, 3, 3, 4, 0, 4, 0, 3, 0, 4, 5, 4, 4, 5, 3, 0, 4, 1, 0, 3, 4),
-    (0, 2, 0, 3, 0, 3, 0, 0, 0, 2, 2, 2, 1, 0, 1, 0, 0, 0, 3, 0, 3, 0, 3, 0, 1, 3, 1, 0, 3, 1, 3, 3, 3, 1, 3, 3, 3, 0, 1, 3, 1, 3, 4, 0, 0, 3, 1, 1, 0, 3, 2, 0, 0, 0, 0, 1, 3, 0, 1, 0, 0, 3, 3, 2, 0, 3, 0, 0, 0, 0, 0, 3, 4, 3, 4, 3, 3, 0, 3, 0, 0, 2, 3),
-    (2, 3, 0, 3, 0, 2, 0, 1, 0, 3, 3, 4, 3, 1, 3, 1, 1, 1, 3, 1, 4, 3, 4, 3, 3, 3, 0, 0, 3, 1, 5, 4, 3, 1, 4, 3, 2, 5, 5, 4, 4, 4, 4, 3, 3, 4, 4, 4, 0, 2, 1, 1, 3, 2, 0, 1, 2, 0, 0, 1, 0, 4, 1, 3, 3, 3, 0, 3, 0, 1, 0, 4, 4, 4, 5, 5, 3, 0, 2, 0, 0, 4, 4),
-    (0, 2, 0, 1, 0, 3, 1, 3, 0, 2, 3, 3, 3, 0, 3, 1, 0, 0, 3, 0, 3, 2, 3, 1, 3, 2, 1, 1, 0, 0, 4, 2, 1, 0, 2, 3, 1, 4, 3, 2, 0, 4, 4, 3, 1, 3, 1, 3, 0, 1, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 4, 1, 1, 1, 2, 0, 3, 0, 0, 0, 3, 4, 2, 4, 3, 2, 0, 1, 0, 0, 3, 3),
-    (0, 1, 0, 4, 0, 5, 0, 4, 0, 2, 4, 4, 2, 3, 3, 2, 3, 3, 5, 3, 3, 3, 4, 3, 4, 2, 3, 0, 4, 3, 3, 3, 4, 1, 4, 3, 2, 1, 5, 5, 3, 4, 5, 1, 3, 5, 4, 2, 0, 3, 3, 0, 1, 3, 0, 4, 2, 0, 1, 3, 1, 4, 3, 3, 3, 3, 0, 3, 0, 1, 0, 3, 4, 4, 4, 5, 5, 0, 3, 0, 1, 4, 5),
-    (0, 2, 0, 3, 0, 3, 0, 0, 0, 2, 3, 1, 3, 0, 4, 0, 1, 1, 3, 0, 3, 4, 3, 2, 3, 1, 0, 3, 3, 2, 3, 1, 3, 0, 2, 3, 0, 2, 1, 4, 1, 2, 2, 0, 0, 3, 3, 0, 0, 2, 0, 0, 0, 1, 0, 0, 0, 0, 2, 2, 0, 3, 2, 1, 3, 3, 0, 2, 0, 2, 0, 0, 3, 3, 1, 2, 4, 0, 3, 0, 2, 2, 3),
-    (2, 4, 0, 5, 0, 4, 0, 4, 0, 2, 4, 4, 4, 3, 4, 3, 3, 3, 1, 2, 4, 3, 4, 3, 4, 4, 5, 0, 3, 3, 3, 3, 2, 0, 4, 3, 1, 4, 3, 4, 1, 4, 4, 3, 3, 4, 4, 3, 1, 2, 3, 0, 4, 2, 0, 4, 1, 0, 3, 3, 0, 4, 3, 3, 3, 4, 0, 4, 0, 2, 0, 3, 5, 3, 4, 5, 2, 0, 3, 0, 0, 4, 5),
-    (0, 3, 0, 4, 0, 1, 0, 1, 0, 1, 3, 2, 2, 1, 3, 0, 3, 0, 2, 0, 2, 0, 3, 0, 2, 0, 0, 0, 1, 0, 1, 1, 0, 0, 3, 1, 0, 0, 0, 4, 0, 3, 1, 0, 2, 1, 3, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 4, 2, 2, 3, 1, 0, 3, 0, 0, 0, 1, 4, 4, 4, 3, 0, 0, 4, 0, 0, 1, 4),
-    (1, 4, 1, 5, 0, 3, 0, 3, 0, 4, 5, 4, 4, 3, 5, 3, 3, 4, 4, 3, 4, 1, 3, 3, 3, 3, 2, 1, 4, 1, 5, 4, 3, 1, 4, 4, 3, 5, 4, 4, 3, 5, 4, 3, 3, 4, 4, 4, 0, 3, 3, 1, 2, 3, 0, 3, 1, 0, 3, 3, 0, 5, 4, 4, 4, 4, 4, 4, 3, 3, 5, 4, 4, 3, 3, 5, 4, 0, 3, 2, 0, 4, 4),
-    (0, 2, 0, 3, 0, 1, 0, 0, 0, 1, 3, 3, 3, 2, 4, 1, 3, 0, 3, 1, 3, 0, 2, 2, 1, 1, 0, 0, 2, 0, 4, 3, 1, 0, 4, 3, 0, 4, 4, 4, 1, 4, 3, 1, 1, 3, 3, 1, 0, 2, 0, 0, 1, 3, 0, 0, 0, 0, 2, 0, 0, 4, 3, 2, 4, 3, 5, 4, 3, 3, 3, 4, 3, 3, 4, 3, 3, 0, 2, 1, 0, 3, 3),
-    (0, 2, 0, 4, 0, 3, 0, 2, 0, 2, 5, 5, 3, 4, 4, 4, 4, 1, 4, 3, 3, 0, 4, 3, 4, 3, 1, 3, 3, 2, 4, 3, 0, 3, 4, 3, 0, 3, 4, 4, 2, 4, 4, 0, 4, 5, 3, 3, 2, 2, 1, 1, 1, 2, 0, 1, 5, 0, 3, 3, 2, 4, 3, 3, 3, 4, 0, 3, 0, 2, 0, 4, 4, 3, 5, 5, 0, 0, 3, 0, 2, 3, 3),
-    (0, 3, 0, 4, 0, 3, 0, 1, 0, 3, 4, 3, 3, 1, 3, 3, 3, 0, 3, 1, 3, 0, 4, 3, 3, 1, 1, 0, 3, 0, 3, 3, 0, 0, 4, 4, 0, 1, 5, 4, 3, 3, 5, 0, 3, 3, 4, 3, 0, 2, 0, 1, 1, 1, 0, 1, 3, 0, 1, 2, 1, 3, 3, 2, 3, 3, 0, 3, 0, 1, 0, 1, 3, 3, 4, 4, 1, 0, 1, 2, 2, 1, 3),
-    (0, 1, 0, 4, 0, 4, 0, 3, 0, 1, 3, 3, 3, 2, 3, 1, 1, 0, 3, 0, 3, 3, 4, 3, 2, 4, 2, 0, 1, 0, 4, 3, 2, 0, 4, 3, 0, 5, 3, 3, 2, 4, 4, 4, 3, 3, 3, 4, 0, 1, 3, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 4, 2, 3, 3, 3, 0, 3, 0, 0, 0, 4, 4, 4, 5, 3, 2, 0, 3, 3, 0, 3, 5),
-    (0, 2, 0, 3, 0, 0, 0, 3, 0, 1, 3, 0, 2, 0, 0, 0, 1, 0, 3, 1, 1, 3, 3, 0, 0, 3, 0, 0, 3, 0, 2, 3, 1, 0, 3, 1, 0, 3, 3, 2, 0, 4, 2, 2, 0, 2, 0, 0, 0, 4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 1, 2, 0, 1, 0, 1, 0, 0, 0, 1, 3, 1, 2, 0, 0, 0, 1, 0, 0, 1, 4),
-    (0, 3, 0, 3, 0, 5, 0, 1, 0, 2, 4, 3, 1, 3, 3, 2, 1, 1, 5, 2, 1, 0, 5, 1, 2, 0, 0, 0, 3, 3, 2, 2, 3, 2, 4, 3, 0, 0, 3, 3, 1, 3, 3, 0, 2, 5, 3, 4, 0, 3, 3, 0, 1, 2, 0, 2, 2, 0, 3, 2, 0, 2, 2, 3, 3, 3, 0, 2, 0, 1, 0, 3, 4, 4, 2, 5, 4, 0, 3, 0, 0, 3, 5),
-    (0, 3, 0, 3, 0, 3, 0, 1, 0, 3, 3, 3, 3, 0, 3, 0, 2, 0, 2, 1, 1, 0, 2, 0, 1, 0, 0, 0, 2, 1, 0, 0, 1, 0, 3, 2, 0, 0, 3, 3, 1, 2, 3, 1, 0, 3, 3, 0, 0, 1, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 2, 3, 1, 2, 3, 0, 3, 0, 1, 0, 3, 2, 1, 0, 4, 3, 0, 1, 1, 0, 3, 3),
-    (0, 4, 0, 5, 0, 3, 0, 3, 0, 4, 5, 5, 4, 3, 5, 3, 4, 3, 5, 3, 3, 2, 5, 3, 4, 4, 4, 3, 4, 3, 4, 5, 5, 3, 4, 4, 3, 4, 4, 5, 4, 4, 4, 3, 4, 5, 5, 4, 2, 3, 4, 2, 3, 4, 0, 3, 3, 1, 4, 3, 2, 4, 3, 3, 5, 5, 0, 3, 0, 3, 0, 5, 5, 5, 5, 4, 4, 0, 4, 0, 1, 4, 4),
-    (0, 4, 0, 4, 0, 3, 0, 3, 0, 3, 5, 4, 4, 2, 3, 2, 5, 1, 3, 2, 5, 1, 4, 2, 3, 2, 3, 3, 4, 3, 3, 3, 3, 2, 5, 4, 1, 3, 3, 5, 3, 4, 4, 0, 4, 4, 3, 1, 1, 3, 1, 0, 2, 3, 0, 2, 3, 0, 3, 0, 0, 4, 3, 1, 3, 4, 0, 3, 0, 2, 0, 4, 4, 4, 3, 4, 5, 0, 4, 0, 0, 3, 4),
-    (0, 3, 0, 3, 0, 3, 1, 2, 0, 3, 4, 4, 3, 3, 3, 0, 2, 2, 4, 3, 3, 1, 3, 3, 3, 1, 1, 0, 3, 1, 4, 3, 2, 3, 4, 4, 2, 4, 4, 4, 3, 4, 4, 3, 2, 4, 4, 3, 1, 3, 3, 1, 3, 3, 0, 4, 1, 0, 2, 2, 1, 4, 3, 2, 3, 3, 5, 4, 3, 3, 5, 4, 4, 3, 3, 0, 4, 0, 3, 2, 2, 4, 4),
-    (0, 2, 0, 1, 0, 0, 0, 0, 0, 1, 2, 1, 3, 0, 0, 0, 0, 0, 2, 0, 1, 2, 1, 0, 0, 1, 0, 0, 0, 0, 3, 0, 0, 1, 0, 1, 1, 3, 1, 0, 0, 0, 1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 2, 2, 0, 3, 4, 0, 0, 0, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 1, 1),
-    (0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 4, 0, 4, 1, 4, 0, 3, 0, 4, 0, 3, 0, 4, 0, 3, 0, 3, 0, 4, 1, 5, 1, 4, 0, 0, 3, 0, 5, 0, 5, 2, 0, 1, 0, 0, 0, 2, 1, 4, 0, 1, 3, 0, 0, 3, 0, 0, 3, 1, 1, 4, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0),
-    (1, 4, 0, 5, 0, 3, 0, 2, 0, 3, 5, 4, 4, 3, 4, 3, 5, 3, 4, 3, 3, 0, 4, 3, 3, 3, 3, 3, 3, 2, 4, 4, 3, 1, 3, 4, 4, 5, 4, 4, 3, 4, 4, 1, 3, 5, 4, 3, 3, 3, 1, 2, 2, 3, 3, 1, 3, 1, 3, 3, 3, 5, 3, 3, 4, 5, 0, 3, 0, 3, 0, 3, 4, 3, 4, 4, 3, 0, 3, 0, 2, 4, 3),
-    (0, 1, 0, 4, 0, 0, 0, 0, 0, 1, 4, 0, 4, 1, 4, 2, 4, 0, 3, 0, 1, 0, 1, 0, 0, 0, 0, 0, 2, 0, 3, 1, 1, 1, 0, 3, 0, 0, 0, 1, 2, 1, 0, 0, 1, 1, 1, 1, 0, 1, 0, 0, 0, 1, 0, 0, 3, 0, 0, 0, 0, 3, 2, 0, 2, 2, 0, 1, 0, 0, 0, 2, 3, 2, 3, 3, 0, 0, 0, 0, 2, 1, 0),
-    (0, 5, 1, 5, 0, 3, 0, 3, 0, 5, 4, 4, 5, 1, 5, 3, 3, 0, 4, 3, 4, 3, 5, 3, 4, 3, 3, 2, 4, 3, 4, 3, 3, 0, 3, 3, 1, 4, 4, 3, 4, 4, 4, 3, 4, 5, 5, 3, 2, 3, 1, 1, 3, 3, 1, 3, 1, 1, 3, 3, 2, 4, 5, 3, 3, 5, 0, 4, 0, 3, 0, 4, 4, 3, 5, 3, 3, 0, 3, 4, 0, 4, 3),
-    (0, 5, 0, 5, 0, 3, 0, 2, 0, 4, 4, 3, 5, 2, 4, 3, 3, 3, 4, 4, 4, 3, 5, 3, 5, 3, 3, 1, 4, 0, 4, 3, 3, 0, 3, 3, 0, 4, 4, 4, 4, 5, 4, 3, 3, 5, 5, 3, 2, 3, 1, 2, 3, 2, 0, 1, 0, 0, 3, 2, 2, 4, 4, 3, 1, 5, 0, 4, 0, 3, 0, 4, 3, 1, 3, 2, 1, 0, 3, 3, 0, 3, 3),
-    (0, 4, 0, 5, 0, 5, 0, 4, 0, 4, 5, 5, 5, 3, 4, 3, 3, 2, 5, 4, 4, 3, 5, 3, 5, 3, 4, 0, 4, 3, 4, 4, 3, 2, 4, 4, 3, 4, 5, 4, 4, 5, 5, 0, 3, 5, 5, 4, 1, 3, 3, 2, 3, 3, 1, 3, 1, 0, 4, 3, 1, 4, 4, 3, 4, 5, 0, 4, 0, 2, 0, 4, 3, 4, 4, 3, 3, 0, 4, 0, 0, 5, 5),
-    (0, 4, 0, 4, 0, 5, 0, 1, 1, 3, 3, 4, 4, 3, 4, 1, 3, 0, 5, 1, 3, 0, 3, 1, 3, 1, 1, 0, 3, 0, 3, 3, 4, 0, 4, 3, 0, 4, 4, 4, 3, 4, 4, 0, 3, 5, 4, 1, 0, 3, 0, 0, 2, 3, 0, 3, 1, 0, 3, 1, 0, 3, 2, 1, 3, 5, 0, 3, 0, 1, 0, 3, 2, 3, 3, 4, 4, 0, 2, 2, 0, 4, 4),
-    (2, 4, 0, 5, 0, 4, 0, 3, 0, 4, 5, 5, 4, 3, 5, 3, 5, 3, 5, 3, 5, 2, 5, 3, 4, 3, 3, 4, 3, 4, 5, 3, 2, 1, 5, 4, 3, 2, 3, 4, 5, 3, 4, 1, 2, 5, 4, 3, 0, 3, 3, 0, 3, 2, 0, 2, 3, 0, 4, 1, 0, 3, 4, 3, 3, 5, 0, 3, 0, 1, 0, 4, 5, 5, 5, 4, 3, 0, 4, 2, 0, 3, 5),
-    (0, 5, 0, 4, 0, 4, 0, 2, 0, 5, 4, 3, 4, 3, 4, 3, 3, 3, 4, 3, 4, 2, 5, 3, 5, 3, 4, 1, 4, 3, 4, 4, 4, 0, 3, 5, 0, 4, 4, 4, 4, 5, 3, 1, 3, 4, 5, 3, 3, 3, 3, 3, 3, 3, 0, 2, 2, 0, 3, 3, 2, 4, 3, 3, 3, 5, 3, 4, 1, 3, 3, 5, 3, 2, 0, 0, 0, 0, 4, 3, 1, 3, 3),
-    (0, 1, 0, 3, 0, 3, 0, 1, 0, 1, 3, 3, 3, 2, 3, 3, 3, 0, 3, 0, 0, 0, 3, 1, 3, 0, 0, 0, 2, 2, 2, 3, 0, 0, 3, 2, 0, 1, 2, 4, 1, 3, 3, 0, 0, 3, 3, 3, 0, 1, 0, 0, 2, 1, 0, 0, 3, 0, 3, 1, 0, 3, 0, 0, 1, 3, 0, 2, 0, 1, 0, 3, 3, 1, 3, 3, 0, 0, 1, 1, 0, 3, 3),
-    (0, 2, 0, 3, 0, 2, 1, 4, 0, 2, 2, 3, 1, 1, 3, 1, 1, 0, 2, 0, 3, 1, 2, 3, 1, 3, 0, 0, 1, 0, 4, 3, 2, 3, 3, 3, 1, 4, 2, 3, 3, 3, 3, 1, 0, 3, 1, 4, 0, 1, 1, 0, 1, 2, 0, 1, 1, 0, 1, 1, 0, 3, 1, 3, 2, 2, 0, 1, 0, 0, 0, 2, 3, 3, 3, 1, 0, 0, 0, 0, 0, 2, 3),
-    (0, 5, 0, 4, 0, 5, 0, 2, 0, 4, 5, 5, 3, 3, 4, 3, 3, 1, 5, 4, 4, 2, 4, 4, 4, 3, 4, 2, 4, 3, 5, 5, 4, 3, 3, 4, 3, 3, 5, 5, 4, 5, 5, 1, 3, 4, 5, 3, 1, 4, 3, 1, 3, 3, 0, 3, 3, 1, 4, 3, 1, 4, 5, 3, 3, 5, 0, 4, 0, 3, 0, 5, 3, 3, 1, 4, 3, 0, 4, 0, 1, 5, 3),
-    (0, 5, 0, 5, 0, 4, 0, 2, 0, 4, 4, 3, 4, 3, 3, 3, 3, 3, 5, 4, 4, 4, 4, 4, 4, 5, 3, 3, 5, 2, 4, 4, 4, 3, 4, 4, 3, 3, 4, 4, 5, 5, 3, 3, 4, 3, 4, 3, 3, 4, 3, 3, 3, 3, 1, 2, 2, 1, 4, 3, 3, 5, 4, 4, 3, 4, 0, 4, 0, 3, 0, 4, 4, 4, 4, 4, 1, 0, 4, 2, 0, 2, 4),
-    (0, 4, 0, 4, 0, 3, 0, 1, 0, 3, 5, 2, 3, 0, 3, 0, 2, 1, 4, 2, 3, 3, 4, 1, 4, 3, 3, 2, 4, 1, 3, 3, 3, 0, 3, 3, 0, 0, 3, 3, 3, 5, 3, 3, 3, 3, 3, 2, 0, 2, 0, 0, 2, 0, 0, 2, 0, 0, 1, 0, 0, 3, 1, 2, 2, 3, 0, 3, 0, 2, 0, 4, 4, 3, 3, 4, 1, 0, 3, 0, 0, 2, 4),
-    (0, 0, 0, 4, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 2, 0, 0, 0, 0, 0, 1, 0, 2, 0, 1, 0, 0, 0, 0, 0, 3, 1, 3, 0, 3, 2, 0, 0, 0, 1, 0, 3, 2, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 4, 0, 2, 0, 0, 0, 0, 0, 0, 2),
-    (0, 2, 1, 3, 0, 2, 0, 2, 0, 3, 3, 3, 3, 1, 3, 1, 3, 3, 3, 3, 3, 3, 4, 2, 2, 1, 2, 1, 4, 0, 4, 3, 1, 3, 3, 3, 2, 4, 3, 5, 4, 3, 3, 3, 3, 3, 3, 3, 0, 1, 3, 0, 2, 0, 0, 1, 0, 0, 1, 0, 0, 4, 2, 0, 2, 3, 0, 3, 3, 0, 3, 3, 4, 2, 3, 1, 4, 0, 1, 2, 0, 2, 3),
-    (0, 3, 0, 3, 0, 1, 0, 3, 0, 2, 3, 3, 3, 0, 3, 1, 2, 0, 3, 3, 2, 3, 3, 2, 3, 2, 3, 1, 3, 0, 4, 3, 2, 0, 3, 3, 1, 4, 3, 3, 2, 3, 4, 3, 1, 3, 3, 1, 1, 0, 1, 1, 0, 1, 0, 1, 0, 1, 0, 0, 0, 4, 1, 1, 0, 3, 0, 3, 1, 0, 2, 3, 3, 3, 3, 3, 1, 0, 0, 2, 0, 3, 3),
-    (0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 0, 2, 0, 3, 0, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 3, 0, 3, 0, 3, 1, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 0, 2, 0, 2, 3, 0, 0, 0, 0, 0, 0, 0, 0, 3),
-    (0, 2, 0, 3, 1, 3, 0, 3, 0, 2, 3, 3, 3, 1, 3, 1, 3, 1, 3, 1, 3, 3, 3, 1, 3, 0, 2, 3, 1, 1, 4, 3, 3, 2, 3, 3, 1, 2, 2, 4, 1, 3, 3, 0, 1, 4, 2, 3, 0, 1, 3, 0, 3, 0, 0, 1, 3, 0, 2, 0, 0, 3, 3, 2, 1, 3, 0, 3, 0, 2, 0, 3, 4, 4, 4, 3, 1, 0, 3, 0, 0, 3, 3),
-    (0, 2, 0, 1, 0, 2, 0, 0, 0, 1, 3, 2, 2, 1, 3, 0, 1, 1, 3, 0, 3, 2, 3, 1, 2, 0, 2, 0, 1, 1, 3, 3, 3, 0, 3, 3, 1, 1, 2, 3, 2, 3, 3, 1, 2, 3, 2, 0, 0, 1, 0, 0, 0, 0, 0, 0, 3, 0, 1, 0, 0, 2, 1, 2, 1, 3, 0, 3, 0, 0, 0, 3, 4, 4, 4, 3, 2, 0, 2, 0, 0, 2, 4),
-    (0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 2, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 3, 1, 0, 0, 0, 0, 0, 0, 0, 3),
-    (0, 3, 0, 3, 0, 2, 0, 3, 0, 3, 3, 3, 2, 3, 2, 2, 2, 0, 3, 1, 3, 3, 3, 2, 3, 3, 0, 0, 3, 0, 3, 2, 2, 0, 2, 3, 1, 4, 3, 4, 3, 3, 2, 3, 1, 5, 4, 4, 0, 3, 1, 2, 1, 3, 0, 3, 1, 1, 2, 0, 2, 3, 1, 3, 1, 3, 0, 3, 0, 1, 0, 3, 3, 4, 4, 2, 1, 0, 2, 1, 0, 2, 4),
-    (0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 4, 2, 5, 1, 4, 0, 2, 0, 2, 1, 3, 1, 4, 0, 2, 1, 0, 0, 2, 1, 4, 1, 1, 0, 3, 3, 0, 5, 1, 3, 2, 3, 3, 1, 0, 3, 2, 3, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 4, 0, 1, 0, 3, 0, 2, 0, 1, 0, 3, 3, 3, 4, 3, 3, 0, 0, 0, 0, 2, 3),
-    (0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 2, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 1, 0, 0, 1, 0, 0, 0, 0, 0, 3),
-    (0, 1, 0, 3, 0, 4, 0, 3, 0, 2, 4, 3, 1, 0, 3, 2, 2, 1, 3, 1, 2, 2, 3, 1, 1, 1, 2, 1, 3, 0, 1, 2, 0, 1, 3, 2, 1, 3, 0, 5, 5, 1, 0, 0, 1, 3, 2, 1, 0, 3, 0, 0, 1, 0, 0, 0, 0, 0, 3, 4, 0, 1, 1, 1, 3, 2, 0, 2, 0, 1, 0, 2, 3, 3, 1, 2, 3, 0, 1, 0, 1, 0, 4),
-    (0, 0, 0, 1, 0, 3, 0, 3, 0, 2, 2, 1, 0, 0, 4, 0, 3, 0, 3, 1, 3, 0, 3, 0, 3, 0, 1, 0, 3, 0, 3, 1, 3, 0, 3, 3, 0, 0, 1, 2, 1, 1, 1, 0, 1, 2, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 2, 2, 1, 2, 0, 0, 2, 0, 0, 0, 0, 2, 3, 3, 3, 3, 0, 0, 0, 0, 1, 4),
-    (0, 0, 0, 3, 0, 3, 0, 0, 0, 0, 3, 1, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 3, 0, 2, 0, 2, 3, 0, 0, 2, 2, 3, 1, 2, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 0, 0, 2, 0, 0, 0, 0, 2, 3),
-    (2, 4, 0, 5, 0, 5, 0, 4, 0, 3, 4, 3, 3, 3, 4, 3, 3, 3, 4, 3, 4, 4, 5, 4, 5, 5, 5, 2, 3, 0, 5, 5, 4, 1, 5, 4, 3, 1, 5, 4, 3, 4, 4, 3, 3, 4, 3, 3, 0, 3, 2, 0, 2, 3, 0, 3, 0, 0, 3, 3, 0, 5, 3, 2, 3, 3, 0, 3, 0, 3, 0, 3, 4, 5, 4, 5, 3, 0, 4, 3, 0, 3, 4),
-    (0, 3, 0, 3, 0, 3, 0, 3, 0, 3, 3, 4, 3, 2, 3, 2, 3, 0, 4, 3, 3, 3, 3, 3, 3, 3, 3, 0, 3, 2, 4, 3, 3, 1, 3, 4, 3, 4, 4, 4, 3, 4, 4, 3, 2, 4, 4, 1, 0, 2, 0, 0, 1, 1, 0, 2, 0, 0, 3, 1, 0, 5, 3, 2, 1, 3, 0, 3, 0, 1, 2, 4, 3, 2, 4, 3, 3, 0, 3, 2, 0, 4, 4),
-    (0, 3, 0, 3, 0, 1, 0, 0, 0, 1, 4, 3, 3, 2, 3, 1, 3, 1, 4, 2, 3, 2, 4, 2, 3, 4, 3, 0, 2, 2, 3, 3, 3, 0, 3, 3, 3, 0, 3, 4, 1, 3, 3, 0, 3, 4, 3, 3, 0, 1, 1, 0, 1, 0, 0, 0, 4, 0, 3, 0, 0, 3, 1, 2, 1, 3, 0, 4, 0, 1, 0, 4, 3, 3, 4, 3, 3, 0, 2, 0, 0, 3, 3),
-    (0, 3, 0, 4, 0, 1, 0, 3, 0, 3, 4, 3, 3, 0, 3, 3, 3, 1, 3, 1, 3, 3, 4, 3, 3, 3, 0, 0, 3, 1, 5, 3, 3, 1, 3, 3, 2, 5, 4, 3, 3, 4, 5, 3, 2, 5, 3, 4, 0, 1, 0, 0, 0, 0, 0, 2, 0, 0, 1, 1, 0, 4, 2, 2, 1, 3, 0, 3, 0, 2, 0, 4, 4, 3, 5, 3, 2, 0, 1, 1, 0, 3, 4),
-    (0, 5, 0, 4, 0, 5, 0, 2, 0, 4, 4, 3, 3, 2, 3, 3, 3, 1, 4, 3, 4, 1, 5, 3, 4, 3, 4, 0, 4, 2, 4, 3, 4, 1, 5, 4, 0, 4, 4, 4, 4, 5, 4, 1, 3, 5, 4, 2, 1, 4, 1, 1, 3, 2, 0, 3, 1, 0, 3, 2, 1, 4, 3, 3, 3, 4, 0, 4, 0, 3, 0, 4, 4, 4, 3, 3, 3, 0, 4, 2, 0, 3, 4),
-    (1, 4, 0, 4, 0, 3, 0, 1, 0, 3, 3, 3, 1, 1, 3, 3, 2, 2, 3, 3, 1, 0, 3, 2, 2, 1, 2, 0, 3, 1, 2, 1, 2, 0, 3, 2, 0, 2, 2, 3, 3, 4, 3, 0, 3, 3, 1, 2, 0, 1, 1, 3, 1, 2, 0, 0, 3, 0, 1, 1, 0, 3, 2, 2, 3, 3, 0, 3, 0, 0, 0, 2, 3, 3, 4, 3, 3, 0, 1, 0, 0, 1, 4),
-    (0, 4, 0, 4, 0, 4, 0, 0, 0, 3, 4, 4, 3, 1, 4, 2, 3, 2, 3, 3, 3, 1, 4, 3, 4, 0, 3, 0, 4, 2, 3, 3, 2, 2, 5, 4, 2, 1, 3, 4, 3, 4, 3, 1, 3, 3, 4, 2, 0, 2, 1, 0, 3, 3, 0, 0, 2, 0, 3, 1, 0, 4, 4, 3, 4, 3, 0, 4, 0, 1, 0, 2, 4, 4, 4, 4, 4, 0, 3, 2, 0, 3, 3),
-    (0, 0, 0, 1, 0, 4, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 3, 2, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 2),
-    (0, 2, 0, 3, 0, 4, 0, 4, 0, 1, 3, 3, 3, 0, 4, 0, 2, 1, 2, 1, 1, 1, 2, 0, 3, 1, 1, 0, 1, 0, 3, 1, 0, 0, 3, 3, 2, 0, 1, 1, 0, 0, 0, 0, 0, 1, 0, 2, 0, 2, 2, 0, 3, 1, 0, 0, 1, 0, 1, 1, 0, 1, 2, 0, 3, 0, 0, 0, 0, 1, 0, 0, 3, 3, 4, 3, 1, 0, 1, 0, 3, 0, 2),
-    (0, 0, 0, 3, 0, 5, 0, 0, 0, 0, 1, 0, 2, 0, 3, 1, 0, 1, 3, 0, 0, 0, 2, 0, 0, 0, 1, 0, 0, 0, 1, 1, 0, 0, 4, 0, 0, 0, 2, 3, 0, 1, 4, 1, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 3, 0, 0, 0, 0, 0, 3),
-    (0, 2, 0, 5, 0, 5, 0, 1, 0, 2, 4, 3, 3, 2, 5, 1, 3, 2, 3, 3, 3, 0, 4, 1, 2, 0, 3, 0, 4, 0, 2, 2, 1, 1, 5, 3, 0, 0, 1, 4, 2, 3, 2, 0, 3, 3, 3, 2, 0, 2, 4, 1, 1, 2, 0, 1, 1, 0, 3, 1, 0, 1, 3, 1, 2, 3, 0, 2, 0, 0, 0, 1, 3, 5, 4, 4, 4, 0, 3, 0, 0, 1, 3),
-    (0, 4, 0, 5, 0, 4, 0, 4, 0, 4, 5, 4, 3, 3, 4, 3, 3, 3, 4, 3, 4, 4, 5, 3, 4, 5, 4, 2, 4, 2, 3, 4, 3, 1, 4, 4, 1, 3, 5, 4, 4, 5, 5, 4, 4, 5, 5, 5, 2, 3, 3, 1, 4, 3, 1, 3, 3, 0, 3, 3, 1, 4, 3, 4, 4, 4, 0, 3, 0, 4, 0, 3, 3, 4, 4, 5, 0, 0, 4, 3, 0, 4, 5),
-    (0, 4, 0, 4, 0, 3, 0, 3, 0, 3, 4, 4, 4, 3, 3, 2, 4, 3, 4, 3, 4, 3, 5, 3, 4, 3, 2, 1, 4, 2, 4, 4, 3, 1, 3, 4, 2, 4, 5, 5, 3, 4, 5, 4, 1, 5, 4, 3, 0, 3, 2, 2, 3, 2, 1, 3, 1, 0, 3, 3, 3, 5, 3, 3, 3, 5, 4, 4, 2, 3, 3, 4, 3, 3, 3, 2, 1, 0, 3, 2, 1, 4, 3),
-    (0, 4, 0, 5, 0, 4, 0, 3, 0, 3, 5, 5, 3, 2, 4, 3, 4, 0, 5, 4, 4, 1, 4, 4, 4, 3, 3, 3, 4, 3, 5, 5, 2, 3, 3, 4, 1, 2, 5, 5, 3, 5, 5, 2, 3, 5, 5, 4, 0, 3, 2, 0, 3, 3, 1, 1, 5, 1, 4, 1, 0, 4, 3, 2, 3, 5, 0, 4, 0, 3, 0, 5, 4, 3, 4, 3, 0, 0, 4, 1, 0, 4, 4),
-    (1, 3, 0, 4, 0, 2, 0, 2, 0, 2, 5, 5, 3, 3, 3, 3, 3, 0, 4, 2, 3, 4, 4, 4, 3, 4, 0, 0, 3, 4, 5, 4, 3, 3, 3, 3, 2, 5, 5, 4, 5, 5, 5, 4, 3, 5, 5, 5, 1, 3, 1, 0, 1, 0, 0, 3, 2, 0, 4, 2, 0, 5, 2, 3, 2, 4, 1, 3, 0, 3, 0, 4, 5, 4, 5, 4, 3, 0, 4, 2, 0, 5, 4),
-    (0, 3, 0, 4, 0, 5, 0, 3, 0, 3, 4, 4, 3, 2, 3, 2, 3, 3, 3, 3, 3, 2, 4, 3, 3, 2, 2, 0, 3, 3, 3, 3, 3, 1, 3, 3, 3, 0, 4, 4, 3, 4, 4, 1, 1, 4, 4, 2, 0, 3, 1, 0, 1, 1, 0, 4, 1, 0, 2, 3, 1, 3, 3, 1, 3, 4, 0, 3, 0, 1, 0, 3, 1, 3, 0, 0, 1, 0, 2, 0, 0, 4, 4),
-    (0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0),
-    (0, 3, 0, 3, 0, 2, 0, 3, 0, 1, 5, 4, 3, 3, 3, 1, 4, 2, 1, 2, 3, 4, 4, 2, 4, 4, 5, 0, 3, 1, 4, 3, 4, 0, 4, 3, 3, 3, 2, 3, 2, 5, 3, 4, 3, 2, 2, 3, 0, 0, 3, 0, 2, 1, 0, 1, 2, 0, 0, 0, 0, 2, 1, 1, 3, 1, 0, 2, 0, 4, 0, 3, 4, 4, 4, 5, 2, 0, 2, 0, 0, 1, 3),
-    (0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 1, 1, 0, 0, 0, 4, 2, 1, 1, 0, 1, 0, 3, 2, 0, 0, 3, 1, 1, 1, 2, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 0, 1, 0, 0, 0, 2, 0, 0, 0, 1, 4, 0, 4, 2, 1, 0, 0, 0, 0, 0, 1),
-    (0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 3, 1, 0, 0, 0, 2, 0, 2, 1, 0, 0, 1, 2, 1, 0, 1, 1, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 3, 1, 0, 0, 0, 0, 0, 1, 0, 0, 2, 1, 0, 0, 0, 0, 0, 0, 0, 0, 2),
-    (0, 4, 0, 4, 0, 4, 0, 3, 0, 4, 4, 3, 4, 2, 4, 3, 2, 0, 4, 4, 4, 3, 5, 3, 5, 3, 3, 2, 4, 2, 4, 3, 4, 3, 1, 4, 0, 2, 3, 4, 4, 4, 3, 3, 3, 4, 4, 4, 3, 4, 1, 3, 4, 3, 2, 1, 2, 1, 3, 3, 3, 4, 4, 3, 3, 5, 0, 4, 0, 3, 0, 4, 3, 3, 3, 2, 1, 0, 3, 0, 0, 3, 3),
-    (0, 4, 0, 3, 0, 3, 0, 3, 0, 3, 5, 5, 3, 3, 3, 3, 4, 3, 4, 3, 3, 3, 4, 4, 4, 3, 3, 3, 3, 4, 3, 5, 3, 3, 1, 3, 2, 4, 5, 5, 5, 5, 4, 3, 4, 5, 5, 3, 2, 2, 3, 3, 3, 3, 2, 3, 3, 1, 2, 3, 2, 4, 3, 3, 3, 4, 0, 4, 0, 2, 0, 4, 3, 2, 2, 1, 2, 0, 3, 0, 0, 4, 1),
+jp2CharContext = (
+(0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1),
+(2,4,0,4,0,3,0,4,0,3,4,4,4,2,4,3,3,4,3,2,3,3,4,2,3,3,3,2,4,1,4,3,3,1,5,4,3,4,3,4,3,5,3,0,3,5,4,2,0,3,1,0,3,3,0,3,3,0,1,1,0,4,3,0,3,3,0,4,0,2,0,3,5,5,5,5,4,0,4,1,0,3,4),
+(0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2),
+(0,4,0,5,0,5,0,4,0,4,5,4,4,3,5,3,5,1,5,3,4,3,4,4,3,4,3,3,4,3,5,4,4,3,5,5,3,5,5,5,3,5,5,3,4,5,5,3,1,3,2,0,3,4,0,4,2,0,4,2,1,5,3,2,3,5,0,4,0,2,0,5,4,4,5,4,5,0,4,0,0,4,4),
+(0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0),
+(0,3,0,4,0,3,0,3,0,4,5,4,3,3,3,3,4,3,5,4,4,3,5,4,4,3,4,3,4,4,4,4,5,3,4,4,3,4,5,5,4,5,5,1,4,5,4,3,0,3,3,1,3,3,0,4,4,0,3,3,1,5,3,3,3,5,0,4,0,3,0,4,4,3,4,3,3,0,4,1,1,3,4),
+(0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0),
+(0,4,0,3,0,3,0,4,0,3,4,4,3,2,2,1,2,1,3,1,3,3,3,3,3,4,3,1,3,3,5,3,3,0,4,3,0,5,4,3,3,5,4,4,3,4,4,5,0,1,2,0,1,2,0,2,2,0,1,0,0,5,2,2,1,4,0,3,0,1,0,4,4,3,5,4,3,0,2,1,0,4,3),
+(0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0),
+(0,3,0,5,0,4,0,2,1,4,4,2,4,1,4,2,4,2,4,3,3,3,4,3,3,3,3,1,4,2,3,3,3,1,4,4,1,1,1,4,3,3,2,0,2,4,3,2,0,3,3,0,3,1,1,0,0,0,3,3,0,4,2,2,3,4,0,4,0,3,0,4,4,5,3,4,4,0,3,0,0,1,4),
+(1,4,0,4,0,4,0,4,0,3,5,4,4,3,4,3,5,4,3,3,4,3,5,4,4,4,4,3,4,2,4,3,3,1,5,4,3,2,4,5,4,5,5,4,4,5,4,4,0,3,2,2,3,3,0,4,3,1,3,2,1,4,3,3,4,5,0,3,0,2,0,4,5,5,4,5,4,0,4,0,0,5,4),
+(0,5,0,5,0,4,0,3,0,4,4,3,4,3,3,3,4,0,4,4,4,3,4,3,4,3,3,1,4,2,4,3,4,0,5,4,1,4,5,4,4,5,3,2,4,3,4,3,2,4,1,3,3,3,2,3,2,0,4,3,3,4,3,3,3,4,0,4,0,3,0,4,5,4,4,4,3,0,4,1,0,1,3),
+(0,3,1,4,0,3,0,2,0,3,4,4,3,1,4,2,3,3,4,3,4,3,4,3,4,4,3,2,3,1,5,4,4,1,4,4,3,5,4,4,3,5,5,4,3,4,4,3,1,2,3,1,2,2,0,3,2,0,3,1,0,5,3,3,3,4,3,3,3,3,4,4,4,4,5,4,2,0,3,3,2,4,3),
+(0,2,0,3,0,1,0,1,0,0,3,2,0,0,2,0,1,0,2,1,3,3,3,1,2,3,1,0,1,0,4,2,1,1,3,3,0,4,3,3,1,4,3,3,0,3,3,2,0,0,0,0,1,0,0,2,0,0,0,0,0,4,1,0,2,3,2,2,2,1,3,3,3,4,4,3,2,0,3,1,0,3,3),
+(0,4,0,4,0,3,0,3,0,4,4,4,3,3,3,3,3,3,4,3,4,2,4,3,4,3,3,2,4,3,4,5,4,1,4,5,3,5,4,5,3,5,4,0,3,5,5,3,1,3,3,2,2,3,0,3,4,1,3,3,2,4,3,3,3,4,0,4,0,3,0,4,5,4,4,5,3,0,4,1,0,3,4),
+(0,2,0,3,0,3,0,0,0,2,2,2,1,0,1,0,0,0,3,0,3,0,3,0,1,3,1,0,3,1,3,3,3,1,3,3,3,0,1,3,1,3,4,0,0,3,1,1,0,3,2,0,0,0,0,1,3,0,1,0,0,3,3,2,0,3,0,0,0,0,0,3,4,3,4,3,3,0,3,0,0,2,3),
+(2,3,0,3,0,2,0,1,0,3,3,4,3,1,3,1,1,1,3,1,4,3,4,3,3,3,0,0,3,1,5,4,3,1,4,3,2,5,5,4,4,4,4,3,3,4,4,4,0,2,1,1,3,2,0,1,2,0,0,1,0,4,1,3,3,3,0,3,0,1,0,4,4,4,5,5,3,0,2,0,0,4,4),
+(0,2,0,1,0,3,1,3,0,2,3,3,3,0,3,1,0,0,3,0,3,2,3,1,3,2,1,1,0,0,4,2,1,0,2,3,1,4,3,2,0,4,4,3,1,3,1,3,0,1,0,0,1,0,0,0,1,0,0,0,0,4,1,1,1,2,0,3,0,0,0,3,4,2,4,3,2,0,1,0,0,3,3),
+(0,1,0,4,0,5,0,4,0,2,4,4,2,3,3,2,3,3,5,3,3,3,4,3,4,2,3,0,4,3,3,3,4,1,4,3,2,1,5,5,3,4,5,1,3,5,4,2,0,3,3,0,1,3,0,4,2,0,1,3,1,4,3,3,3,3,0,3,0,1,0,3,4,4,4,5,5,0,3,0,1,4,5),
+(0,2,0,3,0,3,0,0,0,2,3,1,3,0,4,0,1,1,3,0,3,4,3,2,3,1,0,3,3,2,3,1,3,0,2,3,0,2,1,4,1,2,2,0,0,3,3,0,0,2,0,0,0,1,0,0,0,0,2,2,0,3,2,1,3,3,0,2,0,2,0,0,3,3,1,2,4,0,3,0,2,2,3),
+(2,4,0,5,0,4,0,4,0,2,4,4,4,3,4,3,3,3,1,2,4,3,4,3,4,4,5,0,3,3,3,3,2,0,4,3,1,4,3,4,1,4,4,3,3,4,4,3,1,2,3,0,4,2,0,4,1,0,3,3,0,4,3,3,3,4,0,4,0,2,0,3,5,3,4,5,2,0,3,0,0,4,5),
+(0,3,0,4,0,1,0,1,0,1,3,2,2,1,3,0,3,0,2,0,2,0,3,0,2,0,0,0,1,0,1,1,0,0,3,1,0,0,0,4,0,3,1,0,2,1,3,0,0,0,0,0,0,3,0,0,0,0,0,0,0,4,2,2,3,1,0,3,0,0,0,1,4,4,4,3,0,0,4,0,0,1,4),
+(1,4,1,5,0,3,0,3,0,4,5,4,4,3,5,3,3,4,4,3,4,1,3,3,3,3,2,1,4,1,5,4,3,1,4,4,3,5,4,4,3,5,4,3,3,4,4,4,0,3,3,1,2,3,0,3,1,0,3,3,0,5,4,4,4,4,4,4,3,3,5,4,4,3,3,5,4,0,3,2,0,4,4),
+(0,2,0,3,0,1,0,0,0,1,3,3,3,2,4,1,3,0,3,1,3,0,2,2,1,1,0,0,2,0,4,3,1,0,4,3,0,4,4,4,1,4,3,1,1,3,3,1,0,2,0,0,1,3,0,0,0,0,2,0,0,4,3,2,4,3,5,4,3,3,3,4,3,3,4,3,3,0,2,1,0,3,3),
+(0,2,0,4,0,3,0,2,0,2,5,5,3,4,4,4,4,1,4,3,3,0,4,3,4,3,1,3,3,2,4,3,0,3,4,3,0,3,4,4,2,4,4,0,4,5,3,3,2,2,1,1,1,2,0,1,5,0,3,3,2,4,3,3,3,4,0,3,0,2,0,4,4,3,5,5,0,0,3,0,2,3,3),
+(0,3,0,4,0,3,0,1,0,3,4,3,3,1,3,3,3,0,3,1,3,0,4,3,3,1,1,0,3,0,3,3,0,0,4,4,0,1,5,4,3,3,5,0,3,3,4,3,0,2,0,1,1,1,0,1,3,0,1,2,1,3,3,2,3,3,0,3,0,1,0,1,3,3,4,4,1,0,1,2,2,1,3),
+(0,1,0,4,0,4,0,3,0,1,3,3,3,2,3,1,1,0,3,0,3,3,4,3,2,4,2,0,1,0,4,3,2,0,4,3,0,5,3,3,2,4,4,4,3,3,3,4,0,1,3,0,0,1,0,0,1,0,0,0,0,4,2,3,3,3,0,3,0,0,0,4,4,4,5,3,2,0,3,3,0,3,5),
+(0,2,0,3,0,0,0,3,0,1,3,0,2,0,0,0,1,0,3,1,1,3,3,0,0,3,0,0,3,0,2,3,1,0,3,1,0,3,3,2,0,4,2,2,0,2,0,0,0,4,0,0,0,0,0,0,0,0,0,0,0,2,1,2,0,1,0,1,0,0,0,1,3,1,2,0,0,0,1,0,0,1,4),
+(0,3,0,3,0,5,0,1,0,2,4,3,1,3,3,2,1,1,5,2,1,0,5,1,2,0,0,0,3,3,2,2,3,2,4,3,0,0,3,3,1,3,3,0,2,5,3,4,0,3,3,0,1,2,0,2,2,0,3,2,0,2,2,3,3,3,0,2,0,1,0,3,4,4,2,5,4,0,3,0,0,3,5),
+(0,3,0,3,0,3,0,1,0,3,3,3,3,0,3,0,2,0,2,1,1,0,2,0,1,0,0,0,2,1,0,0,1,0,3,2,0,0,3,3,1,2,3,1,0,3,3,0,0,1,0,0,0,0,0,2,0,0,0,0,0,2,3,1,2,3,0,3,0,1,0,3,2,1,0,4,3,0,1,1,0,3,3),
+(0,4,0,5,0,3,0,3,0,4,5,5,4,3,5,3,4,3,5,3,3,2,5,3,4,4,4,3,4,3,4,5,5,3,4,4,3,4,4,5,4,4,4,3,4,5,5,4,2,3,4,2,3,4,0,3,3,1,4,3,2,4,3,3,5,5,0,3,0,3,0,5,5,5,5,4,4,0,4,0,1,4,4),
+(0,4,0,4,0,3,0,3,0,3,5,4,4,2,3,2,5,1,3,2,5,1,4,2,3,2,3,3,4,3,3,3,3,2,5,4,1,3,3,5,3,4,4,0,4,4,3,1,1,3,1,0,2,3,0,2,3,0,3,0,0,4,3,1,3,4,0,3,0,2,0,4,4,4,3,4,5,0,4,0,0,3,4),
+(0,3,0,3,0,3,1,2,0,3,4,4,3,3,3,0,2,2,4,3,3,1,3,3,3,1,1,0,3,1,4,3,2,3,4,4,2,4,4,4,3,4,4,3,2,4,4,3,1,3,3,1,3,3,0,4,1,0,2,2,1,4,3,2,3,3,5,4,3,3,5,4,4,3,3,0,4,0,3,2,2,4,4),
+(0,2,0,1,0,0,0,0,0,1,2,1,3,0,0,0,0,0,2,0,1,2,1,0,0,1,0,0,0,0,3,0,0,1,0,1,1,3,1,0,0,0,1,1,0,1,1,0,0,0,0,0,2,0,0,0,0,0,0,0,0,1,1,2,2,0,3,4,0,0,0,1,1,0,0,1,0,0,0,0,0,1,1),
+(0,1,0,0,0,1,0,0,0,0,4,0,4,1,4,0,3,0,4,0,3,0,4,0,3,0,3,0,4,1,5,1,4,0,0,3,0,5,0,5,2,0,1,0,0,0,2,1,4,0,1,3,0,0,3,0,0,3,1,1,4,1,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0),
+(1,4,0,5,0,3,0,2,0,3,5,4,4,3,4,3,5,3,4,3,3,0,4,3,3,3,3,3,3,2,4,4,3,1,3,4,4,5,4,4,3,4,4,1,3,5,4,3,3,3,1,2,2,3,3,1,3,1,3,3,3,5,3,3,4,5,0,3,0,3,0,3,4,3,4,4,3,0,3,0,2,4,3),
+(0,1,0,4,0,0,0,0,0,1,4,0,4,1,4,2,4,0,3,0,1,0,1,0,0,0,0,0,2,0,3,1,1,1,0,3,0,0,0,1,2,1,0,0,1,1,1,1,0,1,0,0,0,1,0,0,3,0,0,0,0,3,2,0,2,2,0,1,0,0,0,2,3,2,3,3,0,0,0,0,2,1,0),
+(0,5,1,5,0,3,0,3,0,5,4,4,5,1,5,3,3,0,4,3,4,3,5,3,4,3,3,2,4,3,4,3,3,0,3,3,1,4,4,3,4,4,4,3,4,5,5,3,2,3,1,1,3,3,1,3,1,1,3,3,2,4,5,3,3,5,0,4,0,3,0,4,4,3,5,3,3,0,3,4,0,4,3),
+(0,5,0,5,0,3,0,2,0,4,4,3,5,2,4,3,3,3,4,4,4,3,5,3,5,3,3,1,4,0,4,3,3,0,3,3,0,4,4,4,4,5,4,3,3,5,5,3,2,3,1,2,3,2,0,1,0,0,3,2,2,4,4,3,1,5,0,4,0,3,0,4,3,1,3,2,1,0,3,3,0,3,3),
+(0,4,0,5,0,5,0,4,0,4,5,5,5,3,4,3,3,2,5,4,4,3,5,3,5,3,4,0,4,3,4,4,3,2,4,4,3,4,5,4,4,5,5,0,3,5,5,4,1,3,3,2,3,3,1,3,1,0,4,3,1,4,4,3,4,5,0,4,0,2,0,4,3,4,4,3,3,0,4,0,0,5,5),
+(0,4,0,4,0,5,0,1,1,3,3,4,4,3,4,1,3,0,5,1,3,0,3,1,3,1,1,0,3,0,3,3,4,0,4,3,0,4,4,4,3,4,4,0,3,5,4,1,0,3,0,0,2,3,0,3,1,0,3,1,0,3,2,1,3,5,0,3,0,1,0,3,2,3,3,4,4,0,2,2,0,4,4),
+(2,4,0,5,0,4,0,3,0,4,5,5,4,3,5,3,5,3,5,3,5,2,5,3,4,3,3,4,3,4,5,3,2,1,5,4,3,2,3,4,5,3,4,1,2,5,4,3,0,3,3,0,3,2,0,2,3,0,4,1,0,3,4,3,3,5,0,3,0,1,0,4,5,5,5,4,3,0,4,2,0,3,5),
+(0,5,0,4,0,4,0,2,0,5,4,3,4,3,4,3,3,3,4,3,4,2,5,3,5,3,4,1,4,3,4,4,4,0,3,5,0,4,4,4,4,5,3,1,3,4,5,3,3,3,3,3,3,3,0,2,2,0,3,3,2,4,3,3,3,5,3,4,1,3,3,5,3,2,0,0,0,0,4,3,1,3,3),
+(0,1,0,3,0,3,0,1,0,1,3,3,3,2,3,3,3,0,3,0,0,0,3,1,3,0,0,0,2,2,2,3,0,0,3,2,0,1,2,4,1,3,3,0,0,3,3,3,0,1,0,0,2,1,0,0,3,0,3,1,0,3,0,0,1,3,0,2,0,1,0,3,3,1,3,3,0,0,1,1,0,3,3),
+(0,2,0,3,0,2,1,4,0,2,2,3,1,1,3,1,1,0,2,0,3,1,2,3,1,3,0,0,1,0,4,3,2,3,3,3,1,4,2,3,3,3,3,1,0,3,1,4,0,1,1,0,1,2,0,1,1,0,1,1,0,3,1,3,2,2,0,1,0,0,0,2,3,3,3,1,0,0,0,0,0,2,3),
+(0,5,0,4,0,5,0,2,0,4,5,5,3,3,4,3,3,1,5,4,4,2,4,4,4,3,4,2,4,3,5,5,4,3,3,4,3,3,5,5,4,5,5,1,3,4,5,3,1,4,3,1,3,3,0,3,3,1,4,3,1,4,5,3,3,5,0,4,0,3,0,5,3,3,1,4,3,0,4,0,1,5,3),
+(0,5,0,5,0,4,0,2,0,4,4,3,4,3,3,3,3,3,5,4,4,4,4,4,4,5,3,3,5,2,4,4,4,3,4,4,3,3,4,4,5,5,3,3,4,3,4,3,3,4,3,3,3,3,1,2,2,1,4,3,3,5,4,4,3,4,0,4,0,3,0,4,4,4,4,4,1,0,4,2,0,2,4),
+(0,4,0,4,0,3,0,1,0,3,5,2,3,0,3,0,2,1,4,2,3,3,4,1,4,3,3,2,4,1,3,3,3,0,3,3,0,0,3,3,3,5,3,3,3,3,3,2,0,2,0,0,2,0,0,2,0,0,1,0,0,3,1,2,2,3,0,3,0,2,0,4,4,3,3,4,1,0,3,0,0,2,4),
+(0,0,0,4,0,0,0,0,0,0,1,0,1,0,2,0,0,0,0,0,1,0,2,0,1,0,0,0,0,0,3,1,3,0,3,2,0,0,0,1,0,3,2,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,3,4,0,2,0,0,0,0,0,0,2),
+(0,2,1,3,0,2,0,2,0,3,3,3,3,1,3,1,3,3,3,3,3,3,4,2,2,1,2,1,4,0,4,3,1,3,3,3,2,4,3,5,4,3,3,3,3,3,3,3,0,1,3,0,2,0,0,1,0,0,1,0,0,4,2,0,2,3,0,3,3,0,3,3,4,2,3,1,4,0,1,2,0,2,3),
+(0,3,0,3,0,1,0,3,0,2,3,3,3,0,3,1,2,0,3,3,2,3,3,2,3,2,3,1,3,0,4,3,2,0,3,3,1,4,3,3,2,3,4,3,1,3,3,1,1,0,1,1,0,1,0,1,0,1,0,0,0,4,1,1,0,3,0,3,1,0,2,3,3,3,3,3,1,0,0,2,0,3,3),
+(0,0,0,0,0,0,0,0,0,0,3,0,2,0,3,0,0,0,0,0,0,0,3,0,0,0,0,0,0,0,3,0,3,0,3,1,0,1,0,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,3,0,2,0,2,3,0,0,0,0,0,0,0,0,3),
+(0,2,0,3,1,3,0,3,0,2,3,3,3,1,3,1,3,1,3,1,3,3,3,1,3,0,2,3,1,1,4,3,3,2,3,3,1,2,2,4,1,3,3,0,1,4,2,3,0,1,3,0,3,0,0,1,3,0,2,0,0,3,3,2,1,3,0,3,0,2,0,3,4,4,4,3,1,0,3,0,0,3,3),
+(0,2,0,1,0,2,0,0,0,1,3,2,2,1,3,0,1,1,3,0,3,2,3,1,2,0,2,0,1,1,3,3,3,0,3,3,1,1,2,3,2,3,3,1,2,3,2,0,0,1,0,0,0,0,0,0,3,0,1,0,0,2,1,2,1,3,0,3,0,0,0,3,4,4,4,3,2,0,2,0,0,2,4),
+(0,0,0,1,0,1,0,0,0,0,1,0,0,0,1,0,0,0,0,0,0,0,1,1,1,0,0,0,0,0,0,0,0,0,2,2,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,1,3,1,0,0,0,0,0,0,0,3),
+(0,3,0,3,0,2,0,3,0,3,3,3,2,3,2,2,2,0,3,1,3,3,3,2,3,3,0,0,3,0,3,2,2,0,2,3,1,4,3,4,3,3,2,3,1,5,4,4,0,3,1,2,1,3,0,3,1,1,2,0,2,3,1,3,1,3,0,3,0,1,0,3,3,4,4,2,1,0,2,1,0,2,4),
+(0,1,0,3,0,1,0,2,0,1,4,2,5,1,4,0,2,0,2,1,3,1,4,0,2,1,0,0,2,1,4,1,1,0,3,3,0,5,1,3,2,3,3,1,0,3,2,3,0,1,0,0,0,0,0,0,1,0,0,0,0,4,0,1,0,3,0,2,0,1,0,3,3,3,4,3,3,0,0,0,0,2,3),
+(0,0,0,1,0,0,0,0,0,0,2,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,3,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,1,0,0,1,0,0,0,0,0,3),
+(0,1,0,3,0,4,0,3,0,2,4,3,1,0,3,2,2,1,3,1,2,2,3,1,1,1,2,1,3,0,1,2,0,1,3,2,1,3,0,5,5,1,0,0,1,3,2,1,0,3,0,0,1,0,0,0,0,0,3,4,0,1,1,1,3,2,0,2,0,1,0,2,3,3,1,2,3,0,1,0,1,0,4),
+(0,0,0,1,0,3,0,3,0,2,2,1,0,0,4,0,3,0,3,1,3,0,3,0,3,0,1,0,3,0,3,1,3,0,3,3,0,0,1,2,1,1,1,0,1,2,0,0,0,1,0,0,1,0,0,0,0,0,0,0,0,2,2,1,2,0,0,2,0,0,0,0,2,3,3,3,3,0,0,0,0,1,4),
+(0,0,0,3,0,3,0,0,0,0,3,1,1,0,3,0,1,0,2,0,1,0,0,0,0,0,0,0,1,0,3,0,2,0,2,3,0,0,2,2,3,1,2,0,0,1,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,3,0,0,2,0,0,0,0,2,3),
+(2,4,0,5,0,5,0,4,0,3,4,3,3,3,4,3,3,3,4,3,4,4,5,4,5,5,5,2,3,0,5,5,4,1,5,4,3,1,5,4,3,4,4,3,3,4,3,3,0,3,2,0,2,3,0,3,0,0,3,3,0,5,3,2,3,3,0,3,0,3,0,3,4,5,4,5,3,0,4,3,0,3,4),
+(0,3,0,3,0,3,0,3,0,3,3,4,3,2,3,2,3,0,4,3,3,3,3,3,3,3,3,0,3,2,4,3,3,1,3,4,3,4,4,4,3,4,4,3,2,4,4,1,0,2,0,0,1,1,0,2,0,0,3,1,0,5,3,2,1,3,0,3,0,1,2,4,3,2,4,3,3,0,3,2,0,4,4),
+(0,3,0,3,0,1,0,0,0,1,4,3,3,2,3,1,3,1,4,2,3,2,4,2,3,4,3,0,2,2,3,3,3,0,3,3,3,0,3,4,1,3,3,0,3,4,3,3,0,1,1,0,1,0,0,0,4,0,3,0,0,3,1,2,1,3,0,4,0,1,0,4,3,3,4,3,3,0,2,0,0,3,3),
+(0,3,0,4,0,1,0,3,0,3,4,3,3,0,3,3,3,1,3,1,3,3,4,3,3,3,0,0,3,1,5,3,3,1,3,3,2,5,4,3,3,4,5,3,2,5,3,4,0,1,0,0,0,0,0,2,0,0,1,1,0,4,2,2,1,3,0,3,0,2,0,4,4,3,5,3,2,0,1,1,0,3,4),
+(0,5,0,4,0,5,0,2,0,4,4,3,3,2,3,3,3,1,4,3,4,1,5,3,4,3,4,0,4,2,4,3,4,1,5,4,0,4,4,4,4,5,4,1,3,5,4,2,1,4,1,1,3,2,0,3,1,0,3,2,1,4,3,3,3,4,0,4,0,3,0,4,4,4,3,3,3,0,4,2,0,3,4),
+(1,4,0,4,0,3,0,1,0,3,3,3,1,1,3,3,2,2,3,3,1,0,3,2,2,1,2,0,3,1,2,1,2,0,3,2,0,2,2,3,3,4,3,0,3,3,1,2,0,1,1,3,1,2,0,0,3,0,1,1,0,3,2,2,3,3,0,3,0,0,0,2,3,3,4,3,3,0,1,0,0,1,4),
+(0,4,0,4,0,4,0,0,0,3,4,4,3,1,4,2,3,2,3,3,3,1,4,3,4,0,3,0,4,2,3,3,2,2,5,4,2,1,3,4,3,4,3,1,3,3,4,2,0,2,1,0,3,3,0,0,2,0,3,1,0,4,4,3,4,3,0,4,0,1,0,2,4,4,4,4,4,0,3,2,0,3,3),
+(0,0,0,1,0,4,0,0,0,0,0,0,1,1,1,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,1,0,3,2,0,0,1,0,0,0,1,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,2),
+(0,2,0,3,0,4,0,4,0,1,3,3,3,0,4,0,2,1,2,1,1,1,2,0,3,1,1,0,1,0,3,1,0,0,3,3,2,0,1,1,0,0,0,0,0,1,0,2,0,2,2,0,3,1,0,0,1,0,1,1,0,1,2,0,3,0,0,0,0,1,0,0,3,3,4,3,1,0,1,0,3,0,2),
+(0,0,0,3,0,5,0,0,0,0,1,0,2,0,3,1,0,1,3,0,0,0,2,0,0,0,1,0,0,0,1,1,0,0,4,0,0,0,2,3,0,1,4,1,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,3,0,0,0,0,0,1,0,0,0,0,0,0,0,2,0,0,3,0,0,0,0,0,3),
+(0,2,0,5,0,5,0,1,0,2,4,3,3,2,5,1,3,2,3,3,3,0,4,1,2,0,3,0,4,0,2,2,1,1,5,3,0,0,1,4,2,3,2,0,3,3,3,2,0,2,4,1,1,2,0,1,1,0,3,1,0,1,3,1,2,3,0,2,0,0,0,1,3,5,4,4,4,0,3,0,0,1,3),
+(0,4,0,5,0,4,0,4,0,4,5,4,3,3,4,3,3,3,4,3,4,4,5,3,4,5,4,2,4,2,3,4,3,1,4,4,1,3,5,4,4,5,5,4,4,5,5,5,2,3,3,1,4,3,1,3,3,0,3,3,1,4,3,4,4,4,0,3,0,4,0,3,3,4,4,5,0,0,4,3,0,4,5),
+(0,4,0,4,0,3,0,3,0,3,4,4,4,3,3,2,4,3,4,3,4,3,5,3,4,3,2,1,4,2,4,4,3,1,3,4,2,4,5,5,3,4,5,4,1,5,4,3,0,3,2,2,3,2,1,3,1,0,3,3,3,5,3,3,3,5,4,4,2,3,3,4,3,3,3,2,1,0,3,2,1,4,3),
+(0,4,0,5,0,4,0,3,0,3,5,5,3,2,4,3,4,0,5,4,4,1,4,4,4,3,3,3,4,3,5,5,2,3,3,4,1,2,5,5,3,5,5,2,3,5,5,4,0,3,2,0,3,3,1,1,5,1,4,1,0,4,3,2,3,5,0,4,0,3,0,5,4,3,4,3,0,0,4,1,0,4,4),
+(1,3,0,4,0,2,0,2,0,2,5,5,3,3,3,3,3,0,4,2,3,4,4,4,3,4,0,0,3,4,5,4,3,3,3,3,2,5,5,4,5,5,5,4,3,5,5,5,1,3,1,0,1,0,0,3,2,0,4,2,0,5,2,3,2,4,1,3,0,3,0,4,5,4,5,4,3,0,4,2,0,5,4),
+(0,3,0,4,0,5,0,3,0,3,4,4,3,2,3,2,3,3,3,3,3,2,4,3,3,2,2,0,3,3,3,3,3,1,3,3,3,0,4,4,3,4,4,1,1,4,4,2,0,3,1,0,1,1,0,4,1,0,2,3,1,3,3,1,3,4,0,3,0,1,0,3,1,3,0,0,1,0,2,0,0,4,4),
+(0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0),
+(0,3,0,3,0,2,0,3,0,1,5,4,3,3,3,1,4,2,1,2,3,4,4,2,4,4,5,0,3,1,4,3,4,0,4,3,3,3,2,3,2,5,3,4,3,2,2,3,0,0,3,0,2,1,0,1,2,0,0,0,0,2,1,1,3,1,0,2,0,4,0,3,4,4,4,5,2,0,2,0,0,1,3),
+(0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,1,1,1,0,0,1,1,0,0,0,4,2,1,1,0,1,0,3,2,0,0,3,1,1,1,2,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,3,0,1,0,0,0,2,0,0,0,1,4,0,4,2,1,0,0,0,0,0,1),
+(0,0,0,0,0,0,0,0,0,1,0,1,0,0,0,0,1,0,0,0,0,0,0,1,0,1,0,0,0,0,3,1,0,0,0,2,0,2,1,0,0,1,2,1,0,1,1,0,0,3,0,0,0,0,0,0,0,0,0,0,0,1,3,1,0,0,0,0,0,1,0,0,2,1,0,0,0,0,0,0,0,0,2),
+(0,4,0,4,0,4,0,3,0,4,4,3,4,2,4,3,2,0,4,4,4,3,5,3,5,3,3,2,4,2,4,3,4,3,1,4,0,2,3,4,4,4,3,3,3,4,4,4,3,4,1,3,4,3,2,1,2,1,3,3,3,4,4,3,3,5,0,4,0,3,0,4,3,3,3,2,1,0,3,0,0,3,3),
+(0,4,0,3,0,3,0,3,0,3,5,5,3,3,3,3,4,3,4,3,3,3,4,4,4,3,3,3,3,4,3,5,3,3,1,3,2,4,5,5,5,5,4,3,4,5,5,3,2,2,3,3,3,3,2,3,3,1,2,3,2,4,3,3,3,4,0,4,0,2,0,4,3,2,2,1,2,0,3,0,0,4,1),
 )
-# fmt: on
 
-
-class JapaneseContextAnalysis:
+class JapaneseContextAnalysis(object):
     NUM_OF_CATEGORY = 6
     DONT_KNOW = -1
     ENOUGH_REL_THRESHOLD = 100
@@ -156,7 +153,7 @@ def feed(self, byte_str, num_bytes):
         # this character will simply our logic and improve performance.
         i = self._need_to_skip_char_num
         while i < num_bytes:
-            order, char_len = self.get_order(byte_str[i : i + 2])
+            order, char_len = self.get_order(byte_str[i:i + 2])
             i += char_len
             if i > num_bytes:
                 self._need_to_skip_char_num = i - num_bytes
@@ -167,9 +164,7 @@ def feed(self, byte_str, num_bytes):
                     if self._total_rel > self.MAX_REL_THRESHOLD:
                         self._done = True
                         break
-                    self._rel_sample[
-                        jp2_char_context[self._last_char_order][order]
-                    ] += 1
+                    self._rel_sample[jp2CharContext[self._last_char_order][order]] += 1
                 self._last_char_order = order
 
     def got_enough_data(self):
@@ -179,15 +174,15 @@ def get_confidence(self):
         # This is just one way to calculate confidence. It works well for me.
         if self._total_rel > self.MINIMUM_DATA_THRESHOLD:
             return (self._total_rel - self._rel_sample[0]) / self._total_rel
-        return self.DONT_KNOW
+        else:
+            return self.DONT_KNOW
 
-    def get_order(self, _):
+    def get_order(self, byte_str):
         return -1, 1
 
-
 class SJISContextAnalysis(JapaneseContextAnalysis):
     def __init__(self):
-        super().__init__()
+        super(SJISContextAnalysis, self).__init__()
         self._charset_name = "SHIFT_JIS"
 
     @property
@@ -214,7 +209,6 @@ def get_order(self, byte_str):
 
         return -1, char_len
 
-
 class EUCJPContextAnalysis(JapaneseContextAnalysis):
     def get_order(self, byte_str):
         if not byte_str:
@@ -235,3 +229,5 @@ def get_order(self, byte_str):
                 return second_char - 0xA1, char_len
 
         return -1, char_len
+
+
diff --git a/venv/lib/python3.10/site-packages/pip/_vendor/chardet/langbulgarianmodel.py b/venv/lib/python3.10/site-packages/pip/_vendor/chardet/langbulgarianmodel.py
index 9946682..e963a50 100644
--- a/venv/lib/python3.10/site-packages/pip/_vendor/chardet/langbulgarianmodel.py
+++ b/venv/lib/python3.10/site-packages/pip/_vendor/chardet/langbulgarianmodel.py
@@ -1,5 +1,9 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+
 from pip._vendor.chardet.sbcharsetprober import SingleByteCharSetModel
 
+
 # 3: Positive
 # 2: Likely
 # 1: Unlikely
@@ -4111,539 +4115,536 @@
 
 # Character Mapping Table(s):
 ISO_8859_5_BULGARIAN_CHAR_TO_ORDER = {
-    0: 255,  # '\x00'
-    1: 255,  # '\x01'
-    2: 255,  # '\x02'
-    3: 255,  # '\x03'
-    4: 255,  # '\x04'
-    5: 255,  # '\x05'
-    6: 255,  # '\x06'
-    7: 255,  # '\x07'
-    8: 255,  # '\x08'
-    9: 255,  # '\t'
-    10: 254,  # '\n'
-    11: 255,  # '\x0b'
-    12: 255,  # '\x0c'
-    13: 254,  # '\r'
-    14: 255,  # '\x0e'
-    15: 255,  # '\x0f'
-    16: 255,  # '\x10'
-    17: 255,  # '\x11'
-    18: 255,  # '\x12'
-    19: 255,  # '\x13'
-    20: 255,  # '\x14'
-    21: 255,  # '\x15'
-    22: 255,  # '\x16'
-    23: 255,  # '\x17'
-    24: 255,  # '\x18'
-    25: 255,  # '\x19'
-    26: 255,  # '\x1a'
-    27: 255,  # '\x1b'
-    28: 255,  # '\x1c'
-    29: 255,  # '\x1d'
-    30: 255,  # '\x1e'
-    31: 255,  # '\x1f'
-    32: 253,  # ' '
-    33: 253,  # '!'
-    34: 253,  # '"'
-    35: 253,  # '#'
-    36: 253,  # '$'
-    37: 253,  # '%'
-    38: 253,  # '&'
-    39: 253,  # "'"
-    40: 253,  # '('
-    41: 253,  # ')'
-    42: 253,  # '*'
-    43: 253,  # '+'
-    44: 253,  # ','
-    45: 253,  # '-'
-    46: 253,  # '.'
-    47: 253,  # '/'
-    48: 252,  # '0'
-    49: 252,  # '1'
-    50: 252,  # '2'
-    51: 252,  # '3'
-    52: 252,  # '4'
-    53: 252,  # '5'
-    54: 252,  # '6'
-    55: 252,  # '7'
-    56: 252,  # '8'
-    57: 252,  # '9'
-    58: 253,  # ':'
-    59: 253,  # ';'
-    60: 253,  # '<'
-    61: 253,  # '='
-    62: 253,  # '>'
-    63: 253,  # '?'
-    64: 253,  # '@'
-    65: 77,  # 'A'
-    66: 90,  # 'B'
-    67: 99,  # 'C'
-    68: 100,  # 'D'
-    69: 72,  # 'E'
-    70: 109,  # 'F'
-    71: 107,  # 'G'
-    72: 101,  # 'H'
-    73: 79,  # 'I'
-    74: 185,  # 'J'
-    75: 81,  # 'K'
-    76: 102,  # 'L'
-    77: 76,  # 'M'
-    78: 94,  # 'N'
-    79: 82,  # 'O'
-    80: 110,  # 'P'
-    81: 186,  # 'Q'
-    82: 108,  # 'R'
-    83: 91,  # 'S'
-    84: 74,  # 'T'
-    85: 119,  # 'U'
-    86: 84,  # 'V'
-    87: 96,  # 'W'
-    88: 111,  # 'X'
-    89: 187,  # 'Y'
-    90: 115,  # 'Z'
-    91: 253,  # '['
-    92: 253,  # '\\'
-    93: 253,  # ']'
-    94: 253,  # '^'
-    95: 253,  # '_'
-    96: 253,  # '`'
-    97: 65,  # 'a'
-    98: 69,  # 'b'
-    99: 70,  # 'c'
-    100: 66,  # 'd'
-    101: 63,  # 'e'
-    102: 68,  # 'f'
-    103: 112,  # 'g'
-    104: 103,  # 'h'
-    105: 92,  # 'i'
-    106: 194,  # 'j'
-    107: 104,  # 'k'
-    108: 95,  # 'l'
-    109: 86,  # 'm'
-    110: 87,  # 'n'
-    111: 71,  # 'o'
-    112: 116,  # 'p'
-    113: 195,  # 'q'
-    114: 85,  # 'r'
-    115: 93,  # 's'
-    116: 97,  # 't'
-    117: 113,  # 'u'
-    118: 196,  # 'v'
-    119: 197,  # 'w'
-    120: 198,  # 'x'
-    121: 199,  # 'y'
-    122: 200,  # 'z'
-    123: 253,  # '{'
-    124: 253,  # '|'
-    125: 253,  # '}'
-    126: 253,  # '~'
-    127: 253,  # '\x7f'
-    128: 194,  # '\x80'
-    129: 195,  # '\x81'
-    130: 196,  # '\x82'
-    131: 197,  # '\x83'
-    132: 198,  # '\x84'
-    133: 199,  # '\x85'
-    134: 200,  # '\x86'
-    135: 201,  # '\x87'
-    136: 202,  # '\x88'
-    137: 203,  # '\x89'
-    138: 204,  # '\x8a'
-    139: 205,  # '\x8b'
-    140: 206,  # '\x8c'
-    141: 207,  # '\x8d'
-    142: 208,  # '\x8e'
-    143: 209,  # '\x8f'
-    144: 210,  # '\x90'
-    145: 211,  # '\x91'
-    146: 212,  # '\x92'
-    147: 213,  # '\x93'
-    148: 214,  # '\x94'
-    149: 215,  # '\x95'
-    150: 216,  # '\x96'
-    151: 217,  # '\x97'
-    152: 218,  # '\x98'
-    153: 219,  # '\x99'
-    154: 220,  # '\x9a'
-    155: 221,  # '\x9b'
-    156: 222,  # '\x9c'
-    157: 223,  # '\x9d'
-    158: 224,  # '\x9e'
-    159: 225,  # '\x9f'
-    160: 81,  # '\xa0'
-    161: 226,  # 'Ё'
-    162: 227,  # 'Ђ'
-    163: 228,  # 'Ѓ'
-    164: 229,  # 'Є'
-    165: 230,  # 'Ѕ'
-    166: 105,  # 'І'
-    167: 231,  # 'Ї'
-    168: 232,  # 'Ј'
-    169: 233,  # 'Љ'
-    170: 234,  # 'Њ'
-    171: 235,  # 'Ћ'
-    172: 236,  # 'Ќ'
-    173: 45,  # '\xad'
-    174: 237,  # 'Ў'
-    175: 238,  # 'Џ'
-    176: 31,  # 'А'
-    177: 32,  # 'Б'
-    178: 35,  # 'В'
-    179: 43,  # 'Г'
-    180: 37,  # 'Д'
-    181: 44,  # 'Е'
-    182: 55,  # 'Ж'
-    183: 47,  # 'З'
-    184: 40,  # 'И'
-    185: 59,  # 'Й'
-    186: 33,  # 'К'
-    187: 46,  # 'Л'
-    188: 38,  # 'М'
-    189: 36,  # 'Н'
-    190: 41,  # 'О'
-    191: 30,  # 'П'
-    192: 39,  # 'Р'
-    193: 28,  # 'С'
-    194: 34,  # 'Т'
-    195: 51,  # 'У'
-    196: 48,  # 'Ф'
-    197: 49,  # 'Х'
-    198: 53,  # 'Ц'
-    199: 50,  # 'Ч'
-    200: 54,  # 'Ш'
-    201: 57,  # 'Щ'
-    202: 61,  # 'Ъ'
-    203: 239,  # 'Ы'
-    204: 67,  # 'Ь'
-    205: 240,  # 'Э'
-    206: 60,  # 'Ю'
-    207: 56,  # 'Я'
-    208: 1,  # 'а'
-    209: 18,  # 'б'
-    210: 9,  # 'в'
-    211: 20,  # 'г'
-    212: 11,  # 'д'
-    213: 3,  # 'е'
-    214: 23,  # 'ж'
-    215: 15,  # 'з'
-    216: 2,  # 'и'
-    217: 26,  # 'й'
-    218: 12,  # 'к'
-    219: 10,  # 'л'
-    220: 14,  # 'м'
-    221: 6,  # 'н'
-    222: 4,  # 'о'
-    223: 13,  # 'п'
-    224: 7,  # 'р'
-    225: 8,  # 'с'
-    226: 5,  # 'т'
-    227: 19,  # 'у'
-    228: 29,  # 'ф'
-    229: 25,  # 'х'
-    230: 22,  # 'ц'
-    231: 21,  # 'ч'
-    232: 27,  # 'ш'
-    233: 24,  # 'щ'
-    234: 17,  # 'ъ'
-    235: 75,  # 'ы'
-    236: 52,  # 'ь'
-    237: 241,  # 'э'
-    238: 42,  # 'ю'
-    239: 16,  # 'я'
-    240: 62,  # '№'
-    241: 242,  # 'ё'
-    242: 243,  # 'ђ'
-    243: 244,  # 'ѓ'
-    244: 58,  # 'є'
-    245: 245,  # 'ѕ'
-    246: 98,  # 'і'
-    247: 246,  # 'ї'
-    248: 247,  # 'ј'
-    249: 248,  # 'љ'
-    250: 249,  # 'њ'
-    251: 250,  # 'ћ'
-    252: 251,  # 'ќ'
-    253: 91,  # '§'
-    254: 252,  # 'ў'
-    255: 253,  # 'џ'
+     0: 255,  # '\x00'
+     1: 255,  # '\x01'
+     2: 255,  # '\x02'
+     3: 255,  # '\x03'
+     4: 255,  # '\x04'
+     5: 255,  # '\x05'
+     6: 255,  # '\x06'
+     7: 255,  # '\x07'
+     8: 255,  # '\x08'
+     9: 255,  # '\t'
+     10: 254,  # '\n'
+     11: 255,  # '\x0b'
+     12: 255,  # '\x0c'
+     13: 254,  # '\r'
+     14: 255,  # '\x0e'
+     15: 255,  # '\x0f'
+     16: 255,  # '\x10'
+     17: 255,  # '\x11'
+     18: 255,  # '\x12'
+     19: 255,  # '\x13'
+     20: 255,  # '\x14'
+     21: 255,  # '\x15'
+     22: 255,  # '\x16'
+     23: 255,  # '\x17'
+     24: 255,  # '\x18'
+     25: 255,  # '\x19'
+     26: 255,  # '\x1a'
+     27: 255,  # '\x1b'
+     28: 255,  # '\x1c'
+     29: 255,  # '\x1d'
+     30: 255,  # '\x1e'
+     31: 255,  # '\x1f'
+     32: 253,  # ' '
+     33: 253,  # '!'
+     34: 253,  # '"'
+     35: 253,  # '#'
+     36: 253,  # '$'
+     37: 253,  # '%'
+     38: 253,  # '&'
+     39: 253,  # "'"
+     40: 253,  # '('
+     41: 253,  # ')'
+     42: 253,  # '*'
+     43: 253,  # '+'
+     44: 253,  # ','
+     45: 253,  # '-'
+     46: 253,  # '.'
+     47: 253,  # '/'
+     48: 252,  # '0'
+     49: 252,  # '1'
+     50: 252,  # '2'
+     51: 252,  # '3'
+     52: 252,  # '4'
+     53: 252,  # '5'
+     54: 252,  # '6'
+     55: 252,  # '7'
+     56: 252,  # '8'
+     57: 252,  # '9'
+     58: 253,  # ':'
+     59: 253,  # ';'
+     60: 253,  # '<'
+     61: 253,  # '='
+     62: 253,  # '>'
+     63: 253,  # '?'
+     64: 253,  # '@'
+     65: 77,  # 'A'
+     66: 90,  # 'B'
+     67: 99,  # 'C'
+     68: 100,  # 'D'
+     69: 72,  # 'E'
+     70: 109,  # 'F'
+     71: 107,  # 'G'
+     72: 101,  # 'H'
+     73: 79,  # 'I'
+     74: 185,  # 'J'
+     75: 81,  # 'K'
+     76: 102,  # 'L'
+     77: 76,  # 'M'
+     78: 94,  # 'N'
+     79: 82,  # 'O'
+     80: 110,  # 'P'
+     81: 186,  # 'Q'
+     82: 108,  # 'R'
+     83: 91,  # 'S'
+     84: 74,  # 'T'
+     85: 119,  # 'U'
+     86: 84,  # 'V'
+     87: 96,  # 'W'
+     88: 111,  # 'X'
+     89: 187,  # 'Y'
+     90: 115,  # 'Z'
+     91: 253,  # '['
+     92: 253,  # '\\'
+     93: 253,  # ']'
+     94: 253,  # '^'
+     95: 253,  # '_'
+     96: 253,  # '`'
+     97: 65,  # 'a'
+     98: 69,  # 'b'
+     99: 70,  # 'c'
+     100: 66,  # 'd'
+     101: 63,  # 'e'
+     102: 68,  # 'f'
+     103: 112,  # 'g'
+     104: 103,  # 'h'
+     105: 92,  # 'i'
+     106: 194,  # 'j'
+     107: 104,  # 'k'
+     108: 95,  # 'l'
+     109: 86,  # 'm'
+     110: 87,  # 'n'
+     111: 71,  # 'o'
+     112: 116,  # 'p'
+     113: 195,  # 'q'
+     114: 85,  # 'r'
+     115: 93,  # 's'
+     116: 97,  # 't'
+     117: 113,  # 'u'
+     118: 196,  # 'v'
+     119: 197,  # 'w'
+     120: 198,  # 'x'
+     121: 199,  # 'y'
+     122: 200,  # 'z'
+     123: 253,  # '{'
+     124: 253,  # '|'
+     125: 253,  # '}'
+     126: 253,  # '~'
+     127: 253,  # '\x7f'
+     128: 194,  # '\x80'
+     129: 195,  # '\x81'
+     130: 196,  # '\x82'
+     131: 197,  # '\x83'
+     132: 198,  # '\x84'
+     133: 199,  # '\x85'
+     134: 200,  # '\x86'
+     135: 201,  # '\x87'
+     136: 202,  # '\x88'
+     137: 203,  # '\x89'
+     138: 204,  # '\x8a'
+     139: 205,  # '\x8b'
+     140: 206,  # '\x8c'
+     141: 207,  # '\x8d'
+     142: 208,  # '\x8e'
+     143: 209,  # '\x8f'
+     144: 210,  # '\x90'
+     145: 211,  # '\x91'
+     146: 212,  # '\x92'
+     147: 213,  # '\x93'
+     148: 214,  # '\x94'
+     149: 215,  # '\x95'
+     150: 216,  # '\x96'
+     151: 217,  # '\x97'
+     152: 218,  # '\x98'
+     153: 219,  # '\x99'
+     154: 220,  # '\x9a'
+     155: 221,  # '\x9b'
+     156: 222,  # '\x9c'
+     157: 223,  # '\x9d'
+     158: 224,  # '\x9e'
+     159: 225,  # '\x9f'
+     160: 81,  # '\xa0'
+     161: 226,  # 'Ё'
+     162: 227,  # 'Ђ'
+     163: 228,  # 'Ѓ'
+     164: 229,  # 'Є'
+     165: 230,  # 'Ѕ'
+     166: 105,  # 'І'
+     167: 231,  # 'Ї'
+     168: 232,  # 'Ј'
+     169: 233,  # 'Љ'
+     170: 234,  # 'Њ'
+     171: 235,  # 'Ћ'
+     172: 236,  # 'Ќ'
+     173: 45,  # '\xad'
+     174: 237,  # 'Ў'
+     175: 238,  # 'Џ'
+     176: 31,  # 'А'
+     177: 32,  # 'Б'
+     178: 35,  # 'В'
+     179: 43,  # 'Г'
+     180: 37,  # 'Д'
+     181: 44,  # 'Е'
+     182: 55,  # 'Ж'
+     183: 47,  # 'З'
+     184: 40,  # 'И'
+     185: 59,  # 'Й'
+     186: 33,  # 'К'
+     187: 46,  # 'Л'
+     188: 38,  # 'М'
+     189: 36,  # 'Н'
+     190: 41,  # 'О'
+     191: 30,  # 'П'
+     192: 39,  # 'Р'
+     193: 28,  # 'С'
+     194: 34,  # 'Т'
+     195: 51,  # 'У'
+     196: 48,  # 'Ф'
+     197: 49,  # 'Х'
+     198: 53,  # 'Ц'
+     199: 50,  # 'Ч'
+     200: 54,  # 'Ш'
+     201: 57,  # 'Щ'
+     202: 61,  # 'Ъ'
+     203: 239,  # 'Ы'
+     204: 67,  # 'Ь'
+     205: 240,  # 'Э'
+     206: 60,  # 'Ю'
+     207: 56,  # 'Я'
+     208: 1,  # 'а'
+     209: 18,  # 'б'
+     210: 9,  # 'в'
+     211: 20,  # 'г'
+     212: 11,  # 'д'
+     213: 3,  # 'е'
+     214: 23,  # 'ж'
+     215: 15,  # 'з'
+     216: 2,  # 'и'
+     217: 26,  # 'й'
+     218: 12,  # 'к'
+     219: 10,  # 'л'
+     220: 14,  # 'м'
+     221: 6,  # 'н'
+     222: 4,  # 'о'
+     223: 13,  # 'п'
+     224: 7,  # 'р'
+     225: 8,  # 'с'
+     226: 5,  # 'т'
+     227: 19,  # 'у'
+     228: 29,  # 'ф'
+     229: 25,  # 'х'
+     230: 22,  # 'ц'
+     231: 21,  # 'ч'
+     232: 27,  # 'ш'
+     233: 24,  # 'щ'
+     234: 17,  # 'ъ'
+     235: 75,  # 'ы'
+     236: 52,  # 'ь'
+     237: 241,  # 'э'
+     238: 42,  # 'ю'
+     239: 16,  # 'я'
+     240: 62,  # '№'
+     241: 242,  # 'ё'
+     242: 243,  # 'ђ'
+     243: 244,  # 'ѓ'
+     244: 58,  # 'є'
+     245: 245,  # 'ѕ'
+     246: 98,  # 'і'
+     247: 246,  # 'ї'
+     248: 247,  # 'ј'
+     249: 248,  # 'љ'
+     250: 249,  # 'њ'
+     251: 250,  # 'ћ'
+     252: 251,  # 'ќ'
+     253: 91,  # '§'
+     254: 252,  # 'ў'
+     255: 253,  # 'џ'
 }
 
-ISO_8859_5_BULGARIAN_MODEL = SingleByteCharSetModel(
-    charset_name="ISO-8859-5",
-    language="Bulgarian",
-    char_to_order_map=ISO_8859_5_BULGARIAN_CHAR_TO_ORDER,
-    language_model=BULGARIAN_LANG_MODEL,
-    typical_positive_ratio=0.969392,
-    keep_ascii_letters=False,
-    alphabet="АБВГДЕЖЗИЙКЛМНОПРСТУФХЦЧШЩЪЬЮЯабвгдежзийклмнопрстуфхцчшщъьюя",
-)
+ISO_8859_5_BULGARIAN_MODEL = SingleByteCharSetModel(charset_name='ISO-8859-5',
+                                                    language='Bulgarian',
+                                                    char_to_order_map=ISO_8859_5_BULGARIAN_CHAR_TO_ORDER,
+                                                    language_model=BULGARIAN_LANG_MODEL,
+                                                    typical_positive_ratio=0.969392,
+                                                    keep_ascii_letters=False,
+                                                    alphabet='АБВГДЕЖЗИЙКЛМНОПРСТУФХЦЧШЩЪЬЮЯабвгдежзийклмнопрстуфхцчшщъьюя')
 
 WINDOWS_1251_BULGARIAN_CHAR_TO_ORDER = {
-    0: 255,  # '\x00'
-    1: 255,  # '\x01'
-    2: 255,  # '\x02'
-    3: 255,  # '\x03'
-    4: 255,  # '\x04'
-    5: 255,  # '\x05'
-    6: 255,  # '\x06'
-    7: 255,  # '\x07'
-    8: 255,  # '\x08'
-    9: 255,  # '\t'
-    10: 254,  # '\n'
-    11: 255,  # '\x0b'
-    12: 255,  # '\x0c'
-    13: 254,  # '\r'
-    14: 255,  # '\x0e'
-    15: 255,  # '\x0f'
-    16: 255,  # '\x10'
-    17: 255,  # '\x11'
-    18: 255,  # '\x12'
-    19: 255,  # '\x13'
-    20: 255,  # '\x14'
-    21: 255,  # '\x15'
-    22: 255,  # '\x16'
-    23: 255,  # '\x17'
-    24: 255,  # '\x18'
-    25: 255,  # '\x19'
-    26: 255,  # '\x1a'
-    27: 255,  # '\x1b'
-    28: 255,  # '\x1c'
-    29: 255,  # '\x1d'
-    30: 255,  # '\x1e'
-    31: 255,  # '\x1f'
-    32: 253,  # ' '
-    33: 253,  # '!'
-    34: 253,  # '"'
-    35: 253,  # '#'
-    36: 253,  # '$'
-    37: 253,  # '%'
-    38: 253,  # '&'
-    39: 253,  # "'"
-    40: 253,  # '('
-    41: 253,  # ')'
-    42: 253,  # '*'
-    43: 253,  # '+'
-    44: 253,  # ','
-    45: 253,  # '-'
-    46: 253,  # '.'
-    47: 253,  # '/'
-    48: 252,  # '0'
-    49: 252,  # '1'
-    50: 252,  # '2'
-    51: 252,  # '3'
-    52: 252,  # '4'
-    53: 252,  # '5'
-    54: 252,  # '6'
-    55: 252,  # '7'
-    56: 252,  # '8'
-    57: 252,  # '9'
-    58: 253,  # ':'
-    59: 253,  # ';'
-    60: 253,  # '<'
-    61: 253,  # '='
-    62: 253,  # '>'
-    63: 253,  # '?'
-    64: 253,  # '@'
-    65: 77,  # 'A'
-    66: 90,  # 'B'
-    67: 99,  # 'C'
-    68: 100,  # 'D'
-    69: 72,  # 'E'
-    70: 109,  # 'F'
-    71: 107,  # 'G'
-    72: 101,  # 'H'
-    73: 79,  # 'I'
-    74: 185,  # 'J'
-    75: 81,  # 'K'
-    76: 102,  # 'L'
-    77: 76,  # 'M'
-    78: 94,  # 'N'
-    79: 82,  # 'O'
-    80: 110,  # 'P'
-    81: 186,  # 'Q'
-    82: 108,  # 'R'
-    83: 91,  # 'S'
-    84: 74,  # 'T'
-    85: 119,  # 'U'
-    86: 84,  # 'V'
-    87: 96,  # 'W'
-    88: 111,  # 'X'
-    89: 187,  # 'Y'
-    90: 115,  # 'Z'
-    91: 253,  # '['
-    92: 253,  # '\\'
-    93: 253,  # ']'
-    94: 253,  # '^'
-    95: 253,  # '_'
-    96: 253,  # '`'
-    97: 65,  # 'a'
-    98: 69,  # 'b'
-    99: 70,  # 'c'
-    100: 66,  # 'd'
-    101: 63,  # 'e'
-    102: 68,  # 'f'
-    103: 112,  # 'g'
-    104: 103,  # 'h'
-    105: 92,  # 'i'
-    106: 194,  # 'j'
-    107: 104,  # 'k'
-    108: 95,  # 'l'
-    109: 86,  # 'm'
-    110: 87,  # 'n'
-    111: 71,  # 'o'
-    112: 116,  # 'p'
-    113: 195,  # 'q'
-    114: 85,  # 'r'
-    115: 93,  # 's'
-    116: 97,  # 't'
-    117: 113,  # 'u'
-    118: 196,  # 'v'
-    119: 197,  # 'w'
-    120: 198,  # 'x'
-    121: 199,  # 'y'
-    122: 200,  # 'z'
-    123: 253,  # '{'
-    124: 253,  # '|'
-    125: 253,  # '}'
-    126: 253,  # '~'
-    127: 253,  # '\x7f'
-    128: 206,  # 'Ђ'
-    129: 207,  # 'Ѓ'
-    130: 208,  # '‚'
-    131: 209,  # 'ѓ'
-    132: 210,  # '„'
-    133: 211,  # '…'
-    134: 212,  # '†'
-    135: 213,  # '‡'
-    136: 120,  # '€'
-    137: 214,  # '‰'
-    138: 215,  # 'Љ'
-    139: 216,  # '‹'
-    140: 217,  # 'Њ'
-    141: 218,  # 'Ќ'
-    142: 219,  # 'Ћ'
-    143: 220,  # 'Џ'
-    144: 221,  # 'ђ'
-    145: 78,  # '‘'
-    146: 64,  # '’'
-    147: 83,  # '“'
-    148: 121,  # '”'
-    149: 98,  # '•'
-    150: 117,  # '–'
-    151: 105,  # '—'
-    152: 222,  # None
-    153: 223,  # '™'
-    154: 224,  # 'љ'
-    155: 225,  # '›'
-    156: 226,  # 'њ'
-    157: 227,  # 'ќ'
-    158: 228,  # 'ћ'
-    159: 229,  # 'џ'
-    160: 88,  # '\xa0'
-    161: 230,  # 'Ў'
-    162: 231,  # 'ў'
-    163: 232,  # 'Ј'
-    164: 233,  # '¤'
-    165: 122,  # 'Ґ'
-    166: 89,  # '¦'
-    167: 106,  # '§'
-    168: 234,  # 'Ё'
-    169: 235,  # '©'
-    170: 236,  # 'Є'
-    171: 237,  # '«'
-    172: 238,  # '¬'
-    173: 45,  # '\xad'
-    174: 239,  # '®'
-    175: 240,  # 'Ї'
-    176: 73,  # '°'
-    177: 80,  # '±'
-    178: 118,  # 'І'
-    179: 114,  # 'і'
-    180: 241,  # 'ґ'
-    181: 242,  # 'µ'
-    182: 243,  # '¶'
-    183: 244,  # '·'
-    184: 245,  # 'ё'
-    185: 62,  # '№'
-    186: 58,  # 'є'
-    187: 246,  # '»'
-    188: 247,  # 'ј'
-    189: 248,  # 'Ѕ'
-    190: 249,  # 'ѕ'
-    191: 250,  # 'ї'
-    192: 31,  # 'А'
-    193: 32,  # 'Б'
-    194: 35,  # 'В'
-    195: 43,  # 'Г'
-    196: 37,  # 'Д'
-    197: 44,  # 'Е'
-    198: 55,  # 'Ж'
-    199: 47,  # 'З'
-    200: 40,  # 'И'
-    201: 59,  # 'Й'
-    202: 33,  # 'К'
-    203: 46,  # 'Л'
-    204: 38,  # 'М'
-    205: 36,  # 'Н'
-    206: 41,  # 'О'
-    207: 30,  # 'П'
-    208: 39,  # 'Р'
-    209: 28,  # 'С'
-    210: 34,  # 'Т'
-    211: 51,  # 'У'
-    212: 48,  # 'Ф'
-    213: 49,  # 'Х'
-    214: 53,  # 'Ц'
-    215: 50,  # 'Ч'
-    216: 54,  # 'Ш'
-    217: 57,  # 'Щ'
-    218: 61,  # 'Ъ'
-    219: 251,  # 'Ы'
-    220: 67,  # 'Ь'
-    221: 252,  # 'Э'
-    222: 60,  # 'Ю'
-    223: 56,  # 'Я'
-    224: 1,  # 'а'
-    225: 18,  # 'б'
-    226: 9,  # 'в'
-    227: 20,  # 'г'
-    228: 11,  # 'д'
-    229: 3,  # 'е'
-    230: 23,  # 'ж'
-    231: 15,  # 'з'
-    232: 2,  # 'и'
-    233: 26,  # 'й'
-    234: 12,  # 'к'
-    235: 10,  # 'л'
-    236: 14,  # 'м'
-    237: 6,  # 'н'
-    238: 4,  # 'о'
-    239: 13,  # 'п'
-    240: 7,  # 'р'
-    241: 8,  # 'с'
-    242: 5,  # 'т'
-    243: 19,  # 'у'
-    244: 29,  # 'ф'
-    245: 25,  # 'х'
-    246: 22,  # 'ц'
-    247: 21,  # 'ч'
-    248: 27,  # 'ш'
-    249: 24,  # 'щ'
-    250: 17,  # 'ъ'
-    251: 75,  # 'ы'
-    252: 52,  # 'ь'
-    253: 253,  # 'э'
-    254: 42,  # 'ю'
-    255: 16,  # 'я'
+     0: 255,  # '\x00'
+     1: 255,  # '\x01'
+     2: 255,  # '\x02'
+     3: 255,  # '\x03'
+     4: 255,  # '\x04'
+     5: 255,  # '\x05'
+     6: 255,  # '\x06'
+     7: 255,  # '\x07'
+     8: 255,  # '\x08'
+     9: 255,  # '\t'
+     10: 254,  # '\n'
+     11: 255,  # '\x0b'
+     12: 255,  # '\x0c'
+     13: 254,  # '\r'
+     14: 255,  # '\x0e'
+     15: 255,  # '\x0f'
+     16: 255,  # '\x10'
+     17: 255,  # '\x11'
+     18: 255,  # '\x12'
+     19: 255,  # '\x13'
+     20: 255,  # '\x14'
+     21: 255,  # '\x15'
+     22: 255,  # '\x16'
+     23: 255,  # '\x17'
+     24: 255,  # '\x18'
+     25: 255,  # '\x19'
+     26: 255,  # '\x1a'
+     27: 255,  # '\x1b'
+     28: 255,  # '\x1c'
+     29: 255,  # '\x1d'
+     30: 255,  # '\x1e'
+     31: 255,  # '\x1f'
+     32: 253,  # ' '
+     33: 253,  # '!'
+     34: 253,  # '"'
+     35: 253,  # '#'
+     36: 253,  # '$'
+     37: 253,  # '%'
+     38: 253,  # '&'
+     39: 253,  # "'"
+     40: 253,  # '('
+     41: 253,  # ')'
+     42: 253,  # '*'
+     43: 253,  # '+'
+     44: 253,  # ','
+     45: 253,  # '-'
+     46: 253,  # '.'
+     47: 253,  # '/'
+     48: 252,  # '0'
+     49: 252,  # '1'
+     50: 252,  # '2'
+     51: 252,  # '3'
+     52: 252,  # '4'
+     53: 252,  # '5'
+     54: 252,  # '6'
+     55: 252,  # '7'
+     56: 252,  # '8'
+     57: 252,  # '9'
+     58: 253,  # ':'
+     59: 253,  # ';'
+     60: 253,  # '<'
+     61: 253,  # '='
+     62: 253,  # '>'
+     63: 253,  # '?'
+     64: 253,  # '@'
+     65: 77,  # 'A'
+     66: 90,  # 'B'
+     67: 99,  # 'C'
+     68: 100,  # 'D'
+     69: 72,  # 'E'
+     70: 109,  # 'F'
+     71: 107,  # 'G'
+     72: 101,  # 'H'
+     73: 79,  # 'I'
+     74: 185,  # 'J'
+     75: 81,  # 'K'
+     76: 102,  # 'L'
+     77: 76,  # 'M'
+     78: 94,  # 'N'
+     79: 82,  # 'O'
+     80: 110,  # 'P'
+     81: 186,  # 'Q'
+     82: 108,  # 'R'
+     83: 91,  # 'S'
+     84: 74,  # 'T'
+     85: 119,  # 'U'
+     86: 84,  # 'V'
+     87: 96,  # 'W'
+     88: 111,  # 'X'
+     89: 187,  # 'Y'
+     90: 115,  # 'Z'
+     91: 253,  # '['
+     92: 253,  # '\\'
+     93: 253,  # ']'
+     94: 253,  # '^'
+     95: 253,  # '_'
+     96: 253,  # '`'
+     97: 65,  # 'a'
+     98: 69,  # 'b'
+     99: 70,  # 'c'
+     100: 66,  # 'd'
+     101: 63,  # 'e'
+     102: 68,  # 'f'
+     103: 112,  # 'g'
+     104: 103,  # 'h'
+     105: 92,  # 'i'
+     106: 194,  # 'j'
+     107: 104,  # 'k'
+     108: 95,  # 'l'
+     109: 86,  # 'm'
+     110: 87,  # 'n'
+     111: 71,  # 'o'
+     112: 116,  # 'p'
+     113: 195,  # 'q'
+     114: 85,  # 'r'
+     115: 93,  # 's'
+     116: 97,  # 't'
+     117: 113,  # 'u'
+     118: 196,  # 'v'
+     119: 197,  # 'w'
+     120: 198,  # 'x'
+     121: 199,  # 'y'
+     122: 200,  # 'z'
+     123: 253,  # '{'
+     124: 253,  # '|'
+     125: 253,  # '}'
+     126: 253,  # '~'
+     127: 253,  # '\x7f'
+     128: 206,  # 'Ђ'
+     129: 207,  # 'Ѓ'
+     130: 208,  # '‚'
+     131: 209,  # 'ѓ'
+     132: 210,  # '„'
+     133: 211,  # '…'
+     134: 212,  # '†'
+     135: 213,  # '‡'
+     136: 120,  # '€'
+     137: 214,  # '‰'
+     138: 215,  # 'Љ'
+     139: 216,  # '‹'
+     140: 217,  # 'Њ'
+     141: 218,  # 'Ќ'
+     142: 219,  # 'Ћ'
+     143: 220,  # 'Џ'
+     144: 221,  # 'ђ'
+     145: 78,  # '‘'
+     146: 64,  # '’'
+     147: 83,  # '“'
+     148: 121,  # '”'
+     149: 98,  # '•'
+     150: 117,  # '–'
+     151: 105,  # '—'
+     152: 222,  # None
+     153: 223,  # '™'
+     154: 224,  # 'љ'
+     155: 225,  # '›'
+     156: 226,  # 'њ'
+     157: 227,  # 'ќ'
+     158: 228,  # 'ћ'
+     159: 229,  # 'џ'
+     160: 88,  # '\xa0'
+     161: 230,  # 'Ў'
+     162: 231,  # 'ў'
+     163: 232,  # 'Ј'
+     164: 233,  # '¤'
+     165: 122,  # 'Ґ'
+     166: 89,  # '¦'
+     167: 106,  # '§'
+     168: 234,  # 'Ё'
+     169: 235,  # '©'
+     170: 236,  # 'Є'
+     171: 237,  # '«'
+     172: 238,  # '¬'
+     173: 45,  # '\xad'
+     174: 239,  # '®'
+     175: 240,  # 'Ї'
+     176: 73,  # '°'
+     177: 80,  # '±'
+     178: 118,  # 'І'
+     179: 114,  # 'і'
+     180: 241,  # 'ґ'
+     181: 242,  # 'µ'
+     182: 243,  # '¶'
+     183: 244,  # '·'
+     184: 245,  # 'ё'
+     185: 62,  # '№'
+     186: 58,  # 'є'
+     187: 246,  # '»'
+     188: 247,  # 'ј'
+     189: 248,  # 'Ѕ'
+     190: 249,  # 'ѕ'
+     191: 250,  # 'ї'
+     192: 31,  # 'А'
+     193: 32,  # 'Б'
+     194: 35,  # 'В'
+     195: 43,  # 'Г'
+     196: 37,  # 'Д'
+     197: 44,  # 'Е'
+     198: 55,  # 'Ж'
+     199: 47,  # 'З'
+     200: 40,  # 'И'
+     201: 59,  # 'Й'
+     202: 33,  # 'К'
+     203: 46,  # 'Л'
+     204: 38,  # 'М'
+     205: 36,  # 'Н'
+     206: 41,  # 'О'
+     207: 30,  # 'П'
+     208: 39,  # 'Р'
+     209: 28,  # 'С'
+     210: 34,  # 'Т'
+     211: 51,  # 'У'
+     212: 48,  # 'Ф'
+     213: 49,  # 'Х'
+     214: 53,  # 'Ц'
+     215: 50,  # 'Ч'
+     216: 54,  # 'Ш'
+     217: 57,  # 'Щ'
+     218: 61,  # 'Ъ'
+     219: 251,  # 'Ы'
+     220: 67,  # 'Ь'
+     221: 252,  # 'Э'
+     222: 60,  # 'Ю'
+     223: 56,  # 'Я'
+     224: 1,  # 'а'
+     225: 18,  # 'б'
+     226: 9,  # 'в'
+     227: 20,  # 'г'
+     228: 11,  # 'д'
+     229: 3,  # 'е'
+     230: 23,  # 'ж'
+     231: 15,  # 'з'
+     232: 2,  # 'и'
+     233: 26,  # 'й'
+     234: 12,  # 'к'
+     235: 10,  # 'л'
+     236: 14,  # 'м'
+     237: 6,  # 'н'
+     238: 4,  # 'о'
+     239: 13,  # 'п'
+     240: 7,  # 'р'
+     241: 8,  # 'с'
+     242: 5,  # 'т'
+     243: 19,  # 'у'
+     244: 29,  # 'ф'
+     245: 25,  # 'х'
+     246: 22,  # 'ц'
+     247: 21,  # 'ч'
+     248: 27,  # 'ш'
+     249: 24,  # 'щ'
+     250: 17,  # 'ъ'
+     251: 75,  # 'ы'
+     252: 52,  # 'ь'
+     253: 253,  # 'э'
+     254: 42,  # 'ю'
+     255: 16,  # 'я'
 }
 
-WINDOWS_1251_BULGARIAN_MODEL = SingleByteCharSetModel(
-    charset_name="windows-1251",
-    language="Bulgarian",
-    char_to_order_map=WINDOWS_1251_BULGARIAN_CHAR_TO_ORDER,
-    language_model=BULGARIAN_LANG_MODEL,
-    typical_positive_ratio=0.969392,
-    keep_ascii_letters=False,
-    alphabet="АБВГДЕЖЗИЙКЛМНОПРСТУФХЦЧШЩЪЬЮЯабвгдежзийклмнопрстуфхцчшщъьюя",
-)
+WINDOWS_1251_BULGARIAN_MODEL = SingleByteCharSetModel(charset_name='windows-1251',
+                                                      language='Bulgarian',
+                                                      char_to_order_map=WINDOWS_1251_BULGARIAN_CHAR_TO_ORDER,
+                                                      language_model=BULGARIAN_LANG_MODEL,
+                                                      typical_positive_ratio=0.969392,
+                                                      keep_ascii_letters=False,
+                                                      alphabet='АБВГДЕЖЗИЙКЛМНОПРСТУФХЦЧШЩЪЬЮЯабвгдежзийклмнопрстуфхцчшщъьюя')
+
diff --git a/venv/lib/python3.10/site-packages/pip/_vendor/chardet/langgreekmodel.py b/venv/lib/python3.10/site-packages/pip/_vendor/chardet/langgreekmodel.py
index cfb8639..d99528e 100644
--- a/venv/lib/python3.10/site-packages/pip/_vendor/chardet/langgreekmodel.py
+++ b/venv/lib/python3.10/site-packages/pip/_vendor/chardet/langgreekmodel.py
@@ -1,5 +1,9 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+
 from pip._vendor.chardet.sbcharsetprober import SingleByteCharSetModel
 
+
 # 3: Positive
 # 2: Likely
 # 1: Unlikely
@@ -3859,539 +3863,536 @@
 
 # Character Mapping Table(s):
 WINDOWS_1253_GREEK_CHAR_TO_ORDER = {
-    0: 255,  # '\x00'
-    1: 255,  # '\x01'
-    2: 255,  # '\x02'
-    3: 255,  # '\x03'
-    4: 255,  # '\x04'
-    5: 255,  # '\x05'
-    6: 255,  # '\x06'
-    7: 255,  # '\x07'
-    8: 255,  # '\x08'
-    9: 255,  # '\t'
-    10: 254,  # '\n'
-    11: 255,  # '\x0b'
-    12: 255,  # '\x0c'
-    13: 254,  # '\r'
-    14: 255,  # '\x0e'
-    15: 255,  # '\x0f'
-    16: 255,  # '\x10'
-    17: 255,  # '\x11'
-    18: 255,  # '\x12'
-    19: 255,  # '\x13'
-    20: 255,  # '\x14'
-    21: 255,  # '\x15'
-    22: 255,  # '\x16'
-    23: 255,  # '\x17'
-    24: 255,  # '\x18'
-    25: 255,  # '\x19'
-    26: 255,  # '\x1a'
-    27: 255,  # '\x1b'
-    28: 255,  # '\x1c'
-    29: 255,  # '\x1d'
-    30: 255,  # '\x1e'
-    31: 255,  # '\x1f'
-    32: 253,  # ' '
-    33: 253,  # '!'
-    34: 253,  # '"'
-    35: 253,  # '#'
-    36: 253,  # '$'
-    37: 253,  # '%'
-    38: 253,  # '&'
-    39: 253,  # "'"
-    40: 253,  # '('
-    41: 253,  # ')'
-    42: 253,  # '*'
-    43: 253,  # '+'
-    44: 253,  # ','
-    45: 253,  # '-'
-    46: 253,  # '.'
-    47: 253,  # '/'
-    48: 252,  # '0'
-    49: 252,  # '1'
-    50: 252,  # '2'
-    51: 252,  # '3'
-    52: 252,  # '4'
-    53: 252,  # '5'
-    54: 252,  # '6'
-    55: 252,  # '7'
-    56: 252,  # '8'
-    57: 252,  # '9'
-    58: 253,  # ':'
-    59: 253,  # ';'
-    60: 253,  # '<'
-    61: 253,  # '='
-    62: 253,  # '>'
-    63: 253,  # '?'
-    64: 253,  # '@'
-    65: 82,  # 'A'
-    66: 100,  # 'B'
-    67: 104,  # 'C'
-    68: 94,  # 'D'
-    69: 98,  # 'E'
-    70: 101,  # 'F'
-    71: 116,  # 'G'
-    72: 102,  # 'H'
-    73: 111,  # 'I'
-    74: 187,  # 'J'
-    75: 117,  # 'K'
-    76: 92,  # 'L'
-    77: 88,  # 'M'
-    78: 113,  # 'N'
-    79: 85,  # 'O'
-    80: 79,  # 'P'
-    81: 118,  # 'Q'
-    82: 105,  # 'R'
-    83: 83,  # 'S'
-    84: 67,  # 'T'
-    85: 114,  # 'U'
-    86: 119,  # 'V'
-    87: 95,  # 'W'
-    88: 99,  # 'X'
-    89: 109,  # 'Y'
-    90: 188,  # 'Z'
-    91: 253,  # '['
-    92: 253,  # '\\'
-    93: 253,  # ']'
-    94: 253,  # '^'
-    95: 253,  # '_'
-    96: 253,  # '`'
-    97: 72,  # 'a'
-    98: 70,  # 'b'
-    99: 80,  # 'c'
-    100: 81,  # 'd'
-    101: 60,  # 'e'
-    102: 96,  # 'f'
-    103: 93,  # 'g'
-    104: 89,  # 'h'
-    105: 68,  # 'i'
-    106: 120,  # 'j'
-    107: 97,  # 'k'
-    108: 77,  # 'l'
-    109: 86,  # 'm'
-    110: 69,  # 'n'
-    111: 55,  # 'o'
-    112: 78,  # 'p'
-    113: 115,  # 'q'
-    114: 65,  # 'r'
-    115: 66,  # 's'
-    116: 58,  # 't'
-    117: 76,  # 'u'
-    118: 106,  # 'v'
-    119: 103,  # 'w'
-    120: 87,  # 'x'
-    121: 107,  # 'y'
-    122: 112,  # 'z'
-    123: 253,  # '{'
-    124: 253,  # '|'
-    125: 253,  # '}'
-    126: 253,  # '~'
-    127: 253,  # '\x7f'
-    128: 255,  # '€'
-    129: 255,  # None
-    130: 255,  # '‚'
-    131: 255,  # 'ƒ'
-    132: 255,  # '„'
-    133: 255,  # '…'
-    134: 255,  # '†'
-    135: 255,  # '‡'
-    136: 255,  # None
-    137: 255,  # '‰'
-    138: 255,  # None
-    139: 255,  # '‹'
-    140: 255,  # None
-    141: 255,  # None
-    142: 255,  # None
-    143: 255,  # None
-    144: 255,  # None
-    145: 255,  # '‘'
-    146: 255,  # '’'
-    147: 255,  # '“'
-    148: 255,  # '”'
-    149: 255,  # '•'
-    150: 255,  # '–'
-    151: 255,  # '—'
-    152: 255,  # None
-    153: 255,  # '™'
-    154: 255,  # None
-    155: 255,  # '›'
-    156: 255,  # None
-    157: 255,  # None
-    158: 255,  # None
-    159: 255,  # None
-    160: 253,  # '\xa0'
-    161: 233,  # '΅'
-    162: 61,  # 'Ά'
-    163: 253,  # '£'
-    164: 253,  # '¤'
-    165: 253,  # '¥'
-    166: 253,  # '¦'
-    167: 253,  # '§'
-    168: 253,  # '¨'
-    169: 253,  # '©'
-    170: 253,  # None
-    171: 253,  # '«'
-    172: 253,  # '¬'
-    173: 74,  # '\xad'
-    174: 253,  # '®'
-    175: 253,  # '―'
-    176: 253,  # '°'
-    177: 253,  # '±'
-    178: 253,  # '²'
-    179: 253,  # '³'
-    180: 247,  # '΄'
-    181: 253,  # 'µ'
-    182: 253,  # '¶'
-    183: 36,  # '·'
-    184: 46,  # 'Έ'
-    185: 71,  # 'Ή'
-    186: 73,  # 'Ί'
-    187: 253,  # '»'
-    188: 54,  # 'Ό'
-    189: 253,  # '½'
-    190: 108,  # 'Ύ'
-    191: 123,  # 'Ώ'
-    192: 110,  # 'ΐ'
-    193: 31,  # 'Α'
-    194: 51,  # 'Β'
-    195: 43,  # 'Γ'
-    196: 41,  # 'Δ'
-    197: 34,  # 'Ε'
-    198: 91,  # 'Ζ'
-    199: 40,  # 'Η'
-    200: 52,  # 'Θ'
-    201: 47,  # 'Ι'
-    202: 44,  # 'Κ'
-    203: 53,  # 'Λ'
-    204: 38,  # 'Μ'
-    205: 49,  # 'Ν'
-    206: 59,  # 'Ξ'
-    207: 39,  # 'Ο'
-    208: 35,  # 'Π'
-    209: 48,  # 'Ρ'
-    210: 250,  # None
-    211: 37,  # 'Σ'
-    212: 33,  # 'Τ'
-    213: 45,  # 'Υ'
-    214: 56,  # 'Φ'
-    215: 50,  # 'Χ'
-    216: 84,  # 'Ψ'
-    217: 57,  # 'Ω'
-    218: 120,  # 'Ϊ'
-    219: 121,  # 'Ϋ'
-    220: 17,  # 'ά'
-    221: 18,  # 'έ'
-    222: 22,  # 'ή'
-    223: 15,  # 'ί'
-    224: 124,  # 'ΰ'
-    225: 1,  # 'α'
-    226: 29,  # 'β'
-    227: 20,  # 'γ'
-    228: 21,  # 'δ'
-    229: 3,  # 'ε'
-    230: 32,  # 'ζ'
-    231: 13,  # 'η'
-    232: 25,  # 'θ'
-    233: 5,  # 'ι'
-    234: 11,  # 'κ'
-    235: 16,  # 'λ'
-    236: 10,  # 'μ'
-    237: 6,  # 'ν'
-    238: 30,  # 'ξ'
-    239: 4,  # 'ο'
-    240: 9,  # 'π'
-    241: 8,  # 'ρ'
-    242: 14,  # 'ς'
-    243: 7,  # 'σ'
-    244: 2,  # 'τ'
-    245: 12,  # 'υ'
-    246: 28,  # 'φ'
-    247: 23,  # 'χ'
-    248: 42,  # 'ψ'
-    249: 24,  # 'ω'
-    250: 64,  # 'ϊ'
-    251: 75,  # 'ϋ'
-    252: 19,  # 'ό'
-    253: 26,  # 'ύ'
-    254: 27,  # 'ώ'
-    255: 253,  # None
+     0: 255,  # '\x00'
+     1: 255,  # '\x01'
+     2: 255,  # '\x02'
+     3: 255,  # '\x03'
+     4: 255,  # '\x04'
+     5: 255,  # '\x05'
+     6: 255,  # '\x06'
+     7: 255,  # '\x07'
+     8: 255,  # '\x08'
+     9: 255,  # '\t'
+     10: 254,  # '\n'
+     11: 255,  # '\x0b'
+     12: 255,  # '\x0c'
+     13: 254,  # '\r'
+     14: 255,  # '\x0e'
+     15: 255,  # '\x0f'
+     16: 255,  # '\x10'
+     17: 255,  # '\x11'
+     18: 255,  # '\x12'
+     19: 255,  # '\x13'
+     20: 255,  # '\x14'
+     21: 255,  # '\x15'
+     22: 255,  # '\x16'
+     23: 255,  # '\x17'
+     24: 255,  # '\x18'
+     25: 255,  # '\x19'
+     26: 255,  # '\x1a'
+     27: 255,  # '\x1b'
+     28: 255,  # '\x1c'
+     29: 255,  # '\x1d'
+     30: 255,  # '\x1e'
+     31: 255,  # '\x1f'
+     32: 253,  # ' '
+     33: 253,  # '!'
+     34: 253,  # '"'
+     35: 253,  # '#'
+     36: 253,  # '$'
+     37: 253,  # '%'
+     38: 253,  # '&'
+     39: 253,  # "'"
+     40: 253,  # '('
+     41: 253,  # ')'
+     42: 253,  # '*'
+     43: 253,  # '+'
+     44: 253,  # ','
+     45: 253,  # '-'
+     46: 253,  # '.'
+     47: 253,  # '/'
+     48: 252,  # '0'
+     49: 252,  # '1'
+     50: 252,  # '2'
+     51: 252,  # '3'
+     52: 252,  # '4'
+     53: 252,  # '5'
+     54: 252,  # '6'
+     55: 252,  # '7'
+     56: 252,  # '8'
+     57: 252,  # '9'
+     58: 253,  # ':'
+     59: 253,  # ';'
+     60: 253,  # '<'
+     61: 253,  # '='
+     62: 253,  # '>'
+     63: 253,  # '?'
+     64: 253,  # '@'
+     65: 82,  # 'A'
+     66: 100,  # 'B'
+     67: 104,  # 'C'
+     68: 94,  # 'D'
+     69: 98,  # 'E'
+     70: 101,  # 'F'
+     71: 116,  # 'G'
+     72: 102,  # 'H'
+     73: 111,  # 'I'
+     74: 187,  # 'J'
+     75: 117,  # 'K'
+     76: 92,  # 'L'
+     77: 88,  # 'M'
+     78: 113,  # 'N'
+     79: 85,  # 'O'
+     80: 79,  # 'P'
+     81: 118,  # 'Q'
+     82: 105,  # 'R'
+     83: 83,  # 'S'
+     84: 67,  # 'T'
+     85: 114,  # 'U'
+     86: 119,  # 'V'
+     87: 95,  # 'W'
+     88: 99,  # 'X'
+     89: 109,  # 'Y'
+     90: 188,  # 'Z'
+     91: 253,  # '['
+     92: 253,  # '\\'
+     93: 253,  # ']'
+     94: 253,  # '^'
+     95: 253,  # '_'
+     96: 253,  # '`'
+     97: 72,  # 'a'
+     98: 70,  # 'b'
+     99: 80,  # 'c'
+     100: 81,  # 'd'
+     101: 60,  # 'e'
+     102: 96,  # 'f'
+     103: 93,  # 'g'
+     104: 89,  # 'h'
+     105: 68,  # 'i'
+     106: 120,  # 'j'
+     107: 97,  # 'k'
+     108: 77,  # 'l'
+     109: 86,  # 'm'
+     110: 69,  # 'n'
+     111: 55,  # 'o'
+     112: 78,  # 'p'
+     113: 115,  # 'q'
+     114: 65,  # 'r'
+     115: 66,  # 's'
+     116: 58,  # 't'
+     117: 76,  # 'u'
+     118: 106,  # 'v'
+     119: 103,  # 'w'
+     120: 87,  # 'x'
+     121: 107,  # 'y'
+     122: 112,  # 'z'
+     123: 253,  # '{'
+     124: 253,  # '|'
+     125: 253,  # '}'
+     126: 253,  # '~'
+     127: 253,  # '\x7f'
+     128: 255,  # '€'
+     129: 255,  # None
+     130: 255,  # '‚'
+     131: 255,  # 'ƒ'
+     132: 255,  # '„'
+     133: 255,  # '…'
+     134: 255,  # '†'
+     135: 255,  # '‡'
+     136: 255,  # None
+     137: 255,  # '‰'
+     138: 255,  # None
+     139: 255,  # '‹'
+     140: 255,  # None
+     141: 255,  # None
+     142: 255,  # None
+     143: 255,  # None
+     144: 255,  # None
+     145: 255,  # '‘'
+     146: 255,  # '’'
+     147: 255,  # '“'
+     148: 255,  # '”'
+     149: 255,  # '•'
+     150: 255,  # '–'
+     151: 255,  # '—'
+     152: 255,  # None
+     153: 255,  # '™'
+     154: 255,  # None
+     155: 255,  # '›'
+     156: 255,  # None
+     157: 255,  # None
+     158: 255,  # None
+     159: 255,  # None
+     160: 253,  # '\xa0'
+     161: 233,  # '΅'
+     162: 61,  # 'Ά'
+     163: 253,  # '£'
+     164: 253,  # '¤'
+     165: 253,  # '¥'
+     166: 253,  # '¦'
+     167: 253,  # '§'
+     168: 253,  # '¨'
+     169: 253,  # '©'
+     170: 253,  # None
+     171: 253,  # '«'
+     172: 253,  # '¬'
+     173: 74,  # '\xad'
+     174: 253,  # '®'
+     175: 253,  # '―'
+     176: 253,  # '°'
+     177: 253,  # '±'
+     178: 253,  # '²'
+     179: 253,  # '³'
+     180: 247,  # '΄'
+     181: 253,  # 'µ'
+     182: 253,  # '¶'
+     183: 36,  # '·'
+     184: 46,  # 'Έ'
+     185: 71,  # 'Ή'
+     186: 73,  # 'Ί'
+     187: 253,  # '»'
+     188: 54,  # 'Ό'
+     189: 253,  # '½'
+     190: 108,  # 'Ύ'
+     191: 123,  # 'Ώ'
+     192: 110,  # 'ΐ'
+     193: 31,  # 'Α'
+     194: 51,  # 'Β'
+     195: 43,  # 'Γ'
+     196: 41,  # 'Δ'
+     197: 34,  # 'Ε'
+     198: 91,  # 'Ζ'
+     199: 40,  # 'Η'
+     200: 52,  # 'Θ'
+     201: 47,  # 'Ι'
+     202: 44,  # 'Κ'
+     203: 53,  # 'Λ'
+     204: 38,  # 'Μ'
+     205: 49,  # 'Ν'
+     206: 59,  # 'Ξ'
+     207: 39,  # 'Ο'
+     208: 35,  # 'Π'
+     209: 48,  # 'Ρ'
+     210: 250,  # None
+     211: 37,  # 'Σ'
+     212: 33,  # 'Τ'
+     213: 45,  # 'Υ'
+     214: 56,  # 'Φ'
+     215: 50,  # 'Χ'
+     216: 84,  # 'Ψ'
+     217: 57,  # 'Ω'
+     218: 120,  # 'Ϊ'
+     219: 121,  # 'Ϋ'
+     220: 17,  # 'ά'
+     221: 18,  # 'έ'
+     222: 22,  # 'ή'
+     223: 15,  # 'ί'
+     224: 124,  # 'ΰ'
+     225: 1,  # 'α'
+     226: 29,  # 'β'
+     227: 20,  # 'γ'
+     228: 21,  # 'δ'
+     229: 3,  # 'ε'
+     230: 32,  # 'ζ'
+     231: 13,  # 'η'
+     232: 25,  # 'θ'
+     233: 5,  # 'ι'
+     234: 11,  # 'κ'
+     235: 16,  # 'λ'
+     236: 10,  # 'μ'
+     237: 6,  # 'ν'
+     238: 30,  # 'ξ'
+     239: 4,  # 'ο'
+     240: 9,  # 'π'
+     241: 8,  # 'ρ'
+     242: 14,  # 'ς'
+     243: 7,  # 'σ'
+     244: 2,  # 'τ'
+     245: 12,  # 'υ'
+     246: 28,  # 'φ'
+     247: 23,  # 'χ'
+     248: 42,  # 'ψ'
+     249: 24,  # 'ω'
+     250: 64,  # 'ϊ'
+     251: 75,  # 'ϋ'
+     252: 19,  # 'ό'
+     253: 26,  # 'ύ'
+     254: 27,  # 'ώ'
+     255: 253,  # None
 }
 
-WINDOWS_1253_GREEK_MODEL = SingleByteCharSetModel(
-    charset_name="windows-1253",
-    language="Greek",
-    char_to_order_map=WINDOWS_1253_GREEK_CHAR_TO_ORDER,
-    language_model=GREEK_LANG_MODEL,
-    typical_positive_ratio=0.982851,
-    keep_ascii_letters=False,
-    alphabet="ΆΈΉΊΌΎΏΑΒΓΔΕΖΗΘΙΚΛΜΝΞΟΠΡΣΤΥΦΧΨΩάέήίαβγδεζηθικλμνξοπρςστυφχψωόύώ",
-)
+WINDOWS_1253_GREEK_MODEL = SingleByteCharSetModel(charset_name='windows-1253',
+                                                  language='Greek',
+                                                  char_to_order_map=WINDOWS_1253_GREEK_CHAR_TO_ORDER,
+                                                  language_model=GREEK_LANG_MODEL,
+                                                  typical_positive_ratio=0.982851,
+                                                  keep_ascii_letters=False,
+                                                  alphabet='ΆΈΉΊΌΎΏΑΒΓΔΕΖΗΘΙΚΛΜΝΞΟΠΡΣΤΥΦΧΨΩάέήίαβγδεζηθικλμνξοπρςστυφχψωόύώ')
 
 ISO_8859_7_GREEK_CHAR_TO_ORDER = {
-    0: 255,  # '\x00'
-    1: 255,  # '\x01'
-    2: 255,  # '\x02'
-    3: 255,  # '\x03'
-    4: 255,  # '\x04'
-    5: 255,  # '\x05'
-    6: 255,  # '\x06'
-    7: 255,  # '\x07'
-    8: 255,  # '\x08'
-    9: 255,  # '\t'
-    10: 254,  # '\n'
-    11: 255,  # '\x0b'
-    12: 255,  # '\x0c'
-    13: 254,  # '\r'
-    14: 255,  # '\x0e'
-    15: 255,  # '\x0f'
-    16: 255,  # '\x10'
-    17: 255,  # '\x11'
-    18: 255,  # '\x12'
-    19: 255,  # '\x13'
-    20: 255,  # '\x14'
-    21: 255,  # '\x15'
-    22: 255,  # '\x16'
-    23: 255,  # '\x17'
-    24: 255,  # '\x18'
-    25: 255,  # '\x19'
-    26: 255,  # '\x1a'
-    27: 255,  # '\x1b'
-    28: 255,  # '\x1c'
-    29: 255,  # '\x1d'
-    30: 255,  # '\x1e'
-    31: 255,  # '\x1f'
-    32: 253,  # ' '
-    33: 253,  # '!'
-    34: 253,  # '"'
-    35: 253,  # '#'
-    36: 253,  # '$'
-    37: 253,  # '%'
-    38: 253,  # '&'
-    39: 253,  # "'"
-    40: 253,  # '('
-    41: 253,  # ')'
-    42: 253,  # '*'
-    43: 253,  # '+'
-    44: 253,  # ','
-    45: 253,  # '-'
-    46: 253,  # '.'
-    47: 253,  # '/'
-    48: 252,  # '0'
-    49: 252,  # '1'
-    50: 252,  # '2'
-    51: 252,  # '3'
-    52: 252,  # '4'
-    53: 252,  # '5'
-    54: 252,  # '6'
-    55: 252,  # '7'
-    56: 252,  # '8'
-    57: 252,  # '9'
-    58: 253,  # ':'
-    59: 253,  # ';'
-    60: 253,  # '<'
-    61: 253,  # '='
-    62: 253,  # '>'
-    63: 253,  # '?'
-    64: 253,  # '@'
-    65: 82,  # 'A'
-    66: 100,  # 'B'
-    67: 104,  # 'C'
-    68: 94,  # 'D'
-    69: 98,  # 'E'
-    70: 101,  # 'F'
-    71: 116,  # 'G'
-    72: 102,  # 'H'
-    73: 111,  # 'I'
-    74: 187,  # 'J'
-    75: 117,  # 'K'
-    76: 92,  # 'L'
-    77: 88,  # 'M'
-    78: 113,  # 'N'
-    79: 85,  # 'O'
-    80: 79,  # 'P'
-    81: 118,  # 'Q'
-    82: 105,  # 'R'
-    83: 83,  # 'S'
-    84: 67,  # 'T'
-    85: 114,  # 'U'
-    86: 119,  # 'V'
-    87: 95,  # 'W'
-    88: 99,  # 'X'
-    89: 109,  # 'Y'
-    90: 188,  # 'Z'
-    91: 253,  # '['
-    92: 253,  # '\\'
-    93: 253,  # ']'
-    94: 253,  # '^'
-    95: 253,  # '_'
-    96: 253,  # '`'
-    97: 72,  # 'a'
-    98: 70,  # 'b'
-    99: 80,  # 'c'
-    100: 81,  # 'd'
-    101: 60,  # 'e'
-    102: 96,  # 'f'
-    103: 93,  # 'g'
-    104: 89,  # 'h'
-    105: 68,  # 'i'
-    106: 120,  # 'j'
-    107: 97,  # 'k'
-    108: 77,  # 'l'
-    109: 86,  # 'm'
-    110: 69,  # 'n'
-    111: 55,  # 'o'
-    112: 78,  # 'p'
-    113: 115,  # 'q'
-    114: 65,  # 'r'
-    115: 66,  # 's'
-    116: 58,  # 't'
-    117: 76,  # 'u'
-    118: 106,  # 'v'
-    119: 103,  # 'w'
-    120: 87,  # 'x'
-    121: 107,  # 'y'
-    122: 112,  # 'z'
-    123: 253,  # '{'
-    124: 253,  # '|'
-    125: 253,  # '}'
-    126: 253,  # '~'
-    127: 253,  # '\x7f'
-    128: 255,  # '\x80'
-    129: 255,  # '\x81'
-    130: 255,  # '\x82'
-    131: 255,  # '\x83'
-    132: 255,  # '\x84'
-    133: 255,  # '\x85'
-    134: 255,  # '\x86'
-    135: 255,  # '\x87'
-    136: 255,  # '\x88'
-    137: 255,  # '\x89'
-    138: 255,  # '\x8a'
-    139: 255,  # '\x8b'
-    140: 255,  # '\x8c'
-    141: 255,  # '\x8d'
-    142: 255,  # '\x8e'
-    143: 255,  # '\x8f'
-    144: 255,  # '\x90'
-    145: 255,  # '\x91'
-    146: 255,  # '\x92'
-    147: 255,  # '\x93'
-    148: 255,  # '\x94'
-    149: 255,  # '\x95'
-    150: 255,  # '\x96'
-    151: 255,  # '\x97'
-    152: 255,  # '\x98'
-    153: 255,  # '\x99'
-    154: 255,  # '\x9a'
-    155: 255,  # '\x9b'
-    156: 255,  # '\x9c'
-    157: 255,  # '\x9d'
-    158: 255,  # '\x9e'
-    159: 255,  # '\x9f'
-    160: 253,  # '\xa0'
-    161: 233,  # '‘'
-    162: 90,  # '’'
-    163: 253,  # '£'
-    164: 253,  # '€'
-    165: 253,  # '₯'
-    166: 253,  # '¦'
-    167: 253,  # '§'
-    168: 253,  # '¨'
-    169: 253,  # '©'
-    170: 253,  # 'ͺ'
-    171: 253,  # '«'
-    172: 253,  # '¬'
-    173: 74,  # '\xad'
-    174: 253,  # None
-    175: 253,  # '―'
-    176: 253,  # '°'
-    177: 253,  # '±'
-    178: 253,  # '²'
-    179: 253,  # '³'
-    180: 247,  # '΄'
-    181: 248,  # '΅'
-    182: 61,  # 'Ά'
-    183: 36,  # '·'
-    184: 46,  # 'Έ'
-    185: 71,  # 'Ή'
-    186: 73,  # 'Ί'
-    187: 253,  # '»'
-    188: 54,  # 'Ό'
-    189: 253,  # '½'
-    190: 108,  # 'Ύ'
-    191: 123,  # 'Ώ'
-    192: 110,  # 'ΐ'
-    193: 31,  # 'Α'
-    194: 51,  # 'Β'
-    195: 43,  # 'Γ'
-    196: 41,  # 'Δ'
-    197: 34,  # 'Ε'
-    198: 91,  # 'Ζ'
-    199: 40,  # 'Η'
-    200: 52,  # 'Θ'
-    201: 47,  # 'Ι'
-    202: 44,  # 'Κ'
-    203: 53,  # 'Λ'
-    204: 38,  # 'Μ'
-    205: 49,  # 'Ν'
-    206: 59,  # 'Ξ'
-    207: 39,  # 'Ο'
-    208: 35,  # 'Π'
-    209: 48,  # 'Ρ'
-    210: 250,  # None
-    211: 37,  # 'Σ'
-    212: 33,  # 'Τ'
-    213: 45,  # 'Υ'
-    214: 56,  # 'Φ'
-    215: 50,  # 'Χ'
-    216: 84,  # 'Ψ'
-    217: 57,  # 'Ω'
-    218: 120,  # 'Ϊ'
-    219: 121,  # 'Ϋ'
-    220: 17,  # 'ά'
-    221: 18,  # 'έ'
-    222: 22,  # 'ή'
-    223: 15,  # 'ί'
-    224: 124,  # 'ΰ'
-    225: 1,  # 'α'
-    226: 29,  # 'β'
-    227: 20,  # 'γ'
-    228: 21,  # 'δ'
-    229: 3,  # 'ε'
-    230: 32,  # 'ζ'
-    231: 13,  # 'η'
-    232: 25,  # 'θ'
-    233: 5,  # 'ι'
-    234: 11,  # 'κ'
-    235: 16,  # 'λ'
-    236: 10,  # 'μ'
-    237: 6,  # 'ν'
-    238: 30,  # 'ξ'
-    239: 4,  # 'ο'
-    240: 9,  # 'π'
-    241: 8,  # 'ρ'
-    242: 14,  # 'ς'
-    243: 7,  # 'σ'
-    244: 2,  # 'τ'
-    245: 12,  # 'υ'
-    246: 28,  # 'φ'
-    247: 23,  # 'χ'
-    248: 42,  # 'ψ'
-    249: 24,  # 'ω'
-    250: 64,  # 'ϊ'
-    251: 75,  # 'ϋ'
-    252: 19,  # 'ό'
-    253: 26,  # 'ύ'
-    254: 27,  # 'ώ'
-    255: 253,  # None
+     0: 255,  # '\x00'
+     1: 255,  # '\x01'
+     2: 255,  # '\x02'
+     3: 255,  # '\x03'
+     4: 255,  # '\x04'
+     5: 255,  # '\x05'
+     6: 255,  # '\x06'
+     7: 255,  # '\x07'
+     8: 255,  # '\x08'
+     9: 255,  # '\t'
+     10: 254,  # '\n'
+     11: 255,  # '\x0b'
+     12: 255,  # '\x0c'
+     13: 254,  # '\r'
+     14: 255,  # '\x0e'
+     15: 255,  # '\x0f'
+     16: 255,  # '\x10'
+     17: 255,  # '\x11'
+     18: 255,  # '\x12'
+     19: 255,  # '\x13'
+     20: 255,  # '\x14'
+     21: 255,  # '\x15'
+     22: 255,  # '\x16'
+     23: 255,  # '\x17'
+     24: 255,  # '\x18'
+     25: 255,  # '\x19'
+     26: 255,  # '\x1a'
+     27: 255,  # '\x1b'
+     28: 255,  # '\x1c'
+     29: 255,  # '\x1d'
+     30: 255,  # '\x1e'
+     31: 255,  # '\x1f'
+     32: 253,  # ' '
+     33: 253,  # '!'
+     34: 253,  # '"'
+     35: 253,  # '#'
+     36: 253,  # '$'
+     37: 253,  # '%'
+     38: 253,  # '&'
+     39: 253,  # "'"
+     40: 253,  # '('
+     41: 253,  # ')'
+     42: 253,  # '*'
+     43: 253,  # '+'
+     44: 253,  # ','
+     45: 253,  # '-'
+     46: 253,  # '.'
+     47: 253,  # '/'
+     48: 252,  # '0'
+     49: 252,  # '1'
+     50: 252,  # '2'
+     51: 252,  # '3'
+     52: 252,  # '4'
+     53: 252,  # '5'
+     54: 252,  # '6'
+     55: 252,  # '7'
+     56: 252,  # '8'
+     57: 252,  # '9'
+     58: 253,  # ':'
+     59: 253,  # ';'
+     60: 253,  # '<'
+     61: 253,  # '='
+     62: 253,  # '>'
+     63: 253,  # '?'
+     64: 253,  # '@'
+     65: 82,  # 'A'
+     66: 100,  # 'B'
+     67: 104,  # 'C'
+     68: 94,  # 'D'
+     69: 98,  # 'E'
+     70: 101,  # 'F'
+     71: 116,  # 'G'
+     72: 102,  # 'H'
+     73: 111,  # 'I'
+     74: 187,  # 'J'
+     75: 117,  # 'K'
+     76: 92,  # 'L'
+     77: 88,  # 'M'
+     78: 113,  # 'N'
+     79: 85,  # 'O'
+     80: 79,  # 'P'
+     81: 118,  # 'Q'
+     82: 105,  # 'R'
+     83: 83,  # 'S'
+     84: 67,  # 'T'
+     85: 114,  # 'U'
+     86: 119,  # 'V'
+     87: 95,  # 'W'
+     88: 99,  # 'X'
+     89: 109,  # 'Y'
+     90: 188,  # 'Z'
+     91: 253,  # '['
+     92: 253,  # '\\'
+     93: 253,  # ']'
+     94: 253,  # '^'
+     95: 253,  # '_'
+     96: 253,  # '`'
+     97: 72,  # 'a'
+     98: 70,  # 'b'
+     99: 80,  # 'c'
+     100: 81,  # 'd'
+     101: 60,  # 'e'
+     102: 96,  # 'f'
+     103: 93,  # 'g'
+     104: 89,  # 'h'
+     105: 68,  # 'i'
+     106: 120,  # 'j'
+     107: 97,  # 'k'
+     108: 77,  # 'l'
+     109: 86,  # 'm'
+     110: 69,  # 'n'
+     111: 55,  # 'o'
+     112: 78,  # 'p'
+     113: 115,  # 'q'
+     114: 65,  # 'r'
+     115: 66,  # 's'
+     116: 58,  # 't'
+     117: 76,  # 'u'
+     118: 106,  # 'v'
+     119: 103,  # 'w'
+     120: 87,  # 'x'
+     121: 107,  # 'y'
+     122: 112,  # 'z'
+     123: 253,  # '{'
+     124: 253,  # '|'
+     125: 253,  # '}'
+     126: 253,  # '~'
+     127: 253,  # '\x7f'
+     128: 255,  # '\x80'
+     129: 255,  # '\x81'
+     130: 255,  # '\x82'
+     131: 255,  # '\x83'
+     132: 255,  # '\x84'
+     133: 255,  # '\x85'
+     134: 255,  # '\x86'
+     135: 255,  # '\x87'
+     136: 255,  # '\x88'
+     137: 255,  # '\x89'
+     138: 255,  # '\x8a'
+     139: 255,  # '\x8b'
+     140: 255,  # '\x8c'
+     141: 255,  # '\x8d'
+     142: 255,  # '\x8e'
+     143: 255,  # '\x8f'
+     144: 255,  # '\x90'
+     145: 255,  # '\x91'
+     146: 255,  # '\x92'
+     147: 255,  # '\x93'
+     148: 255,  # '\x94'
+     149: 255,  # '\x95'
+     150: 255,  # '\x96'
+     151: 255,  # '\x97'
+     152: 255,  # '\x98'
+     153: 255,  # '\x99'
+     154: 255,  # '\x9a'
+     155: 255,  # '\x9b'
+     156: 255,  # '\x9c'
+     157: 255,  # '\x9d'
+     158: 255,  # '\x9e'
+     159: 255,  # '\x9f'
+     160: 253,  # '\xa0'
+     161: 233,  # '‘'
+     162: 90,  # '’'
+     163: 253,  # '£'
+     164: 253,  # '€'
+     165: 253,  # '₯'
+     166: 253,  # '¦'
+     167: 253,  # '§'
+     168: 253,  # '¨'
+     169: 253,  # '©'
+     170: 253,  # 'ͺ'
+     171: 253,  # '«'
+     172: 253,  # '¬'
+     173: 74,  # '\xad'
+     174: 253,  # None
+     175: 253,  # '―'
+     176: 253,  # '°'
+     177: 253,  # '±'
+     178: 253,  # '²'
+     179: 253,  # '³'
+     180: 247,  # '΄'
+     181: 248,  # '΅'
+     182: 61,  # 'Ά'
+     183: 36,  # '·'
+     184: 46,  # 'Έ'
+     185: 71,  # 'Ή'
+     186: 73,  # 'Ί'
+     187: 253,  # '»'
+     188: 54,  # 'Ό'
+     189: 253,  # '½'
+     190: 108,  # 'Ύ'
+     191: 123,  # 'Ώ'
+     192: 110,  # 'ΐ'
+     193: 31,  # 'Α'
+     194: 51,  # 'Β'
+     195: 43,  # 'Γ'
+     196: 41,  # 'Δ'
+     197: 34,  # 'Ε'
+     198: 91,  # 'Ζ'
+     199: 40,  # 'Η'
+     200: 52,  # 'Θ'
+     201: 47,  # 'Ι'
+     202: 44,  # 'Κ'
+     203: 53,  # 'Λ'
+     204: 38,  # 'Μ'
+     205: 49,  # 'Ν'
+     206: 59,  # 'Ξ'
+     207: 39,  # 'Ο'
+     208: 35,  # 'Π'
+     209: 48,  # 'Ρ'
+     210: 250,  # None
+     211: 37,  # 'Σ'
+     212: 33,  # 'Τ'
+     213: 45,  # 'Υ'
+     214: 56,  # 'Φ'
+     215: 50,  # 'Χ'
+     216: 84,  # 'Ψ'
+     217: 57,  # 'Ω'
+     218: 120,  # 'Ϊ'
+     219: 121,  # 'Ϋ'
+     220: 17,  # 'ά'
+     221: 18,  # 'έ'
+     222: 22,  # 'ή'
+     223: 15,  # 'ί'
+     224: 124,  # 'ΰ'
+     225: 1,  # 'α'
+     226: 29,  # 'β'
+     227: 20,  # 'γ'
+     228: 21,  # 'δ'
+     229: 3,  # 'ε'
+     230: 32,  # 'ζ'
+     231: 13,  # 'η'
+     232: 25,  # 'θ'
+     233: 5,  # 'ι'
+     234: 11,  # 'κ'
+     235: 16,  # 'λ'
+     236: 10,  # 'μ'
+     237: 6,  # 'ν'
+     238: 30,  # 'ξ'
+     239: 4,  # 'ο'
+     240: 9,  # 'π'
+     241: 8,  # 'ρ'
+     242: 14,  # 'ς'
+     243: 7,  # 'σ'
+     244: 2,  # 'τ'
+     245: 12,  # 'υ'
+     246: 28,  # 'φ'
+     247: 23,  # 'χ'
+     248: 42,  # 'ψ'
+     249: 24,  # 'ω'
+     250: 64,  # 'ϊ'
+     251: 75,  # 'ϋ'
+     252: 19,  # 'ό'
+     253: 26,  # 'ύ'
+     254: 27,  # 'ώ'
+     255: 253,  # None
 }
 
-ISO_8859_7_GREEK_MODEL = SingleByteCharSetModel(
-    charset_name="ISO-8859-7",
-    language="Greek",
-    char_to_order_map=ISO_8859_7_GREEK_CHAR_TO_ORDER,
-    language_model=GREEK_LANG_MODEL,
-    typical_positive_ratio=0.982851,
-    keep_ascii_letters=False,
-    alphabet="ΆΈΉΊΌΎΏΑΒΓΔΕΖΗΘΙΚΛΜΝΞΟΠΡΣΤΥΦΧΨΩάέήίαβγδεζηθικλμνξοπρςστυφχψωόύώ",
-)
+ISO_8859_7_GREEK_MODEL = SingleByteCharSetModel(charset_name='ISO-8859-7',
+                                                language='Greek',
+                                                char_to_order_map=ISO_8859_7_GREEK_CHAR_TO_ORDER,
+                                                language_model=GREEK_LANG_MODEL,
+                                                typical_positive_ratio=0.982851,
+                                                keep_ascii_letters=False,
+                                                alphabet='ΆΈΉΊΌΎΏΑΒΓΔΕΖΗΘΙΚΛΜΝΞΟΠΡΣΤΥΦΧΨΩάέήίαβγδεζηθικλμνξοπρςστυφχψωόύώ')
+
diff --git a/venv/lib/python3.10/site-packages/pip/_vendor/chardet/langhebrewmodel.py b/venv/lib/python3.10/site-packages/pip/_vendor/chardet/langhebrewmodel.py
index 56d2975..484c652 100644
--- a/venv/lib/python3.10/site-packages/pip/_vendor/chardet/langhebrewmodel.py
+++ b/venv/lib/python3.10/site-packages/pip/_vendor/chardet/langhebrewmodel.py
@@ -1,5 +1,9 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+
 from pip._vendor.chardet.sbcharsetprober import SingleByteCharSetModel
 
+
 # 3: Positive
 # 2: Likely
 # 1: Unlikely
@@ -4111,270 +4115,269 @@
 
 # Character Mapping Table(s):
 WINDOWS_1255_HEBREW_CHAR_TO_ORDER = {
-    0: 255,  # '\x00'
-    1: 255,  # '\x01'
-    2: 255,  # '\x02'
-    3: 255,  # '\x03'
-    4: 255,  # '\x04'
-    5: 255,  # '\x05'
-    6: 255,  # '\x06'
-    7: 255,  # '\x07'
-    8: 255,  # '\x08'
-    9: 255,  # '\t'
-    10: 254,  # '\n'
-    11: 255,  # '\x0b'
-    12: 255,  # '\x0c'
-    13: 254,  # '\r'
-    14: 255,  # '\x0e'
-    15: 255,  # '\x0f'
-    16: 255,  # '\x10'
-    17: 255,  # '\x11'
-    18: 255,  # '\x12'
-    19: 255,  # '\x13'
-    20: 255,  # '\x14'
-    21: 255,  # '\x15'
-    22: 255,  # '\x16'
-    23: 255,  # '\x17'
-    24: 255,  # '\x18'
-    25: 255,  # '\x19'
-    26: 255,  # '\x1a'
-    27: 255,  # '\x1b'
-    28: 255,  # '\x1c'
-    29: 255,  # '\x1d'
-    30: 255,  # '\x1e'
-    31: 255,  # '\x1f'
-    32: 253,  # ' '
-    33: 253,  # '!'
-    34: 253,  # '"'
-    35: 253,  # '#'
-    36: 253,  # '$'
-    37: 253,  # '%'
-    38: 253,  # '&'
-    39: 253,  # "'"
-    40: 253,  # '('
-    41: 253,  # ')'
-    42: 253,  # '*'
-    43: 253,  # '+'
-    44: 253,  # ','
-    45: 253,  # '-'
-    46: 253,  # '.'
-    47: 253,  # '/'
-    48: 252,  # '0'
-    49: 252,  # '1'
-    50: 252,  # '2'
-    51: 252,  # '3'
-    52: 252,  # '4'
-    53: 252,  # '5'
-    54: 252,  # '6'
-    55: 252,  # '7'
-    56: 252,  # '8'
-    57: 252,  # '9'
-    58: 253,  # ':'
-    59: 253,  # ';'
-    60: 253,  # '<'
-    61: 253,  # '='
-    62: 253,  # '>'
-    63: 253,  # '?'
-    64: 253,  # '@'
-    65: 69,  # 'A'
-    66: 91,  # 'B'
-    67: 79,  # 'C'
-    68: 80,  # 'D'
-    69: 92,  # 'E'
-    70: 89,  # 'F'
-    71: 97,  # 'G'
-    72: 90,  # 'H'
-    73: 68,  # 'I'
-    74: 111,  # 'J'
-    75: 112,  # 'K'
-    76: 82,  # 'L'
-    77: 73,  # 'M'
-    78: 95,  # 'N'
-    79: 85,  # 'O'
-    80: 78,  # 'P'
-    81: 121,  # 'Q'
-    82: 86,  # 'R'
-    83: 71,  # 'S'
-    84: 67,  # 'T'
-    85: 102,  # 'U'
-    86: 107,  # 'V'
-    87: 84,  # 'W'
-    88: 114,  # 'X'
-    89: 103,  # 'Y'
-    90: 115,  # 'Z'
-    91: 253,  # '['
-    92: 253,  # '\\'
-    93: 253,  # ']'
-    94: 253,  # '^'
-    95: 253,  # '_'
-    96: 253,  # '`'
-    97: 50,  # 'a'
-    98: 74,  # 'b'
-    99: 60,  # 'c'
-    100: 61,  # 'd'
-    101: 42,  # 'e'
-    102: 76,  # 'f'
-    103: 70,  # 'g'
-    104: 64,  # 'h'
-    105: 53,  # 'i'
-    106: 105,  # 'j'
-    107: 93,  # 'k'
-    108: 56,  # 'l'
-    109: 65,  # 'm'
-    110: 54,  # 'n'
-    111: 49,  # 'o'
-    112: 66,  # 'p'
-    113: 110,  # 'q'
-    114: 51,  # 'r'
-    115: 43,  # 's'
-    116: 44,  # 't'
-    117: 63,  # 'u'
-    118: 81,  # 'v'
-    119: 77,  # 'w'
-    120: 98,  # 'x'
-    121: 75,  # 'y'
-    122: 108,  # 'z'
-    123: 253,  # '{'
-    124: 253,  # '|'
-    125: 253,  # '}'
-    126: 253,  # '~'
-    127: 253,  # '\x7f'
-    128: 124,  # '€'
-    129: 202,  # None
-    130: 203,  # '‚'
-    131: 204,  # 'ƒ'
-    132: 205,  # '„'
-    133: 40,  # '…'
-    134: 58,  # '†'
-    135: 206,  # '‡'
-    136: 207,  # 'ˆ'
-    137: 208,  # '‰'
-    138: 209,  # None
-    139: 210,  # '‹'
-    140: 211,  # None
-    141: 212,  # None
-    142: 213,  # None
-    143: 214,  # None
-    144: 215,  # None
-    145: 83,  # '‘'
-    146: 52,  # '’'
-    147: 47,  # '“'
-    148: 46,  # '”'
-    149: 72,  # '•'
-    150: 32,  # '–'
-    151: 94,  # '—'
-    152: 216,  # '˜'
-    153: 113,  # '™'
-    154: 217,  # None
-    155: 109,  # '›'
-    156: 218,  # None
-    157: 219,  # None
-    158: 220,  # None
-    159: 221,  # None
-    160: 34,  # '\xa0'
-    161: 116,  # '¡'
-    162: 222,  # '¢'
-    163: 118,  # '£'
-    164: 100,  # '₪'
-    165: 223,  # '¥'
-    166: 224,  # '¦'
-    167: 117,  # '§'
-    168: 119,  # '¨'
-    169: 104,  # '©'
-    170: 125,  # '×'
-    171: 225,  # '«'
-    172: 226,  # '¬'
-    173: 87,  # '\xad'
-    174: 99,  # '®'
-    175: 227,  # '¯'
-    176: 106,  # '°'
-    177: 122,  # '±'
-    178: 123,  # '²'
-    179: 228,  # '³'
-    180: 55,  # '´'
-    181: 229,  # 'µ'
-    182: 230,  # '¶'
-    183: 101,  # '·'
-    184: 231,  # '¸'
-    185: 232,  # '¹'
-    186: 120,  # '÷'
-    187: 233,  # '»'
-    188: 48,  # '¼'
-    189: 39,  # '½'
-    190: 57,  # '¾'
-    191: 234,  # '¿'
-    192: 30,  # 'ְ'
-    193: 59,  # 'ֱ'
-    194: 41,  # 'ֲ'
-    195: 88,  # 'ֳ'
-    196: 33,  # 'ִ'
-    197: 37,  # 'ֵ'
-    198: 36,  # 'ֶ'
-    199: 31,  # 'ַ'
-    200: 29,  # 'ָ'
-    201: 35,  # 'ֹ'
-    202: 235,  # None
-    203: 62,  # 'ֻ'
-    204: 28,  # 'ּ'
-    205: 236,  # 'ֽ'
-    206: 126,  # '־'
-    207: 237,  # 'ֿ'
-    208: 238,  # '׀'
-    209: 38,  # 'ׁ'
-    210: 45,  # 'ׂ'
-    211: 239,  # '׃'
-    212: 240,  # 'װ'
-    213: 241,  # 'ױ'
-    214: 242,  # 'ײ'
-    215: 243,  # '׳'
-    216: 127,  # '״'
-    217: 244,  # None
-    218: 245,  # None
-    219: 246,  # None
-    220: 247,  # None
-    221: 248,  # None
-    222: 249,  # None
-    223: 250,  # None
-    224: 9,  # 'א'
-    225: 8,  # 'ב'
-    226: 20,  # 'ג'
-    227: 16,  # 'ד'
-    228: 3,  # 'ה'
-    229: 2,  # 'ו'
-    230: 24,  # 'ז'
-    231: 14,  # 'ח'
-    232: 22,  # 'ט'
-    233: 1,  # 'י'
-    234: 25,  # 'ך'
-    235: 15,  # 'כ'
-    236: 4,  # 'ל'
-    237: 11,  # 'ם'
-    238: 6,  # 'מ'
-    239: 23,  # 'ן'
-    240: 12,  # 'נ'
-    241: 19,  # 'ס'
-    242: 13,  # 'ע'
-    243: 26,  # 'ף'
-    244: 18,  # 'פ'
-    245: 27,  # 'ץ'
-    246: 21,  # 'צ'
-    247: 17,  # 'ק'
-    248: 7,  # 'ר'
-    249: 10,  # 'ש'
-    250: 5,  # 'ת'
-    251: 251,  # None
-    252: 252,  # None
-    253: 128,  # '\u200e'
-    254: 96,  # '\u200f'
-    255: 253,  # None
+     0: 255,  # '\x00'
+     1: 255,  # '\x01'
+     2: 255,  # '\x02'
+     3: 255,  # '\x03'
+     4: 255,  # '\x04'
+     5: 255,  # '\x05'
+     6: 255,  # '\x06'
+     7: 255,  # '\x07'
+     8: 255,  # '\x08'
+     9: 255,  # '\t'
+     10: 254,  # '\n'
+     11: 255,  # '\x0b'
+     12: 255,  # '\x0c'
+     13: 254,  # '\r'
+     14: 255,  # '\x0e'
+     15: 255,  # '\x0f'
+     16: 255,  # '\x10'
+     17: 255,  # '\x11'
+     18: 255,  # '\x12'
+     19: 255,  # '\x13'
+     20: 255,  # '\x14'
+     21: 255,  # '\x15'
+     22: 255,  # '\x16'
+     23: 255,  # '\x17'
+     24: 255,  # '\x18'
+     25: 255,  # '\x19'
+     26: 255,  # '\x1a'
+     27: 255,  # '\x1b'
+     28: 255,  # '\x1c'
+     29: 255,  # '\x1d'
+     30: 255,  # '\x1e'
+     31: 255,  # '\x1f'
+     32: 253,  # ' '
+     33: 253,  # '!'
+     34: 253,  # '"'
+     35: 253,  # '#'
+     36: 253,  # '$'
+     37: 253,  # '%'
+     38: 253,  # '&'
+     39: 253,  # "'"
+     40: 253,  # '('
+     41: 253,  # ')'
+     42: 253,  # '*'
+     43: 253,  # '+'
+     44: 253,  # ','
+     45: 253,  # '-'
+     46: 253,  # '.'
+     47: 253,  # '/'
+     48: 252,  # '0'
+     49: 252,  # '1'
+     50: 252,  # '2'
+     51: 252,  # '3'
+     52: 252,  # '4'
+     53: 252,  # '5'
+     54: 252,  # '6'
+     55: 252,  # '7'
+     56: 252,  # '8'
+     57: 252,  # '9'
+     58: 253,  # ':'
+     59: 253,  # ';'
+     60: 253,  # '<'
+     61: 253,  # '='
+     62: 253,  # '>'
+     63: 253,  # '?'
+     64: 253,  # '@'
+     65: 69,  # 'A'
+     66: 91,  # 'B'
+     67: 79,  # 'C'
+     68: 80,  # 'D'
+     69: 92,  # 'E'
+     70: 89,  # 'F'
+     71: 97,  # 'G'
+     72: 90,  # 'H'
+     73: 68,  # 'I'
+     74: 111,  # 'J'
+     75: 112,  # 'K'
+     76: 82,  # 'L'
+     77: 73,  # 'M'
+     78: 95,  # 'N'
+     79: 85,  # 'O'
+     80: 78,  # 'P'
+     81: 121,  # 'Q'
+     82: 86,  # 'R'
+     83: 71,  # 'S'
+     84: 67,  # 'T'
+     85: 102,  # 'U'
+     86: 107,  # 'V'
+     87: 84,  # 'W'
+     88: 114,  # 'X'
+     89: 103,  # 'Y'
+     90: 115,  # 'Z'
+     91: 253,  # '['
+     92: 253,  # '\\'
+     93: 253,  # ']'
+     94: 253,  # '^'
+     95: 253,  # '_'
+     96: 253,  # '`'
+     97: 50,  # 'a'
+     98: 74,  # 'b'
+     99: 60,  # 'c'
+     100: 61,  # 'd'
+     101: 42,  # 'e'
+     102: 76,  # 'f'
+     103: 70,  # 'g'
+     104: 64,  # 'h'
+     105: 53,  # 'i'
+     106: 105,  # 'j'
+     107: 93,  # 'k'
+     108: 56,  # 'l'
+     109: 65,  # 'm'
+     110: 54,  # 'n'
+     111: 49,  # 'o'
+     112: 66,  # 'p'
+     113: 110,  # 'q'
+     114: 51,  # 'r'
+     115: 43,  # 's'
+     116: 44,  # 't'
+     117: 63,  # 'u'
+     118: 81,  # 'v'
+     119: 77,  # 'w'
+     120: 98,  # 'x'
+     121: 75,  # 'y'
+     122: 108,  # 'z'
+     123: 253,  # '{'
+     124: 253,  # '|'
+     125: 253,  # '}'
+     126: 253,  # '~'
+     127: 253,  # '\x7f'
+     128: 124,  # '€'
+     129: 202,  # None
+     130: 203,  # '‚'
+     131: 204,  # 'ƒ'
+     132: 205,  # '„'
+     133: 40,  # '…'
+     134: 58,  # '†'
+     135: 206,  # '‡'
+     136: 207,  # 'ˆ'
+     137: 208,  # '‰'
+     138: 209,  # None
+     139: 210,  # '‹'
+     140: 211,  # None
+     141: 212,  # None
+     142: 213,  # None
+     143: 214,  # None
+     144: 215,  # None
+     145: 83,  # '‘'
+     146: 52,  # '’'
+     147: 47,  # '“'
+     148: 46,  # '”'
+     149: 72,  # '•'
+     150: 32,  # '–'
+     151: 94,  # '—'
+     152: 216,  # '˜'
+     153: 113,  # '™'
+     154: 217,  # None
+     155: 109,  # '›'
+     156: 218,  # None
+     157: 219,  # None
+     158: 220,  # None
+     159: 221,  # None
+     160: 34,  # '\xa0'
+     161: 116,  # '¡'
+     162: 222,  # '¢'
+     163: 118,  # '£'
+     164: 100,  # '₪'
+     165: 223,  # '¥'
+     166: 224,  # '¦'
+     167: 117,  # '§'
+     168: 119,  # '¨'
+     169: 104,  # '©'
+     170: 125,  # '×'
+     171: 225,  # '«'
+     172: 226,  # '¬'
+     173: 87,  # '\xad'
+     174: 99,  # '®'
+     175: 227,  # '¯'
+     176: 106,  # '°'
+     177: 122,  # '±'
+     178: 123,  # '²'
+     179: 228,  # '³'
+     180: 55,  # '´'
+     181: 229,  # 'µ'
+     182: 230,  # '¶'
+     183: 101,  # '·'
+     184: 231,  # '¸'
+     185: 232,  # '¹'
+     186: 120,  # '÷'
+     187: 233,  # '»'
+     188: 48,  # '¼'
+     189: 39,  # '½'
+     190: 57,  # '¾'
+     191: 234,  # '¿'
+     192: 30,  # 'ְ'
+     193: 59,  # 'ֱ'
+     194: 41,  # 'ֲ'
+     195: 88,  # 'ֳ'
+     196: 33,  # 'ִ'
+     197: 37,  # 'ֵ'
+     198: 36,  # 'ֶ'
+     199: 31,  # 'ַ'
+     200: 29,  # 'ָ'
+     201: 35,  # 'ֹ'
+     202: 235,  # None
+     203: 62,  # 'ֻ'
+     204: 28,  # 'ּ'
+     205: 236,  # 'ֽ'
+     206: 126,  # '־'
+     207: 237,  # 'ֿ'
+     208: 238,  # '׀'
+     209: 38,  # 'ׁ'
+     210: 45,  # 'ׂ'
+     211: 239,  # '׃'
+     212: 240,  # 'װ'
+     213: 241,  # 'ױ'
+     214: 242,  # 'ײ'
+     215: 243,  # '׳'
+     216: 127,  # '״'
+     217: 244,  # None
+     218: 245,  # None
+     219: 246,  # None
+     220: 247,  # None
+     221: 248,  # None
+     222: 249,  # None
+     223: 250,  # None
+     224: 9,  # 'א'
+     225: 8,  # 'ב'
+     226: 20,  # 'ג'
+     227: 16,  # 'ד'
+     228: 3,  # 'ה'
+     229: 2,  # 'ו'
+     230: 24,  # 'ז'
+     231: 14,  # 'ח'
+     232: 22,  # 'ט'
+     233: 1,  # 'י'
+     234: 25,  # 'ך'
+     235: 15,  # 'כ'
+     236: 4,  # 'ל'
+     237: 11,  # 'ם'
+     238: 6,  # 'מ'
+     239: 23,  # 'ן'
+     240: 12,  # 'נ'
+     241: 19,  # 'ס'
+     242: 13,  # 'ע'
+     243: 26,  # 'ף'
+     244: 18,  # 'פ'
+     245: 27,  # 'ץ'
+     246: 21,  # 'צ'
+     247: 17,  # 'ק'
+     248: 7,  # 'ר'
+     249: 10,  # 'ש'
+     250: 5,  # 'ת'
+     251: 251,  # None
+     252: 252,  # None
+     253: 128,  # '\u200e'
+     254: 96,  # '\u200f'
+     255: 253,  # None
 }
 
-WINDOWS_1255_HEBREW_MODEL = SingleByteCharSetModel(
-    charset_name="windows-1255",
-    language="Hebrew",
-    char_to_order_map=WINDOWS_1255_HEBREW_CHAR_TO_ORDER,
-    language_model=HEBREW_LANG_MODEL,
-    typical_positive_ratio=0.984004,
-    keep_ascii_letters=False,
-    alphabet="אבגדהוזחטיךכלםמןנסעףפץצקרשתװױײ",
-)
+WINDOWS_1255_HEBREW_MODEL = SingleByteCharSetModel(charset_name='windows-1255',
+                                                   language='Hebrew',
+                                                   char_to_order_map=WINDOWS_1255_HEBREW_CHAR_TO_ORDER,
+                                                   language_model=HEBREW_LANG_MODEL,
+                                                   typical_positive_ratio=0.984004,
+                                                   keep_ascii_letters=False,
+                                                   alphabet='אבגדהוזחטיךכלםמןנסעףפץצקרשתװױײ')
+
diff --git a/venv/lib/python3.10/site-packages/pip/_vendor/chardet/langhungarianmodel.py b/venv/lib/python3.10/site-packages/pip/_vendor/chardet/langhungarianmodel.py
index 09a0d32..bbc5cda 100644
--- a/venv/lib/python3.10/site-packages/pip/_vendor/chardet/langhungarianmodel.py
+++ b/venv/lib/python3.10/site-packages/pip/_vendor/chardet/langhungarianmodel.py
@@ -1,5 +1,9 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+
 from pip._vendor.chardet.sbcharsetprober import SingleByteCharSetModel
 
+
 # 3: Positive
 # 2: Likely
 # 1: Unlikely
@@ -4111,539 +4115,536 @@
 
 # Character Mapping Table(s):
 WINDOWS_1250_HUNGARIAN_CHAR_TO_ORDER = {
-    0: 255,  # '\x00'
-    1: 255,  # '\x01'
-    2: 255,  # '\x02'
-    3: 255,  # '\x03'
-    4: 255,  # '\x04'
-    5: 255,  # '\x05'
-    6: 255,  # '\x06'
-    7: 255,  # '\x07'
-    8: 255,  # '\x08'
-    9: 255,  # '\t'
-    10: 254,  # '\n'
-    11: 255,  # '\x0b'
-    12: 255,  # '\x0c'
-    13: 254,  # '\r'
-    14: 255,  # '\x0e'
-    15: 255,  # '\x0f'
-    16: 255,  # '\x10'
-    17: 255,  # '\x11'
-    18: 255,  # '\x12'
-    19: 255,  # '\x13'
-    20: 255,  # '\x14'
-    21: 255,  # '\x15'
-    22: 255,  # '\x16'
-    23: 255,  # '\x17'
-    24: 255,  # '\x18'
-    25: 255,  # '\x19'
-    26: 255,  # '\x1a'
-    27: 255,  # '\x1b'
-    28: 255,  # '\x1c'
-    29: 255,  # '\x1d'
-    30: 255,  # '\x1e'
-    31: 255,  # '\x1f'
-    32: 253,  # ' '
-    33: 253,  # '!'
-    34: 253,  # '"'
-    35: 253,  # '#'
-    36: 253,  # '$'
-    37: 253,  # '%'
-    38: 253,  # '&'
-    39: 253,  # "'"
-    40: 253,  # '('
-    41: 253,  # ')'
-    42: 253,  # '*'
-    43: 253,  # '+'
-    44: 253,  # ','
-    45: 253,  # '-'
-    46: 253,  # '.'
-    47: 253,  # '/'
-    48: 252,  # '0'
-    49: 252,  # '1'
-    50: 252,  # '2'
-    51: 252,  # '3'
-    52: 252,  # '4'
-    53: 252,  # '5'
-    54: 252,  # '6'
-    55: 252,  # '7'
-    56: 252,  # '8'
-    57: 252,  # '9'
-    58: 253,  # ':'
-    59: 253,  # ';'
-    60: 253,  # '<'
-    61: 253,  # '='
-    62: 253,  # '>'
-    63: 253,  # '?'
-    64: 253,  # '@'
-    65: 28,  # 'A'
-    66: 40,  # 'B'
-    67: 54,  # 'C'
-    68: 45,  # 'D'
-    69: 32,  # 'E'
-    70: 50,  # 'F'
-    71: 49,  # 'G'
-    72: 38,  # 'H'
-    73: 39,  # 'I'
-    74: 53,  # 'J'
-    75: 36,  # 'K'
-    76: 41,  # 'L'
-    77: 34,  # 'M'
-    78: 35,  # 'N'
-    79: 47,  # 'O'
-    80: 46,  # 'P'
-    81: 72,  # 'Q'
-    82: 43,  # 'R'
-    83: 33,  # 'S'
-    84: 37,  # 'T'
-    85: 57,  # 'U'
-    86: 48,  # 'V'
-    87: 64,  # 'W'
-    88: 68,  # 'X'
-    89: 55,  # 'Y'
-    90: 52,  # 'Z'
-    91: 253,  # '['
-    92: 253,  # '\\'
-    93: 253,  # ']'
-    94: 253,  # '^'
-    95: 253,  # '_'
-    96: 253,  # '`'
-    97: 2,  # 'a'
-    98: 18,  # 'b'
-    99: 26,  # 'c'
-    100: 17,  # 'd'
-    101: 1,  # 'e'
-    102: 27,  # 'f'
-    103: 12,  # 'g'
-    104: 20,  # 'h'
-    105: 9,  # 'i'
-    106: 22,  # 'j'
-    107: 7,  # 'k'
-    108: 6,  # 'l'
-    109: 13,  # 'm'
-    110: 4,  # 'n'
-    111: 8,  # 'o'
-    112: 23,  # 'p'
-    113: 67,  # 'q'
-    114: 10,  # 'r'
-    115: 5,  # 's'
-    116: 3,  # 't'
-    117: 21,  # 'u'
-    118: 19,  # 'v'
-    119: 65,  # 'w'
-    120: 62,  # 'x'
-    121: 16,  # 'y'
-    122: 11,  # 'z'
-    123: 253,  # '{'
-    124: 253,  # '|'
-    125: 253,  # '}'
-    126: 253,  # '~'
-    127: 253,  # '\x7f'
-    128: 161,  # '€'
-    129: 162,  # None
-    130: 163,  # '‚'
-    131: 164,  # None
-    132: 165,  # '„'
-    133: 166,  # '…'
-    134: 167,  # '†'
-    135: 168,  # '‡'
-    136: 169,  # None
-    137: 170,  # '‰'
-    138: 171,  # 'Š'
-    139: 172,  # '‹'
-    140: 173,  # 'Ś'
-    141: 174,  # 'Ť'
-    142: 175,  # 'Ž'
-    143: 176,  # 'Ź'
-    144: 177,  # None
-    145: 178,  # '‘'
-    146: 179,  # '’'
-    147: 180,  # '“'
-    148: 78,  # '”'
-    149: 181,  # '•'
-    150: 69,  # '–'
-    151: 182,  # '—'
-    152: 183,  # None
-    153: 184,  # '™'
-    154: 185,  # 'š'
-    155: 186,  # '›'
-    156: 187,  # 'ś'
-    157: 188,  # 'ť'
-    158: 189,  # 'ž'
-    159: 190,  # 'ź'
-    160: 191,  # '\xa0'
-    161: 192,  # 'ˇ'
-    162: 193,  # '˘'
-    163: 194,  # 'Ł'
-    164: 195,  # '¤'
-    165: 196,  # 'Ą'
-    166: 197,  # '¦'
-    167: 76,  # '§'
-    168: 198,  # '¨'
-    169: 199,  # '©'
-    170: 200,  # 'Ş'
-    171: 201,  # '«'
-    172: 202,  # '¬'
-    173: 203,  # '\xad'
-    174: 204,  # '®'
-    175: 205,  # 'Ż'
-    176: 81,  # '°'
-    177: 206,  # '±'
-    178: 207,  # '˛'
-    179: 208,  # 'ł'
-    180: 209,  # '´'
-    181: 210,  # 'µ'
-    182: 211,  # '¶'
-    183: 212,  # '·'
-    184: 213,  # '¸'
-    185: 214,  # 'ą'
-    186: 215,  # 'ş'
-    187: 216,  # '»'
-    188: 217,  # 'Ľ'
-    189: 218,  # '˝'
-    190: 219,  # 'ľ'
-    191: 220,  # 'ż'
-    192: 221,  # 'Ŕ'
-    193: 51,  # 'Á'
-    194: 83,  # 'Â'
-    195: 222,  # 'Ă'
-    196: 80,  # 'Ä'
-    197: 223,  # 'Ĺ'
-    198: 224,  # 'Ć'
-    199: 225,  # 'Ç'
-    200: 226,  # 'Č'
-    201: 44,  # 'É'
-    202: 227,  # 'Ę'
-    203: 228,  # 'Ë'
-    204: 229,  # 'Ě'
-    205: 61,  # 'Í'
-    206: 230,  # 'Î'
-    207: 231,  # 'Ď'
-    208: 232,  # 'Đ'
-    209: 233,  # 'Ń'
-    210: 234,  # 'Ň'
-    211: 58,  # 'Ó'
-    212: 235,  # 'Ô'
-    213: 66,  # 'Ő'
-    214: 59,  # 'Ö'
-    215: 236,  # '×'
-    216: 237,  # 'Ř'
-    217: 238,  # 'Ů'
-    218: 60,  # 'Ú'
-    219: 70,  # 'Ű'
-    220: 63,  # 'Ü'
-    221: 239,  # 'Ý'
-    222: 240,  # 'Ţ'
-    223: 241,  # 'ß'
-    224: 84,  # 'ŕ'
-    225: 14,  # 'á'
-    226: 75,  # 'â'
-    227: 242,  # 'ă'
-    228: 71,  # 'ä'
-    229: 82,  # 'ĺ'
-    230: 243,  # 'ć'
-    231: 73,  # 'ç'
-    232: 244,  # 'č'
-    233: 15,  # 'é'
-    234: 85,  # 'ę'
-    235: 79,  # 'ë'
-    236: 86,  # 'ě'
-    237: 30,  # 'í'
-    238: 77,  # 'î'
-    239: 87,  # 'ď'
-    240: 245,  # 'đ'
-    241: 246,  # 'ń'
-    242: 247,  # 'ň'
-    243: 25,  # 'ó'
-    244: 74,  # 'ô'
-    245: 42,  # 'ő'
-    246: 24,  # 'ö'
-    247: 248,  # '÷'
-    248: 249,  # 'ř'
-    249: 250,  # 'ů'
-    250: 31,  # 'ú'
-    251: 56,  # 'ű'
-    252: 29,  # 'ü'
-    253: 251,  # 'ý'
-    254: 252,  # 'ţ'
-    255: 253,  # '˙'
+     0: 255,  # '\x00'
+     1: 255,  # '\x01'
+     2: 255,  # '\x02'
+     3: 255,  # '\x03'
+     4: 255,  # '\x04'
+     5: 255,  # '\x05'
+     6: 255,  # '\x06'
+     7: 255,  # '\x07'
+     8: 255,  # '\x08'
+     9: 255,  # '\t'
+     10: 254,  # '\n'
+     11: 255,  # '\x0b'
+     12: 255,  # '\x0c'
+     13: 254,  # '\r'
+     14: 255,  # '\x0e'
+     15: 255,  # '\x0f'
+     16: 255,  # '\x10'
+     17: 255,  # '\x11'
+     18: 255,  # '\x12'
+     19: 255,  # '\x13'
+     20: 255,  # '\x14'
+     21: 255,  # '\x15'
+     22: 255,  # '\x16'
+     23: 255,  # '\x17'
+     24: 255,  # '\x18'
+     25: 255,  # '\x19'
+     26: 255,  # '\x1a'
+     27: 255,  # '\x1b'
+     28: 255,  # '\x1c'
+     29: 255,  # '\x1d'
+     30: 255,  # '\x1e'
+     31: 255,  # '\x1f'
+     32: 253,  # ' '
+     33: 253,  # '!'
+     34: 253,  # '"'
+     35: 253,  # '#'
+     36: 253,  # '$'
+     37: 253,  # '%'
+     38: 253,  # '&'
+     39: 253,  # "'"
+     40: 253,  # '('
+     41: 253,  # ')'
+     42: 253,  # '*'
+     43: 253,  # '+'
+     44: 253,  # ','
+     45: 253,  # '-'
+     46: 253,  # '.'
+     47: 253,  # '/'
+     48: 252,  # '0'
+     49: 252,  # '1'
+     50: 252,  # '2'
+     51: 252,  # '3'
+     52: 252,  # '4'
+     53: 252,  # '5'
+     54: 252,  # '6'
+     55: 252,  # '7'
+     56: 252,  # '8'
+     57: 252,  # '9'
+     58: 253,  # ':'
+     59: 253,  # ';'
+     60: 253,  # '<'
+     61: 253,  # '='
+     62: 253,  # '>'
+     63: 253,  # '?'
+     64: 253,  # '@'
+     65: 28,  # 'A'
+     66: 40,  # 'B'
+     67: 54,  # 'C'
+     68: 45,  # 'D'
+     69: 32,  # 'E'
+     70: 50,  # 'F'
+     71: 49,  # 'G'
+     72: 38,  # 'H'
+     73: 39,  # 'I'
+     74: 53,  # 'J'
+     75: 36,  # 'K'
+     76: 41,  # 'L'
+     77: 34,  # 'M'
+     78: 35,  # 'N'
+     79: 47,  # 'O'
+     80: 46,  # 'P'
+     81: 72,  # 'Q'
+     82: 43,  # 'R'
+     83: 33,  # 'S'
+     84: 37,  # 'T'
+     85: 57,  # 'U'
+     86: 48,  # 'V'
+     87: 64,  # 'W'
+     88: 68,  # 'X'
+     89: 55,  # 'Y'
+     90: 52,  # 'Z'
+     91: 253,  # '['
+     92: 253,  # '\\'
+     93: 253,  # ']'
+     94: 253,  # '^'
+     95: 253,  # '_'
+     96: 253,  # '`'
+     97: 2,  # 'a'
+     98: 18,  # 'b'
+     99: 26,  # 'c'
+     100: 17,  # 'd'
+     101: 1,  # 'e'
+     102: 27,  # 'f'
+     103: 12,  # 'g'
+     104: 20,  # 'h'
+     105: 9,  # 'i'
+     106: 22,  # 'j'
+     107: 7,  # 'k'
+     108: 6,  # 'l'
+     109: 13,  # 'm'
+     110: 4,  # 'n'
+     111: 8,  # 'o'
+     112: 23,  # 'p'
+     113: 67,  # 'q'
+     114: 10,  # 'r'
+     115: 5,  # 's'
+     116: 3,  # 't'
+     117: 21,  # 'u'
+     118: 19,  # 'v'
+     119: 65,  # 'w'
+     120: 62,  # 'x'
+     121: 16,  # 'y'
+     122: 11,  # 'z'
+     123: 253,  # '{'
+     124: 253,  # '|'
+     125: 253,  # '}'
+     126: 253,  # '~'
+     127: 253,  # '\x7f'
+     128: 161,  # '€'
+     129: 162,  # None
+     130: 163,  # '‚'
+     131: 164,  # None
+     132: 165,  # '„'
+     133: 166,  # '…'
+     134: 167,  # '†'
+     135: 168,  # '‡'
+     136: 169,  # None
+     137: 170,  # '‰'
+     138: 171,  # 'Š'
+     139: 172,  # '‹'
+     140: 173,  # 'Ś'
+     141: 174,  # 'Ť'
+     142: 175,  # 'Ž'
+     143: 176,  # 'Ź'
+     144: 177,  # None
+     145: 178,  # '‘'
+     146: 179,  # '’'
+     147: 180,  # '“'
+     148: 78,  # '”'
+     149: 181,  # '•'
+     150: 69,  # '–'
+     151: 182,  # '—'
+     152: 183,  # None
+     153: 184,  # '™'
+     154: 185,  # 'š'
+     155: 186,  # '›'
+     156: 187,  # 'ś'
+     157: 188,  # 'ť'
+     158: 189,  # 'ž'
+     159: 190,  # 'ź'
+     160: 191,  # '\xa0'
+     161: 192,  # 'ˇ'
+     162: 193,  # '˘'
+     163: 194,  # 'Ł'
+     164: 195,  # '¤'
+     165: 196,  # 'Ą'
+     166: 197,  # '¦'
+     167: 76,  # '§'
+     168: 198,  # '¨'
+     169: 199,  # '©'
+     170: 200,  # 'Ş'
+     171: 201,  # '«'
+     172: 202,  # '¬'
+     173: 203,  # '\xad'
+     174: 204,  # '®'
+     175: 205,  # 'Ż'
+     176: 81,  # '°'
+     177: 206,  # '±'
+     178: 207,  # '˛'
+     179: 208,  # 'ł'
+     180: 209,  # '´'
+     181: 210,  # 'µ'
+     182: 211,  # '¶'
+     183: 212,  # '·'
+     184: 213,  # '¸'
+     185: 214,  # 'ą'
+     186: 215,  # 'ş'
+     187: 216,  # '»'
+     188: 217,  # 'Ľ'
+     189: 218,  # '˝'
+     190: 219,  # 'ľ'
+     191: 220,  # 'ż'
+     192: 221,  # 'Ŕ'
+     193: 51,  # 'Á'
+     194: 83,  # 'Â'
+     195: 222,  # 'Ă'
+     196: 80,  # 'Ä'
+     197: 223,  # 'Ĺ'
+     198: 224,  # 'Ć'
+     199: 225,  # 'Ç'
+     200: 226,  # 'Č'
+     201: 44,  # 'É'
+     202: 227,  # 'Ę'
+     203: 228,  # 'Ë'
+     204: 229,  # 'Ě'
+     205: 61,  # 'Í'
+     206: 230,  # 'Î'
+     207: 231,  # 'Ď'
+     208: 232,  # 'Đ'
+     209: 233,  # 'Ń'
+     210: 234,  # 'Ň'
+     211: 58,  # 'Ó'
+     212: 235,  # 'Ô'
+     213: 66,  # 'Ő'
+     214: 59,  # 'Ö'
+     215: 236,  # '×'
+     216: 237,  # 'Ř'
+     217: 238,  # 'Ů'
+     218: 60,  # 'Ú'
+     219: 70,  # 'Ű'
+     220: 63,  # 'Ü'
+     221: 239,  # 'Ý'
+     222: 240,  # 'Ţ'
+     223: 241,  # 'ß'
+     224: 84,  # 'ŕ'
+     225: 14,  # 'á'
+     226: 75,  # 'â'
+     227: 242,  # 'ă'
+     228: 71,  # 'ä'
+     229: 82,  # 'ĺ'
+     230: 243,  # 'ć'
+     231: 73,  # 'ç'
+     232: 244,  # 'č'
+     233: 15,  # 'é'
+     234: 85,  # 'ę'
+     235: 79,  # 'ë'
+     236: 86,  # 'ě'
+     237: 30,  # 'í'
+     238: 77,  # 'î'
+     239: 87,  # 'ď'
+     240: 245,  # 'đ'
+     241: 246,  # 'ń'
+     242: 247,  # 'ň'
+     243: 25,  # 'ó'
+     244: 74,  # 'ô'
+     245: 42,  # 'ő'
+     246: 24,  # 'ö'
+     247: 248,  # '÷'
+     248: 249,  # 'ř'
+     249: 250,  # 'ů'
+     250: 31,  # 'ú'
+     251: 56,  # 'ű'
+     252: 29,  # 'ü'
+     253: 251,  # 'ý'
+     254: 252,  # 'ţ'
+     255: 253,  # '˙'
 }
 
-WINDOWS_1250_HUNGARIAN_MODEL = SingleByteCharSetModel(
-    charset_name="windows-1250",
-    language="Hungarian",
-    char_to_order_map=WINDOWS_1250_HUNGARIAN_CHAR_TO_ORDER,
-    language_model=HUNGARIAN_LANG_MODEL,
-    typical_positive_ratio=0.947368,
-    keep_ascii_letters=True,
-    alphabet="ABCDEFGHIJKLMNOPRSTUVZabcdefghijklmnoprstuvzÁÉÍÓÖÚÜáéíóöúüŐőŰű",
-)
+WINDOWS_1250_HUNGARIAN_MODEL = SingleByteCharSetModel(charset_name='windows-1250',
+                                                      language='Hungarian',
+                                                      char_to_order_map=WINDOWS_1250_HUNGARIAN_CHAR_TO_ORDER,
+                                                      language_model=HUNGARIAN_LANG_MODEL,
+                                                      typical_positive_ratio=0.947368,
+                                                      keep_ascii_letters=True,
+                                                      alphabet='ABCDEFGHIJKLMNOPRSTUVZabcdefghijklmnoprstuvzÁÉÍÓÖÚÜáéíóöúüŐőŰű')
 
 ISO_8859_2_HUNGARIAN_CHAR_TO_ORDER = {
-    0: 255,  # '\x00'
-    1: 255,  # '\x01'
-    2: 255,  # '\x02'
-    3: 255,  # '\x03'
-    4: 255,  # '\x04'
-    5: 255,  # '\x05'
-    6: 255,  # '\x06'
-    7: 255,  # '\x07'
-    8: 255,  # '\x08'
-    9: 255,  # '\t'
-    10: 254,  # '\n'
-    11: 255,  # '\x0b'
-    12: 255,  # '\x0c'
-    13: 254,  # '\r'
-    14: 255,  # '\x0e'
-    15: 255,  # '\x0f'
-    16: 255,  # '\x10'
-    17: 255,  # '\x11'
-    18: 255,  # '\x12'
-    19: 255,  # '\x13'
-    20: 255,  # '\x14'
-    21: 255,  # '\x15'
-    22: 255,  # '\x16'
-    23: 255,  # '\x17'
-    24: 255,  # '\x18'
-    25: 255,  # '\x19'
-    26: 255,  # '\x1a'
-    27: 255,  # '\x1b'
-    28: 255,  # '\x1c'
-    29: 255,  # '\x1d'
-    30: 255,  # '\x1e'
-    31: 255,  # '\x1f'
-    32: 253,  # ' '
-    33: 253,  # '!'
-    34: 253,  # '"'
-    35: 253,  # '#'
-    36: 253,  # '$'
-    37: 253,  # '%'
-    38: 253,  # '&'
-    39: 253,  # "'"
-    40: 253,  # '('
-    41: 253,  # ')'
-    42: 253,  # '*'
-    43: 253,  # '+'
-    44: 253,  # ','
-    45: 253,  # '-'
-    46: 253,  # '.'
-    47: 253,  # '/'
-    48: 252,  # '0'
-    49: 252,  # '1'
-    50: 252,  # '2'
-    51: 252,  # '3'
-    52: 252,  # '4'
-    53: 252,  # '5'
-    54: 252,  # '6'
-    55: 252,  # '7'
-    56: 252,  # '8'
-    57: 252,  # '9'
-    58: 253,  # ':'
-    59: 253,  # ';'
-    60: 253,  # '<'
-    61: 253,  # '='
-    62: 253,  # '>'
-    63: 253,  # '?'
-    64: 253,  # '@'
-    65: 28,  # 'A'
-    66: 40,  # 'B'
-    67: 54,  # 'C'
-    68: 45,  # 'D'
-    69: 32,  # 'E'
-    70: 50,  # 'F'
-    71: 49,  # 'G'
-    72: 38,  # 'H'
-    73: 39,  # 'I'
-    74: 53,  # 'J'
-    75: 36,  # 'K'
-    76: 41,  # 'L'
-    77: 34,  # 'M'
-    78: 35,  # 'N'
-    79: 47,  # 'O'
-    80: 46,  # 'P'
-    81: 71,  # 'Q'
-    82: 43,  # 'R'
-    83: 33,  # 'S'
-    84: 37,  # 'T'
-    85: 57,  # 'U'
-    86: 48,  # 'V'
-    87: 64,  # 'W'
-    88: 68,  # 'X'
-    89: 55,  # 'Y'
-    90: 52,  # 'Z'
-    91: 253,  # '['
-    92: 253,  # '\\'
-    93: 253,  # ']'
-    94: 253,  # '^'
-    95: 253,  # '_'
-    96: 253,  # '`'
-    97: 2,  # 'a'
-    98: 18,  # 'b'
-    99: 26,  # 'c'
-    100: 17,  # 'd'
-    101: 1,  # 'e'
-    102: 27,  # 'f'
-    103: 12,  # 'g'
-    104: 20,  # 'h'
-    105: 9,  # 'i'
-    106: 22,  # 'j'
-    107: 7,  # 'k'
-    108: 6,  # 'l'
-    109: 13,  # 'm'
-    110: 4,  # 'n'
-    111: 8,  # 'o'
-    112: 23,  # 'p'
-    113: 67,  # 'q'
-    114: 10,  # 'r'
-    115: 5,  # 's'
-    116: 3,  # 't'
-    117: 21,  # 'u'
-    118: 19,  # 'v'
-    119: 65,  # 'w'
-    120: 62,  # 'x'
-    121: 16,  # 'y'
-    122: 11,  # 'z'
-    123: 253,  # '{'
-    124: 253,  # '|'
-    125: 253,  # '}'
-    126: 253,  # '~'
-    127: 253,  # '\x7f'
-    128: 159,  # '\x80'
-    129: 160,  # '\x81'
-    130: 161,  # '\x82'
-    131: 162,  # '\x83'
-    132: 163,  # '\x84'
-    133: 164,  # '\x85'
-    134: 165,  # '\x86'
-    135: 166,  # '\x87'
-    136: 167,  # '\x88'
-    137: 168,  # '\x89'
-    138: 169,  # '\x8a'
-    139: 170,  # '\x8b'
-    140: 171,  # '\x8c'
-    141: 172,  # '\x8d'
-    142: 173,  # '\x8e'
-    143: 174,  # '\x8f'
-    144: 175,  # '\x90'
-    145: 176,  # '\x91'
-    146: 177,  # '\x92'
-    147: 178,  # '\x93'
-    148: 179,  # '\x94'
-    149: 180,  # '\x95'
-    150: 181,  # '\x96'
-    151: 182,  # '\x97'
-    152: 183,  # '\x98'
-    153: 184,  # '\x99'
-    154: 185,  # '\x9a'
-    155: 186,  # '\x9b'
-    156: 187,  # '\x9c'
-    157: 188,  # '\x9d'
-    158: 189,  # '\x9e'
-    159: 190,  # '\x9f'
-    160: 191,  # '\xa0'
-    161: 192,  # 'Ą'
-    162: 193,  # '˘'
-    163: 194,  # 'Ł'
-    164: 195,  # '¤'
-    165: 196,  # 'Ľ'
-    166: 197,  # 'Ś'
-    167: 75,  # '§'
-    168: 198,  # '¨'
-    169: 199,  # 'Š'
-    170: 200,  # 'Ş'
-    171: 201,  # 'Ť'
-    172: 202,  # 'Ź'
-    173: 203,  # '\xad'
-    174: 204,  # 'Ž'
-    175: 205,  # 'Ż'
-    176: 79,  # '°'
-    177: 206,  # 'ą'
-    178: 207,  # '˛'
-    179: 208,  # 'ł'
-    180: 209,  # '´'
-    181: 210,  # 'ľ'
-    182: 211,  # 'ś'
-    183: 212,  # 'ˇ'
-    184: 213,  # '¸'
-    185: 214,  # 'š'
-    186: 215,  # 'ş'
-    187: 216,  # 'ť'
-    188: 217,  # 'ź'
-    189: 218,  # '˝'
-    190: 219,  # 'ž'
-    191: 220,  # 'ż'
-    192: 221,  # 'Ŕ'
-    193: 51,  # 'Á'
-    194: 81,  # 'Â'
-    195: 222,  # 'Ă'
-    196: 78,  # 'Ä'
-    197: 223,  # 'Ĺ'
-    198: 224,  # 'Ć'
-    199: 225,  # 'Ç'
-    200: 226,  # 'Č'
-    201: 44,  # 'É'
-    202: 227,  # 'Ę'
-    203: 228,  # 'Ë'
-    204: 229,  # 'Ě'
-    205: 61,  # 'Í'
-    206: 230,  # 'Î'
-    207: 231,  # 'Ď'
-    208: 232,  # 'Đ'
-    209: 233,  # 'Ń'
-    210: 234,  # 'Ň'
-    211: 58,  # 'Ó'
-    212: 235,  # 'Ô'
-    213: 66,  # 'Ő'
-    214: 59,  # 'Ö'
-    215: 236,  # '×'
-    216: 237,  # 'Ř'
-    217: 238,  # 'Ů'
-    218: 60,  # 'Ú'
-    219: 69,  # 'Ű'
-    220: 63,  # 'Ü'
-    221: 239,  # 'Ý'
-    222: 240,  # 'Ţ'
-    223: 241,  # 'ß'
-    224: 82,  # 'ŕ'
-    225: 14,  # 'á'
-    226: 74,  # 'â'
-    227: 242,  # 'ă'
-    228: 70,  # 'ä'
-    229: 80,  # 'ĺ'
-    230: 243,  # 'ć'
-    231: 72,  # 'ç'
-    232: 244,  # 'č'
-    233: 15,  # 'é'
-    234: 83,  # 'ę'
-    235: 77,  # 'ë'
-    236: 84,  # 'ě'
-    237: 30,  # 'í'
-    238: 76,  # 'î'
-    239: 85,  # 'ď'
-    240: 245,  # 'đ'
-    241: 246,  # 'ń'
-    242: 247,  # 'ň'
-    243: 25,  # 'ó'
-    244: 73,  # 'ô'
-    245: 42,  # 'ő'
-    246: 24,  # 'ö'
-    247: 248,  # '÷'
-    248: 249,  # 'ř'
-    249: 250,  # 'ů'
-    250: 31,  # 'ú'
-    251: 56,  # 'ű'
-    252: 29,  # 'ü'
-    253: 251,  # 'ý'
-    254: 252,  # 'ţ'
-    255: 253,  # '˙'
+     0: 255,  # '\x00'
+     1: 255,  # '\x01'
+     2: 255,  # '\x02'
+     3: 255,  # '\x03'
+     4: 255,  # '\x04'
+     5: 255,  # '\x05'
+     6: 255,  # '\x06'
+     7: 255,  # '\x07'
+     8: 255,  # '\x08'
+     9: 255,  # '\t'
+     10: 254,  # '\n'
+     11: 255,  # '\x0b'
+     12: 255,  # '\x0c'
+     13: 254,  # '\r'
+     14: 255,  # '\x0e'
+     15: 255,  # '\x0f'
+     16: 255,  # '\x10'
+     17: 255,  # '\x11'
+     18: 255,  # '\x12'
+     19: 255,  # '\x13'
+     20: 255,  # '\x14'
+     21: 255,  # '\x15'
+     22: 255,  # '\x16'
+     23: 255,  # '\x17'
+     24: 255,  # '\x18'
+     25: 255,  # '\x19'
+     26: 255,  # '\x1a'
+     27: 255,  # '\x1b'
+     28: 255,  # '\x1c'
+     29: 255,  # '\x1d'
+     30: 255,  # '\x1e'
+     31: 255,  # '\x1f'
+     32: 253,  # ' '
+     33: 253,  # '!'
+     34: 253,  # '"'
+     35: 253,  # '#'
+     36: 253,  # '$'
+     37: 253,  # '%'
+     38: 253,  # '&'
+     39: 253,  # "'"
+     40: 253,  # '('
+     41: 253,  # ')'
+     42: 253,  # '*'
+     43: 253,  # '+'
+     44: 253,  # ','
+     45: 253,  # '-'
+     46: 253,  # '.'
+     47: 253,  # '/'
+     48: 252,  # '0'
+     49: 252,  # '1'
+     50: 252,  # '2'
+     51: 252,  # '3'
+     52: 252,  # '4'
+     53: 252,  # '5'
+     54: 252,  # '6'
+     55: 252,  # '7'
+     56: 252,  # '8'
+     57: 252,  # '9'
+     58: 253,  # ':'
+     59: 253,  # ';'
+     60: 253,  # '<'
+     61: 253,  # '='
+     62: 253,  # '>'
+     63: 253,  # '?'
+     64: 253,  # '@'
+     65: 28,  # 'A'
+     66: 40,  # 'B'
+     67: 54,  # 'C'
+     68: 45,  # 'D'
+     69: 32,  # 'E'
+     70: 50,  # 'F'
+     71: 49,  # 'G'
+     72: 38,  # 'H'
+     73: 39,  # 'I'
+     74: 53,  # 'J'
+     75: 36,  # 'K'
+     76: 41,  # 'L'
+     77: 34,  # 'M'
+     78: 35,  # 'N'
+     79: 47,  # 'O'
+     80: 46,  # 'P'
+     81: 71,  # 'Q'
+     82: 43,  # 'R'
+     83: 33,  # 'S'
+     84: 37,  # 'T'
+     85: 57,  # 'U'
+     86: 48,  # 'V'
+     87: 64,  # 'W'
+     88: 68,  # 'X'
+     89: 55,  # 'Y'
+     90: 52,  # 'Z'
+     91: 253,  # '['
+     92: 253,  # '\\'
+     93: 253,  # ']'
+     94: 253,  # '^'
+     95: 253,  # '_'
+     96: 253,  # '`'
+     97: 2,  # 'a'
+     98: 18,  # 'b'
+     99: 26,  # 'c'
+     100: 17,  # 'd'
+     101: 1,  # 'e'
+     102: 27,  # 'f'
+     103: 12,  # 'g'
+     104: 20,  # 'h'
+     105: 9,  # 'i'
+     106: 22,  # 'j'
+     107: 7,  # 'k'
+     108: 6,  # 'l'
+     109: 13,  # 'm'
+     110: 4,  # 'n'
+     111: 8,  # 'o'
+     112: 23,  # 'p'
+     113: 67,  # 'q'
+     114: 10,  # 'r'
+     115: 5,  # 's'
+     116: 3,  # 't'
+     117: 21,  # 'u'
+     118: 19,  # 'v'
+     119: 65,  # 'w'
+     120: 62,  # 'x'
+     121: 16,  # 'y'
+     122: 11,  # 'z'
+     123: 253,  # '{'
+     124: 253,  # '|'
+     125: 253,  # '}'
+     126: 253,  # '~'
+     127: 253,  # '\x7f'
+     128: 159,  # '\x80'
+     129: 160,  # '\x81'
+     130: 161,  # '\x82'
+     131: 162,  # '\x83'
+     132: 163,  # '\x84'
+     133: 164,  # '\x85'
+     134: 165,  # '\x86'
+     135: 166,  # '\x87'
+     136: 167,  # '\x88'
+     137: 168,  # '\x89'
+     138: 169,  # '\x8a'
+     139: 170,  # '\x8b'
+     140: 171,  # '\x8c'
+     141: 172,  # '\x8d'
+     142: 173,  # '\x8e'
+     143: 174,  # '\x8f'
+     144: 175,  # '\x90'
+     145: 176,  # '\x91'
+     146: 177,  # '\x92'
+     147: 178,  # '\x93'
+     148: 179,  # '\x94'
+     149: 180,  # '\x95'
+     150: 181,  # '\x96'
+     151: 182,  # '\x97'
+     152: 183,  # '\x98'
+     153: 184,  # '\x99'
+     154: 185,  # '\x9a'
+     155: 186,  # '\x9b'
+     156: 187,  # '\x9c'
+     157: 188,  # '\x9d'
+     158: 189,  # '\x9e'
+     159: 190,  # '\x9f'
+     160: 191,  # '\xa0'
+     161: 192,  # 'Ą'
+     162: 193,  # '˘'
+     163: 194,  # 'Ł'
+     164: 195,  # '¤'
+     165: 196,  # 'Ľ'
+     166: 197,  # 'Ś'
+     167: 75,  # '§'
+     168: 198,  # '¨'
+     169: 199,  # 'Š'
+     170: 200,  # 'Ş'
+     171: 201,  # 'Ť'
+     172: 202,  # 'Ź'
+     173: 203,  # '\xad'
+     174: 204,  # 'Ž'
+     175: 205,  # 'Ż'
+     176: 79,  # '°'
+     177: 206,  # 'ą'
+     178: 207,  # '˛'
+     179: 208,  # 'ł'
+     180: 209,  # '´'
+     181: 210,  # 'ľ'
+     182: 211,  # 'ś'
+     183: 212,  # 'ˇ'
+     184: 213,  # '¸'
+     185: 214,  # 'š'
+     186: 215,  # 'ş'
+     187: 216,  # 'ť'
+     188: 217,  # 'ź'
+     189: 218,  # '˝'
+     190: 219,  # 'ž'
+     191: 220,  # 'ż'
+     192: 221,  # 'Ŕ'
+     193: 51,  # 'Á'
+     194: 81,  # 'Â'
+     195: 222,  # 'Ă'
+     196: 78,  # 'Ä'
+     197: 223,  # 'Ĺ'
+     198: 224,  # 'Ć'
+     199: 225,  # 'Ç'
+     200: 226,  # 'Č'
+     201: 44,  # 'É'
+     202: 227,  # 'Ę'
+     203: 228,  # 'Ë'
+     204: 229,  # 'Ě'
+     205: 61,  # 'Í'
+     206: 230,  # 'Î'
+     207: 231,  # 'Ď'
+     208: 232,  # 'Đ'
+     209: 233,  # 'Ń'
+     210: 234,  # 'Ň'
+     211: 58,  # 'Ó'
+     212: 235,  # 'Ô'
+     213: 66,  # 'Ő'
+     214: 59,  # 'Ö'
+     215: 236,  # '×'
+     216: 237,  # 'Ř'
+     217: 238,  # 'Ů'
+     218: 60,  # 'Ú'
+     219: 69,  # 'Ű'
+     220: 63,  # 'Ü'
+     221: 239,  # 'Ý'
+     222: 240,  # 'Ţ'
+     223: 241,  # 'ß'
+     224: 82,  # 'ŕ'
+     225: 14,  # 'á'
+     226: 74,  # 'â'
+     227: 242,  # 'ă'
+     228: 70,  # 'ä'
+     229: 80,  # 'ĺ'
+     230: 243,  # 'ć'
+     231: 72,  # 'ç'
+     232: 244,  # 'č'
+     233: 15,  # 'é'
+     234: 83,  # 'ę'
+     235: 77,  # 'ë'
+     236: 84,  # 'ě'
+     237: 30,  # 'í'
+     238: 76,  # 'î'
+     239: 85,  # 'ď'
+     240: 245,  # 'đ'
+     241: 246,  # 'ń'
+     242: 247,  # 'ň'
+     243: 25,  # 'ó'
+     244: 73,  # 'ô'
+     245: 42,  # 'ő'
+     246: 24,  # 'ö'
+     247: 248,  # '÷'
+     248: 249,  # 'ř'
+     249: 250,  # 'ů'
+     250: 31,  # 'ú'
+     251: 56,  # 'ű'
+     252: 29,  # 'ü'
+     253: 251,  # 'ý'
+     254: 252,  # 'ţ'
+     255: 253,  # '˙'
 }
 
-ISO_8859_2_HUNGARIAN_MODEL = SingleByteCharSetModel(
-    charset_name="ISO-8859-2",
-    language="Hungarian",
-    char_to_order_map=ISO_8859_2_HUNGARIAN_CHAR_TO_ORDER,
-    language_model=HUNGARIAN_LANG_MODEL,
-    typical_positive_ratio=0.947368,
-    keep_ascii_letters=True,
-    alphabet="ABCDEFGHIJKLMNOPRSTUVZabcdefghijklmnoprstuvzÁÉÍÓÖÚÜáéíóöúüŐőŰű",
-)
+ISO_8859_2_HUNGARIAN_MODEL = SingleByteCharSetModel(charset_name='ISO-8859-2',
+                                                    language='Hungarian',
+                                                    char_to_order_map=ISO_8859_2_HUNGARIAN_CHAR_TO_ORDER,
+                                                    language_model=HUNGARIAN_LANG_MODEL,
+                                                    typical_positive_ratio=0.947368,
+                                                    keep_ascii_letters=True,
+                                                    alphabet='ABCDEFGHIJKLMNOPRSTUVZabcdefghijklmnoprstuvzÁÉÍÓÖÚÜáéíóöúüŐőŰű')
+
diff --git a/venv/lib/python3.10/site-packages/pip/_vendor/chardet/langrussianmodel.py b/venv/lib/python3.10/site-packages/pip/_vendor/chardet/langrussianmodel.py
index 39a5388..5594452 100644
--- a/venv/lib/python3.10/site-packages/pip/_vendor/chardet/langrussianmodel.py
+++ b/venv/lib/python3.10/site-packages/pip/_vendor/chardet/langrussianmodel.py
@@ -1,5 +1,9 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+
 from pip._vendor.chardet.sbcharsetprober import SingleByteCharSetModel
 
+
 # 3: Positive
 # 2: Likely
 # 1: Unlikely
@@ -4111,1615 +4115,1604 @@
 
 # Character Mapping Table(s):
 IBM866_RUSSIAN_CHAR_TO_ORDER = {
-    0: 255,  # '\x00'
-    1: 255,  # '\x01'
-    2: 255,  # '\x02'
-    3: 255,  # '\x03'
-    4: 255,  # '\x04'
-    5: 255,  # '\x05'
-    6: 255,  # '\x06'
-    7: 255,  # '\x07'
-    8: 255,  # '\x08'
-    9: 255,  # '\t'
-    10: 254,  # '\n'
-    11: 255,  # '\x0b'
-    12: 255,  # '\x0c'
-    13: 254,  # '\r'
-    14: 255,  # '\x0e'
-    15: 255,  # '\x0f'
-    16: 255,  # '\x10'
-    17: 255,  # '\x11'
-    18: 255,  # '\x12'
-    19: 255,  # '\x13'
-    20: 255,  # '\x14'
-    21: 255,  # '\x15'
-    22: 255,  # '\x16'
-    23: 255,  # '\x17'
-    24: 255,  # '\x18'
-    25: 255,  # '\x19'
-    26: 255,  # '\x1a'
-    27: 255,  # '\x1b'
-    28: 255,  # '\x1c'
-    29: 255,  # '\x1d'
-    30: 255,  # '\x1e'
-    31: 255,  # '\x1f'
-    32: 253,  # ' '
-    33: 253,  # '!'
-    34: 253,  # '"'
-    35: 253,  # '#'
-    36: 253,  # '$'
-    37: 253,  # '%'
-    38: 253,  # '&'
-    39: 253,  # "'"
-    40: 253,  # '('
-    41: 253,  # ')'
-    42: 253,  # '*'
-    43: 253,  # '+'
-    44: 253,  # ','
-    45: 253,  # '-'
-    46: 253,  # '.'
-    47: 253,  # '/'
-    48: 252,  # '0'
-    49: 252,  # '1'
-    50: 252,  # '2'
-    51: 252,  # '3'
-    52: 252,  # '4'
-    53: 252,  # '5'
-    54: 252,  # '6'
-    55: 252,  # '7'
-    56: 252,  # '8'
-    57: 252,  # '9'
-    58: 253,  # ':'
-    59: 253,  # ';'
-    60: 253,  # '<'
-    61: 253,  # '='
-    62: 253,  # '>'
-    63: 253,  # '?'
-    64: 253,  # '@'
-    65: 142,  # 'A'
-    66: 143,  # 'B'
-    67: 144,  # 'C'
-    68: 145,  # 'D'
-    69: 146,  # 'E'
-    70: 147,  # 'F'
-    71: 148,  # 'G'
-    72: 149,  # 'H'
-    73: 150,  # 'I'
-    74: 151,  # 'J'
-    75: 152,  # 'K'
-    76: 74,  # 'L'
-    77: 153,  # 'M'
-    78: 75,  # 'N'
-    79: 154,  # 'O'
-    80: 155,  # 'P'
-    81: 156,  # 'Q'
-    82: 157,  # 'R'
-    83: 158,  # 'S'
-    84: 159,  # 'T'
-    85: 160,  # 'U'
-    86: 161,  # 'V'
-    87: 162,  # 'W'
-    88: 163,  # 'X'
-    89: 164,  # 'Y'
-    90: 165,  # 'Z'
-    91: 253,  # '['
-    92: 253,  # '\\'
-    93: 253,  # ']'
-    94: 253,  # '^'
-    95: 253,  # '_'
-    96: 253,  # '`'
-    97: 71,  # 'a'
-    98: 172,  # 'b'
-    99: 66,  # 'c'
-    100: 173,  # 'd'
-    101: 65,  # 'e'
-    102: 174,  # 'f'
-    103: 76,  # 'g'
-    104: 175,  # 'h'
-    105: 64,  # 'i'
-    106: 176,  # 'j'
-    107: 177,  # 'k'
-    108: 77,  # 'l'
-    109: 72,  # 'm'
-    110: 178,  # 'n'
-    111: 69,  # 'o'
-    112: 67,  # 'p'
-    113: 179,  # 'q'
-    114: 78,  # 'r'
-    115: 73,  # 's'
-    116: 180,  # 't'
-    117: 181,  # 'u'
-    118: 79,  # 'v'
-    119: 182,  # 'w'
-    120: 183,  # 'x'
-    121: 184,  # 'y'
-    122: 185,  # 'z'
-    123: 253,  # '{'
-    124: 253,  # '|'
-    125: 253,  # '}'
-    126: 253,  # '~'
-    127: 253,  # '\x7f'
-    128: 37,  # 'А'
-    129: 44,  # 'Б'
-    130: 33,  # 'В'
-    131: 46,  # 'Г'
-    132: 41,  # 'Д'
-    133: 48,  # 'Е'
-    134: 56,  # 'Ж'
-    135: 51,  # 'З'
-    136: 42,  # 'И'
-    137: 60,  # 'Й'
-    138: 36,  # 'К'
-    139: 49,  # 'Л'
-    140: 38,  # 'М'
-    141: 31,  # 'Н'
-    142: 34,  # 'О'
-    143: 35,  # 'П'
-    144: 45,  # 'Р'
-    145: 32,  # 'С'
-    146: 40,  # 'Т'
-    147: 52,  # 'У'
-    148: 53,  # 'Ф'
-    149: 55,  # 'Х'
-    150: 58,  # 'Ц'
-    151: 50,  # 'Ч'
-    152: 57,  # 'Ш'
-    153: 63,  # 'Щ'
-    154: 70,  # 'Ъ'
-    155: 62,  # 'Ы'
-    156: 61,  # 'Ь'
-    157: 47,  # 'Э'
-    158: 59,  # 'Ю'
-    159: 43,  # 'Я'
-    160: 3,  # 'а'
-    161: 21,  # 'б'
-    162: 10,  # 'в'
-    163: 19,  # 'г'
-    164: 13,  # 'д'
-    165: 2,  # 'е'
-    166: 24,  # 'ж'
-    167: 20,  # 'з'
-    168: 4,  # 'и'
-    169: 23,  # 'й'
-    170: 11,  # 'к'
-    171: 8,  # 'л'
-    172: 12,  # 'м'
-    173: 5,  # 'н'
-    174: 1,  # 'о'
-    175: 15,  # 'п'
-    176: 191,  # '░'
-    177: 192,  # '▒'
-    178: 193,  # '▓'
-    179: 194,  # '│'
-    180: 195,  # '┤'
-    181: 196,  # '╡'
-    182: 197,  # '╢'
-    183: 198,  # '╖'
-    184: 199,  # '╕'
-    185: 200,  # '╣'
-    186: 201,  # '║'
-    187: 202,  # '╗'
-    188: 203,  # '╝'
-    189: 204,  # '╜'
-    190: 205,  # '╛'
-    191: 206,  # '┐'
-    192: 207,  # '└'
-    193: 208,  # '┴'
-    194: 209,  # '┬'
-    195: 210,  # '├'
-    196: 211,  # '─'
-    197: 212,  # '┼'
-    198: 213,  # '╞'
-    199: 214,  # '╟'
-    200: 215,  # '╚'
-    201: 216,  # '╔'
-    202: 217,  # '╩'
-    203: 218,  # '╦'
-    204: 219,  # '╠'
-    205: 220,  # '═'
-    206: 221,  # '╬'
-    207: 222,  # '╧'
-    208: 223,  # '╨'
-    209: 224,  # '╤'
-    210: 225,  # '╥'
-    211: 226,  # '╙'
-    212: 227,  # '╘'
-    213: 228,  # '╒'
-    214: 229,  # '╓'
-    215: 230,  # '╫'
-    216: 231,  # '╪'
-    217: 232,  # '┘'
-    218: 233,  # '┌'
-    219: 234,  # '█'
-    220: 235,  # '▄'
-    221: 236,  # '▌'
-    222: 237,  # '▐'
-    223: 238,  # '▀'
-    224: 9,  # 'р'
-    225: 7,  # 'с'
-    226: 6,  # 'т'
-    227: 14,  # 'у'
-    228: 39,  # 'ф'
-    229: 26,  # 'х'
-    230: 28,  # 'ц'
-    231: 22,  # 'ч'
-    232: 25,  # 'ш'
-    233: 29,  # 'щ'
-    234: 54,  # 'ъ'
-    235: 18,  # 'ы'
-    236: 17,  # 'ь'
-    237: 30,  # 'э'
-    238: 27,  # 'ю'
-    239: 16,  # 'я'
-    240: 239,  # 'Ё'
-    241: 68,  # 'ё'
-    242: 240,  # 'Є'
-    243: 241,  # 'є'
-    244: 242,  # 'Ї'
-    245: 243,  # 'ї'
-    246: 244,  # 'Ў'
-    247: 245,  # 'ў'
-    248: 246,  # '°'
-    249: 247,  # '∙'
-    250: 248,  # '·'
-    251: 249,  # '√'
-    252: 250,  # '№'
-    253: 251,  # '¤'
-    254: 252,  # '■'
-    255: 255,  # '\xa0'
+     0: 255,  # '\x00'
+     1: 255,  # '\x01'
+     2: 255,  # '\x02'
+     3: 255,  # '\x03'
+     4: 255,  # '\x04'
+     5: 255,  # '\x05'
+     6: 255,  # '\x06'
+     7: 255,  # '\x07'
+     8: 255,  # '\x08'
+     9: 255,  # '\t'
+     10: 254,  # '\n'
+     11: 255,  # '\x0b'
+     12: 255,  # '\x0c'
+     13: 254,  # '\r'
+     14: 255,  # '\x0e'
+     15: 255,  # '\x0f'
+     16: 255,  # '\x10'
+     17: 255,  # '\x11'
+     18: 255,  # '\x12'
+     19: 255,  # '\x13'
+     20: 255,  # '\x14'
+     21: 255,  # '\x15'
+     22: 255,  # '\x16'
+     23: 255,  # '\x17'
+     24: 255,  # '\x18'
+     25: 255,  # '\x19'
+     26: 255,  # '\x1a'
+     27: 255,  # '\x1b'
+     28: 255,  # '\x1c'
+     29: 255,  # '\x1d'
+     30: 255,  # '\x1e'
+     31: 255,  # '\x1f'
+     32: 253,  # ' '
+     33: 253,  # '!'
+     34: 253,  # '"'
+     35: 253,  # '#'
+     36: 253,  # '$'
+     37: 253,  # '%'
+     38: 253,  # '&'
+     39: 253,  # "'"
+     40: 253,  # '('
+     41: 253,  # ')'
+     42: 253,  # '*'
+     43: 253,  # '+'
+     44: 253,  # ','
+     45: 253,  # '-'
+     46: 253,  # '.'
+     47: 253,  # '/'
+     48: 252,  # '0'
+     49: 252,  # '1'
+     50: 252,  # '2'
+     51: 252,  # '3'
+     52: 252,  # '4'
+     53: 252,  # '5'
+     54: 252,  # '6'
+     55: 252,  # '7'
+     56: 252,  # '8'
+     57: 252,  # '9'
+     58: 253,  # ':'
+     59: 253,  # ';'
+     60: 253,  # '<'
+     61: 253,  # '='
+     62: 253,  # '>'
+     63: 253,  # '?'
+     64: 253,  # '@'
+     65: 142,  # 'A'
+     66: 143,  # 'B'
+     67: 144,  # 'C'
+     68: 145,  # 'D'
+     69: 146,  # 'E'
+     70: 147,  # 'F'
+     71: 148,  # 'G'
+     72: 149,  # 'H'
+     73: 150,  # 'I'
+     74: 151,  # 'J'
+     75: 152,  # 'K'
+     76: 74,  # 'L'
+     77: 153,  # 'M'
+     78: 75,  # 'N'
+     79: 154,  # 'O'
+     80: 155,  # 'P'
+     81: 156,  # 'Q'
+     82: 157,  # 'R'
+     83: 158,  # 'S'
+     84: 159,  # 'T'
+     85: 160,  # 'U'
+     86: 161,  # 'V'
+     87: 162,  # 'W'
+     88: 163,  # 'X'
+     89: 164,  # 'Y'
+     90: 165,  # 'Z'
+     91: 253,  # '['
+     92: 253,  # '\\'
+     93: 253,  # ']'
+     94: 253,  # '^'
+     95: 253,  # '_'
+     96: 253,  # '`'
+     97: 71,  # 'a'
+     98: 172,  # 'b'
+     99: 66,  # 'c'
+     100: 173,  # 'd'
+     101: 65,  # 'e'
+     102: 174,  # 'f'
+     103: 76,  # 'g'
+     104: 175,  # 'h'
+     105: 64,  # 'i'
+     106: 176,  # 'j'
+     107: 177,  # 'k'
+     108: 77,  # 'l'
+     109: 72,  # 'm'
+     110: 178,  # 'n'
+     111: 69,  # 'o'
+     112: 67,  # 'p'
+     113: 179,  # 'q'
+     114: 78,  # 'r'
+     115: 73,  # 's'
+     116: 180,  # 't'
+     117: 181,  # 'u'
+     118: 79,  # 'v'
+     119: 182,  # 'w'
+     120: 183,  # 'x'
+     121: 184,  # 'y'
+     122: 185,  # 'z'
+     123: 253,  # '{'
+     124: 253,  # '|'
+     125: 253,  # '}'
+     126: 253,  # '~'
+     127: 253,  # '\x7f'
+     128: 37,  # 'А'
+     129: 44,  # 'Б'
+     130: 33,  # 'В'
+     131: 46,  # 'Г'
+     132: 41,  # 'Д'
+     133: 48,  # 'Е'
+     134: 56,  # 'Ж'
+     135: 51,  # 'З'
+     136: 42,  # 'И'
+     137: 60,  # 'Й'
+     138: 36,  # 'К'
+     139: 49,  # 'Л'
+     140: 38,  # 'М'
+     141: 31,  # 'Н'
+     142: 34,  # 'О'
+     143: 35,  # 'П'
+     144: 45,  # 'Р'
+     145: 32,  # 'С'
+     146: 40,  # 'Т'
+     147: 52,  # 'У'
+     148: 53,  # 'Ф'
+     149: 55,  # 'Х'
+     150: 58,  # 'Ц'
+     151: 50,  # 'Ч'
+     152: 57,  # 'Ш'
+     153: 63,  # 'Щ'
+     154: 70,  # 'Ъ'
+     155: 62,  # 'Ы'
+     156: 61,  # 'Ь'
+     157: 47,  # 'Э'
+     158: 59,  # 'Ю'
+     159: 43,  # 'Я'
+     160: 3,  # 'а'
+     161: 21,  # 'б'
+     162: 10,  # 'в'
+     163: 19,  # 'г'
+     164: 13,  # 'д'
+     165: 2,  # 'е'
+     166: 24,  # 'ж'
+     167: 20,  # 'з'
+     168: 4,  # 'и'
+     169: 23,  # 'й'
+     170: 11,  # 'к'
+     171: 8,  # 'л'
+     172: 12,  # 'м'
+     173: 5,  # 'н'
+     174: 1,  # 'о'
+     175: 15,  # 'п'
+     176: 191,  # '░'
+     177: 192,  # '▒'
+     178: 193,  # '▓'
+     179: 194,  # '│'
+     180: 195,  # '┤'
+     181: 196,  # '╡'
+     182: 197,  # '╢'
+     183: 198,  # '╖'
+     184: 199,  # '╕'
+     185: 200,  # '╣'
+     186: 201,  # '║'
+     187: 202,  # '╗'
+     188: 203,  # '╝'
+     189: 204,  # '╜'
+     190: 205,  # '╛'
+     191: 206,  # '┐'
+     192: 207,  # '└'
+     193: 208,  # '┴'
+     194: 209,  # '┬'
+     195: 210,  # '├'
+     196: 211,  # '─'
+     197: 212,  # '┼'
+     198: 213,  # '╞'
+     199: 214,  # '╟'
+     200: 215,  # '╚'
+     201: 216,  # '╔'
+     202: 217,  # '╩'
+     203: 218,  # '╦'
+     204: 219,  # '╠'
+     205: 220,  # '═'
+     206: 221,  # '╬'
+     207: 222,  # '╧'
+     208: 223,  # '╨'
+     209: 224,  # '╤'
+     210: 225,  # '╥'
+     211: 226,  # '╙'
+     212: 227,  # '╘'
+     213: 228,  # '╒'
+     214: 229,  # '╓'
+     215: 230,  # '╫'
+     216: 231,  # '╪'
+     217: 232,  # '┘'
+     218: 233,  # '┌'
+     219: 234,  # '█'
+     220: 235,  # '▄'
+     221: 236,  # '▌'
+     222: 237,  # '▐'
+     223: 238,  # '▀'
+     224: 9,  # 'р'
+     225: 7,  # 'с'
+     226: 6,  # 'т'
+     227: 14,  # 'у'
+     228: 39,  # 'ф'
+     229: 26,  # 'х'
+     230: 28,  # 'ц'
+     231: 22,  # 'ч'
+     232: 25,  # 'ш'
+     233: 29,  # 'щ'
+     234: 54,  # 'ъ'
+     235: 18,  # 'ы'
+     236: 17,  # 'ь'
+     237: 30,  # 'э'
+     238: 27,  # 'ю'
+     239: 16,  # 'я'
+     240: 239,  # 'Ё'
+     241: 68,  # 'ё'
+     242: 240,  # 'Є'
+     243: 241,  # 'є'
+     244: 242,  # 'Ї'
+     245: 243,  # 'ї'
+     246: 244,  # 'Ў'
+     247: 245,  # 'ў'
+     248: 246,  # '°'
+     249: 247,  # '∙'
+     250: 248,  # '·'
+     251: 249,  # '√'
+     252: 250,  # '№'
+     253: 251,  # '¤'
+     254: 252,  # '■'
+     255: 255,  # '\xa0'
 }
 
-IBM866_RUSSIAN_MODEL = SingleByteCharSetModel(
-    charset_name="IBM866",
-    language="Russian",
-    char_to_order_map=IBM866_RUSSIAN_CHAR_TO_ORDER,
-    language_model=RUSSIAN_LANG_MODEL,
-    typical_positive_ratio=0.976601,
-    keep_ascii_letters=False,
-    alphabet="ЁАБВГДЕЖЗИЙКЛМНОПРСТУФХЦЧШЩЪЫЬЭЮЯабвгдежзийклмнопрстуфхцчшщъыьэюяё",
-)
+IBM866_RUSSIAN_MODEL = SingleByteCharSetModel(charset_name='IBM866',
+                                              language='Russian',
+                                              char_to_order_map=IBM866_RUSSIAN_CHAR_TO_ORDER,
+                                              language_model=RUSSIAN_LANG_MODEL,
+                                              typical_positive_ratio=0.976601,
+                                              keep_ascii_letters=False,
+                                              alphabet='ЁАБВГДЕЖЗИЙКЛМНОПРСТУФХЦЧШЩЪЫЬЭЮЯабвгдежзийклмнопрстуфхцчшщъыьэюяё')
 
 WINDOWS_1251_RUSSIAN_CHAR_TO_ORDER = {
-    0: 255,  # '\x00'
-    1: 255,  # '\x01'
-    2: 255,  # '\x02'
-    3: 255,  # '\x03'
-    4: 255,  # '\x04'
-    5: 255,  # '\x05'
-    6: 255,  # '\x06'
-    7: 255,  # '\x07'
-    8: 255,  # '\x08'
-    9: 255,  # '\t'
-    10: 254,  # '\n'
-    11: 255,  # '\x0b'
-    12: 255,  # '\x0c'
-    13: 254,  # '\r'
-    14: 255,  # '\x0e'
-    15: 255,  # '\x0f'
-    16: 255,  # '\x10'
-    17: 255,  # '\x11'
-    18: 255,  # '\x12'
-    19: 255,  # '\x13'
-    20: 255,  # '\x14'
-    21: 255,  # '\x15'
-    22: 255,  # '\x16'
-    23: 255,  # '\x17'
-    24: 255,  # '\x18'
-    25: 255,  # '\x19'
-    26: 255,  # '\x1a'
-    27: 255,  # '\x1b'
-    28: 255,  # '\x1c'
-    29: 255,  # '\x1d'
-    30: 255,  # '\x1e'
-    31: 255,  # '\x1f'
-    32: 253,  # ' '
-    33: 253,  # '!'
-    34: 253,  # '"'
-    35: 253,  # '#'
-    36: 253,  # '$'
-    37: 253,  # '%'
-    38: 253,  # '&'
-    39: 253,  # "'"
-    40: 253,  # '('
-    41: 253,  # ')'
-    42: 253,  # '*'
-    43: 253,  # '+'
-    44: 253,  # ','
-    45: 253,  # '-'
-    46: 253,  # '.'
-    47: 253,  # '/'
-    48: 252,  # '0'
-    49: 252,  # '1'
-    50: 252,  # '2'
-    51: 252,  # '3'
-    52: 252,  # '4'
-    53: 252,  # '5'
-    54: 252,  # '6'
-    55: 252,  # '7'
-    56: 252,  # '8'
-    57: 252,  # '9'
-    58: 253,  # ':'
-    59: 253,  # ';'
-    60: 253,  # '<'
-    61: 253,  # '='
-    62: 253,  # '>'
-    63: 253,  # '?'
-    64: 253,  # '@'
-    65: 142,  # 'A'
-    66: 143,  # 'B'
-    67: 144,  # 'C'
-    68: 145,  # 'D'
-    69: 146,  # 'E'
-    70: 147,  # 'F'
-    71: 148,  # 'G'
-    72: 149,  # 'H'
-    73: 150,  # 'I'
-    74: 151,  # 'J'
-    75: 152,  # 'K'
-    76: 74,  # 'L'
-    77: 153,  # 'M'
-    78: 75,  # 'N'
-    79: 154,  # 'O'
-    80: 155,  # 'P'
-    81: 156,  # 'Q'
-    82: 157,  # 'R'
-    83: 158,  # 'S'
-    84: 159,  # 'T'
-    85: 160,  # 'U'
-    86: 161,  # 'V'
-    87: 162,  # 'W'
-    88: 163,  # 'X'
-    89: 164,  # 'Y'
-    90: 165,  # 'Z'
-    91: 253,  # '['
-    92: 253,  # '\\'
-    93: 253,  # ']'
-    94: 253,  # '^'
-    95: 253,  # '_'
-    96: 253,  # '`'
-    97: 71,  # 'a'
-    98: 172,  # 'b'
-    99: 66,  # 'c'
-    100: 173,  # 'd'
-    101: 65,  # 'e'
-    102: 174,  # 'f'
-    103: 76,  # 'g'
-    104: 175,  # 'h'
-    105: 64,  # 'i'
-    106: 176,  # 'j'
-    107: 177,  # 'k'
-    108: 77,  # 'l'
-    109: 72,  # 'm'
-    110: 178,  # 'n'
-    111: 69,  # 'o'
-    112: 67,  # 'p'
-    113: 179,  # 'q'
-    114: 78,  # 'r'
-    115: 73,  # 's'
-    116: 180,  # 't'
-    117: 181,  # 'u'
-    118: 79,  # 'v'
-    119: 182,  # 'w'
-    120: 183,  # 'x'
-    121: 184,  # 'y'
-    122: 185,  # 'z'
-    123: 253,  # '{'
-    124: 253,  # '|'
-    125: 253,  # '}'
-    126: 253,  # '~'
-    127: 253,  # '\x7f'
-    128: 191,  # 'Ђ'
-    129: 192,  # 'Ѓ'
-    130: 193,  # '‚'
-    131: 194,  # 'ѓ'
-    132: 195,  # '„'
-    133: 196,  # '…'
-    134: 197,  # '†'
-    135: 198,  # '‡'
-    136: 199,  # '€'
-    137: 200,  # '‰'
-    138: 201,  # 'Љ'
-    139: 202,  # '‹'
-    140: 203,  # 'Њ'
-    141: 204,  # 'Ќ'
-    142: 205,  # 'Ћ'
-    143: 206,  # 'Џ'
-    144: 207,  # 'ђ'
-    145: 208,  # '‘'
-    146: 209,  # '’'
-    147: 210,  # '“'
-    148: 211,  # '”'
-    149: 212,  # '•'
-    150: 213,  # '–'
-    151: 214,  # '—'
-    152: 215,  # None
-    153: 216,  # '™'
-    154: 217,  # 'љ'
-    155: 218,  # '›'
-    156: 219,  # 'њ'
-    157: 220,  # 'ќ'
-    158: 221,  # 'ћ'
-    159: 222,  # 'џ'
-    160: 223,  # '\xa0'
-    161: 224,  # 'Ў'
-    162: 225,  # 'ў'
-    163: 226,  # 'Ј'
-    164: 227,  # '¤'
-    165: 228,  # 'Ґ'
-    166: 229,  # '¦'
-    167: 230,  # '§'
-    168: 231,  # 'Ё'
-    169: 232,  # '©'
-    170: 233,  # 'Є'
-    171: 234,  # '«'
-    172: 235,  # '¬'
-    173: 236,  # '\xad'
-    174: 237,  # '®'
-    175: 238,  # 'Ї'
-    176: 239,  # '°'
-    177: 240,  # '±'
-    178: 241,  # 'І'
-    179: 242,  # 'і'
-    180: 243,  # 'ґ'
-    181: 244,  # 'µ'
-    182: 245,  # '¶'
-    183: 246,  # '·'
-    184: 68,  # 'ё'
-    185: 247,  # '№'
-    186: 248,  # 'є'
-    187: 249,  # '»'
-    188: 250,  # 'ј'
-    189: 251,  # 'Ѕ'
-    190: 252,  # 'ѕ'
-    191: 253,  # 'ї'
-    192: 37,  # 'А'
-    193: 44,  # 'Б'
-    194: 33,  # 'В'
-    195: 46,  # 'Г'
-    196: 41,  # 'Д'
-    197: 48,  # 'Е'
-    198: 56,  # 'Ж'
-    199: 51,  # 'З'
-    200: 42,  # 'И'
-    201: 60,  # 'Й'
-    202: 36,  # 'К'
-    203: 49,  # 'Л'
-    204: 38,  # 'М'
-    205: 31,  # 'Н'
-    206: 34,  # 'О'
-    207: 35,  # 'П'
-    208: 45,  # 'Р'
-    209: 32,  # 'С'
-    210: 40,  # 'Т'
-    211: 52,  # 'У'
-    212: 53,  # 'Ф'
-    213: 55,  # 'Х'
-    214: 58,  # 'Ц'
-    215: 50,  # 'Ч'
-    216: 57,  # 'Ш'
-    217: 63,  # 'Щ'
-    218: 70,  # 'Ъ'
-    219: 62,  # 'Ы'
-    220: 61,  # 'Ь'
-    221: 47,  # 'Э'
-    222: 59,  # 'Ю'
-    223: 43,  # 'Я'
-    224: 3,  # 'а'
-    225: 21,  # 'б'
-    226: 10,  # 'в'
-    227: 19,  # 'г'
-    228: 13,  # 'д'
-    229: 2,  # 'е'
-    230: 24,  # 'ж'
-    231: 20,  # 'з'
-    232: 4,  # 'и'
-    233: 23,  # 'й'
-    234: 11,  # 'к'
-    235: 8,  # 'л'
-    236: 12,  # 'м'
-    237: 5,  # 'н'
-    238: 1,  # 'о'
-    239: 15,  # 'п'
-    240: 9,  # 'р'
-    241: 7,  # 'с'
-    242: 6,  # 'т'
-    243: 14,  # 'у'
-    244: 39,  # 'ф'
-    245: 26,  # 'х'
-    246: 28,  # 'ц'
-    247: 22,  # 'ч'
-    248: 25,  # 'ш'
-    249: 29,  # 'щ'
-    250: 54,  # 'ъ'
-    251: 18,  # 'ы'
-    252: 17,  # 'ь'
-    253: 30,  # 'э'
-    254: 27,  # 'ю'
-    255: 16,  # 'я'
+     0: 255,  # '\x00'
+     1: 255,  # '\x01'
+     2: 255,  # '\x02'
+     3: 255,  # '\x03'
+     4: 255,  # '\x04'
+     5: 255,  # '\x05'
+     6: 255,  # '\x06'
+     7: 255,  # '\x07'
+     8: 255,  # '\x08'
+     9: 255,  # '\t'
+     10: 254,  # '\n'
+     11: 255,  # '\x0b'
+     12: 255,  # '\x0c'
+     13: 254,  # '\r'
+     14: 255,  # '\x0e'
+     15: 255,  # '\x0f'
+     16: 255,  # '\x10'
+     17: 255,  # '\x11'
+     18: 255,  # '\x12'
+     19: 255,  # '\x13'
+     20: 255,  # '\x14'
+     21: 255,  # '\x15'
+     22: 255,  # '\x16'
+     23: 255,  # '\x17'
+     24: 255,  # '\x18'
+     25: 255,  # '\x19'
+     26: 255,  # '\x1a'
+     27: 255,  # '\x1b'
+     28: 255,  # '\x1c'
+     29: 255,  # '\x1d'
+     30: 255,  # '\x1e'
+     31: 255,  # '\x1f'
+     32: 253,  # ' '
+     33: 253,  # '!'
+     34: 253,  # '"'
+     35: 253,  # '#'
+     36: 253,  # '$'
+     37: 253,  # '%'
+     38: 253,  # '&'
+     39: 253,  # "'"
+     40: 253,  # '('
+     41: 253,  # ')'
+     42: 253,  # '*'
+     43: 253,  # '+'
+     44: 253,  # ','
+     45: 253,  # '-'
+     46: 253,  # '.'
+     47: 253,  # '/'
+     48: 252,  # '0'
+     49: 252,  # '1'
+     50: 252,  # '2'
+     51: 252,  # '3'
+     52: 252,  # '4'
+     53: 252,  # '5'
+     54: 252,  # '6'
+     55: 252,  # '7'
+     56: 252,  # '8'
+     57: 252,  # '9'
+     58: 253,  # ':'
+     59: 253,  # ';'
+     60: 253,  # '<'
+     61: 253,  # '='
+     62: 253,  # '>'
+     63: 253,  # '?'
+     64: 253,  # '@'
+     65: 142,  # 'A'
+     66: 143,  # 'B'
+     67: 144,  # 'C'
+     68: 145,  # 'D'
+     69: 146,  # 'E'
+     70: 147,  # 'F'
+     71: 148,  # 'G'
+     72: 149,  # 'H'
+     73: 150,  # 'I'
+     74: 151,  # 'J'
+     75: 152,  # 'K'
+     76: 74,  # 'L'
+     77: 153,  # 'M'
+     78: 75,  # 'N'
+     79: 154,  # 'O'
+     80: 155,  # 'P'
+     81: 156,  # 'Q'
+     82: 157,  # 'R'
+     83: 158,  # 'S'
+     84: 159,  # 'T'
+     85: 160,  # 'U'
+     86: 161,  # 'V'
+     87: 162,  # 'W'
+     88: 163,  # 'X'
+     89: 164,  # 'Y'
+     90: 165,  # 'Z'
+     91: 253,  # '['
+     92: 253,  # '\\'
+     93: 253,  # ']'
+     94: 253,  # '^'
+     95: 253,  # '_'
+     96: 253,  # '`'
+     97: 71,  # 'a'
+     98: 172,  # 'b'
+     99: 66,  # 'c'
+     100: 173,  # 'd'
+     101: 65,  # 'e'
+     102: 174,  # 'f'
+     103: 76,  # 'g'
+     104: 175,  # 'h'
+     105: 64,  # 'i'
+     106: 176,  # 'j'
+     107: 177,  # 'k'
+     108: 77,  # 'l'
+     109: 72,  # 'm'
+     110: 178,  # 'n'
+     111: 69,  # 'o'
+     112: 67,  # 'p'
+     113: 179,  # 'q'
+     114: 78,  # 'r'
+     115: 73,  # 's'
+     116: 180,  # 't'
+     117: 181,  # 'u'
+     118: 79,  # 'v'
+     119: 182,  # 'w'
+     120: 183,  # 'x'
+     121: 184,  # 'y'
+     122: 185,  # 'z'
+     123: 253,  # '{'
+     124: 253,  # '|'
+     125: 253,  # '}'
+     126: 253,  # '~'
+     127: 253,  # '\x7f'
+     128: 191,  # 'Ђ'
+     129: 192,  # 'Ѓ'
+     130: 193,  # '‚'
+     131: 194,  # 'ѓ'
+     132: 195,  # '„'
+     133: 196,  # '…'
+     134: 197,  # '†'
+     135: 198,  # '‡'
+     136: 199,  # '€'
+     137: 200,  # '‰'
+     138: 201,  # 'Љ'
+     139: 202,  # '‹'
+     140: 203,  # 'Њ'
+     141: 204,  # 'Ќ'
+     142: 205,  # 'Ћ'
+     143: 206,  # 'Џ'
+     144: 207,  # 'ђ'
+     145: 208,  # '‘'
+     146: 209,  # '’'
+     147: 210,  # '“'
+     148: 211,  # '”'
+     149: 212,  # '•'
+     150: 213,  # '–'
+     151: 214,  # '—'
+     152: 215,  # None
+     153: 216,  # '™'
+     154: 217,  # 'љ'
+     155: 218,  # '›'
+     156: 219,  # 'њ'
+     157: 220,  # 'ќ'
+     158: 221,  # 'ћ'
+     159: 222,  # 'џ'
+     160: 223,  # '\xa0'
+     161: 224,  # 'Ў'
+     162: 225,  # 'ў'
+     163: 226,  # 'Ј'
+     164: 227,  # '¤'
+     165: 228,  # 'Ґ'
+     166: 229,  # '¦'
+     167: 230,  # '§'
+     168: 231,  # 'Ё'
+     169: 232,  # '©'
+     170: 233,  # 'Є'
+     171: 234,  # '«'
+     172: 235,  # '¬'
+     173: 236,  # '\xad'
+     174: 237,  # '®'
+     175: 238,  # 'Ї'
+     176: 239,  # '°'
+     177: 240,  # '±'
+     178: 241,  # 'І'
+     179: 242,  # 'і'
+     180: 243,  # 'ґ'
+     181: 244,  # 'µ'
+     182: 245,  # '¶'
+     183: 246,  # '·'
+     184: 68,  # 'ё'
+     185: 247,  # '№'
+     186: 248,  # 'є'
+     187: 249,  # '»'
+     188: 250,  # 'ј'
+     189: 251,  # 'Ѕ'
+     190: 252,  # 'ѕ'
+     191: 253,  # 'ї'
+     192: 37,  # 'А'
+     193: 44,  # 'Б'
+     194: 33,  # 'В'
+     195: 46,  # 'Г'
+     196: 41,  # 'Д'
+     197: 48,  # 'Е'
+     198: 56,  # 'Ж'
+     199: 51,  # 'З'
+     200: 42,  # 'И'
+     201: 60,  # 'Й'
+     202: 36,  # 'К'
+     203: 49,  # 'Л'
+     204: 38,  # 'М'
+     205: 31,  # 'Н'
+     206: 34,  # 'О'
+     207: 35,  # 'П'
+     208: 45,  # 'Р'
+     209: 32,  # 'С'
+     210: 40,  # 'Т'
+     211: 52,  # 'У'
+     212: 53,  # 'Ф'
+     213: 55,  # 'Х'
+     214: 58,  # 'Ц'
+     215: 50,  # 'Ч'
+     216: 57,  # 'Ш'
+     217: 63,  # 'Щ'
+     218: 70,  # 'Ъ'
+     219: 62,  # 'Ы'
+     220: 61,  # 'Ь'
+     221: 47,  # 'Э'
+     222: 59,  # 'Ю'
+     223: 43,  # 'Я'
+     224: 3,  # 'а'
+     225: 21,  # 'б'
+     226: 10,  # 'в'
+     227: 19,  # 'г'
+     228: 13,  # 'д'
+     229: 2,  # 'е'
+     230: 24,  # 'ж'
+     231: 20,  # 'з'
+     232: 4,  # 'и'
+     233: 23,  # 'й'
+     234: 11,  # 'к'
+     235: 8,  # 'л'
+     236: 12,  # 'м'
+     237: 5,  # 'н'
+     238: 1,  # 'о'
+     239: 15,  # 'п'
+     240: 9,  # 'р'
+     241: 7,  # 'с'
+     242: 6,  # 'т'
+     243: 14,  # 'у'
+     244: 39,  # 'ф'
+     245: 26,  # 'х'
+     246: 28,  # 'ц'
+     247: 22,  # 'ч'
+     248: 25,  # 'ш'
+     249: 29,  # 'щ'
+     250: 54,  # 'ъ'
+     251: 18,  # 'ы'
+     252: 17,  # 'ь'
+     253: 30,  # 'э'
+     254: 27,  # 'ю'
+     255: 16,  # 'я'
 }
 
-WINDOWS_1251_RUSSIAN_MODEL = SingleByteCharSetModel(
-    charset_name="windows-1251",
-    language="Russian",
-    char_to_order_map=WINDOWS_1251_RUSSIAN_CHAR_TO_ORDER,
-    language_model=RUSSIAN_LANG_MODEL,
-    typical_positive_ratio=0.976601,
-    keep_ascii_letters=False,
-    alphabet="ЁАБВГДЕЖЗИЙКЛМНОПРСТУФХЦЧШЩЪЫЬЭЮЯабвгдежзийклмнопрстуфхцчшщъыьэюяё",
-)
+WINDOWS_1251_RUSSIAN_MODEL = SingleByteCharSetModel(charset_name='windows-1251',
+                                                    language='Russian',
+                                                    char_to_order_map=WINDOWS_1251_RUSSIAN_CHAR_TO_ORDER,
+                                                    language_model=RUSSIAN_LANG_MODEL,
+                                                    typical_positive_ratio=0.976601,
+                                                    keep_ascii_letters=False,
+                                                    alphabet='ЁАБВГДЕЖЗИЙКЛМНОПРСТУФХЦЧШЩЪЫЬЭЮЯабвгдежзийклмнопрстуфхцчшщъыьэюяё')
 
 IBM855_RUSSIAN_CHAR_TO_ORDER = {
-    0: 255,  # '\x00'
-    1: 255,  # '\x01'
-    2: 255,  # '\x02'
-    3: 255,  # '\x03'
-    4: 255,  # '\x04'
-    5: 255,  # '\x05'
-    6: 255,  # '\x06'
-    7: 255,  # '\x07'
-    8: 255,  # '\x08'
-    9: 255,  # '\t'
-    10: 254,  # '\n'
-    11: 255,  # '\x0b'
-    12: 255,  # '\x0c'
-    13: 254,  # '\r'
-    14: 255,  # '\x0e'
-    15: 255,  # '\x0f'
-    16: 255,  # '\x10'
-    17: 255,  # '\x11'
-    18: 255,  # '\x12'
-    19: 255,  # '\x13'
-    20: 255,  # '\x14'
-    21: 255,  # '\x15'
-    22: 255,  # '\x16'
-    23: 255,  # '\x17'
-    24: 255,  # '\x18'
-    25: 255,  # '\x19'
-    26: 255,  # '\x1a'
-    27: 255,  # '\x1b'
-    28: 255,  # '\x1c'
-    29: 255,  # '\x1d'
-    30: 255,  # '\x1e'
-    31: 255,  # '\x1f'
-    32: 253,  # ' '
-    33: 253,  # '!'
-    34: 253,  # '"'
-    35: 253,  # '#'
-    36: 253,  # '$'
-    37: 253,  # '%'
-    38: 253,  # '&'
-    39: 253,  # "'"
-    40: 253,  # '('
-    41: 253,  # ')'
-    42: 253,  # '*'
-    43: 253,  # '+'
-    44: 253,  # ','
-    45: 253,  # '-'
-    46: 253,  # '.'
-    47: 253,  # '/'
-    48: 252,  # '0'
-    49: 252,  # '1'
-    50: 252,  # '2'
-    51: 252,  # '3'
-    52: 252,  # '4'
-    53: 252,  # '5'
-    54: 252,  # '6'
-    55: 252,  # '7'
-    56: 252,  # '8'
-    57: 252,  # '9'
-    58: 253,  # ':'
-    59: 253,  # ';'
-    60: 253,  # '<'
-    61: 253,  # '='
-    62: 253,  # '>'
-    63: 253,  # '?'
-    64: 253,  # '@'
-    65: 142,  # 'A'
-    66: 143,  # 'B'
-    67: 144,  # 'C'
-    68: 145,  # 'D'
-    69: 146,  # 'E'
-    70: 147,  # 'F'
-    71: 148,  # 'G'
-    72: 149,  # 'H'
-    73: 150,  # 'I'
-    74: 151,  # 'J'
-    75: 152,  # 'K'
-    76: 74,  # 'L'
-    77: 153,  # 'M'
-    78: 75,  # 'N'
-    79: 154,  # 'O'
-    80: 155,  # 'P'
-    81: 156,  # 'Q'
-    82: 157,  # 'R'
-    83: 158,  # 'S'
-    84: 159,  # 'T'
-    85: 160,  # 'U'
-    86: 161,  # 'V'
-    87: 162,  # 'W'
-    88: 163,  # 'X'
-    89: 164,  # 'Y'
-    90: 165,  # 'Z'
-    91: 253,  # '['
-    92: 253,  # '\\'
-    93: 253,  # ']'
-    94: 253,  # '^'
-    95: 253,  # '_'
-    96: 253,  # '`'
-    97: 71,  # 'a'
-    98: 172,  # 'b'
-    99: 66,  # 'c'
-    100: 173,  # 'd'
-    101: 65,  # 'e'
-    102: 174,  # 'f'
-    103: 76,  # 'g'
-    104: 175,  # 'h'
-    105: 64,  # 'i'
-    106: 176,  # 'j'
-    107: 177,  # 'k'
-    108: 77,  # 'l'
-    109: 72,  # 'm'
-    110: 178,  # 'n'
-    111: 69,  # 'o'
-    112: 67,  # 'p'
-    113: 179,  # 'q'
-    114: 78,  # 'r'
-    115: 73,  # 's'
-    116: 180,  # 't'
-    117: 181,  # 'u'
-    118: 79,  # 'v'
-    119: 182,  # 'w'
-    120: 183,  # 'x'
-    121: 184,  # 'y'
-    122: 185,  # 'z'
-    123: 253,  # '{'
-    124: 253,  # '|'
-    125: 253,  # '}'
-    126: 253,  # '~'
-    127: 253,  # '\x7f'
-    128: 191,  # 'ђ'
-    129: 192,  # 'Ђ'
-    130: 193,  # 'ѓ'
-    131: 194,  # 'Ѓ'
-    132: 68,  # 'ё'
-    133: 195,  # 'Ё'
-    134: 196,  # 'є'
-    135: 197,  # 'Є'
-    136: 198,  # 'ѕ'
-    137: 199,  # 'Ѕ'
-    138: 200,  # 'і'
-    139: 201,  # 'І'
-    140: 202,  # 'ї'
-    141: 203,  # 'Ї'
-    142: 204,  # 'ј'
-    143: 205,  # 'Ј'
-    144: 206,  # 'љ'
-    145: 207,  # 'Љ'
-    146: 208,  # 'њ'
-    147: 209,  # 'Њ'
-    148: 210,  # 'ћ'
-    149: 211,  # 'Ћ'
-    150: 212,  # 'ќ'
-    151: 213,  # 'Ќ'
-    152: 214,  # 'ў'
-    153: 215,  # 'Ў'
-    154: 216,  # 'џ'
-    155: 217,  # 'Џ'
-    156: 27,  # 'ю'
-    157: 59,  # 'Ю'
-    158: 54,  # 'ъ'
-    159: 70,  # 'Ъ'
-    160: 3,  # 'а'
-    161: 37,  # 'А'
-    162: 21,  # 'б'
-    163: 44,  # 'Б'
-    164: 28,  # 'ц'
-    165: 58,  # 'Ц'
-    166: 13,  # 'д'
-    167: 41,  # 'Д'
-    168: 2,  # 'е'
-    169: 48,  # 'Е'
-    170: 39,  # 'ф'
-    171: 53,  # 'Ф'
-    172: 19,  # 'г'
-    173: 46,  # 'Г'
-    174: 218,  # '«'
-    175: 219,  # '»'
-    176: 220,  # '░'
-    177: 221,  # '▒'
-    178: 222,  # '▓'
-    179: 223,  # '│'
-    180: 224,  # '┤'
-    181: 26,  # 'х'
-    182: 55,  # 'Х'
-    183: 4,  # 'и'
-    184: 42,  # 'И'
-    185: 225,  # '╣'
-    186: 226,  # '║'
-    187: 227,  # '╗'
-    188: 228,  # '╝'
-    189: 23,  # 'й'
-    190: 60,  # 'Й'
-    191: 229,  # '┐'
-    192: 230,  # '└'
-    193: 231,  # '┴'
-    194: 232,  # '┬'
-    195: 233,  # '├'
-    196: 234,  # '─'
-    197: 235,  # '┼'
-    198: 11,  # 'к'
-    199: 36,  # 'К'
-    200: 236,  # '╚'
-    201: 237,  # '╔'
-    202: 238,  # '╩'
-    203: 239,  # '╦'
-    204: 240,  # '╠'
-    205: 241,  # '═'
-    206: 242,  # '╬'
-    207: 243,  # '¤'
-    208: 8,  # 'л'
-    209: 49,  # 'Л'
-    210: 12,  # 'м'
-    211: 38,  # 'М'
-    212: 5,  # 'н'
-    213: 31,  # 'Н'
-    214: 1,  # 'о'
-    215: 34,  # 'О'
-    216: 15,  # 'п'
-    217: 244,  # '┘'
-    218: 245,  # '┌'
-    219: 246,  # '█'
-    220: 247,  # '▄'
-    221: 35,  # 'П'
-    222: 16,  # 'я'
-    223: 248,  # '▀'
-    224: 43,  # 'Я'
-    225: 9,  # 'р'
-    226: 45,  # 'Р'
-    227: 7,  # 'с'
-    228: 32,  # 'С'
-    229: 6,  # 'т'
-    230: 40,  # 'Т'
-    231: 14,  # 'у'
-    232: 52,  # 'У'
-    233: 24,  # 'ж'
-    234: 56,  # 'Ж'
-    235: 10,  # 'в'
-    236: 33,  # 'В'
-    237: 17,  # 'ь'
-    238: 61,  # 'Ь'
-    239: 249,  # '№'
-    240: 250,  # '\xad'
-    241: 18,  # 'ы'
-    242: 62,  # 'Ы'
-    243: 20,  # 'з'
-    244: 51,  # 'З'
-    245: 25,  # 'ш'
-    246: 57,  # 'Ш'
-    247: 30,  # 'э'
-    248: 47,  # 'Э'
-    249: 29,  # 'щ'
-    250: 63,  # 'Щ'
-    251: 22,  # 'ч'
-    252: 50,  # 'Ч'
-    253: 251,  # '§'
-    254: 252,  # '■'
-    255: 255,  # '\xa0'
+     0: 255,  # '\x00'
+     1: 255,  # '\x01'
+     2: 255,  # '\x02'
+     3: 255,  # '\x03'
+     4: 255,  # '\x04'
+     5: 255,  # '\x05'
+     6: 255,  # '\x06'
+     7: 255,  # '\x07'
+     8: 255,  # '\x08'
+     9: 255,  # '\t'
+     10: 254,  # '\n'
+     11: 255,  # '\x0b'
+     12: 255,  # '\x0c'
+     13: 254,  # '\r'
+     14: 255,  # '\x0e'
+     15: 255,  # '\x0f'
+     16: 255,  # '\x10'
+     17: 255,  # '\x11'
+     18: 255,  # '\x12'
+     19: 255,  # '\x13'
+     20: 255,  # '\x14'
+     21: 255,  # '\x15'
+     22: 255,  # '\x16'
+     23: 255,  # '\x17'
+     24: 255,  # '\x18'
+     25: 255,  # '\x19'
+     26: 255,  # '\x1a'
+     27: 255,  # '\x1b'
+     28: 255,  # '\x1c'
+     29: 255,  # '\x1d'
+     30: 255,  # '\x1e'
+     31: 255,  # '\x1f'
+     32: 253,  # ' '
+     33: 253,  # '!'
+     34: 253,  # '"'
+     35: 253,  # '#'
+     36: 253,  # '$'
+     37: 253,  # '%'
+     38: 253,  # '&'
+     39: 253,  # "'"
+     40: 253,  # '('
+     41: 253,  # ')'
+     42: 253,  # '*'
+     43: 253,  # '+'
+     44: 253,  # ','
+     45: 253,  # '-'
+     46: 253,  # '.'
+     47: 253,  # '/'
+     48: 252,  # '0'
+     49: 252,  # '1'
+     50: 252,  # '2'
+     51: 252,  # '3'
+     52: 252,  # '4'
+     53: 252,  # '5'
+     54: 252,  # '6'
+     55: 252,  # '7'
+     56: 252,  # '8'
+     57: 252,  # '9'
+     58: 253,  # ':'
+     59: 253,  # ';'
+     60: 253,  # '<'
+     61: 253,  # '='
+     62: 253,  # '>'
+     63: 253,  # '?'
+     64: 253,  # '@'
+     65: 142,  # 'A'
+     66: 143,  # 'B'
+     67: 144,  # 'C'
+     68: 145,  # 'D'
+     69: 146,  # 'E'
+     70: 147,  # 'F'
+     71: 148,  # 'G'
+     72: 149,  # 'H'
+     73: 150,  # 'I'
+     74: 151,  # 'J'
+     75: 152,  # 'K'
+     76: 74,  # 'L'
+     77: 153,  # 'M'
+     78: 75,  # 'N'
+     79: 154,  # 'O'
+     80: 155,  # 'P'
+     81: 156,  # 'Q'
+     82: 157,  # 'R'
+     83: 158,  # 'S'
+     84: 159,  # 'T'
+     85: 160,  # 'U'
+     86: 161,  # 'V'
+     87: 162,  # 'W'
+     88: 163,  # 'X'
+     89: 164,  # 'Y'
+     90: 165,  # 'Z'
+     91: 253,  # '['
+     92: 253,  # '\\'
+     93: 253,  # ']'
+     94: 253,  # '^'
+     95: 253,  # '_'
+     96: 253,  # '`'
+     97: 71,  # 'a'
+     98: 172,  # 'b'
+     99: 66,  # 'c'
+     100: 173,  # 'd'
+     101: 65,  # 'e'
+     102: 174,  # 'f'
+     103: 76,  # 'g'
+     104: 175,  # 'h'
+     105: 64,  # 'i'
+     106: 176,  # 'j'
+     107: 177,  # 'k'
+     108: 77,  # 'l'
+     109: 72,  # 'm'
+     110: 178,  # 'n'
+     111: 69,  # 'o'
+     112: 67,  # 'p'
+     113: 179,  # 'q'
+     114: 78,  # 'r'
+     115: 73,  # 's'
+     116: 180,  # 't'
+     117: 181,  # 'u'
+     118: 79,  # 'v'
+     119: 182,  # 'w'
+     120: 183,  # 'x'
+     121: 184,  # 'y'
+     122: 185,  # 'z'
+     123: 253,  # '{'
+     124: 253,  # '|'
+     125: 253,  # '}'
+     126: 253,  # '~'
+     127: 253,  # '\x7f'
+     128: 191,  # 'ђ'
+     129: 192,  # 'Ђ'
+     130: 193,  # 'ѓ'
+     131: 194,  # 'Ѓ'
+     132: 68,  # 'ё'
+     133: 195,  # 'Ё'
+     134: 196,  # 'є'
+     135: 197,  # 'Є'
+     136: 198,  # 'ѕ'
+     137: 199,  # 'Ѕ'
+     138: 200,  # 'і'
+     139: 201,  # 'І'
+     140: 202,  # 'ї'
+     141: 203,  # 'Ї'
+     142: 204,  # 'ј'
+     143: 205,  # 'Ј'
+     144: 206,  # 'љ'
+     145: 207,  # 'Љ'
+     146: 208,  # 'њ'
+     147: 209,  # 'Њ'
+     148: 210,  # 'ћ'
+     149: 211,  # 'Ћ'
+     150: 212,  # 'ќ'
+     151: 213,  # 'Ќ'
+     152: 214,  # 'ў'
+     153: 215,  # 'Ў'
+     154: 216,  # 'џ'
+     155: 217,  # 'Џ'
+     156: 27,  # 'ю'
+     157: 59,  # 'Ю'
+     158: 54,  # 'ъ'
+     159: 70,  # 'Ъ'
+     160: 3,  # 'а'
+     161: 37,  # 'А'
+     162: 21,  # 'б'
+     163: 44,  # 'Б'
+     164: 28,  # 'ц'
+     165: 58,  # 'Ц'
+     166: 13,  # 'д'
+     167: 41,  # 'Д'
+     168: 2,  # 'е'
+     169: 48,  # 'Е'
+     170: 39,  # 'ф'
+     171: 53,  # 'Ф'
+     172: 19,  # 'г'
+     173: 46,  # 'Г'
+     174: 218,  # '«'
+     175: 219,  # '»'
+     176: 220,  # '░'
+     177: 221,  # '▒'
+     178: 222,  # '▓'
+     179: 223,  # '│'
+     180: 224,  # '┤'
+     181: 26,  # 'х'
+     182: 55,  # 'Х'
+     183: 4,  # 'и'
+     184: 42,  # 'И'
+     185: 225,  # '╣'
+     186: 226,  # '║'
+     187: 227,  # '╗'
+     188: 228,  # '╝'
+     189: 23,  # 'й'
+     190: 60,  # 'Й'
+     191: 229,  # '┐'
+     192: 230,  # '└'
+     193: 231,  # '┴'
+     194: 232,  # '┬'
+     195: 233,  # '├'
+     196: 234,  # '─'
+     197: 235,  # '┼'
+     198: 11,  # 'к'
+     199: 36,  # 'К'
+     200: 236,  # '╚'
+     201: 237,  # '╔'
+     202: 238,  # '╩'
+     203: 239,  # '╦'
+     204: 240,  # '╠'
+     205: 241,  # '═'
+     206: 242,  # '╬'
+     207: 243,  # '¤'
+     208: 8,  # 'л'
+     209: 49,  # 'Л'
+     210: 12,  # 'м'
+     211: 38,  # 'М'
+     212: 5,  # 'н'
+     213: 31,  # 'Н'
+     214: 1,  # 'о'
+     215: 34,  # 'О'
+     216: 15,  # 'п'
+     217: 244,  # '┘'
+     218: 245,  # '┌'
+     219: 246,  # '█'
+     220: 247,  # '▄'
+     221: 35,  # 'П'
+     222: 16,  # 'я'
+     223: 248,  # '▀'
+     224: 43,  # 'Я'
+     225: 9,  # 'р'
+     226: 45,  # 'Р'
+     227: 7,  # 'с'
+     228: 32,  # 'С'
+     229: 6,  # 'т'
+     230: 40,  # 'Т'
+     231: 14,  # 'у'
+     232: 52,  # 'У'
+     233: 24,  # 'ж'
+     234: 56,  # 'Ж'
+     235: 10,  # 'в'
+     236: 33,  # 'В'
+     237: 17,  # 'ь'
+     238: 61,  # 'Ь'
+     239: 249,  # '№'
+     240: 250,  # '\xad'
+     241: 18,  # 'ы'
+     242: 62,  # 'Ы'
+     243: 20,  # 'з'
+     244: 51,  # 'З'
+     245: 25,  # 'ш'
+     246: 57,  # 'Ш'
+     247: 30,  # 'э'
+     248: 47,  # 'Э'
+     249: 29,  # 'щ'
+     250: 63,  # 'Щ'
+     251: 22,  # 'ч'
+     252: 50,  # 'Ч'
+     253: 251,  # '§'
+     254: 252,  # '■'
+     255: 255,  # '\xa0'
 }
 
-IBM855_RUSSIAN_MODEL = SingleByteCharSetModel(
-    charset_name="IBM855",
-    language="Russian",
-    char_to_order_map=IBM855_RUSSIAN_CHAR_TO_ORDER,
-    language_model=RUSSIAN_LANG_MODEL,
-    typical_positive_ratio=0.976601,
-    keep_ascii_letters=False,
-    alphabet="ЁАБВГДЕЖЗИЙКЛМНОПРСТУФХЦЧШЩЪЫЬЭЮЯабвгдежзийклмнопрстуфхцчшщъыьэюяё",
-)
+IBM855_RUSSIAN_MODEL = SingleByteCharSetModel(charset_name='IBM855',
+                                              language='Russian',
+                                              char_to_order_map=IBM855_RUSSIAN_CHAR_TO_ORDER,
+                                              language_model=RUSSIAN_LANG_MODEL,
+                                              typical_positive_ratio=0.976601,
+                                              keep_ascii_letters=False,
+                                              alphabet='ЁАБВГДЕЖЗИЙКЛМНОПРСТУФХЦЧШЩЪЫЬЭЮЯабвгдежзийклмнопрстуфхцчшщъыьэюяё')
 
 KOI8_R_RUSSIAN_CHAR_TO_ORDER = {
-    0: 255,  # '\x00'
-    1: 255,  # '\x01'
-    2: 255,  # '\x02'
-    3: 255,  # '\x03'
-    4: 255,  # '\x04'
-    5: 255,  # '\x05'
-    6: 255,  # '\x06'
-    7: 255,  # '\x07'
-    8: 255,  # '\x08'
-    9: 255,  # '\t'
-    10: 254,  # '\n'
-    11: 255,  # '\x0b'
-    12: 255,  # '\x0c'
-    13: 254,  # '\r'
-    14: 255,  # '\x0e'
-    15: 255,  # '\x0f'
-    16: 255,  # '\x10'
-    17: 255,  # '\x11'
-    18: 255,  # '\x12'
-    19: 255,  # '\x13'
-    20: 255,  # '\x14'
-    21: 255,  # '\x15'
-    22: 255,  # '\x16'
-    23: 255,  # '\x17'
-    24: 255,  # '\x18'
-    25: 255,  # '\x19'
-    26: 255,  # '\x1a'
-    27: 255,  # '\x1b'
-    28: 255,  # '\x1c'
-    29: 255,  # '\x1d'
-    30: 255,  # '\x1e'
-    31: 255,  # '\x1f'
-    32: 253,  # ' '
-    33: 253,  # '!'
-    34: 253,  # '"'
-    35: 253,  # '#'
-    36: 253,  # '$'
-    37: 253,  # '%'
-    38: 253,  # '&'
-    39: 253,  # "'"
-    40: 253,  # '('
-    41: 253,  # ')'
-    42: 253,  # '*'
-    43: 253,  # '+'
-    44: 253,  # ','
-    45: 253,  # '-'
-    46: 253,  # '.'
-    47: 253,  # '/'
-    48: 252,  # '0'
-    49: 252,  # '1'
-    50: 252,  # '2'
-    51: 252,  # '3'
-    52: 252,  # '4'
-    53: 252,  # '5'
-    54: 252,  # '6'
-    55: 252,  # '7'
-    56: 252,  # '8'
-    57: 252,  # '9'
-    58: 253,  # ':'
-    59: 253,  # ';'
-    60: 253,  # '<'
-    61: 253,  # '='
-    62: 253,  # '>'
-    63: 253,  # '?'
-    64: 253,  # '@'
-    65: 142,  # 'A'
-    66: 143,  # 'B'
-    67: 144,  # 'C'
-    68: 145,  # 'D'
-    69: 146,  # 'E'
-    70: 147,  # 'F'
-    71: 148,  # 'G'
-    72: 149,  # 'H'
-    73: 150,  # 'I'
-    74: 151,  # 'J'
-    75: 152,  # 'K'
-    76: 74,  # 'L'
-    77: 153,  # 'M'
-    78: 75,  # 'N'
-    79: 154,  # 'O'
-    80: 155,  # 'P'
-    81: 156,  # 'Q'
-    82: 157,  # 'R'
-    83: 158,  # 'S'
-    84: 159,  # 'T'
-    85: 160,  # 'U'
-    86: 161,  # 'V'
-    87: 162,  # 'W'
-    88: 163,  # 'X'
-    89: 164,  # 'Y'
-    90: 165,  # 'Z'
-    91: 253,  # '['
-    92: 253,  # '\\'
-    93: 253,  # ']'
-    94: 253,  # '^'
-    95: 253,  # '_'
-    96: 253,  # '`'
-    97: 71,  # 'a'
-    98: 172,  # 'b'
-    99: 66,  # 'c'
-    100: 173,  # 'd'
-    101: 65,  # 'e'
-    102: 174,  # 'f'
-    103: 76,  # 'g'
-    104: 175,  # 'h'
-    105: 64,  # 'i'
-    106: 176,  # 'j'
-    107: 177,  # 'k'
-    108: 77,  # 'l'
-    109: 72,  # 'm'
-    110: 178,  # 'n'
-    111: 69,  # 'o'
-    112: 67,  # 'p'
-    113: 179,  # 'q'
-    114: 78,  # 'r'
-    115: 73,  # 's'
-    116: 180,  # 't'
-    117: 181,  # 'u'
-    118: 79,  # 'v'
-    119: 182,  # 'w'
-    120: 183,  # 'x'
-    121: 184,  # 'y'
-    122: 185,  # 'z'
-    123: 253,  # '{'
-    124: 253,  # '|'
-    125: 253,  # '}'
-    126: 253,  # '~'
-    127: 253,  # '\x7f'
-    128: 191,  # '─'
-    129: 192,  # '│'
-    130: 193,  # '┌'
-    131: 194,  # '┐'
-    132: 195,  # '└'
-    133: 196,  # '┘'
-    134: 197,  # '├'
-    135: 198,  # '┤'
-    136: 199,  # '┬'
-    137: 200,  # '┴'
-    138: 201,  # '┼'
-    139: 202,  # '▀'
-    140: 203,  # '▄'
-    141: 204,  # '█'
-    142: 205,  # '▌'
-    143: 206,  # '▐'
-    144: 207,  # '░'
-    145: 208,  # '▒'
-    146: 209,  # '▓'
-    147: 210,  # '⌠'
-    148: 211,  # '■'
-    149: 212,  # '∙'
-    150: 213,  # '√'
-    151: 214,  # '≈'
-    152: 215,  # '≤'
-    153: 216,  # '≥'
-    154: 217,  # '\xa0'
-    155: 218,  # '⌡'
-    156: 219,  # '°'
-    157: 220,  # '²'
-    158: 221,  # '·'
-    159: 222,  # '÷'
-    160: 223,  # '═'
-    161: 224,  # '║'
-    162: 225,  # '╒'
-    163: 68,  # 'ё'
-    164: 226,  # '╓'
-    165: 227,  # '╔'
-    166: 228,  # '╕'
-    167: 229,  # '╖'
-    168: 230,  # '╗'
-    169: 231,  # '╘'
-    170: 232,  # '╙'
-    171: 233,  # '╚'
-    172: 234,  # '╛'
-    173: 235,  # '╜'
-    174: 236,  # '╝'
-    175: 237,  # '╞'
-    176: 238,  # '╟'
-    177: 239,  # '╠'
-    178: 240,  # '╡'
-    179: 241,  # 'Ё'
-    180: 242,  # '╢'
-    181: 243,  # '╣'
-    182: 244,  # '╤'
-    183: 245,  # '╥'
-    184: 246,  # '╦'
-    185: 247,  # '╧'
-    186: 248,  # '╨'
-    187: 249,  # '╩'
-    188: 250,  # '╪'
-    189: 251,  # '╫'
-    190: 252,  # '╬'
-    191: 253,  # '©'
-    192: 27,  # 'ю'
-    193: 3,  # 'а'
-    194: 21,  # 'б'
-    195: 28,  # 'ц'
-    196: 13,  # 'д'
-    197: 2,  # 'е'
-    198: 39,  # 'ф'
-    199: 19,  # 'г'
-    200: 26,  # 'х'
-    201: 4,  # 'и'
-    202: 23,  # 'й'
-    203: 11,  # 'к'
-    204: 8,  # 'л'
-    205: 12,  # 'м'
-    206: 5,  # 'н'
-    207: 1,  # 'о'
-    208: 15,  # 'п'
-    209: 16,  # 'я'
-    210: 9,  # 'р'
-    211: 7,  # 'с'
-    212: 6,  # 'т'
-    213: 14,  # 'у'
-    214: 24,  # 'ж'
-    215: 10,  # 'в'
-    216: 17,  # 'ь'
-    217: 18,  # 'ы'
-    218: 20,  # 'з'
-    219: 25,  # 'ш'
-    220: 30,  # 'э'
-    221: 29,  # 'щ'
-    222: 22,  # 'ч'
-    223: 54,  # 'ъ'
-    224: 59,  # 'Ю'
-    225: 37,  # 'А'
-    226: 44,  # 'Б'
-    227: 58,  # 'Ц'
-    228: 41,  # 'Д'
-    229: 48,  # 'Е'
-    230: 53,  # 'Ф'
-    231: 46,  # 'Г'
-    232: 55,  # 'Х'
-    233: 42,  # 'И'
-    234: 60,  # 'Й'
-    235: 36,  # 'К'
-    236: 49,  # 'Л'
-    237: 38,  # 'М'
-    238: 31,  # 'Н'
-    239: 34,  # 'О'
-    240: 35,  # 'П'
-    241: 43,  # 'Я'
-    242: 45,  # 'Р'
-    243: 32,  # 'С'
-    244: 40,  # 'Т'
-    245: 52,  # 'У'
-    246: 56,  # 'Ж'
-    247: 33,  # 'В'
-    248: 61,  # 'Ь'
-    249: 62,  # 'Ы'
-    250: 51,  # 'З'
-    251: 57,  # 'Ш'
-    252: 47,  # 'Э'
-    253: 63,  # 'Щ'
-    254: 50,  # 'Ч'
-    255: 70,  # 'Ъ'
+     0: 255,  # '\x00'
+     1: 255,  # '\x01'
+     2: 255,  # '\x02'
+     3: 255,  # '\x03'
+     4: 255,  # '\x04'
+     5: 255,  # '\x05'
+     6: 255,  # '\x06'
+     7: 255,  # '\x07'
+     8: 255,  # '\x08'
+     9: 255,  # '\t'
+     10: 254,  # '\n'
+     11: 255,  # '\x0b'
+     12: 255,  # '\x0c'
+     13: 254,  # '\r'
+     14: 255,  # '\x0e'
+     15: 255,  # '\x0f'
+     16: 255,  # '\x10'
+     17: 255,  # '\x11'
+     18: 255,  # '\x12'
+     19: 255,  # '\x13'
+     20: 255,  # '\x14'
+     21: 255,  # '\x15'
+     22: 255,  # '\x16'
+     23: 255,  # '\x17'
+     24: 255,  # '\x18'
+     25: 255,  # '\x19'
+     26: 255,  # '\x1a'
+     27: 255,  # '\x1b'
+     28: 255,  # '\x1c'
+     29: 255,  # '\x1d'
+     30: 255,  # '\x1e'
+     31: 255,  # '\x1f'
+     32: 253,  # ' '
+     33: 253,  # '!'
+     34: 253,  # '"'
+     35: 253,  # '#'
+     36: 253,  # '$'
+     37: 253,  # '%'
+     38: 253,  # '&'
+     39: 253,  # "'"
+     40: 253,  # '('
+     41: 253,  # ')'
+     42: 253,  # '*'
+     43: 253,  # '+'
+     44: 253,  # ','
+     45: 253,  # '-'
+     46: 253,  # '.'
+     47: 253,  # '/'
+     48: 252,  # '0'
+     49: 252,  # '1'
+     50: 252,  # '2'
+     51: 252,  # '3'
+     52: 252,  # '4'
+     53: 252,  # '5'
+     54: 252,  # '6'
+     55: 252,  # '7'
+     56: 252,  # '8'
+     57: 252,  # '9'
+     58: 253,  # ':'
+     59: 253,  # ';'
+     60: 253,  # '<'
+     61: 253,  # '='
+     62: 253,  # '>'
+     63: 253,  # '?'
+     64: 253,  # '@'
+     65: 142,  # 'A'
+     66: 143,  # 'B'
+     67: 144,  # 'C'
+     68: 145,  # 'D'
+     69: 146,  # 'E'
+     70: 147,  # 'F'
+     71: 148,  # 'G'
+     72: 149,  # 'H'
+     73: 150,  # 'I'
+     74: 151,  # 'J'
+     75: 152,  # 'K'
+     76: 74,  # 'L'
+     77: 153,  # 'M'
+     78: 75,  # 'N'
+     79: 154,  # 'O'
+     80: 155,  # 'P'
+     81: 156,  # 'Q'
+     82: 157,  # 'R'
+     83: 158,  # 'S'
+     84: 159,  # 'T'
+     85: 160,  # 'U'
+     86: 161,  # 'V'
+     87: 162,  # 'W'
+     88: 163,  # 'X'
+     89: 164,  # 'Y'
+     90: 165,  # 'Z'
+     91: 253,  # '['
+     92: 253,  # '\\'
+     93: 253,  # ']'
+     94: 253,  # '^'
+     95: 253,  # '_'
+     96: 253,  # '`'
+     97: 71,  # 'a'
+     98: 172,  # 'b'
+     99: 66,  # 'c'
+     100: 173,  # 'd'
+     101: 65,  # 'e'
+     102: 174,  # 'f'
+     103: 76,  # 'g'
+     104: 175,  # 'h'
+     105: 64,  # 'i'
+     106: 176,  # 'j'
+     107: 177,  # 'k'
+     108: 77,  # 'l'
+     109: 72,  # 'm'
+     110: 178,  # 'n'
+     111: 69,  # 'o'
+     112: 67,  # 'p'
+     113: 179,  # 'q'
+     114: 78,  # 'r'
+     115: 73,  # 's'
+     116: 180,  # 't'
+     117: 181,  # 'u'
+     118: 79,  # 'v'
+     119: 182,  # 'w'
+     120: 183,  # 'x'
+     121: 184,  # 'y'
+     122: 185,  # 'z'
+     123: 253,  # '{'
+     124: 253,  # '|'
+     125: 253,  # '}'
+     126: 253,  # '~'
+     127: 253,  # '\x7f'
+     128: 191,  # '─'
+     129: 192,  # '│'
+     130: 193,  # '┌'
+     131: 194,  # '┐'
+     132: 195,  # '└'
+     133: 196,  # '┘'
+     134: 197,  # '├'
+     135: 198,  # '┤'
+     136: 199,  # '┬'
+     137: 200,  # '┴'
+     138: 201,  # '┼'
+     139: 202,  # '▀'
+     140: 203,  # '▄'
+     141: 204,  # '█'
+     142: 205,  # '▌'
+     143: 206,  # '▐'
+     144: 207,  # '░'
+     145: 208,  # '▒'
+     146: 209,  # '▓'
+     147: 210,  # '⌠'
+     148: 211,  # '■'
+     149: 212,  # '∙'
+     150: 213,  # '√'
+     151: 214,  # '≈'
+     152: 215,  # '≤'
+     153: 216,  # '≥'
+     154: 217,  # '\xa0'
+     155: 218,  # '⌡'
+     156: 219,  # '°'
+     157: 220,  # '²'
+     158: 221,  # '·'
+     159: 222,  # '÷'
+     160: 223,  # '═'
+     161: 224,  # '║'
+     162: 225,  # '╒'
+     163: 68,  # 'ё'
+     164: 226,  # '╓'
+     165: 227,  # '╔'
+     166: 228,  # '╕'
+     167: 229,  # '╖'
+     168: 230,  # '╗'
+     169: 231,  # '╘'
+     170: 232,  # '╙'
+     171: 233,  # '╚'
+     172: 234,  # '╛'
+     173: 235,  # '╜'
+     174: 236,  # '╝'
+     175: 237,  # '╞'
+     176: 238,  # '╟'
+     177: 239,  # '╠'
+     178: 240,  # '╡'
+     179: 241,  # 'Ё'
+     180: 242,  # '╢'
+     181: 243,  # '╣'
+     182: 244,  # '╤'
+     183: 245,  # '╥'
+     184: 246,  # '╦'
+     185: 247,  # '╧'
+     186: 248,  # '╨'
+     187: 249,  # '╩'
+     188: 250,  # '╪'
+     189: 251,  # '╫'
+     190: 252,  # '╬'
+     191: 253,  # '©'
+     192: 27,  # 'ю'
+     193: 3,  # 'а'
+     194: 21,  # 'б'
+     195: 28,  # 'ц'
+     196: 13,  # 'д'
+     197: 2,  # 'е'
+     198: 39,  # 'ф'
+     199: 19,  # 'г'
+     200: 26,  # 'х'
+     201: 4,  # 'и'
+     202: 23,  # 'й'
+     203: 11,  # 'к'
+     204: 8,  # 'л'
+     205: 12,  # 'м'
+     206: 5,  # 'н'
+     207: 1,  # 'о'
+     208: 15,  # 'п'
+     209: 16,  # 'я'
+     210: 9,  # 'р'
+     211: 7,  # 'с'
+     212: 6,  # 'т'
+     213: 14,  # 'у'
+     214: 24,  # 'ж'
+     215: 10,  # 'в'
+     216: 17,  # 'ь'
+     217: 18,  # 'ы'
+     218: 20,  # 'з'
+     219: 25,  # 'ш'
+     220: 30,  # 'э'
+     221: 29,  # 'щ'
+     222: 22,  # 'ч'
+     223: 54,  # 'ъ'
+     224: 59,  # 'Ю'
+     225: 37,  # 'А'
+     226: 44,  # 'Б'
+     227: 58,  # 'Ц'
+     228: 41,  # 'Д'
+     229: 48,  # 'Е'
+     230: 53,  # 'Ф'
+     231: 46,  # 'Г'
+     232: 55,  # 'Х'
+     233: 42,  # 'И'
+     234: 60,  # 'Й'
+     235: 36,  # 'К'
+     236: 49,  # 'Л'
+     237: 38,  # 'М'
+     238: 31,  # 'Н'
+     239: 34,  # 'О'
+     240: 35,  # 'П'
+     241: 43,  # 'Я'
+     242: 45,  # 'Р'
+     243: 32,  # 'С'
+     244: 40,  # 'Т'
+     245: 52,  # 'У'
+     246: 56,  # 'Ж'
+     247: 33,  # 'В'
+     248: 61,  # 'Ь'
+     249: 62,  # 'Ы'
+     250: 51,  # 'З'
+     251: 57,  # 'Ш'
+     252: 47,  # 'Э'
+     253: 63,  # 'Щ'
+     254: 50,  # 'Ч'
+     255: 70,  # 'Ъ'
 }
 
-KOI8_R_RUSSIAN_MODEL = SingleByteCharSetModel(
-    charset_name="KOI8-R",
-    language="Russian",
-    char_to_order_map=KOI8_R_RUSSIAN_CHAR_TO_ORDER,
-    language_model=RUSSIAN_LANG_MODEL,
-    typical_positive_ratio=0.976601,
-    keep_ascii_letters=False,
-    alphabet="ЁАБВГДЕЖЗИЙКЛМНОПРСТУФХЦЧШЩЪЫЬЭЮЯабвгдежзийклмнопрстуфхцчшщъыьэюяё",
-)
+KOI8_R_RUSSIAN_MODEL = SingleByteCharSetModel(charset_name='KOI8-R',
+                                              language='Russian',
+                                              char_to_order_map=KOI8_R_RUSSIAN_CHAR_TO_ORDER,
+                                              language_model=RUSSIAN_LANG_MODEL,
+                                              typical_positive_ratio=0.976601,
+                                              keep_ascii_letters=False,
+                                              alphabet='ЁАБВГДЕЖЗИЙКЛМНОПРСТУФХЦЧШЩЪЫЬЭЮЯабвгдежзийклмнопрстуфхцчшщъыьэюяё')
 
 MACCYRILLIC_RUSSIAN_CHAR_TO_ORDER = {
-    0: 255,  # '\x00'
-    1: 255,  # '\x01'
-    2: 255,  # '\x02'
-    3: 255,  # '\x03'
-    4: 255,  # '\x04'
-    5: 255,  # '\x05'
-    6: 255,  # '\x06'
-    7: 255,  # '\x07'
-    8: 255,  # '\x08'
-    9: 255,  # '\t'
-    10: 254,  # '\n'
-    11: 255,  # '\x0b'
-    12: 255,  # '\x0c'
-    13: 254,  # '\r'
-    14: 255,  # '\x0e'
-    15: 255,  # '\x0f'
-    16: 255,  # '\x10'
-    17: 255,  # '\x11'
-    18: 255,  # '\x12'
-    19: 255,  # '\x13'
-    20: 255,  # '\x14'
-    21: 255,  # '\x15'
-    22: 255,  # '\x16'
-    23: 255,  # '\x17'
-    24: 255,  # '\x18'
-    25: 255,  # '\x19'
-    26: 255,  # '\x1a'
-    27: 255,  # '\x1b'
-    28: 255,  # '\x1c'
-    29: 255,  # '\x1d'
-    30: 255,  # '\x1e'
-    31: 255,  # '\x1f'
-    32: 253,  # ' '
-    33: 253,  # '!'
-    34: 253,  # '"'
-    35: 253,  # '#'
-    36: 253,  # '$'
-    37: 253,  # '%'
-    38: 253,  # '&'
-    39: 253,  # "'"
-    40: 253,  # '('
-    41: 253,  # ')'
-    42: 253,  # '*'
-    43: 253,  # '+'
-    44: 253,  # ','
-    45: 253,  # '-'
-    46: 253,  # '.'
-    47: 253,  # '/'
-    48: 252,  # '0'
-    49: 252,  # '1'
-    50: 252,  # '2'
-    51: 252,  # '3'
-    52: 252,  # '4'
-    53: 252,  # '5'
-    54: 252,  # '6'
-    55: 252,  # '7'
-    56: 252,  # '8'
-    57: 252,  # '9'
-    58: 253,  # ':'
-    59: 253,  # ';'
-    60: 253,  # '<'
-    61: 253,  # '='
-    62: 253,  # '>'
-    63: 253,  # '?'
-    64: 253,  # '@'
-    65: 142,  # 'A'
-    66: 143,  # 'B'
-    67: 144,  # 'C'
-    68: 145,  # 'D'
-    69: 146,  # 'E'
-    70: 147,  # 'F'
-    71: 148,  # 'G'
-    72: 149,  # 'H'
-    73: 150,  # 'I'
-    74: 151,  # 'J'
-    75: 152,  # 'K'
-    76: 74,  # 'L'
-    77: 153,  # 'M'
-    78: 75,  # 'N'
-    79: 154,  # 'O'
-    80: 155,  # 'P'
-    81: 156,  # 'Q'
-    82: 157,  # 'R'
-    83: 158,  # 'S'
-    84: 159,  # 'T'
-    85: 160,  # 'U'
-    86: 161,  # 'V'
-    87: 162,  # 'W'
-    88: 163,  # 'X'
-    89: 164,  # 'Y'
-    90: 165,  # 'Z'
-    91: 253,  # '['
-    92: 253,  # '\\'
-    93: 253,  # ']'
-    94: 253,  # '^'
-    95: 253,  # '_'
-    96: 253,  # '`'
-    97: 71,  # 'a'
-    98: 172,  # 'b'
-    99: 66,  # 'c'
-    100: 173,  # 'd'
-    101: 65,  # 'e'
-    102: 174,  # 'f'
-    103: 76,  # 'g'
-    104: 175,  # 'h'
-    105: 64,  # 'i'
-    106: 176,  # 'j'
-    107: 177,  # 'k'
-    108: 77,  # 'l'
-    109: 72,  # 'm'
-    110: 178,  # 'n'
-    111: 69,  # 'o'
-    112: 67,  # 'p'
-    113: 179,  # 'q'
-    114: 78,  # 'r'
-    115: 73,  # 's'
-    116: 180,  # 't'
-    117: 181,  # 'u'
-    118: 79,  # 'v'
-    119: 182,  # 'w'
-    120: 183,  # 'x'
-    121: 184,  # 'y'
-    122: 185,  # 'z'
-    123: 253,  # '{'
-    124: 253,  # '|'
-    125: 253,  # '}'
-    126: 253,  # '~'
-    127: 253,  # '\x7f'
-    128: 37,  # 'А'
-    129: 44,  # 'Б'
-    130: 33,  # 'В'
-    131: 46,  # 'Г'
-    132: 41,  # 'Д'
-    133: 48,  # 'Е'
-    134: 56,  # 'Ж'
-    135: 51,  # 'З'
-    136: 42,  # 'И'
-    137: 60,  # 'Й'
-    138: 36,  # 'К'
-    139: 49,  # 'Л'
-    140: 38,  # 'М'
-    141: 31,  # 'Н'
-    142: 34,  # 'О'
-    143: 35,  # 'П'
-    144: 45,  # 'Р'
-    145: 32,  # 'С'
-    146: 40,  # 'Т'
-    147: 52,  # 'У'
-    148: 53,  # 'Ф'
-    149: 55,  # 'Х'
-    150: 58,  # 'Ц'
-    151: 50,  # 'Ч'
-    152: 57,  # 'Ш'
-    153: 63,  # 'Щ'
-    154: 70,  # 'Ъ'
-    155: 62,  # 'Ы'
-    156: 61,  # 'Ь'
-    157: 47,  # 'Э'
-    158: 59,  # 'Ю'
-    159: 43,  # 'Я'
-    160: 191,  # '†'
-    161: 192,  # '°'
-    162: 193,  # 'Ґ'
-    163: 194,  # '£'
-    164: 195,  # '§'
-    165: 196,  # '•'
-    166: 197,  # '¶'
-    167: 198,  # 'І'
-    168: 199,  # '®'
-    169: 200,  # '©'
-    170: 201,  # '™'
-    171: 202,  # 'Ђ'
-    172: 203,  # 'ђ'
-    173: 204,  # '≠'
-    174: 205,  # 'Ѓ'
-    175: 206,  # 'ѓ'
-    176: 207,  # '∞'
-    177: 208,  # '±'
-    178: 209,  # '≤'
-    179: 210,  # '≥'
-    180: 211,  # 'і'
-    181: 212,  # 'µ'
-    182: 213,  # 'ґ'
-    183: 214,  # 'Ј'
-    184: 215,  # 'Є'
-    185: 216,  # 'є'
-    186: 217,  # 'Ї'
-    187: 218,  # 'ї'
-    188: 219,  # 'Љ'
-    189: 220,  # 'љ'
-    190: 221,  # 'Њ'
-    191: 222,  # 'њ'
-    192: 223,  # 'ј'
-    193: 224,  # 'Ѕ'
-    194: 225,  # '¬'
-    195: 226,  # '√'
-    196: 227,  # 'ƒ'
-    197: 228,  # '≈'
-    198: 229,  # '∆'
-    199: 230,  # '«'
-    200: 231,  # '»'
-    201: 232,  # '…'
-    202: 233,  # '\xa0'
-    203: 234,  # 'Ћ'
-    204: 235,  # 'ћ'
-    205: 236,  # 'Ќ'
-    206: 237,  # 'ќ'
-    207: 238,  # 'ѕ'
-    208: 239,  # '–'
-    209: 240,  # '—'
-    210: 241,  # '“'
-    211: 242,  # '”'
-    212: 243,  # '‘'
-    213: 244,  # '’'
-    214: 245,  # '÷'
-    215: 246,  # '„'
-    216: 247,  # 'Ў'
-    217: 248,  # 'ў'
-    218: 249,  # 'Џ'
-    219: 250,  # 'џ'
-    220: 251,  # '№'
-    221: 252,  # 'Ё'
-    222: 68,  # 'ё'
-    223: 16,  # 'я'
-    224: 3,  # 'а'
-    225: 21,  # 'б'
-    226: 10,  # 'в'
-    227: 19,  # 'г'
-    228: 13,  # 'д'
-    229: 2,  # 'е'
-    230: 24,  # 'ж'
-    231: 20,  # 'з'
-    232: 4,  # 'и'
-    233: 23,  # 'й'
-    234: 11,  # 'к'
-    235: 8,  # 'л'
-    236: 12,  # 'м'
-    237: 5,  # 'н'
-    238: 1,  # 'о'
-    239: 15,  # 'п'
-    240: 9,  # 'р'
-    241: 7,  # 'с'
-    242: 6,  # 'т'
-    243: 14,  # 'у'
-    244: 39,  # 'ф'
-    245: 26,  # 'х'
-    246: 28,  # 'ц'
-    247: 22,  # 'ч'
-    248: 25,  # 'ш'
-    249: 29,  # 'щ'
-    250: 54,  # 'ъ'
-    251: 18,  # 'ы'
-    252: 17,  # 'ь'
-    253: 30,  # 'э'
-    254: 27,  # 'ю'
-    255: 255,  # '€'
+     0: 255,  # '\x00'
+     1: 255,  # '\x01'
+     2: 255,  # '\x02'
+     3: 255,  # '\x03'
+     4: 255,  # '\x04'
+     5: 255,  # '\x05'
+     6: 255,  # '\x06'
+     7: 255,  # '\x07'
+     8: 255,  # '\x08'
+     9: 255,  # '\t'
+     10: 254,  # '\n'
+     11: 255,  # '\x0b'
+     12: 255,  # '\x0c'
+     13: 254,  # '\r'
+     14: 255,  # '\x0e'
+     15: 255,  # '\x0f'
+     16: 255,  # '\x10'
+     17: 255,  # '\x11'
+     18: 255,  # '\x12'
+     19: 255,  # '\x13'
+     20: 255,  # '\x14'
+     21: 255,  # '\x15'
+     22: 255,  # '\x16'
+     23: 255,  # '\x17'
+     24: 255,  # '\x18'
+     25: 255,  # '\x19'
+     26: 255,  # '\x1a'
+     27: 255,  # '\x1b'
+     28: 255,  # '\x1c'
+     29: 255,  # '\x1d'
+     30: 255,  # '\x1e'
+     31: 255,  # '\x1f'
+     32: 253,  # ' '
+     33: 253,  # '!'
+     34: 253,  # '"'
+     35: 253,  # '#'
+     36: 253,  # '$'
+     37: 253,  # '%'
+     38: 253,  # '&'
+     39: 253,  # "'"
+     40: 253,  # '('
+     41: 253,  # ')'
+     42: 253,  # '*'
+     43: 253,  # '+'
+     44: 253,  # ','
+     45: 253,  # '-'
+     46: 253,  # '.'
+     47: 253,  # '/'
+     48: 252,  # '0'
+     49: 252,  # '1'
+     50: 252,  # '2'
+     51: 252,  # '3'
+     52: 252,  # '4'
+     53: 252,  # '5'
+     54: 252,  # '6'
+     55: 252,  # '7'
+     56: 252,  # '8'
+     57: 252,  # '9'
+     58: 253,  # ':'
+     59: 253,  # ';'
+     60: 253,  # '<'
+     61: 253,  # '='
+     62: 253,  # '>'
+     63: 253,  # '?'
+     64: 253,  # '@'
+     65: 142,  # 'A'
+     66: 143,  # 'B'
+     67: 144,  # 'C'
+     68: 145,  # 'D'
+     69: 146,  # 'E'
+     70: 147,  # 'F'
+     71: 148,  # 'G'
+     72: 149,  # 'H'
+     73: 150,  # 'I'
+     74: 151,  # 'J'
+     75: 152,  # 'K'
+     76: 74,  # 'L'
+     77: 153,  # 'M'
+     78: 75,  # 'N'
+     79: 154,  # 'O'
+     80: 155,  # 'P'
+     81: 156,  # 'Q'
+     82: 157,  # 'R'
+     83: 158,  # 'S'
+     84: 159,  # 'T'
+     85: 160,  # 'U'
+     86: 161,  # 'V'
+     87: 162,  # 'W'
+     88: 163,  # 'X'
+     89: 164,  # 'Y'
+     90: 165,  # 'Z'
+     91: 253,  # '['
+     92: 253,  # '\\'
+     93: 253,  # ']'
+     94: 253,  # '^'
+     95: 253,  # '_'
+     96: 253,  # '`'
+     97: 71,  # 'a'
+     98: 172,  # 'b'
+     99: 66,  # 'c'
+     100: 173,  # 'd'
+     101: 65,  # 'e'
+     102: 174,  # 'f'
+     103: 76,  # 'g'
+     104: 175,  # 'h'
+     105: 64,  # 'i'
+     106: 176,  # 'j'
+     107: 177,  # 'k'
+     108: 77,  # 'l'
+     109: 72,  # 'm'
+     110: 178,  # 'n'
+     111: 69,  # 'o'
+     112: 67,  # 'p'
+     113: 179,  # 'q'
+     114: 78,  # 'r'
+     115: 73,  # 's'
+     116: 180,  # 't'
+     117: 181,  # 'u'
+     118: 79,  # 'v'
+     119: 182,  # 'w'
+     120: 183,  # 'x'
+     121: 184,  # 'y'
+     122: 185,  # 'z'
+     123: 253,  # '{'
+     124: 253,  # '|'
+     125: 253,  # '}'
+     126: 253,  # '~'
+     127: 253,  # '\x7f'
+     128: 37,  # 'А'
+     129: 44,  # 'Б'
+     130: 33,  # 'В'
+     131: 46,  # 'Г'
+     132: 41,  # 'Д'
+     133: 48,  # 'Е'
+     134: 56,  # 'Ж'
+     135: 51,  # 'З'
+     136: 42,  # 'И'
+     137: 60,  # 'Й'
+     138: 36,  # 'К'
+     139: 49,  # 'Л'
+     140: 38,  # 'М'
+     141: 31,  # 'Н'
+     142: 34,  # 'О'
+     143: 35,  # 'П'
+     144: 45,  # 'Р'
+     145: 32,  # 'С'
+     146: 40,  # 'Т'
+     147: 52,  # 'У'
+     148: 53,  # 'Ф'
+     149: 55,  # 'Х'
+     150: 58,  # 'Ц'
+     151: 50,  # 'Ч'
+     152: 57,  # 'Ш'
+     153: 63,  # 'Щ'
+     154: 70,  # 'Ъ'
+     155: 62,  # 'Ы'
+     156: 61,  # 'Ь'
+     157: 47,  # 'Э'
+     158: 59,  # 'Ю'
+     159: 43,  # 'Я'
+     160: 191,  # '†'
+     161: 192,  # '°'
+     162: 193,  # 'Ґ'
+     163: 194,  # '£'
+     164: 195,  # '§'
+     165: 196,  # '•'
+     166: 197,  # '¶'
+     167: 198,  # 'І'
+     168: 199,  # '®'
+     169: 200,  # '©'
+     170: 201,  # '™'
+     171: 202,  # 'Ђ'
+     172: 203,  # 'ђ'
+     173: 204,  # '≠'
+     174: 205,  # 'Ѓ'
+     175: 206,  # 'ѓ'
+     176: 207,  # '∞'
+     177: 208,  # '±'
+     178: 209,  # '≤'
+     179: 210,  # '≥'
+     180: 211,  # 'і'
+     181: 212,  # 'µ'
+     182: 213,  # 'ґ'
+     183: 214,  # 'Ј'
+     184: 215,  # 'Є'
+     185: 216,  # 'є'
+     186: 217,  # 'Ї'
+     187: 218,  # 'ї'
+     188: 219,  # 'Љ'
+     189: 220,  # 'љ'
+     190: 221,  # 'Њ'
+     191: 222,  # 'њ'
+     192: 223,  # 'ј'
+     193: 224,  # 'Ѕ'
+     194: 225,  # '¬'
+     195: 226,  # '√'
+     196: 227,  # 'ƒ'
+     197: 228,  # '≈'
+     198: 229,  # '∆'
+     199: 230,  # '«'
+     200: 231,  # '»'
+     201: 232,  # '…'
+     202: 233,  # '\xa0'
+     203: 234,  # 'Ћ'
+     204: 235,  # 'ћ'
+     205: 236,  # 'Ќ'
+     206: 237,  # 'ќ'
+     207: 238,  # 'ѕ'
+     208: 239,  # '–'
+     209: 240,  # '—'
+     210: 241,  # '“'
+     211: 242,  # '”'
+     212: 243,  # '‘'
+     213: 244,  # '’'
+     214: 245,  # '÷'
+     215: 246,  # '„'
+     216: 247,  # 'Ў'
+     217: 248,  # 'ў'
+     218: 249,  # 'Џ'
+     219: 250,  # 'џ'
+     220: 251,  # '№'
+     221: 252,  # 'Ё'
+     222: 68,  # 'ё'
+     223: 16,  # 'я'
+     224: 3,  # 'а'
+     225: 21,  # 'б'
+     226: 10,  # 'в'
+     227: 19,  # 'г'
+     228: 13,  # 'д'
+     229: 2,  # 'е'
+     230: 24,  # 'ж'
+     231: 20,  # 'з'
+     232: 4,  # 'и'
+     233: 23,  # 'й'
+     234: 11,  # 'к'
+     235: 8,  # 'л'
+     236: 12,  # 'м'
+     237: 5,  # 'н'
+     238: 1,  # 'о'
+     239: 15,  # 'п'
+     240: 9,  # 'р'
+     241: 7,  # 'с'
+     242: 6,  # 'т'
+     243: 14,  # 'у'
+     244: 39,  # 'ф'
+     245: 26,  # 'х'
+     246: 28,  # 'ц'
+     247: 22,  # 'ч'
+     248: 25,  # 'ш'
+     249: 29,  # 'щ'
+     250: 54,  # 'ъ'
+     251: 18,  # 'ы'
+     252: 17,  # 'ь'
+     253: 30,  # 'э'
+     254: 27,  # 'ю'
+     255: 255,  # '€'
 }
 
-MACCYRILLIC_RUSSIAN_MODEL = SingleByteCharSetModel(
-    charset_name="MacCyrillic",
-    language="Russian",
-    char_to_order_map=MACCYRILLIC_RUSSIAN_CHAR_TO_ORDER,
-    language_model=RUSSIAN_LANG_MODEL,
-    typical_positive_ratio=0.976601,
-    keep_ascii_letters=False,
-    alphabet="ЁАБВГДЕЖЗИЙКЛМНОПРСТУФХЦЧШЩЪЫЬЭЮЯабвгдежзийклмнопрстуфхцчшщъыьэюяё",
-)
+MACCYRILLIC_RUSSIAN_MODEL = SingleByteCharSetModel(charset_name='MacCyrillic',
+                                                   language='Russian',
+                                                   char_to_order_map=MACCYRILLIC_RUSSIAN_CHAR_TO_ORDER,
+                                                   language_model=RUSSIAN_LANG_MODEL,
+                                                   typical_positive_ratio=0.976601,
+                                                   keep_ascii_letters=False,
+                                                   alphabet='ЁАБВГДЕЖЗИЙКЛМНОПРСТУФХЦЧШЩЪЫЬЭЮЯабвгдежзийклмнопрстуфхцчшщъыьэюяё')
 
 ISO_8859_5_RUSSIAN_CHAR_TO_ORDER = {
-    0: 255,  # '\x00'
-    1: 255,  # '\x01'
-    2: 255,  # '\x02'
-    3: 255,  # '\x03'
-    4: 255,  # '\x04'
-    5: 255,  # '\x05'
-    6: 255,  # '\x06'
-    7: 255,  # '\x07'
-    8: 255,  # '\x08'
-    9: 255,  # '\t'
-    10: 254,  # '\n'
-    11: 255,  # '\x0b'
-    12: 255,  # '\x0c'
-    13: 254,  # '\r'
-    14: 255,  # '\x0e'
-    15: 255,  # '\x0f'
-    16: 255,  # '\x10'
-    17: 255,  # '\x11'
-    18: 255,  # '\x12'
-    19: 255,  # '\x13'
-    20: 255,  # '\x14'
-    21: 255,  # '\x15'
-    22: 255,  # '\x16'
-    23: 255,  # '\x17'
-    24: 255,  # '\x18'
-    25: 255,  # '\x19'
-    26: 255,  # '\x1a'
-    27: 255,  # '\x1b'
-    28: 255,  # '\x1c'
-    29: 255,  # '\x1d'
-    30: 255,  # '\x1e'
-    31: 255,  # '\x1f'
-    32: 253,  # ' '
-    33: 253,  # '!'
-    34: 253,  # '"'
-    35: 253,  # '#'
-    36: 253,  # '$'
-    37: 253,  # '%'
-    38: 253,  # '&'
-    39: 253,  # "'"
-    40: 253,  # '('
-    41: 253,  # ')'
-    42: 253,  # '*'
-    43: 253,  # '+'
-    44: 253,  # ','
-    45: 253,  # '-'
-    46: 253,  # '.'
-    47: 253,  # '/'
-    48: 252,  # '0'
-    49: 252,  # '1'
-    50: 252,  # '2'
-    51: 252,  # '3'
-    52: 252,  # '4'
-    53: 252,  # '5'
-    54: 252,  # '6'
-    55: 252,  # '7'
-    56: 252,  # '8'
-    57: 252,  # '9'
-    58: 253,  # ':'
-    59: 253,  # ';'
-    60: 253,  # '<'
-    61: 253,  # '='
-    62: 253,  # '>'
-    63: 253,  # '?'
-    64: 253,  # '@'
-    65: 142,  # 'A'
-    66: 143,  # 'B'
-    67: 144,  # 'C'
-    68: 145,  # 'D'
-    69: 146,  # 'E'
-    70: 147,  # 'F'
-    71: 148,  # 'G'
-    72: 149,  # 'H'
-    73: 150,  # 'I'
-    74: 151,  # 'J'
-    75: 152,  # 'K'
-    76: 74,  # 'L'
-    77: 153,  # 'M'
-    78: 75,  # 'N'
-    79: 154,  # 'O'
-    80: 155,  # 'P'
-    81: 156,  # 'Q'
-    82: 157,  # 'R'
-    83: 158,  # 'S'
-    84: 159,  # 'T'
-    85: 160,  # 'U'
-    86: 161,  # 'V'
-    87: 162,  # 'W'
-    88: 163,  # 'X'
-    89: 164,  # 'Y'
-    90: 165,  # 'Z'
-    91: 253,  # '['
-    92: 253,  # '\\'
-    93: 253,  # ']'
-    94: 253,  # '^'
-    95: 253,  # '_'
-    96: 253,  # '`'
-    97: 71,  # 'a'
-    98: 172,  # 'b'
-    99: 66,  # 'c'
-    100: 173,  # 'd'
-    101: 65,  # 'e'
-    102: 174,  # 'f'
-    103: 76,  # 'g'
-    104: 175,  # 'h'
-    105: 64,  # 'i'
-    106: 176,  # 'j'
-    107: 177,  # 'k'
-    108: 77,  # 'l'
-    109: 72,  # 'm'
-    110: 178,  # 'n'
-    111: 69,  # 'o'
-    112: 67,  # 'p'
-    113: 179,  # 'q'
-    114: 78,  # 'r'
-    115: 73,  # 's'
-    116: 180,  # 't'
-    117: 181,  # 'u'
-    118: 79,  # 'v'
-    119: 182,  # 'w'
-    120: 183,  # 'x'
-    121: 184,  # 'y'
-    122: 185,  # 'z'
-    123: 253,  # '{'
-    124: 253,  # '|'
-    125: 253,  # '}'
-    126: 253,  # '~'
-    127: 253,  # '\x7f'
-    128: 191,  # '\x80'
-    129: 192,  # '\x81'
-    130: 193,  # '\x82'
-    131: 194,  # '\x83'
-    132: 195,  # '\x84'
-    133: 196,  # '\x85'
-    134: 197,  # '\x86'
-    135: 198,  # '\x87'
-    136: 199,  # '\x88'
-    137: 200,  # '\x89'
-    138: 201,  # '\x8a'
-    139: 202,  # '\x8b'
-    140: 203,  # '\x8c'
-    141: 204,  # '\x8d'
-    142: 205,  # '\x8e'
-    143: 206,  # '\x8f'
-    144: 207,  # '\x90'
-    145: 208,  # '\x91'
-    146: 209,  # '\x92'
-    147: 210,  # '\x93'
-    148: 211,  # '\x94'
-    149: 212,  # '\x95'
-    150: 213,  # '\x96'
-    151: 214,  # '\x97'
-    152: 215,  # '\x98'
-    153: 216,  # '\x99'
-    154: 217,  # '\x9a'
-    155: 218,  # '\x9b'
-    156: 219,  # '\x9c'
-    157: 220,  # '\x9d'
-    158: 221,  # '\x9e'
-    159: 222,  # '\x9f'
-    160: 223,  # '\xa0'
-    161: 224,  # 'Ё'
-    162: 225,  # 'Ђ'
-    163: 226,  # 'Ѓ'
-    164: 227,  # 'Є'
-    165: 228,  # 'Ѕ'
-    166: 229,  # 'І'
-    167: 230,  # 'Ї'
-    168: 231,  # 'Ј'
-    169: 232,  # 'Љ'
-    170: 233,  # 'Њ'
-    171: 234,  # 'Ћ'
-    172: 235,  # 'Ќ'
-    173: 236,  # '\xad'
-    174: 237,  # 'Ў'
-    175: 238,  # 'Џ'
-    176: 37,  # 'А'
-    177: 44,  # 'Б'
-    178: 33,  # 'В'
-    179: 46,  # 'Г'
-    180: 41,  # 'Д'
-    181: 48,  # 'Е'
-    182: 56,  # 'Ж'
-    183: 51,  # 'З'
-    184: 42,  # 'И'
-    185: 60,  # 'Й'
-    186: 36,  # 'К'
-    187: 49,  # 'Л'
-    188: 38,  # 'М'
-    189: 31,  # 'Н'
-    190: 34,  # 'О'
-    191: 35,  # 'П'
-    192: 45,  # 'Р'
-    193: 32,  # 'С'
-    194: 40,  # 'Т'
-    195: 52,  # 'У'
-    196: 53,  # 'Ф'
-    197: 55,  # 'Х'
-    198: 58,  # 'Ц'
-    199: 50,  # 'Ч'
-    200: 57,  # 'Ш'
-    201: 63,  # 'Щ'
-    202: 70,  # 'Ъ'
-    203: 62,  # 'Ы'
-    204: 61,  # 'Ь'
-    205: 47,  # 'Э'
-    206: 59,  # 'Ю'
-    207: 43,  # 'Я'
-    208: 3,  # 'а'
-    209: 21,  # 'б'
-    210: 10,  # 'в'
-    211: 19,  # 'г'
-    212: 13,  # 'д'
-    213: 2,  # 'е'
-    214: 24,  # 'ж'
-    215: 20,  # 'з'
-    216: 4,  # 'и'
-    217: 23,  # 'й'
-    218: 11,  # 'к'
-    219: 8,  # 'л'
-    220: 12,  # 'м'
-    221: 5,  # 'н'
-    222: 1,  # 'о'
-    223: 15,  # 'п'
-    224: 9,  # 'р'
-    225: 7,  # 'с'
-    226: 6,  # 'т'
-    227: 14,  # 'у'
-    228: 39,  # 'ф'
-    229: 26,  # 'х'
-    230: 28,  # 'ц'
-    231: 22,  # 'ч'
-    232: 25,  # 'ш'
-    233: 29,  # 'щ'
-    234: 54,  # 'ъ'
-    235: 18,  # 'ы'
-    236: 17,  # 'ь'
-    237: 30,  # 'э'
-    238: 27,  # 'ю'
-    239: 16,  # 'я'
-    240: 239,  # '№'
-    241: 68,  # 'ё'
-    242: 240,  # 'ђ'
-    243: 241,  # 'ѓ'
-    244: 242,  # 'є'
-    245: 243,  # 'ѕ'
-    246: 244,  # 'і'
-    247: 245,  # 'ї'
-    248: 246,  # 'ј'
-    249: 247,  # 'љ'
-    250: 248,  # 'њ'
-    251: 249,  # 'ћ'
-    252: 250,  # 'ќ'
-    253: 251,  # '§'
-    254: 252,  # 'ў'
-    255: 255,  # 'џ'
+     0: 255,  # '\x00'
+     1: 255,  # '\x01'
+     2: 255,  # '\x02'
+     3: 255,  # '\x03'
+     4: 255,  # '\x04'
+     5: 255,  # '\x05'
+     6: 255,  # '\x06'
+     7: 255,  # '\x07'
+     8: 255,  # '\x08'
+     9: 255,  # '\t'
+     10: 254,  # '\n'
+     11: 255,  # '\x0b'
+     12: 255,  # '\x0c'
+     13: 254,  # '\r'
+     14: 255,  # '\x0e'
+     15: 255,  # '\x0f'
+     16: 255,  # '\x10'
+     17: 255,  # '\x11'
+     18: 255,  # '\x12'
+     19: 255,  # '\x13'
+     20: 255,  # '\x14'
+     21: 255,  # '\x15'
+     22: 255,  # '\x16'
+     23: 255,  # '\x17'
+     24: 255,  # '\x18'
+     25: 255,  # '\x19'
+     26: 255,  # '\x1a'
+     27: 255,  # '\x1b'
+     28: 255,  # '\x1c'
+     29: 255,  # '\x1d'
+     30: 255,  # '\x1e'
+     31: 255,  # '\x1f'
+     32: 253,  # ' '
+     33: 253,  # '!'
+     34: 253,  # '"'
+     35: 253,  # '#'
+     36: 253,  # '$'
+     37: 253,  # '%'
+     38: 253,  # '&'
+     39: 253,  # "'"
+     40: 253,  # '('
+     41: 253,  # ')'
+     42: 253,  # '*'
+     43: 253,  # '+'
+     44: 253,  # ','
+     45: 253,  # '-'
+     46: 253,  # '.'
+     47: 253,  # '/'
+     48: 252,  # '0'
+     49: 252,  # '1'
+     50: 252,  # '2'
+     51: 252,  # '3'
+     52: 252,  # '4'
+     53: 252,  # '5'
+     54: 252,  # '6'
+     55: 252,  # '7'
+     56: 252,  # '8'
+     57: 252,  # '9'
+     58: 253,  # ':'
+     59: 253,  # ';'
+     60: 253,  # '<'
+     61: 253,  # '='
+     62: 253,  # '>'
+     63: 253,  # '?'
+     64: 253,  # '@'
+     65: 142,  # 'A'
+     66: 143,  # 'B'
+     67: 144,  # 'C'
+     68: 145,  # 'D'
+     69: 146,  # 'E'
+     70: 147,  # 'F'
+     71: 148,  # 'G'
+     72: 149,  # 'H'
+     73: 150,  # 'I'
+     74: 151,  # 'J'
+     75: 152,  # 'K'
+     76: 74,  # 'L'
+     77: 153,  # 'M'
+     78: 75,  # 'N'
+     79: 154,  # 'O'
+     80: 155,  # 'P'
+     81: 156,  # 'Q'
+     82: 157,  # 'R'
+     83: 158,  # 'S'
+     84: 159,  # 'T'
+     85: 160,  # 'U'
+     86: 161,  # 'V'
+     87: 162,  # 'W'
+     88: 163,  # 'X'
+     89: 164,  # 'Y'
+     90: 165,  # 'Z'
+     91: 253,  # '['
+     92: 253,  # '\\'
+     93: 253,  # ']'
+     94: 253,  # '^'
+     95: 253,  # '_'
+     96: 253,  # '`'
+     97: 71,  # 'a'
+     98: 172,  # 'b'
+     99: 66,  # 'c'
+     100: 173,  # 'd'
+     101: 65,  # 'e'
+     102: 174,  # 'f'
+     103: 76,  # 'g'
+     104: 175,  # 'h'
+     105: 64,  # 'i'
+     106: 176,  # 'j'
+     107: 177,  # 'k'
+     108: 77,  # 'l'
+     109: 72,  # 'm'
+     110: 178,  # 'n'
+     111: 69,  # 'o'
+     112: 67,  # 'p'
+     113: 179,  # 'q'
+     114: 78,  # 'r'
+     115: 73,  # 's'
+     116: 180,  # 't'
+     117: 181,  # 'u'
+     118: 79,  # 'v'
+     119: 182,  # 'w'
+     120: 183,  # 'x'
+     121: 184,  # 'y'
+     122: 185,  # 'z'
+     123: 253,  # '{'
+     124: 253,  # '|'
+     125: 253,  # '}'
+     126: 253,  # '~'
+     127: 253,  # '\x7f'
+     128: 191,  # '\x80'
+     129: 192,  # '\x81'
+     130: 193,  # '\x82'
+     131: 194,  # '\x83'
+     132: 195,  # '\x84'
+     133: 196,  # '\x85'
+     134: 197,  # '\x86'
+     135: 198,  # '\x87'
+     136: 199,  # '\x88'
+     137: 200,  # '\x89'
+     138: 201,  # '\x8a'
+     139: 202,  # '\x8b'
+     140: 203,  # '\x8c'
+     141: 204,  # '\x8d'
+     142: 205,  # '\x8e'
+     143: 206,  # '\x8f'
+     144: 207,  # '\x90'
+     145: 208,  # '\x91'
+     146: 209,  # '\x92'
+     147: 210,  # '\x93'
+     148: 211,  # '\x94'
+     149: 212,  # '\x95'
+     150: 213,  # '\x96'
+     151: 214,  # '\x97'
+     152: 215,  # '\x98'
+     153: 216,  # '\x99'
+     154: 217,  # '\x9a'
+     155: 218,  # '\x9b'
+     156: 219,  # '\x9c'
+     157: 220,  # '\x9d'
+     158: 221,  # '\x9e'
+     159: 222,  # '\x9f'
+     160: 223,  # '\xa0'
+     161: 224,  # 'Ё'
+     162: 225,  # 'Ђ'
+     163: 226,  # 'Ѓ'
+     164: 227,  # 'Є'
+     165: 228,  # 'Ѕ'
+     166: 229,  # 'І'
+     167: 230,  # 'Ї'
+     168: 231,  # 'Ј'
+     169: 232,  # 'Љ'
+     170: 233,  # 'Њ'
+     171: 234,  # 'Ћ'
+     172: 235,  # 'Ќ'
+     173: 236,  # '\xad'
+     174: 237,  # 'Ў'
+     175: 238,  # 'Џ'
+     176: 37,  # 'А'
+     177: 44,  # 'Б'
+     178: 33,  # 'В'
+     179: 46,  # 'Г'
+     180: 41,  # 'Д'
+     181: 48,  # 'Е'
+     182: 56,  # 'Ж'
+     183: 51,  # 'З'
+     184: 42,  # 'И'
+     185: 60,  # 'Й'
+     186: 36,  # 'К'
+     187: 49,  # 'Л'
+     188: 38,  # 'М'
+     189: 31,  # 'Н'
+     190: 34,  # 'О'
+     191: 35,  # 'П'
+     192: 45,  # 'Р'
+     193: 32,  # 'С'
+     194: 40,  # 'Т'
+     195: 52,  # 'У'
+     196: 53,  # 'Ф'
+     197: 55,  # 'Х'
+     198: 58,  # 'Ц'
+     199: 50,  # 'Ч'
+     200: 57,  # 'Ш'
+     201: 63,  # 'Щ'
+     202: 70,  # 'Ъ'
+     203: 62,  # 'Ы'
+     204: 61,  # 'Ь'
+     205: 47,  # 'Э'
+     206: 59,  # 'Ю'
+     207: 43,  # 'Я'
+     208: 3,  # 'а'
+     209: 21,  # 'б'
+     210: 10,  # 'в'
+     211: 19,  # 'г'
+     212: 13,  # 'д'
+     213: 2,  # 'е'
+     214: 24,  # 'ж'
+     215: 20,  # 'з'
+     216: 4,  # 'и'
+     217: 23,  # 'й'
+     218: 11,  # 'к'
+     219: 8,  # 'л'
+     220: 12,  # 'м'
+     221: 5,  # 'н'
+     222: 1,  # 'о'
+     223: 15,  # 'п'
+     224: 9,  # 'р'
+     225: 7,  # 'с'
+     226: 6,  # 'т'
+     227: 14,  # 'у'
+     228: 39,  # 'ф'
+     229: 26,  # 'х'
+     230: 28,  # 'ц'
+     231: 22,  # 'ч'
+     232: 25,  # 'ш'
+     233: 29,  # 'щ'
+     234: 54,  # 'ъ'
+     235: 18,  # 'ы'
+     236: 17,  # 'ь'
+     237: 30,  # 'э'
+     238: 27,  # 'ю'
+     239: 16,  # 'я'
+     240: 239,  # '№'
+     241: 68,  # 'ё'
+     242: 240,  # 'ђ'
+     243: 241,  # 'ѓ'
+     244: 242,  # 'є'
+     245: 243,  # 'ѕ'
+     246: 244,  # 'і'
+     247: 245,  # 'ї'
+     248: 246,  # 'ј'
+     249: 247,  # 'љ'
+     250: 248,  # 'њ'
+     251: 249,  # 'ћ'
+     252: 250,  # 'ќ'
+     253: 251,  # '§'
+     254: 252,  # 'ў'
+     255: 255,  # 'џ'
 }
 
-ISO_8859_5_RUSSIAN_MODEL = SingleByteCharSetModel(
-    charset_name="ISO-8859-5",
-    language="Russian",
-    char_to_order_map=ISO_8859_5_RUSSIAN_CHAR_TO_ORDER,
-    language_model=RUSSIAN_LANG_MODEL,
-    typical_positive_ratio=0.976601,
-    keep_ascii_letters=False,
-    alphabet="ЁАБВГДЕЖЗИЙКЛМНОПРСТУФХЦЧШЩЪЫЬЭЮЯабвгдежзийклмнопрстуфхцчшщъыьэюяё",
-)
+ISO_8859_5_RUSSIAN_MODEL = SingleByteCharSetModel(charset_name='ISO-8859-5',
+                                                  language='Russian',
+                                                  char_to_order_map=ISO_8859_5_RUSSIAN_CHAR_TO_ORDER,
+                                                  language_model=RUSSIAN_LANG_MODEL,
+                                                  typical_positive_ratio=0.976601,
+                                                  keep_ascii_letters=False,
+                                                  alphabet='ЁАБВГДЕЖЗИЙКЛМНОПРСТУФХЦЧШЩЪЫЬЭЮЯабвгдежзийклмнопрстуфхцчшщъыьэюяё')
+
diff --git a/venv/lib/python3.10/site-packages/pip/_vendor/chardet/langthaimodel.py b/venv/lib/python3.10/site-packages/pip/_vendor/chardet/langthaimodel.py
index 489cad9..9a37db5 100644
--- a/venv/lib/python3.10/site-packages/pip/_vendor/chardet/langthaimodel.py
+++ b/venv/lib/python3.10/site-packages/pip/_vendor/chardet/langthaimodel.py
@@ -1,5 +1,9 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+
 from pip._vendor.chardet.sbcharsetprober import SingleByteCharSetModel
 
+
 # 3: Positive
 # 2: Likely
 # 1: Unlikely
@@ -4111,270 +4115,269 @@
 
 # Character Mapping Table(s):
 TIS_620_THAI_CHAR_TO_ORDER = {
-    0: 255,  # '\x00'
-    1: 255,  # '\x01'
-    2: 255,  # '\x02'
-    3: 255,  # '\x03'
-    4: 255,  # '\x04'
-    5: 255,  # '\x05'
-    6: 255,  # '\x06'
-    7: 255,  # '\x07'
-    8: 255,  # '\x08'
-    9: 255,  # '\t'
-    10: 254,  # '\n'
-    11: 255,  # '\x0b'
-    12: 255,  # '\x0c'
-    13: 254,  # '\r'
-    14: 255,  # '\x0e'
-    15: 255,  # '\x0f'
-    16: 255,  # '\x10'
-    17: 255,  # '\x11'
-    18: 255,  # '\x12'
-    19: 255,  # '\x13'
-    20: 255,  # '\x14'
-    21: 255,  # '\x15'
-    22: 255,  # '\x16'
-    23: 255,  # '\x17'
-    24: 255,  # '\x18'
-    25: 255,  # '\x19'
-    26: 255,  # '\x1a'
-    27: 255,  # '\x1b'
-    28: 255,  # '\x1c'
-    29: 255,  # '\x1d'
-    30: 255,  # '\x1e'
-    31: 255,  # '\x1f'
-    32: 253,  # ' '
-    33: 253,  # '!'
-    34: 253,  # '"'
-    35: 253,  # '#'
-    36: 253,  # '$'
-    37: 253,  # '%'
-    38: 253,  # '&'
-    39: 253,  # "'"
-    40: 253,  # '('
-    41: 253,  # ')'
-    42: 253,  # '*'
-    43: 253,  # '+'
-    44: 253,  # ','
-    45: 253,  # '-'
-    46: 253,  # '.'
-    47: 253,  # '/'
-    48: 252,  # '0'
-    49: 252,  # '1'
-    50: 252,  # '2'
-    51: 252,  # '3'
-    52: 252,  # '4'
-    53: 252,  # '5'
-    54: 252,  # '6'
-    55: 252,  # '7'
-    56: 252,  # '8'
-    57: 252,  # '9'
-    58: 253,  # ':'
-    59: 253,  # ';'
-    60: 253,  # '<'
-    61: 253,  # '='
-    62: 253,  # '>'
-    63: 253,  # '?'
-    64: 253,  # '@'
-    65: 182,  # 'A'
-    66: 106,  # 'B'
-    67: 107,  # 'C'
-    68: 100,  # 'D'
-    69: 183,  # 'E'
-    70: 184,  # 'F'
-    71: 185,  # 'G'
-    72: 101,  # 'H'
-    73: 94,  # 'I'
-    74: 186,  # 'J'
-    75: 187,  # 'K'
-    76: 108,  # 'L'
-    77: 109,  # 'M'
-    78: 110,  # 'N'
-    79: 111,  # 'O'
-    80: 188,  # 'P'
-    81: 189,  # 'Q'
-    82: 190,  # 'R'
-    83: 89,  # 'S'
-    84: 95,  # 'T'
-    85: 112,  # 'U'
-    86: 113,  # 'V'
-    87: 191,  # 'W'
-    88: 192,  # 'X'
-    89: 193,  # 'Y'
-    90: 194,  # 'Z'
-    91: 253,  # '['
-    92: 253,  # '\\'
-    93: 253,  # ']'
-    94: 253,  # '^'
-    95: 253,  # '_'
-    96: 253,  # '`'
-    97: 64,  # 'a'
-    98: 72,  # 'b'
-    99: 73,  # 'c'
-    100: 114,  # 'd'
-    101: 74,  # 'e'
-    102: 115,  # 'f'
-    103: 116,  # 'g'
-    104: 102,  # 'h'
-    105: 81,  # 'i'
-    106: 201,  # 'j'
-    107: 117,  # 'k'
-    108: 90,  # 'l'
-    109: 103,  # 'm'
-    110: 78,  # 'n'
-    111: 82,  # 'o'
-    112: 96,  # 'p'
-    113: 202,  # 'q'
-    114: 91,  # 'r'
-    115: 79,  # 's'
-    116: 84,  # 't'
-    117: 104,  # 'u'
-    118: 105,  # 'v'
-    119: 97,  # 'w'
-    120: 98,  # 'x'
-    121: 92,  # 'y'
-    122: 203,  # 'z'
-    123: 253,  # '{'
-    124: 253,  # '|'
-    125: 253,  # '}'
-    126: 253,  # '~'
-    127: 253,  # '\x7f'
-    128: 209,  # '\x80'
-    129: 210,  # '\x81'
-    130: 211,  # '\x82'
-    131: 212,  # '\x83'
-    132: 213,  # '\x84'
-    133: 88,  # '\x85'
-    134: 214,  # '\x86'
-    135: 215,  # '\x87'
-    136: 216,  # '\x88'
-    137: 217,  # '\x89'
-    138: 218,  # '\x8a'
-    139: 219,  # '\x8b'
-    140: 220,  # '\x8c'
-    141: 118,  # '\x8d'
-    142: 221,  # '\x8e'
-    143: 222,  # '\x8f'
-    144: 223,  # '\x90'
-    145: 224,  # '\x91'
-    146: 99,  # '\x92'
-    147: 85,  # '\x93'
-    148: 83,  # '\x94'
-    149: 225,  # '\x95'
-    150: 226,  # '\x96'
-    151: 227,  # '\x97'
-    152: 228,  # '\x98'
-    153: 229,  # '\x99'
-    154: 230,  # '\x9a'
-    155: 231,  # '\x9b'
-    156: 232,  # '\x9c'
-    157: 233,  # '\x9d'
-    158: 234,  # '\x9e'
-    159: 235,  # '\x9f'
-    160: 236,  # None
-    161: 5,  # 'ก'
-    162: 30,  # 'ข'
-    163: 237,  # 'ฃ'
-    164: 24,  # 'ค'
-    165: 238,  # 'ฅ'
-    166: 75,  # 'ฆ'
-    167: 8,  # 'ง'
-    168: 26,  # 'จ'
-    169: 52,  # 'ฉ'
-    170: 34,  # 'ช'
-    171: 51,  # 'ซ'
-    172: 119,  # 'ฌ'
-    173: 47,  # 'ญ'
-    174: 58,  # 'ฎ'
-    175: 57,  # 'ฏ'
-    176: 49,  # 'ฐ'
-    177: 53,  # 'ฑ'
-    178: 55,  # 'ฒ'
-    179: 43,  # 'ณ'
-    180: 20,  # 'ด'
-    181: 19,  # 'ต'
-    182: 44,  # 'ถ'
-    183: 14,  # 'ท'
-    184: 48,  # 'ธ'
-    185: 3,  # 'น'
-    186: 17,  # 'บ'
-    187: 25,  # 'ป'
-    188: 39,  # 'ผ'
-    189: 62,  # 'ฝ'
-    190: 31,  # 'พ'
-    191: 54,  # 'ฟ'
-    192: 45,  # 'ภ'
-    193: 9,  # 'ม'
-    194: 16,  # 'ย'
-    195: 2,  # 'ร'
-    196: 61,  # 'ฤ'
-    197: 15,  # 'ล'
-    198: 239,  # 'ฦ'
-    199: 12,  # 'ว'
-    200: 42,  # 'ศ'
-    201: 46,  # 'ษ'
-    202: 18,  # 'ส'
-    203: 21,  # 'ห'
-    204: 76,  # 'ฬ'
-    205: 4,  # 'อ'
-    206: 66,  # 'ฮ'
-    207: 63,  # 'ฯ'
-    208: 22,  # 'ะ'
-    209: 10,  # 'ั'
-    210: 1,  # 'า'
-    211: 36,  # 'ำ'
-    212: 23,  # 'ิ'
-    213: 13,  # 'ี'
-    214: 40,  # 'ึ'
-    215: 27,  # 'ื'
-    216: 32,  # 'ุ'
-    217: 35,  # 'ู'
-    218: 86,  # 'ฺ'
-    219: 240,  # None
-    220: 241,  # None
-    221: 242,  # None
-    222: 243,  # None
-    223: 244,  # '฿'
-    224: 11,  # 'เ'
-    225: 28,  # 'แ'
-    226: 41,  # 'โ'
-    227: 29,  # 'ใ'
-    228: 33,  # 'ไ'
-    229: 245,  # 'ๅ'
-    230: 50,  # 'ๆ'
-    231: 37,  # '็'
-    232: 6,  # '่'
-    233: 7,  # '้'
-    234: 67,  # '๊'
-    235: 77,  # '๋'
-    236: 38,  # '์'
-    237: 93,  # 'ํ'
-    238: 246,  # '๎'
-    239: 247,  # '๏'
-    240: 68,  # '๐'
-    241: 56,  # '๑'
-    242: 59,  # '๒'
-    243: 65,  # '๓'
-    244: 69,  # '๔'
-    245: 60,  # '๕'
-    246: 70,  # '๖'
-    247: 80,  # '๗'
-    248: 71,  # '๘'
-    249: 87,  # '๙'
-    250: 248,  # '๚'
-    251: 249,  # '๛'
-    252: 250,  # None
-    253: 251,  # None
-    254: 252,  # None
-    255: 253,  # None
+     0: 255,  # '\x00'
+     1: 255,  # '\x01'
+     2: 255,  # '\x02'
+     3: 255,  # '\x03'
+     4: 255,  # '\x04'
+     5: 255,  # '\x05'
+     6: 255,  # '\x06'
+     7: 255,  # '\x07'
+     8: 255,  # '\x08'
+     9: 255,  # '\t'
+     10: 254,  # '\n'
+     11: 255,  # '\x0b'
+     12: 255,  # '\x0c'
+     13: 254,  # '\r'
+     14: 255,  # '\x0e'
+     15: 255,  # '\x0f'
+     16: 255,  # '\x10'
+     17: 255,  # '\x11'
+     18: 255,  # '\x12'
+     19: 255,  # '\x13'
+     20: 255,  # '\x14'
+     21: 255,  # '\x15'
+     22: 255,  # '\x16'
+     23: 255,  # '\x17'
+     24: 255,  # '\x18'
+     25: 255,  # '\x19'
+     26: 255,  # '\x1a'
+     27: 255,  # '\x1b'
+     28: 255,  # '\x1c'
+     29: 255,  # '\x1d'
+     30: 255,  # '\x1e'
+     31: 255,  # '\x1f'
+     32: 253,  # ' '
+     33: 253,  # '!'
+     34: 253,  # '"'
+     35: 253,  # '#'
+     36: 253,  # '$'
+     37: 253,  # '%'
+     38: 253,  # '&'
+     39: 253,  # "'"
+     40: 253,  # '('
+     41: 253,  # ')'
+     42: 253,  # '*'
+     43: 253,  # '+'
+     44: 253,  # ','
+     45: 253,  # '-'
+     46: 253,  # '.'
+     47: 253,  # '/'
+     48: 252,  # '0'
+     49: 252,  # '1'
+     50: 252,  # '2'
+     51: 252,  # '3'
+     52: 252,  # '4'
+     53: 252,  # '5'
+     54: 252,  # '6'
+     55: 252,  # '7'
+     56: 252,  # '8'
+     57: 252,  # '9'
+     58: 253,  # ':'
+     59: 253,  # ';'
+     60: 253,  # '<'
+     61: 253,  # '='
+     62: 253,  # '>'
+     63: 253,  # '?'
+     64: 253,  # '@'
+     65: 182,  # 'A'
+     66: 106,  # 'B'
+     67: 107,  # 'C'
+     68: 100,  # 'D'
+     69: 183,  # 'E'
+     70: 184,  # 'F'
+     71: 185,  # 'G'
+     72: 101,  # 'H'
+     73: 94,  # 'I'
+     74: 186,  # 'J'
+     75: 187,  # 'K'
+     76: 108,  # 'L'
+     77: 109,  # 'M'
+     78: 110,  # 'N'
+     79: 111,  # 'O'
+     80: 188,  # 'P'
+     81: 189,  # 'Q'
+     82: 190,  # 'R'
+     83: 89,  # 'S'
+     84: 95,  # 'T'
+     85: 112,  # 'U'
+     86: 113,  # 'V'
+     87: 191,  # 'W'
+     88: 192,  # 'X'
+     89: 193,  # 'Y'
+     90: 194,  # 'Z'
+     91: 253,  # '['
+     92: 253,  # '\\'
+     93: 253,  # ']'
+     94: 253,  # '^'
+     95: 253,  # '_'
+     96: 253,  # '`'
+     97: 64,  # 'a'
+     98: 72,  # 'b'
+     99: 73,  # 'c'
+     100: 114,  # 'd'
+     101: 74,  # 'e'
+     102: 115,  # 'f'
+     103: 116,  # 'g'
+     104: 102,  # 'h'
+     105: 81,  # 'i'
+     106: 201,  # 'j'
+     107: 117,  # 'k'
+     108: 90,  # 'l'
+     109: 103,  # 'm'
+     110: 78,  # 'n'
+     111: 82,  # 'o'
+     112: 96,  # 'p'
+     113: 202,  # 'q'
+     114: 91,  # 'r'
+     115: 79,  # 's'
+     116: 84,  # 't'
+     117: 104,  # 'u'
+     118: 105,  # 'v'
+     119: 97,  # 'w'
+     120: 98,  # 'x'
+     121: 92,  # 'y'
+     122: 203,  # 'z'
+     123: 253,  # '{'
+     124: 253,  # '|'
+     125: 253,  # '}'
+     126: 253,  # '~'
+     127: 253,  # '\x7f'
+     128: 209,  # '\x80'
+     129: 210,  # '\x81'
+     130: 211,  # '\x82'
+     131: 212,  # '\x83'
+     132: 213,  # '\x84'
+     133: 88,  # '\x85'
+     134: 214,  # '\x86'
+     135: 215,  # '\x87'
+     136: 216,  # '\x88'
+     137: 217,  # '\x89'
+     138: 218,  # '\x8a'
+     139: 219,  # '\x8b'
+     140: 220,  # '\x8c'
+     141: 118,  # '\x8d'
+     142: 221,  # '\x8e'
+     143: 222,  # '\x8f'
+     144: 223,  # '\x90'
+     145: 224,  # '\x91'
+     146: 99,  # '\x92'
+     147: 85,  # '\x93'
+     148: 83,  # '\x94'
+     149: 225,  # '\x95'
+     150: 226,  # '\x96'
+     151: 227,  # '\x97'
+     152: 228,  # '\x98'
+     153: 229,  # '\x99'
+     154: 230,  # '\x9a'
+     155: 231,  # '\x9b'
+     156: 232,  # '\x9c'
+     157: 233,  # '\x9d'
+     158: 234,  # '\x9e'
+     159: 235,  # '\x9f'
+     160: 236,  # None
+     161: 5,  # 'ก'
+     162: 30,  # 'ข'
+     163: 237,  # 'ฃ'
+     164: 24,  # 'ค'
+     165: 238,  # 'ฅ'
+     166: 75,  # 'ฆ'
+     167: 8,  # 'ง'
+     168: 26,  # 'จ'
+     169: 52,  # 'ฉ'
+     170: 34,  # 'ช'
+     171: 51,  # 'ซ'
+     172: 119,  # 'ฌ'
+     173: 47,  # 'ญ'
+     174: 58,  # 'ฎ'
+     175: 57,  # 'ฏ'
+     176: 49,  # 'ฐ'
+     177: 53,  # 'ฑ'
+     178: 55,  # 'ฒ'
+     179: 43,  # 'ณ'
+     180: 20,  # 'ด'
+     181: 19,  # 'ต'
+     182: 44,  # 'ถ'
+     183: 14,  # 'ท'
+     184: 48,  # 'ธ'
+     185: 3,  # 'น'
+     186: 17,  # 'บ'
+     187: 25,  # 'ป'
+     188: 39,  # 'ผ'
+     189: 62,  # 'ฝ'
+     190: 31,  # 'พ'
+     191: 54,  # 'ฟ'
+     192: 45,  # 'ภ'
+     193: 9,  # 'ม'
+     194: 16,  # 'ย'
+     195: 2,  # 'ร'
+     196: 61,  # 'ฤ'
+     197: 15,  # 'ล'
+     198: 239,  # 'ฦ'
+     199: 12,  # 'ว'
+     200: 42,  # 'ศ'
+     201: 46,  # 'ษ'
+     202: 18,  # 'ส'
+     203: 21,  # 'ห'
+     204: 76,  # 'ฬ'
+     205: 4,  # 'อ'
+     206: 66,  # 'ฮ'
+     207: 63,  # 'ฯ'
+     208: 22,  # 'ะ'
+     209: 10,  # 'ั'
+     210: 1,  # 'า'
+     211: 36,  # 'ำ'
+     212: 23,  # 'ิ'
+     213: 13,  # 'ี'
+     214: 40,  # 'ึ'
+     215: 27,  # 'ื'
+     216: 32,  # 'ุ'
+     217: 35,  # 'ู'
+     218: 86,  # 'ฺ'
+     219: 240,  # None
+     220: 241,  # None
+     221: 242,  # None
+     222: 243,  # None
+     223: 244,  # '฿'
+     224: 11,  # 'เ'
+     225: 28,  # 'แ'
+     226: 41,  # 'โ'
+     227: 29,  # 'ใ'
+     228: 33,  # 'ไ'
+     229: 245,  # 'ๅ'
+     230: 50,  # 'ๆ'
+     231: 37,  # '็'
+     232: 6,  # '่'
+     233: 7,  # '้'
+     234: 67,  # '๊'
+     235: 77,  # '๋'
+     236: 38,  # '์'
+     237: 93,  # 'ํ'
+     238: 246,  # '๎'
+     239: 247,  # '๏'
+     240: 68,  # '๐'
+     241: 56,  # '๑'
+     242: 59,  # '๒'
+     243: 65,  # '๓'
+     244: 69,  # '๔'
+     245: 60,  # '๕'
+     246: 70,  # '๖'
+     247: 80,  # '๗'
+     248: 71,  # '๘'
+     249: 87,  # '๙'
+     250: 248,  # '๚'
+     251: 249,  # '๛'
+     252: 250,  # None
+     253: 251,  # None
+     254: 252,  # None
+     255: 253,  # None
 }
 
-TIS_620_THAI_MODEL = SingleByteCharSetModel(
-    charset_name="TIS-620",
-    language="Thai",
-    char_to_order_map=TIS_620_THAI_CHAR_TO_ORDER,
-    language_model=THAI_LANG_MODEL,
-    typical_positive_ratio=0.926386,
-    keep_ascii_letters=False,
-    alphabet="กขฃคฅฆงจฉชซฌญฎฏฐฑฒณดตถทธนบปผฝพฟภมยรฤลฦวศษสหฬอฮฯะัาำิีึืฺุู฿เแโใไๅๆ็่้๊๋์ํ๎๏๐๑๒๓๔๕๖๗๘๙๚๛",
-)
+TIS_620_THAI_MODEL = SingleByteCharSetModel(charset_name='TIS-620',
+                                            language='Thai',
+                                            char_to_order_map=TIS_620_THAI_CHAR_TO_ORDER,
+                                            language_model=THAI_LANG_MODEL,
+                                            typical_positive_ratio=0.926386,
+                                            keep_ascii_letters=False,
+                                            alphabet='กขฃคฅฆงจฉชซฌญฎฏฐฑฒณดตถทธนบปผฝพฟภมยรฤลฦวศษสหฬอฮฯะัาำิีึืฺุู฿เแโใไๅๆ็่้๊๋์ํ๎๏๐๑๒๓๔๕๖๗๘๙๚๛')
+
diff --git a/venv/lib/python3.10/site-packages/pip/_vendor/chardet/langturkishmodel.py b/venv/lib/python3.10/site-packages/pip/_vendor/chardet/langturkishmodel.py
index 291857c..43f4230 100644
--- a/venv/lib/python3.10/site-packages/pip/_vendor/chardet/langturkishmodel.py
+++ b/venv/lib/python3.10/site-packages/pip/_vendor/chardet/langturkishmodel.py
@@ -1,5 +1,9 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+
 from pip._vendor.chardet.sbcharsetprober import SingleByteCharSetModel
 
+
 # 3: Positive
 # 2: Likely
 # 1: Unlikely
@@ -4111,270 +4115,269 @@
 
 # Character Mapping Table(s):
 ISO_8859_9_TURKISH_CHAR_TO_ORDER = {
-    0: 255,  # '\x00'
-    1: 255,  # '\x01'
-    2: 255,  # '\x02'
-    3: 255,  # '\x03'
-    4: 255,  # '\x04'
-    5: 255,  # '\x05'
-    6: 255,  # '\x06'
-    7: 255,  # '\x07'
-    8: 255,  # '\x08'
-    9: 255,  # '\t'
-    10: 255,  # '\n'
-    11: 255,  # '\x0b'
-    12: 255,  # '\x0c'
-    13: 255,  # '\r'
-    14: 255,  # '\x0e'
-    15: 255,  # '\x0f'
-    16: 255,  # '\x10'
-    17: 255,  # '\x11'
-    18: 255,  # '\x12'
-    19: 255,  # '\x13'
-    20: 255,  # '\x14'
-    21: 255,  # '\x15'
-    22: 255,  # '\x16'
-    23: 255,  # '\x17'
-    24: 255,  # '\x18'
-    25: 255,  # '\x19'
-    26: 255,  # '\x1a'
-    27: 255,  # '\x1b'
-    28: 255,  # '\x1c'
-    29: 255,  # '\x1d'
-    30: 255,  # '\x1e'
-    31: 255,  # '\x1f'
-    32: 255,  # ' '
-    33: 255,  # '!'
-    34: 255,  # '"'
-    35: 255,  # '#'
-    36: 255,  # '$'
-    37: 255,  # '%'
-    38: 255,  # '&'
-    39: 255,  # "'"
-    40: 255,  # '('
-    41: 255,  # ')'
-    42: 255,  # '*'
-    43: 255,  # '+'
-    44: 255,  # ','
-    45: 255,  # '-'
-    46: 255,  # '.'
-    47: 255,  # '/'
-    48: 255,  # '0'
-    49: 255,  # '1'
-    50: 255,  # '2'
-    51: 255,  # '3'
-    52: 255,  # '4'
-    53: 255,  # '5'
-    54: 255,  # '6'
-    55: 255,  # '7'
-    56: 255,  # '8'
-    57: 255,  # '9'
-    58: 255,  # ':'
-    59: 255,  # ';'
-    60: 255,  # '<'
-    61: 255,  # '='
-    62: 255,  # '>'
-    63: 255,  # '?'
-    64: 255,  # '@'
-    65: 23,  # 'A'
-    66: 37,  # 'B'
-    67: 47,  # 'C'
-    68: 39,  # 'D'
-    69: 29,  # 'E'
-    70: 52,  # 'F'
-    71: 36,  # 'G'
-    72: 45,  # 'H'
-    73: 53,  # 'I'
-    74: 60,  # 'J'
-    75: 16,  # 'K'
-    76: 49,  # 'L'
-    77: 20,  # 'M'
-    78: 46,  # 'N'
-    79: 42,  # 'O'
-    80: 48,  # 'P'
-    81: 69,  # 'Q'
-    82: 44,  # 'R'
-    83: 35,  # 'S'
-    84: 31,  # 'T'
-    85: 51,  # 'U'
-    86: 38,  # 'V'
-    87: 62,  # 'W'
-    88: 65,  # 'X'
-    89: 43,  # 'Y'
-    90: 56,  # 'Z'
-    91: 255,  # '['
-    92: 255,  # '\\'
-    93: 255,  # ']'
-    94: 255,  # '^'
-    95: 255,  # '_'
-    96: 255,  # '`'
-    97: 1,  # 'a'
-    98: 21,  # 'b'
-    99: 28,  # 'c'
-    100: 12,  # 'd'
-    101: 2,  # 'e'
-    102: 18,  # 'f'
-    103: 27,  # 'g'
-    104: 25,  # 'h'
-    105: 3,  # 'i'
-    106: 24,  # 'j'
-    107: 10,  # 'k'
-    108: 5,  # 'l'
-    109: 13,  # 'm'
-    110: 4,  # 'n'
-    111: 15,  # 'o'
-    112: 26,  # 'p'
-    113: 64,  # 'q'
-    114: 7,  # 'r'
-    115: 8,  # 's'
-    116: 9,  # 't'
-    117: 14,  # 'u'
-    118: 32,  # 'v'
-    119: 57,  # 'w'
-    120: 58,  # 'x'
-    121: 11,  # 'y'
-    122: 22,  # 'z'
-    123: 255,  # '{'
-    124: 255,  # '|'
-    125: 255,  # '}'
-    126: 255,  # '~'
-    127: 255,  # '\x7f'
-    128: 180,  # '\x80'
-    129: 179,  # '\x81'
-    130: 178,  # '\x82'
-    131: 177,  # '\x83'
-    132: 176,  # '\x84'
-    133: 175,  # '\x85'
-    134: 174,  # '\x86'
-    135: 173,  # '\x87'
-    136: 172,  # '\x88'
-    137: 171,  # '\x89'
-    138: 170,  # '\x8a'
-    139: 169,  # '\x8b'
-    140: 168,  # '\x8c'
-    141: 167,  # '\x8d'
-    142: 166,  # '\x8e'
-    143: 165,  # '\x8f'
-    144: 164,  # '\x90'
-    145: 163,  # '\x91'
-    146: 162,  # '\x92'
-    147: 161,  # '\x93'
-    148: 160,  # '\x94'
-    149: 159,  # '\x95'
-    150: 101,  # '\x96'
-    151: 158,  # '\x97'
-    152: 157,  # '\x98'
-    153: 156,  # '\x99'
-    154: 155,  # '\x9a'
-    155: 154,  # '\x9b'
-    156: 153,  # '\x9c'
-    157: 152,  # '\x9d'
-    158: 151,  # '\x9e'
-    159: 106,  # '\x9f'
-    160: 150,  # '\xa0'
-    161: 149,  # '¡'
-    162: 148,  # '¢'
-    163: 147,  # '£'
-    164: 146,  # '¤'
-    165: 145,  # '¥'
-    166: 144,  # '¦'
-    167: 100,  # '§'
-    168: 143,  # '¨'
-    169: 142,  # '©'
-    170: 141,  # 'ª'
-    171: 140,  # '«'
-    172: 139,  # '¬'
-    173: 138,  # '\xad'
-    174: 137,  # '®'
-    175: 136,  # '¯'
-    176: 94,  # '°'
-    177: 80,  # '±'
-    178: 93,  # '²'
-    179: 135,  # '³'
-    180: 105,  # '´'
-    181: 134,  # 'µ'
-    182: 133,  # '¶'
-    183: 63,  # '·'
-    184: 132,  # '¸'
-    185: 131,  # '¹'
-    186: 130,  # 'º'
-    187: 129,  # '»'
-    188: 128,  # '¼'
-    189: 127,  # '½'
-    190: 126,  # '¾'
-    191: 125,  # '¿'
-    192: 124,  # 'À'
-    193: 104,  # 'Á'
-    194: 73,  # 'Â'
-    195: 99,  # 'Ã'
-    196: 79,  # 'Ä'
-    197: 85,  # 'Å'
-    198: 123,  # 'Æ'
-    199: 54,  # 'Ç'
-    200: 122,  # 'È'
-    201: 98,  # 'É'
-    202: 92,  # 'Ê'
-    203: 121,  # 'Ë'
-    204: 120,  # 'Ì'
-    205: 91,  # 'Í'
-    206: 103,  # 'Î'
-    207: 119,  # 'Ï'
-    208: 68,  # 'Ğ'
-    209: 118,  # 'Ñ'
-    210: 117,  # 'Ò'
-    211: 97,  # 'Ó'
-    212: 116,  # 'Ô'
-    213: 115,  # 'Õ'
-    214: 50,  # 'Ö'
-    215: 90,  # '×'
-    216: 114,  # 'Ø'
-    217: 113,  # 'Ù'
-    218: 112,  # 'Ú'
-    219: 111,  # 'Û'
-    220: 55,  # 'Ü'
-    221: 41,  # 'İ'
-    222: 40,  # 'Ş'
-    223: 86,  # 'ß'
-    224: 89,  # 'à'
-    225: 70,  # 'á'
-    226: 59,  # 'â'
-    227: 78,  # 'ã'
-    228: 71,  # 'ä'
-    229: 82,  # 'å'
-    230: 88,  # 'æ'
-    231: 33,  # 'ç'
-    232: 77,  # 'è'
-    233: 66,  # 'é'
-    234: 84,  # 'ê'
-    235: 83,  # 'ë'
-    236: 110,  # 'ì'
-    237: 75,  # 'í'
-    238: 61,  # 'î'
-    239: 96,  # 'ï'
-    240: 30,  # 'ğ'
-    241: 67,  # 'ñ'
-    242: 109,  # 'ò'
-    243: 74,  # 'ó'
-    244: 87,  # 'ô'
-    245: 102,  # 'õ'
-    246: 34,  # 'ö'
-    247: 95,  # '÷'
-    248: 81,  # 'ø'
-    249: 108,  # 'ù'
-    250: 76,  # 'ú'
-    251: 72,  # 'û'
-    252: 17,  # 'ü'
-    253: 6,  # 'ı'
-    254: 19,  # 'ş'
-    255: 107,  # 'ÿ'
+     0: 255,  # '\x00'
+     1: 255,  # '\x01'
+     2: 255,  # '\x02'
+     3: 255,  # '\x03'
+     4: 255,  # '\x04'
+     5: 255,  # '\x05'
+     6: 255,  # '\x06'
+     7: 255,  # '\x07'
+     8: 255,  # '\x08'
+     9: 255,  # '\t'
+     10: 255,  # '\n'
+     11: 255,  # '\x0b'
+     12: 255,  # '\x0c'
+     13: 255,  # '\r'
+     14: 255,  # '\x0e'
+     15: 255,  # '\x0f'
+     16: 255,  # '\x10'
+     17: 255,  # '\x11'
+     18: 255,  # '\x12'
+     19: 255,  # '\x13'
+     20: 255,  # '\x14'
+     21: 255,  # '\x15'
+     22: 255,  # '\x16'
+     23: 255,  # '\x17'
+     24: 255,  # '\x18'
+     25: 255,  # '\x19'
+     26: 255,  # '\x1a'
+     27: 255,  # '\x1b'
+     28: 255,  # '\x1c'
+     29: 255,  # '\x1d'
+     30: 255,  # '\x1e'
+     31: 255,  # '\x1f'
+     32: 255,  # ' '
+     33: 255,  # '!'
+     34: 255,  # '"'
+     35: 255,  # '#'
+     36: 255,  # '$'
+     37: 255,  # '%'
+     38: 255,  # '&'
+     39: 255,  # "'"
+     40: 255,  # '('
+     41: 255,  # ')'
+     42: 255,  # '*'
+     43: 255,  # '+'
+     44: 255,  # ','
+     45: 255,  # '-'
+     46: 255,  # '.'
+     47: 255,  # '/'
+     48: 255,  # '0'
+     49: 255,  # '1'
+     50: 255,  # '2'
+     51: 255,  # '3'
+     52: 255,  # '4'
+     53: 255,  # '5'
+     54: 255,  # '6'
+     55: 255,  # '7'
+     56: 255,  # '8'
+     57: 255,  # '9'
+     58: 255,  # ':'
+     59: 255,  # ';'
+     60: 255,  # '<'
+     61: 255,  # '='
+     62: 255,  # '>'
+     63: 255,  # '?'
+     64: 255,  # '@'
+     65: 23,  # 'A'
+     66: 37,  # 'B'
+     67: 47,  # 'C'
+     68: 39,  # 'D'
+     69: 29,  # 'E'
+     70: 52,  # 'F'
+     71: 36,  # 'G'
+     72: 45,  # 'H'
+     73: 53,  # 'I'
+     74: 60,  # 'J'
+     75: 16,  # 'K'
+     76: 49,  # 'L'
+     77: 20,  # 'M'
+     78: 46,  # 'N'
+     79: 42,  # 'O'
+     80: 48,  # 'P'
+     81: 69,  # 'Q'
+     82: 44,  # 'R'
+     83: 35,  # 'S'
+     84: 31,  # 'T'
+     85: 51,  # 'U'
+     86: 38,  # 'V'
+     87: 62,  # 'W'
+     88: 65,  # 'X'
+     89: 43,  # 'Y'
+     90: 56,  # 'Z'
+     91: 255,  # '['
+     92: 255,  # '\\'
+     93: 255,  # ']'
+     94: 255,  # '^'
+     95: 255,  # '_'
+     96: 255,  # '`'
+     97: 1,  # 'a'
+     98: 21,  # 'b'
+     99: 28,  # 'c'
+     100: 12,  # 'd'
+     101: 2,  # 'e'
+     102: 18,  # 'f'
+     103: 27,  # 'g'
+     104: 25,  # 'h'
+     105: 3,  # 'i'
+     106: 24,  # 'j'
+     107: 10,  # 'k'
+     108: 5,  # 'l'
+     109: 13,  # 'm'
+     110: 4,  # 'n'
+     111: 15,  # 'o'
+     112: 26,  # 'p'
+     113: 64,  # 'q'
+     114: 7,  # 'r'
+     115: 8,  # 's'
+     116: 9,  # 't'
+     117: 14,  # 'u'
+     118: 32,  # 'v'
+     119: 57,  # 'w'
+     120: 58,  # 'x'
+     121: 11,  # 'y'
+     122: 22,  # 'z'
+     123: 255,  # '{'
+     124: 255,  # '|'
+     125: 255,  # '}'
+     126: 255,  # '~'
+     127: 255,  # '\x7f'
+     128: 180,  # '\x80'
+     129: 179,  # '\x81'
+     130: 178,  # '\x82'
+     131: 177,  # '\x83'
+     132: 176,  # '\x84'
+     133: 175,  # '\x85'
+     134: 174,  # '\x86'
+     135: 173,  # '\x87'
+     136: 172,  # '\x88'
+     137: 171,  # '\x89'
+     138: 170,  # '\x8a'
+     139: 169,  # '\x8b'
+     140: 168,  # '\x8c'
+     141: 167,  # '\x8d'
+     142: 166,  # '\x8e'
+     143: 165,  # '\x8f'
+     144: 164,  # '\x90'
+     145: 163,  # '\x91'
+     146: 162,  # '\x92'
+     147: 161,  # '\x93'
+     148: 160,  # '\x94'
+     149: 159,  # '\x95'
+     150: 101,  # '\x96'
+     151: 158,  # '\x97'
+     152: 157,  # '\x98'
+     153: 156,  # '\x99'
+     154: 155,  # '\x9a'
+     155: 154,  # '\x9b'
+     156: 153,  # '\x9c'
+     157: 152,  # '\x9d'
+     158: 151,  # '\x9e'
+     159: 106,  # '\x9f'
+     160: 150,  # '\xa0'
+     161: 149,  # '¡'
+     162: 148,  # '¢'
+     163: 147,  # '£'
+     164: 146,  # '¤'
+     165: 145,  # '¥'
+     166: 144,  # '¦'
+     167: 100,  # '§'
+     168: 143,  # '¨'
+     169: 142,  # '©'
+     170: 141,  # 'ª'
+     171: 140,  # '«'
+     172: 139,  # '¬'
+     173: 138,  # '\xad'
+     174: 137,  # '®'
+     175: 136,  # '¯'
+     176: 94,  # '°'
+     177: 80,  # '±'
+     178: 93,  # '²'
+     179: 135,  # '³'
+     180: 105,  # '´'
+     181: 134,  # 'µ'
+     182: 133,  # '¶'
+     183: 63,  # '·'
+     184: 132,  # '¸'
+     185: 131,  # '¹'
+     186: 130,  # 'º'
+     187: 129,  # '»'
+     188: 128,  # '¼'
+     189: 127,  # '½'
+     190: 126,  # '¾'
+     191: 125,  # '¿'
+     192: 124,  # 'À'
+     193: 104,  # 'Á'
+     194: 73,  # 'Â'
+     195: 99,  # 'Ã'
+     196: 79,  # 'Ä'
+     197: 85,  # 'Å'
+     198: 123,  # 'Æ'
+     199: 54,  # 'Ç'
+     200: 122,  # 'È'
+     201: 98,  # 'É'
+     202: 92,  # 'Ê'
+     203: 121,  # 'Ë'
+     204: 120,  # 'Ì'
+     205: 91,  # 'Í'
+     206: 103,  # 'Î'
+     207: 119,  # 'Ï'
+     208: 68,  # 'Ğ'
+     209: 118,  # 'Ñ'
+     210: 117,  # 'Ò'
+     211: 97,  # 'Ó'
+     212: 116,  # 'Ô'
+     213: 115,  # 'Õ'
+     214: 50,  # 'Ö'
+     215: 90,  # '×'
+     216: 114,  # 'Ø'
+     217: 113,  # 'Ù'
+     218: 112,  # 'Ú'
+     219: 111,  # 'Û'
+     220: 55,  # 'Ü'
+     221: 41,  # 'İ'
+     222: 40,  # 'Ş'
+     223: 86,  # 'ß'
+     224: 89,  # 'à'
+     225: 70,  # 'á'
+     226: 59,  # 'â'
+     227: 78,  # 'ã'
+     228: 71,  # 'ä'
+     229: 82,  # 'å'
+     230: 88,  # 'æ'
+     231: 33,  # 'ç'
+     232: 77,  # 'è'
+     233: 66,  # 'é'
+     234: 84,  # 'ê'
+     235: 83,  # 'ë'
+     236: 110,  # 'ì'
+     237: 75,  # 'í'
+     238: 61,  # 'î'
+     239: 96,  # 'ï'
+     240: 30,  # 'ğ'
+     241: 67,  # 'ñ'
+     242: 109,  # 'ò'
+     243: 74,  # 'ó'
+     244: 87,  # 'ô'
+     245: 102,  # 'õ'
+     246: 34,  # 'ö'
+     247: 95,  # '÷'
+     248: 81,  # 'ø'
+     249: 108,  # 'ù'
+     250: 76,  # 'ú'
+     251: 72,  # 'û'
+     252: 17,  # 'ü'
+     253: 6,  # 'ı'
+     254: 19,  # 'ş'
+     255: 107,  # 'ÿ'
 }
 
-ISO_8859_9_TURKISH_MODEL = SingleByteCharSetModel(
-    charset_name="ISO-8859-9",
-    language="Turkish",
-    char_to_order_map=ISO_8859_9_TURKISH_CHAR_TO_ORDER,
-    language_model=TURKISH_LANG_MODEL,
-    typical_positive_ratio=0.97029,
-    keep_ascii_letters=True,
-    alphabet="ABCDEFGHIJKLMNOPRSTUVYZabcdefghijklmnoprstuvyzÂÇÎÖÛÜâçîöûüĞğİıŞş",
-)
+ISO_8859_9_TURKISH_MODEL = SingleByteCharSetModel(charset_name='ISO-8859-9',
+                                                  language='Turkish',
+                                                  char_to_order_map=ISO_8859_9_TURKISH_CHAR_TO_ORDER,
+                                                  language_model=TURKISH_LANG_MODEL,
+                                                  typical_positive_ratio=0.97029,
+                                                  keep_ascii_letters=True,
+                                                  alphabet='ABCDEFGHIJKLMNOPRSTUVYZabcdefghijklmnoprstuvyzÂÇÎÖÛÜâçîöûüĞğİıŞş')
+
diff --git a/venv/lib/python3.10/site-packages/pip/_vendor/chardet/latin1prober.py b/venv/lib/python3.10/site-packages/pip/_vendor/chardet/latin1prober.py
index 241f14a..7d1e8c2 100644
--- a/venv/lib/python3.10/site-packages/pip/_vendor/chardet/latin1prober.py
+++ b/venv/lib/python3.10/site-packages/pip/_vendor/chardet/latin1prober.py
@@ -41,7 +41,6 @@
 ASO = 7  # accent small other
 CLASS_NUM = 8  # total classes
 
-# fmt: off
 Latin1_CharToClass = (
     OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH,   # 00 - 07
     OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH,   # 08 - 0F
@@ -92,12 +91,11 @@
     0,  3,  1,  3,  1,  1,  1,  3,  # ASV
     0,  3,  1,  3,  1,  1,  3,  3,  # ASO
 )
-# fmt: on
 
 
 class Latin1Prober(CharSetProber):
     def __init__(self):
-        super().__init__()
+        super(Latin1Prober, self).__init__()
         self._last_char_class = None
         self._freq_counter = None
         self.reset()
@@ -105,7 +103,7 @@ def __init__(self):
     def reset(self):
         self._last_char_class = OTH
         self._freq_counter = [0] * FREQ_CAT_NUM
-        super().reset()
+        CharSetProber.reset(self)
 
     @property
     def charset_name(self):
@@ -116,10 +114,11 @@ def language(self):
         return ""
 
     def feed(self, byte_str):
-        byte_str = self.remove_xml_tags(byte_str)
+        byte_str = self.filter_with_english_letters(byte_str)
         for c in byte_str:
             char_class = Latin1_CharToClass[c]
-            freq = Latin1ClassModel[(self._last_char_class * CLASS_NUM) + char_class]
+            freq = Latin1ClassModel[(self._last_char_class * CLASS_NUM)
+                                    + char_class]
             if freq == 0:
                 self._state = ProbingState.NOT_ME
                 break
@@ -133,13 +132,14 @@ def get_confidence(self):
             return 0.01
 
         total = sum(self._freq_counter)
-        confidence = (
-            0.0
-            if total < 0.01
-            else (self._freq_counter[3] - self._freq_counter[1] * 20.0) / total
-        )
-        confidence = max(confidence, 0.0)
+        if total < 0.01:
+            confidence = 0.0
+        else:
+            confidence = ((self._freq_counter[3] - self._freq_counter[1] * 20.0)
+                          / total)
+        if confidence < 0.0:
+            confidence = 0.0
         # lower the confidence of latin1 so that other more accurate
         # detector can take priority.
-        confidence *= 0.73
+        confidence = confidence * 0.73
         return confidence
diff --git a/venv/lib/python3.10/site-packages/pip/_vendor/chardet/mbcharsetprober.py b/venv/lib/python3.10/site-packages/pip/_vendor/chardet/mbcharsetprober.py
index bf96ad5..6256ecf 100644
--- a/venv/lib/python3.10/site-packages/pip/_vendor/chardet/mbcharsetprober.py
+++ b/venv/lib/python3.10/site-packages/pip/_vendor/chardet/mbcharsetprober.py
@@ -28,7 +28,7 @@
 ######################### END LICENSE BLOCK #########################
 
 from .charsetprober import CharSetProber
-from .enums import MachineState, ProbingState
+from .enums import ProbingState, MachineState
 
 
 class MultiByteCharSetProber(CharSetProber):
@@ -37,13 +37,13 @@ class MultiByteCharSetProber(CharSetProber):
     """
 
     def __init__(self, lang_filter=None):
-        super().__init__(lang_filter=lang_filter)
+        super(MultiByteCharSetProber, self).__init__(lang_filter=lang_filter)
         self.distribution_analyzer = None
         self.coding_sm = None
         self._last_char = [0, 0]
 
     def reset(self):
-        super().reset()
+        super(MultiByteCharSetProber, self).reset()
         if self.coding_sm:
             self.coding_sm.reset()
         if self.distribution_analyzer:
@@ -59,34 +59,30 @@ def language(self):
         raise NotImplementedError
 
     def feed(self, byte_str):
-        for i, byte in enumerate(byte_str):
-            coding_state = self.coding_sm.next_state(byte)
+        for i in range(len(byte_str)):
+            coding_state = self.coding_sm.next_state(byte_str[i])
             if coding_state == MachineState.ERROR:
-                self.logger.debug(
-                    "%s %s prober hit error at byte %s",
-                    self.charset_name,
-                    self.language,
-                    i,
-                )
+                self.logger.debug('%s %s prober hit error at byte %s',
+                                  self.charset_name, self.language, i)
                 self._state = ProbingState.NOT_ME
                 break
-            if coding_state == MachineState.ITS_ME:
+            elif coding_state == MachineState.ITS_ME:
                 self._state = ProbingState.FOUND_IT
                 break
-            if coding_state == MachineState.START:
+            elif coding_state == MachineState.START:
                 char_len = self.coding_sm.get_current_charlen()
                 if i == 0:
-                    self._last_char[1] = byte
+                    self._last_char[1] = byte_str[0]
                     self.distribution_analyzer.feed(self._last_char, char_len)
                 else:
-                    self.distribution_analyzer.feed(byte_str[i - 1 : i + 1], char_len)
+                    self.distribution_analyzer.feed(byte_str[i - 1:i + 1],
+                                                    char_len)
 
         self._last_char[0] = byte_str[-1]
 
         if self.state == ProbingState.DETECTING:
-            if self.distribution_analyzer.got_enough_data() and (
-                self.get_confidence() > self.SHORTCUT_THRESHOLD
-            ):
+            if (self.distribution_analyzer.got_enough_data() and
+                    (self.get_confidence() > self.SHORTCUT_THRESHOLD)):
                 self._state = ProbingState.FOUND_IT
 
         return self.state
diff --git a/venv/lib/python3.10/site-packages/pip/_vendor/chardet/mbcsgroupprober.py b/venv/lib/python3.10/site-packages/pip/_vendor/chardet/mbcsgroupprober.py
index 9448836..530abe7 100644
--- a/venv/lib/python3.10/site-packages/pip/_vendor/chardet/mbcsgroupprober.py
+++ b/venv/lib/python3.10/site-packages/pip/_vendor/chardet/mbcsgroupprober.py
@@ -27,21 +27,20 @@
 # 02110-1301  USA
 ######################### END LICENSE BLOCK #########################
 
-from .big5prober import Big5Prober
 from .charsetgroupprober import CharSetGroupProber
-from .cp949prober import CP949Prober
+from .utf8prober import UTF8Prober
+from .sjisprober import SJISProber
 from .eucjpprober import EUCJPProber
+from .gb2312prober import GB2312Prober
 from .euckrprober import EUCKRProber
+from .cp949prober import CP949Prober
+from .big5prober import Big5Prober
 from .euctwprober import EUCTWProber
-from .gb2312prober import GB2312Prober
-from .johabprober import JOHABProber
-from .sjisprober import SJISProber
-from .utf8prober import UTF8Prober
 
 
 class MBCSGroupProber(CharSetGroupProber):
     def __init__(self, lang_filter=None):
-        super().__init__(lang_filter=lang_filter)
+        super(MBCSGroupProber, self).__init__(lang_filter=lang_filter)
         self.probers = [
             UTF8Prober(),
             SJISProber(),
@@ -50,7 +49,6 @@ def __init__(self, lang_filter=None):
             EUCKRProber(),
             CP949Prober(),
             Big5Prober(),
-            EUCTWProber(),
-            JOHABProber(),
+            EUCTWProber()
         ]
         self.reset()
diff --git a/venv/lib/python3.10/site-packages/pip/_vendor/chardet/mbcssm.py b/venv/lib/python3.10/site-packages/pip/_vendor/chardet/mbcssm.py
index d3b9c4b..8360d0f 100644
--- a/venv/lib/python3.10/site-packages/pip/_vendor/chardet/mbcssm.py
+++ b/venv/lib/python3.10/site-packages/pip/_vendor/chardet/mbcssm.py
@@ -29,40 +29,39 @@
 
 # BIG5
 
-# fmt: off
 BIG5_CLS = (
-    1, 1, 1, 1, 1, 1, 1, 1,  # 00 - 07    #allow 0x00 as legal value
-    1, 1, 1, 1, 1, 1, 0, 0,  # 08 - 0f
-    1, 1, 1, 1, 1, 1, 1, 1,  # 10 - 17
-    1, 1, 1, 0, 1, 1, 1, 1,  # 18 - 1f
-    1, 1, 1, 1, 1, 1, 1, 1,  # 20 - 27
-    1, 1, 1, 1, 1, 1, 1, 1,  # 28 - 2f
-    1, 1, 1, 1, 1, 1, 1, 1,  # 30 - 37
-    1, 1, 1, 1, 1, 1, 1, 1,  # 38 - 3f
-    2, 2, 2, 2, 2, 2, 2, 2,  # 40 - 47
-    2, 2, 2, 2, 2, 2, 2, 2,  # 48 - 4f
-    2, 2, 2, 2, 2, 2, 2, 2,  # 50 - 57
-    2, 2, 2, 2, 2, 2, 2, 2,  # 58 - 5f
-    2, 2, 2, 2, 2, 2, 2, 2,  # 60 - 67
-    2, 2, 2, 2, 2, 2, 2, 2,  # 68 - 6f
-    2, 2, 2, 2, 2, 2, 2, 2,  # 70 - 77
-    2, 2, 2, 2, 2, 2, 2, 1,  # 78 - 7f
-    4, 4, 4, 4, 4, 4, 4, 4,  # 80 - 87
-    4, 4, 4, 4, 4, 4, 4, 4,  # 88 - 8f
-    4, 4, 4, 4, 4, 4, 4, 4,  # 90 - 97
-    4, 4, 4, 4, 4, 4, 4, 4,  # 98 - 9f
-    4, 3, 3, 3, 3, 3, 3, 3,  # a0 - a7
-    3, 3, 3, 3, 3, 3, 3, 3,  # a8 - af
-    3, 3, 3, 3, 3, 3, 3, 3,  # b0 - b7
-    3, 3, 3, 3, 3, 3, 3, 3,  # b8 - bf
-    3, 3, 3, 3, 3, 3, 3, 3,  # c0 - c7
-    3, 3, 3, 3, 3, 3, 3, 3,  # c8 - cf
-    3, 3, 3, 3, 3, 3, 3, 3,  # d0 - d7
-    3, 3, 3, 3, 3, 3, 3, 3,  # d8 - df
-    3, 3, 3, 3, 3, 3, 3, 3,  # e0 - e7
-    3, 3, 3, 3, 3, 3, 3, 3,  # e8 - ef
-    3, 3, 3, 3, 3, 3, 3, 3,  # f0 - f7
-    3, 3, 3, 3, 3, 3, 3, 0  # f8 - ff
+    1,1,1,1,1,1,1,1,  # 00 - 07    #allow 0x00 as legal value
+    1,1,1,1,1,1,0,0,  # 08 - 0f
+    1,1,1,1,1,1,1,1,  # 10 - 17
+    1,1,1,0,1,1,1,1,  # 18 - 1f
+    1,1,1,1,1,1,1,1,  # 20 - 27
+    1,1,1,1,1,1,1,1,  # 28 - 2f
+    1,1,1,1,1,1,1,1,  # 30 - 37
+    1,1,1,1,1,1,1,1,  # 38 - 3f
+    2,2,2,2,2,2,2,2,  # 40 - 47
+    2,2,2,2,2,2,2,2,  # 48 - 4f
+    2,2,2,2,2,2,2,2,  # 50 - 57
+    2,2,2,2,2,2,2,2,  # 58 - 5f
+    2,2,2,2,2,2,2,2,  # 60 - 67
+    2,2,2,2,2,2,2,2,  # 68 - 6f
+    2,2,2,2,2,2,2,2,  # 70 - 77
+    2,2,2,2,2,2,2,1,  # 78 - 7f
+    4,4,4,4,4,4,4,4,  # 80 - 87
+    4,4,4,4,4,4,4,4,  # 88 - 8f
+    4,4,4,4,4,4,4,4,  # 90 - 97
+    4,4,4,4,4,4,4,4,  # 98 - 9f
+    4,3,3,3,3,3,3,3,  # a0 - a7
+    3,3,3,3,3,3,3,3,  # a8 - af
+    3,3,3,3,3,3,3,3,  # b0 - b7
+    3,3,3,3,3,3,3,3,  # b8 - bf
+    3,3,3,3,3,3,3,3,  # c0 - c7
+    3,3,3,3,3,3,3,3,  # c8 - cf
+    3,3,3,3,3,3,3,3,  # d0 - d7
+    3,3,3,3,3,3,3,3,  # d8 - df
+    3,3,3,3,3,3,3,3,  # e0 - e7
+    3,3,3,3,3,3,3,3,  # e8 - ef
+    3,3,3,3,3,3,3,3,  # f0 - f7
+    3,3,3,3,3,3,3,0  # f8 - ff
 )
 
 BIG5_ST = (
@@ -70,37 +69,34 @@
     MachineState.ERROR,MachineState.ERROR,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ERROR,#08-0f
     MachineState.ERROR,MachineState.START,MachineState.START,MachineState.START,MachineState.START,MachineState.START,MachineState.START,MachineState.START#10-17
 )
-# fmt: on
 
 BIG5_CHAR_LEN_TABLE = (0, 1, 1, 2, 0)
 
-BIG5_SM_MODEL = {
-    "class_table": BIG5_CLS,
-    "class_factor": 5,
-    "state_table": BIG5_ST,
-    "char_len_table": BIG5_CHAR_LEN_TABLE,
-    "name": "Big5",
-}
+BIG5_SM_MODEL = {'class_table': BIG5_CLS,
+                 'class_factor': 5,
+                 'state_table': BIG5_ST,
+                 'char_len_table': BIG5_CHAR_LEN_TABLE,
+                 'name': 'Big5'}
 
 # CP949
-# fmt: off
+
 CP949_CLS  = (
-    1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0,  # 00 - 0f
-    1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1,  # 10 - 1f
-    1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,  # 20 - 2f
-    1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,  # 30 - 3f
-    1, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,  # 40 - 4f
-    4, 4, 5, 5, 5, 5, 5, 5, 5, 5, 5, 1, 1, 1, 1, 1,  # 50 - 5f
-    1, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,  # 60 - 6f
-    5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 1, 1, 1, 1, 1,  # 70 - 7f
-    0, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,  # 80 - 8f
-    6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,  # 90 - 9f
-    6, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 8, 8, 8,  # a0 - af
-    7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,  # b0 - bf
-    7, 7, 7, 7, 7, 7, 9, 2, 2, 3, 2, 2, 2, 2, 2, 2,  # c0 - cf
-    2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,  # d0 - df
-    2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,  # e0 - ef
-    2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 0,  # f0 - ff
+    1,1,1,1,1,1,1,1, 1,1,1,1,1,1,0,0,  # 00 - 0f
+    1,1,1,1,1,1,1,1, 1,1,1,0,1,1,1,1,  # 10 - 1f
+    1,1,1,1,1,1,1,1, 1,1,1,1,1,1,1,1,  # 20 - 2f
+    1,1,1,1,1,1,1,1, 1,1,1,1,1,1,1,1,  # 30 - 3f
+    1,4,4,4,4,4,4,4, 4,4,4,4,4,4,4,4,  # 40 - 4f
+    4,4,5,5,5,5,5,5, 5,5,5,1,1,1,1,1,  # 50 - 5f
+    1,5,5,5,5,5,5,5, 5,5,5,5,5,5,5,5,  # 60 - 6f
+    5,5,5,5,5,5,5,5, 5,5,5,1,1,1,1,1,  # 70 - 7f
+    0,6,6,6,6,6,6,6, 6,6,6,6,6,6,6,6,  # 80 - 8f
+    6,6,6,6,6,6,6,6, 6,6,6,6,6,6,6,6,  # 90 - 9f
+    6,7,7,7,7,7,7,7, 7,7,7,7,7,8,8,8,  # a0 - af
+    7,7,7,7,7,7,7,7, 7,7,7,7,7,7,7,7,  # b0 - bf
+    7,7,7,7,7,7,9,2, 2,3,2,2,2,2,2,2,  # c0 - cf
+    2,2,2,2,2,2,2,2, 2,2,2,2,2,2,2,2,  # d0 - df
+    2,2,2,2,2,2,2,2, 2,2,2,2,2,2,2,2,  # e0 - ef
+    2,2,2,2,2,2,2,2, 2,2,2,2,2,2,2,0,  # f0 - ff
 )
 
 CP949_ST = (
@@ -113,53 +109,50 @@
     MachineState.ERROR,MachineState.START,MachineState.START,MachineState.START,MachineState.START,MachineState.START,MachineState.START,MachineState.START,MachineState.START,MachineState.START, # 5
     MachineState.ERROR,MachineState.START,MachineState.START,MachineState.START,MachineState.START,MachineState.ERROR,MachineState.ERROR,MachineState.START,MachineState.START,MachineState.START, # 6
 )
-# fmt: on
 
 CP949_CHAR_LEN_TABLE = (0, 1, 2, 0, 1, 1, 2, 2, 0, 2)
 
-CP949_SM_MODEL = {
-    "class_table": CP949_CLS,
-    "class_factor": 10,
-    "state_table": CP949_ST,
-    "char_len_table": CP949_CHAR_LEN_TABLE,
-    "name": "CP949",
-}
+CP949_SM_MODEL = {'class_table': CP949_CLS,
+                  'class_factor': 10,
+                  'state_table': CP949_ST,
+                  'char_len_table': CP949_CHAR_LEN_TABLE,
+                  'name': 'CP949'}
 
 # EUC-JP
-# fmt: off
+
 EUCJP_CLS = (
-    4, 4, 4, 4, 4, 4, 4, 4,  # 00 - 07
-    4, 4, 4, 4, 4, 4, 5, 5,  # 08 - 0f
-    4, 4, 4, 4, 4, 4, 4, 4,  # 10 - 17
-    4, 4, 4, 5, 4, 4, 4, 4,  # 18 - 1f
-    4, 4, 4, 4, 4, 4, 4, 4,  # 20 - 27
-    4, 4, 4, 4, 4, 4, 4, 4,  # 28 - 2f
-    4, 4, 4, 4, 4, 4, 4, 4,  # 30 - 37
-    4, 4, 4, 4, 4, 4, 4, 4,  # 38 - 3f
-    4, 4, 4, 4, 4, 4, 4, 4,  # 40 - 47
-    4, 4, 4, 4, 4, 4, 4, 4,  # 48 - 4f
-    4, 4, 4, 4, 4, 4, 4, 4,  # 50 - 57
-    4, 4, 4, 4, 4, 4, 4, 4,  # 58 - 5f
-    4, 4, 4, 4, 4, 4, 4, 4,  # 60 - 67
-    4, 4, 4, 4, 4, 4, 4, 4,  # 68 - 6f
-    4, 4, 4, 4, 4, 4, 4, 4,  # 70 - 77
-    4, 4, 4, 4, 4, 4, 4, 4,  # 78 - 7f
-    5, 5, 5, 5, 5, 5, 5, 5,  # 80 - 87
-    5, 5, 5, 5, 5, 5, 1, 3,  # 88 - 8f
-    5, 5, 5, 5, 5, 5, 5, 5,  # 90 - 97
-    5, 5, 5, 5, 5, 5, 5, 5,  # 98 - 9f
-    5, 2, 2, 2, 2, 2, 2, 2,  # a0 - a7
-    2, 2, 2, 2, 2, 2, 2, 2,  # a8 - af
-    2, 2, 2, 2, 2, 2, 2, 2,  # b0 - b7
-    2, 2, 2, 2, 2, 2, 2, 2,  # b8 - bf
-    2, 2, 2, 2, 2, 2, 2, 2,  # c0 - c7
-    2, 2, 2, 2, 2, 2, 2, 2,  # c8 - cf
-    2, 2, 2, 2, 2, 2, 2, 2,  # d0 - d7
-    2, 2, 2, 2, 2, 2, 2, 2,  # d8 - df
-    0, 0, 0, 0, 0, 0, 0, 0,  # e0 - e7
-    0, 0, 0, 0, 0, 0, 0, 0,  # e8 - ef
-    0, 0, 0, 0, 0, 0, 0, 0,  # f0 - f7
-    0, 0, 0, 0, 0, 0, 0, 5  # f8 - ff
+    4,4,4,4,4,4,4,4,  # 00 - 07
+    4,4,4,4,4,4,5,5,  # 08 - 0f
+    4,4,4,4,4,4,4,4,  # 10 - 17
+    4,4,4,5,4,4,4,4,  # 18 - 1f
+    4,4,4,4,4,4,4,4,  # 20 - 27
+    4,4,4,4,4,4,4,4,  # 28 - 2f
+    4,4,4,4,4,4,4,4,  # 30 - 37
+    4,4,4,4,4,4,4,4,  # 38 - 3f
+    4,4,4,4,4,4,4,4,  # 40 - 47
+    4,4,4,4,4,4,4,4,  # 48 - 4f
+    4,4,4,4,4,4,4,4,  # 50 - 57
+    4,4,4,4,4,4,4,4,  # 58 - 5f
+    4,4,4,4,4,4,4,4,  # 60 - 67
+    4,4,4,4,4,4,4,4,  # 68 - 6f
+    4,4,4,4,4,4,4,4,  # 70 - 77
+    4,4,4,4,4,4,4,4,  # 78 - 7f
+    5,5,5,5,5,5,5,5,  # 80 - 87
+    5,5,5,5,5,5,1,3,  # 88 - 8f
+    5,5,5,5,5,5,5,5,  # 90 - 97
+    5,5,5,5,5,5,5,5,  # 98 - 9f
+    5,2,2,2,2,2,2,2,  # a0 - a7
+    2,2,2,2,2,2,2,2,  # a8 - af
+    2,2,2,2,2,2,2,2,  # b0 - b7
+    2,2,2,2,2,2,2,2,  # b8 - bf
+    2,2,2,2,2,2,2,2,  # c0 - c7
+    2,2,2,2,2,2,2,2,  # c8 - cf
+    2,2,2,2,2,2,2,2,  # d0 - d7
+    2,2,2,2,2,2,2,2,  # d8 - df
+    0,0,0,0,0,0,0,0,  # e0 - e7
+    0,0,0,0,0,0,0,0,  # e8 - ef
+    0,0,0,0,0,0,0,0,  # f0 - f7
+    0,0,0,0,0,0,0,5  # f8 - ff
 )
 
 EUCJP_ST = (
@@ -169,163 +162,100 @@
      MachineState.ERROR,MachineState.ERROR,MachineState.START,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,     3,MachineState.ERROR,#18-1f
           3,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.START,MachineState.START,MachineState.START,MachineState.START#20-27
 )
-# fmt: on
 
 EUCJP_CHAR_LEN_TABLE = (2, 2, 2, 3, 1, 0)
 
-EUCJP_SM_MODEL = {
-    "class_table": EUCJP_CLS,
-    "class_factor": 6,
-    "state_table": EUCJP_ST,
-    "char_len_table": EUCJP_CHAR_LEN_TABLE,
-    "name": "EUC-JP",
-}
+EUCJP_SM_MODEL = {'class_table': EUCJP_CLS,
+                  'class_factor': 6,
+                  'state_table': EUCJP_ST,
+                  'char_len_table': EUCJP_CHAR_LEN_TABLE,
+                  'name': 'EUC-JP'}
 
 # EUC-KR
-# fmt: off
-EUCKR_CLS  = (
-    1, 1, 1, 1, 1, 1, 1, 1,  # 00 - 07
-    1, 1, 1, 1, 1, 1, 0, 0,  # 08 - 0f
-    1, 1, 1, 1, 1, 1, 1, 1,  # 10 - 17
-    1, 1, 1, 0, 1, 1, 1, 1,  # 18 - 1f
-    1, 1, 1, 1, 1, 1, 1, 1,  # 20 - 27
-    1, 1, 1, 1, 1, 1, 1, 1,  # 28 - 2f
-    1, 1, 1, 1, 1, 1, 1, 1,  # 30 - 37
-    1, 1, 1, 1, 1, 1, 1, 1,  # 38 - 3f
-    1, 1, 1, 1, 1, 1, 1, 1,  # 40 - 47
-    1, 1, 1, 1, 1, 1, 1, 1,  # 48 - 4f
-    1, 1, 1, 1, 1, 1, 1, 1,  # 50 - 57
-    1, 1, 1, 1, 1, 1, 1, 1,  # 58 - 5f
-    1, 1, 1, 1, 1, 1, 1, 1,  # 60 - 67
-    1, 1, 1, 1, 1, 1, 1, 1,  # 68 - 6f
-    1, 1, 1, 1, 1, 1, 1, 1,  # 70 - 77
-    1, 1, 1, 1, 1, 1, 1, 1,  # 78 - 7f
-    0, 0, 0, 0, 0, 0, 0, 0,  # 80 - 87
-    0, 0, 0, 0, 0, 0, 0, 0,  # 88 - 8f
-    0, 0, 0, 0, 0, 0, 0, 0,  # 90 - 97
-    0, 0, 0, 0, 0, 0, 0, 0,  # 98 - 9f
-    0, 2, 2, 2, 2, 2, 2, 2,  # a0 - a7
-    2, 2, 2, 2, 2, 3, 3, 3,  # a8 - af
-    2, 2, 2, 2, 2, 2, 2, 2,  # b0 - b7
-    2, 2, 2, 2, 2, 2, 2, 2,  # b8 - bf
-    2, 2, 2, 2, 2, 2, 2, 2,  # c0 - c7
-    2, 3, 2, 2, 2, 2, 2, 2,  # c8 - cf
-    2, 2, 2, 2, 2, 2, 2, 2,  # d0 - d7
-    2, 2, 2, 2, 2, 2, 2, 2,  # d8 - df
-    2, 2, 2, 2, 2, 2, 2, 2,  # e0 - e7
-    2, 2, 2, 2, 2, 2, 2, 2,  # e8 - ef
-    2, 2, 2, 2, 2, 2, 2, 2,  # f0 - f7
-    2, 2, 2, 2, 2, 2, 2, 0   # f8 - ff
-)
 
-EUCKR_ST = (
-    MachineState.ERROR,MachineState.START,     3,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,#00-07
-    MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ERROR,MachineState.ERROR,MachineState.START,MachineState.START #08-0f
-)
-# fmt: on
-
-EUCKR_CHAR_LEN_TABLE = (0, 1, 2, 0)
-
-EUCKR_SM_MODEL = {
-    "class_table": EUCKR_CLS,
-    "class_factor": 4,
-    "state_table": EUCKR_ST,
-    "char_len_table": EUCKR_CHAR_LEN_TABLE,
-    "name": "EUC-KR",
-}
-
-# JOHAB
-# fmt: off
-JOHAB_CLS = (
-    4,4,4,4,4,4,4,4,  # 00 - 07
-    4,4,4,4,4,4,0,0,  # 08 - 0f
-    4,4,4,4,4,4,4,4,  # 10 - 17
-    4,4,4,0,4,4,4,4,  # 18 - 1f
-    4,4,4,4,4,4,4,4,  # 20 - 27
-    4,4,4,4,4,4,4,4,  # 28 - 2f
-    4,3,3,3,3,3,3,3,  # 30 - 37
-    3,3,3,3,3,3,3,3,  # 38 - 3f
-    3,1,1,1,1,1,1,1,  # 40 - 47
+EUCKR_CLS  = (
+    1,1,1,1,1,1,1,1,  # 00 - 07
+    1,1,1,1,1,1,0,0,  # 08 - 0f
+    1,1,1,1,1,1,1,1,  # 10 - 17
+    1,1,1,0,1,1,1,1,  # 18 - 1f
+    1,1,1,1,1,1,1,1,  # 20 - 27
+    1,1,1,1,1,1,1,1,  # 28 - 2f
+    1,1,1,1,1,1,1,1,  # 30 - 37
+    1,1,1,1,1,1,1,1,  # 38 - 3f
+    1,1,1,1,1,1,1,1,  # 40 - 47
     1,1,1,1,1,1,1,1,  # 48 - 4f
     1,1,1,1,1,1,1,1,  # 50 - 57
     1,1,1,1,1,1,1,1,  # 58 - 5f
     1,1,1,1,1,1,1,1,  # 60 - 67
     1,1,1,1,1,1,1,1,  # 68 - 6f
     1,1,1,1,1,1,1,1,  # 70 - 77
-    1,1,1,1,1,1,1,2,  # 78 - 7f
-    6,6,6,6,8,8,8,8,  # 80 - 87
-    8,8,8,8,8,8,8,8,  # 88 - 8f
-    8,7,7,7,7,7,7,7,  # 90 - 97
-    7,7,7,7,7,7,7,7,  # 98 - 9f
-    7,7,7,7,7,7,7,7,  # a0 - a7
-    7,7,7,7,7,7,7,7,  # a8 - af
-    7,7,7,7,7,7,7,7,  # b0 - b7
-    7,7,7,7,7,7,7,7,  # b8 - bf
-    7,7,7,7,7,7,7,7,  # c0 - c7
-    7,7,7,7,7,7,7,7,  # c8 - cf
-    7,7,7,7,5,5,5,5,  # d0 - d7
-    5,9,9,9,9,9,9,5,  # d8 - df
-    9,9,9,9,9,9,9,9,  # e0 - e7
-    9,9,9,9,9,9,9,9,  # e8 - ef
-    9,9,9,9,9,9,9,9,  # f0 - f7
-    9,9,5,5,5,5,5,0   # f8 - ff
+    1,1,1,1,1,1,1,1,  # 78 - 7f
+    0,0,0,0,0,0,0,0,  # 80 - 87
+    0,0,0,0,0,0,0,0,  # 88 - 8f
+    0,0,0,0,0,0,0,0,  # 90 - 97
+    0,0,0,0,0,0,0,0,  # 98 - 9f
+    0,2,2,2,2,2,2,2,  # a0 - a7
+    2,2,2,2,2,3,3,3,  # a8 - af
+    2,2,2,2,2,2,2,2,  # b0 - b7
+    2,2,2,2,2,2,2,2,  # b8 - bf
+    2,2,2,2,2,2,2,2,  # c0 - c7
+    2,3,2,2,2,2,2,2,  # c8 - cf
+    2,2,2,2,2,2,2,2,  # d0 - d7
+    2,2,2,2,2,2,2,2,  # d8 - df
+    2,2,2,2,2,2,2,2,  # e0 - e7
+    2,2,2,2,2,2,2,2,  # e8 - ef
+    2,2,2,2,2,2,2,2,  # f0 - f7
+    2,2,2,2,2,2,2,0   # f8 - ff
 )
 
-JOHAB_ST = (
-# cls = 0                   1                   2                   3                   4                   5                   6                   7                   8                   9
-    MachineState.ERROR ,MachineState.START ,MachineState.START ,MachineState.START ,MachineState.START ,MachineState.ERROR ,MachineState.ERROR ,3                  ,3                  ,4                  ,  # MachineState.START
-    MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,  # MachineState.ITS_ME
-    MachineState.ERROR ,MachineState.ERROR ,MachineState.ERROR ,MachineState.ERROR ,MachineState.ERROR ,MachineState.ERROR ,MachineState.ERROR ,MachineState.ERROR ,MachineState.ERROR ,MachineState.ERROR ,  # MachineState.ERROR
-    MachineState.ERROR ,MachineState.START ,MachineState.START ,MachineState.ERROR ,MachineState.ERROR ,MachineState.START ,MachineState.START ,MachineState.START ,MachineState.START ,MachineState.START ,  # 3
-    MachineState.ERROR ,MachineState.START ,MachineState.ERROR ,MachineState.START ,MachineState.ERROR ,MachineState.START ,MachineState.ERROR ,MachineState.START ,MachineState.ERROR ,MachineState.START ,  # 4
+EUCKR_ST = (
+    MachineState.ERROR,MachineState.START,     3,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,#00-07
+    MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ERROR,MachineState.ERROR,MachineState.START,MachineState.START #08-0f
 )
-# fmt: on
 
-JOHAB_CHAR_LEN_TABLE = (0, 1, 1, 1, 1, 0, 0, 2, 2, 2)
+EUCKR_CHAR_LEN_TABLE = (0, 1, 2, 0)
 
-JOHAB_SM_MODEL = {
-    "class_table": JOHAB_CLS,
-    "class_factor": 10,
-    "state_table": JOHAB_ST,
-    "char_len_table": JOHAB_CHAR_LEN_TABLE,
-    "name": "Johab",
-}
+EUCKR_SM_MODEL = {'class_table': EUCKR_CLS,
+                'class_factor': 4,
+                'state_table': EUCKR_ST,
+                'char_len_table': EUCKR_CHAR_LEN_TABLE,
+                'name': 'EUC-KR'}
 
 # EUC-TW
-# fmt: off
+
 EUCTW_CLS = (
-    2, 2, 2, 2, 2, 2, 2, 2,  # 00 - 07
-    2, 2, 2, 2, 2, 2, 0, 0,  # 08 - 0f
-    2, 2, 2, 2, 2, 2, 2, 2,  # 10 - 17
-    2, 2, 2, 0, 2, 2, 2, 2,  # 18 - 1f
-    2, 2, 2, 2, 2, 2, 2, 2,  # 20 - 27
-    2, 2, 2, 2, 2, 2, 2, 2,  # 28 - 2f
-    2, 2, 2, 2, 2, 2, 2, 2,  # 30 - 37
-    2, 2, 2, 2, 2, 2, 2, 2,  # 38 - 3f
-    2, 2, 2, 2, 2, 2, 2, 2,  # 40 - 47
-    2, 2, 2, 2, 2, 2, 2, 2,  # 48 - 4f
-    2, 2, 2, 2, 2, 2, 2, 2,  # 50 - 57
-    2, 2, 2, 2, 2, 2, 2, 2,  # 58 - 5f
-    2, 2, 2, 2, 2, 2, 2, 2,  # 60 - 67
-    2, 2, 2, 2, 2, 2, 2, 2,  # 68 - 6f
-    2, 2, 2, 2, 2, 2, 2, 2,  # 70 - 77
-    2, 2, 2, 2, 2, 2, 2, 2,  # 78 - 7f
-    0, 0, 0, 0, 0, 0, 0, 0,  # 80 - 87
-    0, 0, 0, 0, 0, 0, 6, 0,  # 88 - 8f
-    0, 0, 0, 0, 0, 0, 0, 0,  # 90 - 97
-    0, 0, 0, 0, 0, 0, 0, 0,  # 98 - 9f
-    0, 3, 4, 4, 4, 4, 4, 4,  # a0 - a7
-    5, 5, 1, 1, 1, 1, 1, 1,  # a8 - af
-    1, 1, 1, 1, 1, 1, 1, 1,  # b0 - b7
-    1, 1, 1, 1, 1, 1, 1, 1,  # b8 - bf
-    1, 1, 3, 1, 3, 3, 3, 3,  # c0 - c7
-    3, 3, 3, 3, 3, 3, 3, 3,  # c8 - cf
-    3, 3, 3, 3, 3, 3, 3, 3,  # d0 - d7
-    3, 3, 3, 3, 3, 3, 3, 3,  # d8 - df
-    3, 3, 3, 3, 3, 3, 3, 3,  # e0 - e7
-    3, 3, 3, 3, 3, 3, 3, 3,  # e8 - ef
-    3, 3, 3, 3, 3, 3, 3, 3,  # f0 - f7
-    3, 3, 3, 3, 3, 3, 3, 0   # f8 - ff
+    2,2,2,2,2,2,2,2,  # 00 - 07
+    2,2,2,2,2,2,0,0,  # 08 - 0f
+    2,2,2,2,2,2,2,2,  # 10 - 17
+    2,2,2,0,2,2,2,2,  # 18 - 1f
+    2,2,2,2,2,2,2,2,  # 20 - 27
+    2,2,2,2,2,2,2,2,  # 28 - 2f
+    2,2,2,2,2,2,2,2,  # 30 - 37
+    2,2,2,2,2,2,2,2,  # 38 - 3f
+    2,2,2,2,2,2,2,2,  # 40 - 47
+    2,2,2,2,2,2,2,2,  # 48 - 4f
+    2,2,2,2,2,2,2,2,  # 50 - 57
+    2,2,2,2,2,2,2,2,  # 58 - 5f
+    2,2,2,2,2,2,2,2,  # 60 - 67
+    2,2,2,2,2,2,2,2,  # 68 - 6f
+    2,2,2,2,2,2,2,2,  # 70 - 77
+    2,2,2,2,2,2,2,2,  # 78 - 7f
+    0,0,0,0,0,0,0,0,  # 80 - 87
+    0,0,0,0,0,0,6,0,  # 88 - 8f
+    0,0,0,0,0,0,0,0,  # 90 - 97
+    0,0,0,0,0,0,0,0,  # 98 - 9f
+    0,3,4,4,4,4,4,4,  # a0 - a7
+    5,5,1,1,1,1,1,1,  # a8 - af
+    1,1,1,1,1,1,1,1,  # b0 - b7
+    1,1,1,1,1,1,1,1,  # b8 - bf
+    1,1,3,1,3,3,3,3,  # c0 - c7
+    3,3,3,3,3,3,3,3,  # c8 - cf
+    3,3,3,3,3,3,3,3,  # d0 - d7
+    3,3,3,3,3,3,3,3,  # d8 - df
+    3,3,3,3,3,3,3,3,  # e0 - e7
+    3,3,3,3,3,3,3,3,  # e8 - ef
+    3,3,3,3,3,3,3,3,  # f0 - f7
+    3,3,3,3,3,3,3,0   # f8 - ff
 )
 
 EUCTW_ST = (
@@ -336,53 +266,50 @@
          5,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.START,MachineState.ERROR,MachineState.START,MachineState.START,#20-27
     MachineState.START,MachineState.ERROR,MachineState.START,MachineState.START,MachineState.START,MachineState.START,MachineState.START,MachineState.START #28-2f
 )
-# fmt: on
 
 EUCTW_CHAR_LEN_TABLE = (0, 0, 1, 2, 2, 2, 3)
 
-EUCTW_SM_MODEL = {
-    "class_table": EUCTW_CLS,
-    "class_factor": 7,
-    "state_table": EUCTW_ST,
-    "char_len_table": EUCTW_CHAR_LEN_TABLE,
-    "name": "x-euc-tw",
-}
+EUCTW_SM_MODEL = {'class_table': EUCTW_CLS,
+                'class_factor': 7,
+                'state_table': EUCTW_ST,
+                'char_len_table': EUCTW_CHAR_LEN_TABLE,
+                'name': 'x-euc-tw'}
 
 # GB2312
-# fmt: off
+
 GB2312_CLS = (
-    1, 1, 1, 1, 1, 1, 1, 1,  # 00 - 07
-    1, 1, 1, 1, 1, 1, 0, 0,  # 08 - 0f
-    1, 1, 1, 1, 1, 1, 1, 1,  # 10 - 17
-    1, 1, 1, 0, 1, 1, 1, 1,  # 18 - 1f
-    1, 1, 1, 1, 1, 1, 1, 1,  # 20 - 27
-    1, 1, 1, 1, 1, 1, 1, 1,  # 28 - 2f
-    3, 3, 3, 3, 3, 3, 3, 3,  # 30 - 37
-    3, 3, 1, 1, 1, 1, 1, 1,  # 38 - 3f
-    2, 2, 2, 2, 2, 2, 2, 2,  # 40 - 47
-    2, 2, 2, 2, 2, 2, 2, 2,  # 48 - 4f
-    2, 2, 2, 2, 2, 2, 2, 2,  # 50 - 57
-    2, 2, 2, 2, 2, 2, 2, 2,  # 58 - 5f
-    2, 2, 2, 2, 2, 2, 2, 2,  # 60 - 67
-    2, 2, 2, 2, 2, 2, 2, 2,  # 68 - 6f
-    2, 2, 2, 2, 2, 2, 2, 2,  # 70 - 77
-    2, 2, 2, 2, 2, 2, 2, 4,  # 78 - 7f
-    5, 6, 6, 6, 6, 6, 6, 6,  # 80 - 87
-    6, 6, 6, 6, 6, 6, 6, 6,  # 88 - 8f
-    6, 6, 6, 6, 6, 6, 6, 6,  # 90 - 97
-    6, 6, 6, 6, 6, 6, 6, 6,  # 98 - 9f
-    6, 6, 6, 6, 6, 6, 6, 6,  # a0 - a7
-    6, 6, 6, 6, 6, 6, 6, 6,  # a8 - af
-    6, 6, 6, 6, 6, 6, 6, 6,  # b0 - b7
-    6, 6, 6, 6, 6, 6, 6, 6,  # b8 - bf
-    6, 6, 6, 6, 6, 6, 6, 6,  # c0 - c7
-    6, 6, 6, 6, 6, 6, 6, 6,  # c8 - cf
-    6, 6, 6, 6, 6, 6, 6, 6,  # d0 - d7
-    6, 6, 6, 6, 6, 6, 6, 6,  # d8 - df
-    6, 6, 6, 6, 6, 6, 6, 6,  # e0 - e7
-    6, 6, 6, 6, 6, 6, 6, 6,  # e8 - ef
-    6, 6, 6, 6, 6, 6, 6, 6,  # f0 - f7
-    6, 6, 6, 6, 6, 6, 6, 0   # f8 - ff
+    1,1,1,1,1,1,1,1,  # 00 - 07
+    1,1,1,1,1,1,0,0,  # 08 - 0f
+    1,1,1,1,1,1,1,1,  # 10 - 17
+    1,1,1,0,1,1,1,1,  # 18 - 1f
+    1,1,1,1,1,1,1,1,  # 20 - 27
+    1,1,1,1,1,1,1,1,  # 28 - 2f
+    3,3,3,3,3,3,3,3,  # 30 - 37
+    3,3,1,1,1,1,1,1,  # 38 - 3f
+    2,2,2,2,2,2,2,2,  # 40 - 47
+    2,2,2,2,2,2,2,2,  # 48 - 4f
+    2,2,2,2,2,2,2,2,  # 50 - 57
+    2,2,2,2,2,2,2,2,  # 58 - 5f
+    2,2,2,2,2,2,2,2,  # 60 - 67
+    2,2,2,2,2,2,2,2,  # 68 - 6f
+    2,2,2,2,2,2,2,2,  # 70 - 77
+    2,2,2,2,2,2,2,4,  # 78 - 7f
+    5,6,6,6,6,6,6,6,  # 80 - 87
+    6,6,6,6,6,6,6,6,  # 88 - 8f
+    6,6,6,6,6,6,6,6,  # 90 - 97
+    6,6,6,6,6,6,6,6,  # 98 - 9f
+    6,6,6,6,6,6,6,6,  # a0 - a7
+    6,6,6,6,6,6,6,6,  # a8 - af
+    6,6,6,6,6,6,6,6,  # b0 - b7
+    6,6,6,6,6,6,6,6,  # b8 - bf
+    6,6,6,6,6,6,6,6,  # c0 - c7
+    6,6,6,6,6,6,6,6,  # c8 - cf
+    6,6,6,6,6,6,6,6,  # d0 - d7
+    6,6,6,6,6,6,6,6,  # d8 - df
+    6,6,6,6,6,6,6,6,  # e0 - e7
+    6,6,6,6,6,6,6,6,  # e8 - ef
+    6,6,6,6,6,6,6,6,  # f0 - f7
+    6,6,6,6,6,6,6,0   # f8 - ff
 )
 
 GB2312_ST = (
@@ -393,7 +320,6 @@
     MachineState.ERROR,MachineState.ERROR,     5,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ITS_ME,MachineState.ERROR,#20-27
     MachineState.ERROR,MachineState.ERROR,MachineState.START,MachineState.START,MachineState.START,MachineState.START,MachineState.START,MachineState.START #28-2f
 )
-# fmt: on
 
 # To be accurate, the length of class 6 can be either 2 or 4.
 # But it is not necessary to discriminate between the two since
@@ -402,105 +328,100 @@
 # 2 here.
 GB2312_CHAR_LEN_TABLE = (0, 1, 1, 1, 1, 1, 2)
 
-GB2312_SM_MODEL = {
-    "class_table": GB2312_CLS,
-    "class_factor": 7,
-    "state_table": GB2312_ST,
-    "char_len_table": GB2312_CHAR_LEN_TABLE,
-    "name": "GB2312",
-}
+GB2312_SM_MODEL = {'class_table': GB2312_CLS,
+                   'class_factor': 7,
+                   'state_table': GB2312_ST,
+                   'char_len_table': GB2312_CHAR_LEN_TABLE,
+                   'name': 'GB2312'}
 
 # Shift_JIS
-# fmt: off
+
 SJIS_CLS = (
-    1, 1, 1, 1, 1, 1, 1, 1,  # 00 - 07
-    1, 1, 1, 1, 1, 1, 0, 0,  # 08 - 0f
-    1, 1, 1, 1, 1, 1, 1, 1,  # 10 - 17
-    1, 1, 1, 0, 1, 1, 1, 1,  # 18 - 1f
-    1, 1, 1, 1, 1, 1, 1, 1,  # 20 - 27
-    1, 1, 1, 1, 1, 1, 1, 1,  # 28 - 2f
-    1, 1, 1, 1, 1, 1, 1, 1,  # 30 - 37
-    1, 1, 1, 1, 1, 1, 1, 1,  # 38 - 3f
-    2, 2, 2, 2, 2, 2, 2, 2,  # 40 - 47
-    2, 2, 2, 2, 2, 2, 2, 2,  # 48 - 4f
-    2, 2, 2, 2, 2, 2, 2, 2,  # 50 - 57
-    2, 2, 2, 2, 2, 2, 2, 2,  # 58 - 5f
-    2, 2, 2, 2, 2, 2, 2, 2,  # 60 - 67
-    2, 2, 2, 2, 2, 2, 2, 2,  # 68 - 6f
-    2, 2, 2, 2, 2, 2, 2, 2,  # 70 - 77
-    2, 2, 2, 2, 2, 2, 2, 1,  # 78 - 7f
-    3, 3, 3, 3, 3, 2, 2, 3,  # 80 - 87
-    3, 3, 3, 3, 3, 3, 3, 3,  # 88 - 8f
-    3, 3, 3, 3, 3, 3, 3, 3,  # 90 - 97
-    3, 3, 3, 3, 3, 3, 3, 3,  # 98 - 9f
+    1,1,1,1,1,1,1,1,  # 00 - 07
+    1,1,1,1,1,1,0,0,  # 08 - 0f
+    1,1,1,1,1,1,1,1,  # 10 - 17
+    1,1,1,0,1,1,1,1,  # 18 - 1f
+    1,1,1,1,1,1,1,1,  # 20 - 27
+    1,1,1,1,1,1,1,1,  # 28 - 2f
+    1,1,1,1,1,1,1,1,  # 30 - 37
+    1,1,1,1,1,1,1,1,  # 38 - 3f
+    2,2,2,2,2,2,2,2,  # 40 - 47
+    2,2,2,2,2,2,2,2,  # 48 - 4f
+    2,2,2,2,2,2,2,2,  # 50 - 57
+    2,2,2,2,2,2,2,2,  # 58 - 5f
+    2,2,2,2,2,2,2,2,  # 60 - 67
+    2,2,2,2,2,2,2,2,  # 68 - 6f
+    2,2,2,2,2,2,2,2,  # 70 - 77
+    2,2,2,2,2,2,2,1,  # 78 - 7f
+    3,3,3,3,3,2,2,3,  # 80 - 87
+    3,3,3,3,3,3,3,3,  # 88 - 8f
+    3,3,3,3,3,3,3,3,  # 90 - 97
+    3,3,3,3,3,3,3,3,  # 98 - 9f
     #0xa0 is illegal in sjis encoding, but some pages does
     #contain such byte. We need to be more error forgiven.
-    2, 2, 2, 2, 2, 2, 2, 2,  # a0 - a7
-    2, 2, 2, 2, 2, 2, 2, 2,  # a8 - af
-    2, 2, 2, 2, 2, 2, 2, 2,  # b0 - b7
-    2, 2, 2, 2, 2, 2, 2, 2,  # b8 - bf
-    2, 2, 2, 2, 2, 2, 2, 2,  # c0 - c7
-    2, 2, 2, 2, 2, 2, 2, 2,  # c8 - cf
-    2, 2, 2, 2, 2, 2, 2, 2,  # d0 - d7
-    2, 2, 2, 2, 2, 2, 2, 2,  # d8 - df
-    3, 3, 3, 3, 3, 3, 3, 3,  # e0 - e7
-    3, 3, 3, 3, 3, 4, 4, 4,  # e8 - ef
-    3, 3, 3, 3, 3, 3, 3, 3,  # f0 - f7
-    3, 3, 3, 3, 3, 0, 0, 0,  # f8 - ff
-)
+    2,2,2,2,2,2,2,2,  # a0 - a7
+    2,2,2,2,2,2,2,2,  # a8 - af
+    2,2,2,2,2,2,2,2,  # b0 - b7
+    2,2,2,2,2,2,2,2,  # b8 - bf
+    2,2,2,2,2,2,2,2,  # c0 - c7
+    2,2,2,2,2,2,2,2,  # c8 - cf
+    2,2,2,2,2,2,2,2,  # d0 - d7
+    2,2,2,2,2,2,2,2,  # d8 - df
+    3,3,3,3,3,3,3,3,  # e0 - e7
+    3,3,3,3,3,4,4,4,  # e8 - ef
+    3,3,3,3,3,3,3,3,  # f0 - f7
+    3,3,3,3,3,0,0,0)  # f8 - ff
+
 
 SJIS_ST = (
     MachineState.ERROR,MachineState.START,MachineState.START,     3,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,#00-07
     MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,#08-0f
     MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ERROR,MachineState.ERROR,MachineState.START,MachineState.START,MachineState.START,MachineState.START #10-17
 )
-# fmt: on
 
 SJIS_CHAR_LEN_TABLE = (0, 1, 1, 2, 0, 0)
 
-SJIS_SM_MODEL = {
-    "class_table": SJIS_CLS,
-    "class_factor": 6,
-    "state_table": SJIS_ST,
-    "char_len_table": SJIS_CHAR_LEN_TABLE,
-    "name": "Shift_JIS",
-}
+SJIS_SM_MODEL = {'class_table': SJIS_CLS,
+               'class_factor': 6,
+               'state_table': SJIS_ST,
+               'char_len_table': SJIS_CHAR_LEN_TABLE,
+               'name': 'Shift_JIS'}
 
 # UCS2-BE
-# fmt: off
+
 UCS2BE_CLS = (
-    0, 0, 0, 0, 0, 0, 0, 0,  # 00 - 07
-    0, 0, 1, 0, 0, 2, 0, 0,  # 08 - 0f
-    0, 0, 0, 0, 0, 0, 0, 0,  # 10 - 17
-    0, 0, 0, 3, 0, 0, 0, 0,  # 18 - 1f
-    0, 0, 0, 0, 0, 0, 0, 0,  # 20 - 27
-    0, 3, 3, 3, 3, 3, 0, 0,  # 28 - 2f
-    0, 0, 0, 0, 0, 0, 0, 0,  # 30 - 37
-    0, 0, 0, 0, 0, 0, 0, 0,  # 38 - 3f
-    0, 0, 0, 0, 0, 0, 0, 0,  # 40 - 47
-    0, 0, 0, 0, 0, 0, 0, 0,  # 48 - 4f
-    0, 0, 0, 0, 0, 0, 0, 0,  # 50 - 57
-    0, 0, 0, 0, 0, 0, 0, 0,  # 58 - 5f
-    0, 0, 0, 0, 0, 0, 0, 0,  # 60 - 67
-    0, 0, 0, 0, 0, 0, 0, 0,  # 68 - 6f
-    0, 0, 0, 0, 0, 0, 0, 0,  # 70 - 77
-    0, 0, 0, 0, 0, 0, 0, 0,  # 78 - 7f
-    0, 0, 0, 0, 0, 0, 0, 0,  # 80 - 87
-    0, 0, 0, 0, 0, 0, 0, 0,  # 88 - 8f
-    0, 0, 0, 0, 0, 0, 0, 0,  # 90 - 97
-    0, 0, 0, 0, 0, 0, 0, 0,  # 98 - 9f
-    0, 0, 0, 0, 0, 0, 0, 0,  # a0 - a7
-    0, 0, 0, 0, 0, 0, 0, 0,  # a8 - af
-    0, 0, 0, 0, 0, 0, 0, 0,  # b0 - b7
-    0, 0, 0, 0, 0, 0, 0, 0,  # b8 - bf
-    0, 0, 0, 0, 0, 0, 0, 0,  # c0 - c7
-    0, 0, 0, 0, 0, 0, 0, 0,  # c8 - cf
-    0, 0, 0, 0, 0, 0, 0, 0,  # d0 - d7
-    0, 0, 0, 0, 0, 0, 0, 0,  # d8 - df
-    0, 0, 0, 0, 0, 0, 0, 0,  # e0 - e7
-    0, 0, 0, 0, 0, 0, 0, 0,  # e8 - ef
-    0, 0, 0, 0, 0, 0, 0, 0,  # f0 - f7
-    0, 0, 0, 0, 0, 0, 4, 5   # f8 - ff
+    0,0,0,0,0,0,0,0,  # 00 - 07
+    0,0,1,0,0,2,0,0,  # 08 - 0f
+    0,0,0,0,0,0,0,0,  # 10 - 17
+    0,0,0,3,0,0,0,0,  # 18 - 1f
+    0,0,0,0,0,0,0,0,  # 20 - 27
+    0,3,3,3,3,3,0,0,  # 28 - 2f
+    0,0,0,0,0,0,0,0,  # 30 - 37
+    0,0,0,0,0,0,0,0,  # 38 - 3f
+    0,0,0,0,0,0,0,0,  # 40 - 47
+    0,0,0,0,0,0,0,0,  # 48 - 4f
+    0,0,0,0,0,0,0,0,  # 50 - 57
+    0,0,0,0,0,0,0,0,  # 58 - 5f
+    0,0,0,0,0,0,0,0,  # 60 - 67
+    0,0,0,0,0,0,0,0,  # 68 - 6f
+    0,0,0,0,0,0,0,0,  # 70 - 77
+    0,0,0,0,0,0,0,0,  # 78 - 7f
+    0,0,0,0,0,0,0,0,  # 80 - 87
+    0,0,0,0,0,0,0,0,  # 88 - 8f
+    0,0,0,0,0,0,0,0,  # 90 - 97
+    0,0,0,0,0,0,0,0,  # 98 - 9f
+    0,0,0,0,0,0,0,0,  # a0 - a7
+    0,0,0,0,0,0,0,0,  # a8 - af
+    0,0,0,0,0,0,0,0,  # b0 - b7
+    0,0,0,0,0,0,0,0,  # b8 - bf
+    0,0,0,0,0,0,0,0,  # c0 - c7
+    0,0,0,0,0,0,0,0,  # c8 - cf
+    0,0,0,0,0,0,0,0,  # d0 - d7
+    0,0,0,0,0,0,0,0,  # d8 - df
+    0,0,0,0,0,0,0,0,  # e0 - e7
+    0,0,0,0,0,0,0,0,  # e8 - ef
+    0,0,0,0,0,0,0,0,  # f0 - f7
+    0,0,0,0,0,0,4,5   # f8 - ff
 )
 
 UCS2BE_ST  = (
@@ -512,53 +433,50 @@
           5,     8,     6,     6,MachineState.ERROR,     6,     6,     6,#28-2f
           6,     6,     6,     6,MachineState.ERROR,MachineState.ERROR,MachineState.START,MachineState.START #30-37
 )
-# fmt: on
 
 UCS2BE_CHAR_LEN_TABLE = (2, 2, 2, 0, 2, 2)
 
-UCS2BE_SM_MODEL = {
-    "class_table": UCS2BE_CLS,
-    "class_factor": 6,
-    "state_table": UCS2BE_ST,
-    "char_len_table": UCS2BE_CHAR_LEN_TABLE,
-    "name": "UTF-16BE",
-}
+UCS2BE_SM_MODEL = {'class_table': UCS2BE_CLS,
+                   'class_factor': 6,
+                   'state_table': UCS2BE_ST,
+                   'char_len_table': UCS2BE_CHAR_LEN_TABLE,
+                   'name': 'UTF-16BE'}
 
 # UCS2-LE
-# fmt: off
+
 UCS2LE_CLS = (
-    0, 0, 0, 0, 0, 0, 0, 0,  # 00 - 07
-    0, 0, 1, 0, 0, 2, 0, 0,  # 08 - 0f
-    0, 0, 0, 0, 0, 0, 0, 0,  # 10 - 17
-    0, 0, 0, 3, 0, 0, 0, 0,  # 18 - 1f
-    0, 0, 0, 0, 0, 0, 0, 0,  # 20 - 27
-    0, 3, 3, 3, 3, 3, 0, 0,  # 28 - 2f
-    0, 0, 0, 0, 0, 0, 0, 0,  # 30 - 37
-    0, 0, 0, 0, 0, 0, 0, 0,  # 38 - 3f
-    0, 0, 0, 0, 0, 0, 0, 0,  # 40 - 47
-    0, 0, 0, 0, 0, 0, 0, 0,  # 48 - 4f
-    0, 0, 0, 0, 0, 0, 0, 0,  # 50 - 57
-    0, 0, 0, 0, 0, 0, 0, 0,  # 58 - 5f
-    0, 0, 0, 0, 0, 0, 0, 0,  # 60 - 67
-    0, 0, 0, 0, 0, 0, 0, 0,  # 68 - 6f
-    0, 0, 0, 0, 0, 0, 0, 0,  # 70 - 77
-    0, 0, 0, 0, 0, 0, 0, 0,  # 78 - 7f
-    0, 0, 0, 0, 0, 0, 0, 0,  # 80 - 87
-    0, 0, 0, 0, 0, 0, 0, 0,  # 88 - 8f
-    0, 0, 0, 0, 0, 0, 0, 0,  # 90 - 97
-    0, 0, 0, 0, 0, 0, 0, 0,  # 98 - 9f
-    0, 0, 0, 0, 0, 0, 0, 0,  # a0 - a7
-    0, 0, 0, 0, 0, 0, 0, 0,  # a8 - af
-    0, 0, 0, 0, 0, 0, 0, 0,  # b0 - b7
-    0, 0, 0, 0, 0, 0, 0, 0,  # b8 - bf
-    0, 0, 0, 0, 0, 0, 0, 0,  # c0 - c7
-    0, 0, 0, 0, 0, 0, 0, 0,  # c8 - cf
-    0, 0, 0, 0, 0, 0, 0, 0,  # d0 - d7
-    0, 0, 0, 0, 0, 0, 0, 0,  # d8 - df
-    0, 0, 0, 0, 0, 0, 0, 0,  # e0 - e7
-    0, 0, 0, 0, 0, 0, 0, 0,  # e8 - ef
-    0, 0, 0, 0, 0, 0, 0, 0,  # f0 - f7
-    0, 0, 0, 0, 0, 0, 4, 5   # f8 - ff
+    0,0,0,0,0,0,0,0,  # 00 - 07
+    0,0,1,0,0,2,0,0,  # 08 - 0f
+    0,0,0,0,0,0,0,0,  # 10 - 17
+    0,0,0,3,0,0,0,0,  # 18 - 1f
+    0,0,0,0,0,0,0,0,  # 20 - 27
+    0,3,3,3,3,3,0,0,  # 28 - 2f
+    0,0,0,0,0,0,0,0,  # 30 - 37
+    0,0,0,0,0,0,0,0,  # 38 - 3f
+    0,0,0,0,0,0,0,0,  # 40 - 47
+    0,0,0,0,0,0,0,0,  # 48 - 4f
+    0,0,0,0,0,0,0,0,  # 50 - 57
+    0,0,0,0,0,0,0,0,  # 58 - 5f
+    0,0,0,0,0,0,0,0,  # 60 - 67
+    0,0,0,0,0,0,0,0,  # 68 - 6f
+    0,0,0,0,0,0,0,0,  # 70 - 77
+    0,0,0,0,0,0,0,0,  # 78 - 7f
+    0,0,0,0,0,0,0,0,  # 80 - 87
+    0,0,0,0,0,0,0,0,  # 88 - 8f
+    0,0,0,0,0,0,0,0,  # 90 - 97
+    0,0,0,0,0,0,0,0,  # 98 - 9f
+    0,0,0,0,0,0,0,0,  # a0 - a7
+    0,0,0,0,0,0,0,0,  # a8 - af
+    0,0,0,0,0,0,0,0,  # b0 - b7
+    0,0,0,0,0,0,0,0,  # b8 - bf
+    0,0,0,0,0,0,0,0,  # c0 - c7
+    0,0,0,0,0,0,0,0,  # c8 - cf
+    0,0,0,0,0,0,0,0,  # d0 - d7
+    0,0,0,0,0,0,0,0,  # d8 - df
+    0,0,0,0,0,0,0,0,  # e0 - e7
+    0,0,0,0,0,0,0,0,  # e8 - ef
+    0,0,0,0,0,0,0,0,  # f0 - f7
+    0,0,0,0,0,0,4,5   # f8 - ff
 )
 
 UCS2LE_ST = (
@@ -570,53 +488,50 @@
           5,     5,     5,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,     5,     5,#28-2f
           5,     5,     5,MachineState.ERROR,     5,MachineState.ERROR,MachineState.START,MachineState.START #30-37
 )
-# fmt: on
 
 UCS2LE_CHAR_LEN_TABLE = (2, 2, 2, 2, 2, 2)
 
-UCS2LE_SM_MODEL = {
-    "class_table": UCS2LE_CLS,
-    "class_factor": 6,
-    "state_table": UCS2LE_ST,
-    "char_len_table": UCS2LE_CHAR_LEN_TABLE,
-    "name": "UTF-16LE",
-}
+UCS2LE_SM_MODEL = {'class_table': UCS2LE_CLS,
+                 'class_factor': 6,
+                 'state_table': UCS2LE_ST,
+                 'char_len_table': UCS2LE_CHAR_LEN_TABLE,
+                 'name': 'UTF-16LE'}
 
 # UTF-8
-# fmt: off
+
 UTF8_CLS = (
-    1, 1, 1, 1, 1, 1, 1, 1,  # 00 - 07  #allow 0x00 as a legal value
-    1, 1, 1, 1, 1, 1, 0, 0,  # 08 - 0f
-    1, 1, 1, 1, 1, 1, 1, 1,  # 10 - 17
-    1, 1, 1, 0, 1, 1, 1, 1,  # 18 - 1f
-    1, 1, 1, 1, 1, 1, 1, 1,  # 20 - 27
-    1, 1, 1, 1, 1, 1, 1, 1,  # 28 - 2f
-    1, 1, 1, 1, 1, 1, 1, 1,  # 30 - 37
-    1, 1, 1, 1, 1, 1, 1, 1,  # 38 - 3f
-    1, 1, 1, 1, 1, 1, 1, 1,  # 40 - 47
-    1, 1, 1, 1, 1, 1, 1, 1,  # 48 - 4f
-    1, 1, 1, 1, 1, 1, 1, 1,  # 50 - 57
-    1, 1, 1, 1, 1, 1, 1, 1,  # 58 - 5f
-    1, 1, 1, 1, 1, 1, 1, 1,  # 60 - 67
-    1, 1, 1, 1, 1, 1, 1, 1,  # 68 - 6f
-    1, 1, 1, 1, 1, 1, 1, 1,  # 70 - 77
-    1, 1, 1, 1, 1, 1, 1, 1,  # 78 - 7f
-    2, 2, 2, 2, 3, 3, 3, 3,  # 80 - 87
-    4, 4, 4, 4, 4, 4, 4, 4,  # 88 - 8f
-    4, 4, 4, 4, 4, 4, 4, 4,  # 90 - 97
-    4, 4, 4, 4, 4, 4, 4, 4,  # 98 - 9f
-    5, 5, 5, 5, 5, 5, 5, 5,  # a0 - a7
-    5, 5, 5, 5, 5, 5, 5, 5,  # a8 - af
-    5, 5, 5, 5, 5, 5, 5, 5,  # b0 - b7
-    5, 5, 5, 5, 5, 5, 5, 5,  # b8 - bf
-    0, 0, 6, 6, 6, 6, 6, 6,  # c0 - c7
-    6, 6, 6, 6, 6, 6, 6, 6,  # c8 - cf
-    6, 6, 6, 6, 6, 6, 6, 6,  # d0 - d7
-    6, 6, 6, 6, 6, 6, 6, 6,  # d8 - df
-    7, 8, 8, 8, 8, 8, 8, 8,  # e0 - e7
-    8, 8, 8, 8, 8, 9, 8, 8,  # e8 - ef
-    10, 11, 11, 11, 11, 11, 11, 11,  # f0 - f7
-    12, 13, 13, 13, 14, 15, 0, 0    # f8 - ff
+    1,1,1,1,1,1,1,1,  # 00 - 07  #allow 0x00 as a legal value
+    1,1,1,1,1,1,0,0,  # 08 - 0f
+    1,1,1,1,1,1,1,1,  # 10 - 17
+    1,1,1,0,1,1,1,1,  # 18 - 1f
+    1,1,1,1,1,1,1,1,  # 20 - 27
+    1,1,1,1,1,1,1,1,  # 28 - 2f
+    1,1,1,1,1,1,1,1,  # 30 - 37
+    1,1,1,1,1,1,1,1,  # 38 - 3f
+    1,1,1,1,1,1,1,1,  # 40 - 47
+    1,1,1,1,1,1,1,1,  # 48 - 4f
+    1,1,1,1,1,1,1,1,  # 50 - 57
+    1,1,1,1,1,1,1,1,  # 58 - 5f
+    1,1,1,1,1,1,1,1,  # 60 - 67
+    1,1,1,1,1,1,1,1,  # 68 - 6f
+    1,1,1,1,1,1,1,1,  # 70 - 77
+    1,1,1,1,1,1,1,1,  # 78 - 7f
+    2,2,2,2,3,3,3,3,  # 80 - 87
+    4,4,4,4,4,4,4,4,  # 88 - 8f
+    4,4,4,4,4,4,4,4,  # 90 - 97
+    4,4,4,4,4,4,4,4,  # 98 - 9f
+    5,5,5,5,5,5,5,5,  # a0 - a7
+    5,5,5,5,5,5,5,5,  # a8 - af
+    5,5,5,5,5,5,5,5,  # b0 - b7
+    5,5,5,5,5,5,5,5,  # b8 - bf
+    0,0,6,6,6,6,6,6,  # c0 - c7
+    6,6,6,6,6,6,6,6,  # c8 - cf
+    6,6,6,6,6,6,6,6,  # d0 - d7
+    6,6,6,6,6,6,6,6,  # d8 - df
+    7,8,8,8,8,8,8,8,  # e0 - e7
+    8,8,8,8,8,9,8,8,  # e8 - ef
+    10,11,11,11,11,11,11,11,  # f0 - f7
+    12,13,13,13,14,15,0,0    # f8 - ff
 )
 
 UTF8_ST = (
@@ -647,14 +562,11 @@
     MachineState.ERROR,MachineState.ERROR,MachineState.START,MachineState.START,MachineState.START,MachineState.START,MachineState.ERROR,MachineState.ERROR,#c0-c7
     MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR #c8-cf
 )
-# fmt: on
 
 UTF8_CHAR_LEN_TABLE = (0, 1, 0, 0, 0, 0, 2, 3, 3, 3, 4, 4, 5, 5, 6, 6)
 
-UTF8_SM_MODEL = {
-    "class_table": UTF8_CLS,
-    "class_factor": 16,
-    "state_table": UTF8_ST,
-    "char_len_table": UTF8_CHAR_LEN_TABLE,
-    "name": "UTF-8",
-}
+UTF8_SM_MODEL = {'class_table': UTF8_CLS,
+                 'class_factor': 16,
+                 'state_table': UTF8_ST,
+                 'char_len_table': UTF8_CHAR_LEN_TABLE,
+                 'name': 'UTF-8'}
diff --git a/venv/lib/python3.10/site-packages/pip/_vendor/chardet/metadata/languages.py b/venv/lib/python3.10/site-packages/pip/_vendor/chardet/metadata/languages.py
index 1d37884..3237d5a 100644
--- a/venv/lib/python3.10/site-packages/pip/_vendor/chardet/metadata/languages.py
+++ b/venv/lib/python3.10/site-packages/pip/_vendor/chardet/metadata/languages.py
@@ -1,16 +1,19 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
 """
 Metadata about languages used by our model training code for our
 SingleByteCharSetProbers.  Could be used for other things in the future.
 
 This code is based on the language metadata from the uchardet project.
 """
+from __future__ import absolute_import, print_function
 
 from string import ascii_letters
 
-# TODO: Add Ukrainian (KOI8-U)
 
+# TODO: Add Ukranian (KOI8-U)
 
-class Language:
+class Language(object):
     """Metadata about a language useful for training models
 
     :ivar name: The human name for the language, in English.
@@ -30,17 +33,9 @@ class Language:
                             Wikipedia for training data.
     :type wiki_start_pages: list of str
     """
-
-    def __init__(
-        self,
-        name=None,
-        iso_code=None,
-        use_ascii=True,
-        charsets=None,
-        alphabet=None,
-        wiki_start_pages=None,
-    ):
-        super().__init__()
+    def __init__(self, name=None, iso_code=None, use_ascii=True, charsets=None,
+                 alphabet=None, wiki_start_pages=None):
+        super(Language, self).__init__()
         self.name = name
         self.iso_code = iso_code
         self.use_ascii = use_ascii
@@ -51,301 +46,265 @@ def __init__(
             else:
                 alphabet = ascii_letters
         elif not alphabet:
-            raise ValueError("Must supply alphabet if use_ascii is False")
-        self.alphabet = "".join(sorted(set(alphabet))) if alphabet else None
+            raise ValueError('Must supply alphabet if use_ascii is False')
+        self.alphabet = ''.join(sorted(set(alphabet))) if alphabet else None
         self.wiki_start_pages = wiki_start_pages
 
     def __repr__(self):
-        param_str = ", ".join(
-            f"{k}={v!r}" for k, v in self.__dict__.items() if not k.startswith("_")
-        )
-        return f"{self.__class__.__name__}({param_str})"
+        return '{}({})'.format(self.__class__.__name__,
+                               ', '.join('{}={!r}'.format(k, v)
+                                         for k, v in self.__dict__.items()
+                                         if not k.startswith('_')))
 
 
-LANGUAGES = {
-    "Arabic": Language(
-        name="Arabic",
-        iso_code="ar",
-        use_ascii=False,
-        # We only support encodings that use isolated
-        # forms, because the current recommendation is
-        # that the rendering system handles presentation
-        # forms. This means we purposefully skip IBM864.
-        charsets=["ISO-8859-6", "WINDOWS-1256", "CP720", "CP864"],
-        alphabet="ءآأؤإئابةتثجحخدذرزسشصضطظعغػؼؽؾؿـفقكلمنهوىيًٌٍَُِّ",
-        wiki_start_pages=["الصفحة_الرئيسية"],
-    ),
-    "Belarusian": Language(
-        name="Belarusian",
-        iso_code="be",
-        use_ascii=False,
-        charsets=["ISO-8859-5", "WINDOWS-1251", "IBM866", "MacCyrillic"],
-        alphabet="АБВГДЕЁЖЗІЙКЛМНОПРСТУЎФХЦЧШЫЬЭЮЯабвгдеёжзійклмнопрстуўфхцчшыьэюяʼ",
-        wiki_start_pages=["Галоўная_старонка"],
-    ),
-    "Bulgarian": Language(
-        name="Bulgarian",
-        iso_code="bg",
-        use_ascii=False,
-        charsets=["ISO-8859-5", "WINDOWS-1251", "IBM855"],
-        alphabet="АБВГДЕЖЗИЙКЛМНОПРСТУФХЦЧШЩЪЬЮЯабвгдежзийклмнопрстуфхцчшщъьюя",
-        wiki_start_pages=["Начална_страница"],
-    ),
-    "Czech": Language(
-        name="Czech",
-        iso_code="cz",
-        use_ascii=True,
-        charsets=["ISO-8859-2", "WINDOWS-1250"],
-        alphabet="áčďéěíňóřšťúůýžÁČĎÉĚÍŇÓŘŠŤÚŮÝŽ",
-        wiki_start_pages=["Hlavní_strana"],
-    ),
-    "Danish": Language(
-        name="Danish",
-        iso_code="da",
-        use_ascii=True,
-        charsets=["ISO-8859-1", "ISO-8859-15", "WINDOWS-1252"],
-        alphabet="æøåÆØÅ",
-        wiki_start_pages=["Forside"],
-    ),
-    "German": Language(
-        name="German",
-        iso_code="de",
-        use_ascii=True,
-        charsets=["ISO-8859-1", "WINDOWS-1252"],
-        alphabet="äöüßÄÖÜ",
-        wiki_start_pages=["Wikipedia:Hauptseite"],
-    ),
-    "Greek": Language(
-        name="Greek",
-        iso_code="el",
-        use_ascii=False,
-        charsets=["ISO-8859-7", "WINDOWS-1253"],
-        alphabet="αβγδεζηθικλμνξοπρσςτυφχψωάέήίόύώΑΒΓΔΕΖΗΘΙΚΛΜΝΞΟΠΡΣΣΤΥΦΧΨΩΆΈΉΊΌΎΏ",
-        wiki_start_pages=["Πύλη:Κύρια"],
-    ),
-    "English": Language(
-        name="English",
-        iso_code="en",
-        use_ascii=True,
-        charsets=["ISO-8859-1", "WINDOWS-1252"],
-        wiki_start_pages=["Main_Page"],
-    ),
-    "Esperanto": Language(
-        name="Esperanto",
-        iso_code="eo",
-        # Q, W, X, and Y not used at all
-        use_ascii=False,
-        charsets=["ISO-8859-3"],
-        alphabet="abcĉdefgĝhĥijĵklmnoprsŝtuŭvzABCĈDEFGĜHĤIJĴKLMNOPRSŜTUŬVZ",
-        wiki_start_pages=["Vikipedio:Ĉefpaĝo"],
-    ),
-    "Spanish": Language(
-        name="Spanish",
-        iso_code="es",
-        use_ascii=True,
-        charsets=["ISO-8859-1", "ISO-8859-15", "WINDOWS-1252"],
-        alphabet="ñáéíóúüÑÁÉÍÓÚÜ",
-        wiki_start_pages=["Wikipedia:Portada"],
-    ),
-    "Estonian": Language(
-        name="Estonian",
-        iso_code="et",
-        use_ascii=False,
-        charsets=["ISO-8859-4", "ISO-8859-13", "WINDOWS-1257"],
-        # C, F, Š, Q, W, X, Y, Z, Ž are only for
-        # loanwords
-        alphabet="ABDEGHIJKLMNOPRSTUVÕÄÖÜabdeghijklmnoprstuvõäöü",
-        wiki_start_pages=["Esileht"],
-    ),
-    "Finnish": Language(
-        name="Finnish",
-        iso_code="fi",
-        use_ascii=True,
-        charsets=["ISO-8859-1", "ISO-8859-15", "WINDOWS-1252"],
-        alphabet="ÅÄÖŠŽåäöšž",
-        wiki_start_pages=["Wikipedia:Etusivu"],
-    ),
-    "French": Language(
-        name="French",
-        iso_code="fr",
-        use_ascii=True,
-        charsets=["ISO-8859-1", "ISO-8859-15", "WINDOWS-1252"],
-        alphabet="œàâçèéîïùûêŒÀÂÇÈÉÎÏÙÛÊ",
-        wiki_start_pages=["Wikipédia:Accueil_principal", "Bœuf (animal)"],
-    ),
-    "Hebrew": Language(
-        name="Hebrew",
-        iso_code="he",
-        use_ascii=False,
-        charsets=["ISO-8859-8", "WINDOWS-1255"],
-        alphabet="אבגדהוזחטיךכלםמןנסעףפץצקרשתװױײ",
-        wiki_start_pages=["עמוד_ראשי"],
-    ),
-    "Croatian": Language(
-        name="Croatian",
-        iso_code="hr",
-        # Q, W, X, Y are only used for foreign words.
-        use_ascii=False,
-        charsets=["ISO-8859-2", "WINDOWS-1250"],
-        alphabet="abcčćdđefghijklmnoprsštuvzžABCČĆDĐEFGHIJKLMNOPRSŠTUVZŽ",
-        wiki_start_pages=["Glavna_stranica"],
-    ),
-    "Hungarian": Language(
-        name="Hungarian",
-        iso_code="hu",
-        # Q, W, X, Y are only used for foreign words.
-        use_ascii=False,
-        charsets=["ISO-8859-2", "WINDOWS-1250"],
-        alphabet="abcdefghijklmnoprstuvzáéíóöőúüűABCDEFGHIJKLMNOPRSTUVZÁÉÍÓÖŐÚÜŰ",
-        wiki_start_pages=["Kezdőlap"],
-    ),
-    "Italian": Language(
-        name="Italian",
-        iso_code="it",
-        use_ascii=True,
-        charsets=["ISO-8859-1", "ISO-8859-15", "WINDOWS-1252"],
-        alphabet="ÀÈÉÌÒÓÙàèéìòóù",
-        wiki_start_pages=["Pagina_principale"],
-    ),
-    "Lithuanian": Language(
-        name="Lithuanian",
-        iso_code="lt",
-        use_ascii=False,
-        charsets=["ISO-8859-13", "WINDOWS-1257", "ISO-8859-4"],
-        # Q, W, and X not used at all
-        alphabet="AĄBCČDEĘĖFGHIĮYJKLMNOPRSŠTUŲŪVZŽaąbcčdeęėfghiįyjklmnoprsštuųūvzž",
-        wiki_start_pages=["Pagrindinis_puslapis"],
-    ),
-    "Latvian": Language(
-        name="Latvian",
-        iso_code="lv",
-        use_ascii=False,
-        charsets=["ISO-8859-13", "WINDOWS-1257", "ISO-8859-4"],
-        # Q, W, X, Y are only for loanwords
-        alphabet="AĀBCČDEĒFGĢHIĪJKĶLĻMNŅOPRSŠTUŪVZŽaābcčdeēfgģhiījkķlļmnņoprsštuūvzž",
-        wiki_start_pages=["Sākumlapa"],
-    ),
-    "Macedonian": Language(
-        name="Macedonian",
-        iso_code="mk",
-        use_ascii=False,
-        charsets=["ISO-8859-5", "WINDOWS-1251", "MacCyrillic", "IBM855"],
-        alphabet="АБВГДЃЕЖЗЅИЈКЛЉМНЊОПРСТЌУФХЦЧЏШабвгдѓежзѕијклљмнњопрстќуфхцчџш",
-        wiki_start_pages=["Главна_страница"],
-    ),
-    "Dutch": Language(
-        name="Dutch",
-        iso_code="nl",
-        use_ascii=True,
-        charsets=["ISO-8859-1", "WINDOWS-1252"],
-        wiki_start_pages=["Hoofdpagina"],
-    ),
-    "Polish": Language(
-        name="Polish",
-        iso_code="pl",
-        # Q and X are only used for foreign words.
-        use_ascii=False,
-        charsets=["ISO-8859-2", "WINDOWS-1250"],
-        alphabet="AĄBCĆDEĘFGHIJKLŁMNŃOÓPRSŚTUWYZŹŻaąbcćdeęfghijklłmnńoóprsśtuwyzźż",
-        wiki_start_pages=["Wikipedia:Strona_główna"],
-    ),
-    "Portuguese": Language(
-        name="Portuguese",
-        iso_code="pt",
-        use_ascii=True,
-        charsets=["ISO-8859-1", "ISO-8859-15", "WINDOWS-1252"],
-        alphabet="ÁÂÃÀÇÉÊÍÓÔÕÚáâãàçéêíóôõú",
-        wiki_start_pages=["Wikipédia:Página_principal"],
-    ),
-    "Romanian": Language(
-        name="Romanian",
-        iso_code="ro",
-        use_ascii=True,
-        charsets=["ISO-8859-2", "WINDOWS-1250"],
-        alphabet="ăâîșțĂÂÎȘȚ",
-        wiki_start_pages=["Pagina_principală"],
-    ),
-    "Russian": Language(
-        name="Russian",
-        iso_code="ru",
-        use_ascii=False,
-        charsets=[
-            "ISO-8859-5",
-            "WINDOWS-1251",
-            "KOI8-R",
-            "MacCyrillic",
-            "IBM866",
-            "IBM855",
-        ],
-        alphabet="абвгдеёжзийклмнопрстуфхцчшщъыьэюяАБВГДЕЁЖЗИЙКЛМНОПРСТУФХЦЧШЩЪЫЬЭЮЯ",
-        wiki_start_pages=["Заглавная_страница"],
-    ),
-    "Slovak": Language(
-        name="Slovak",
-        iso_code="sk",
-        use_ascii=True,
-        charsets=["ISO-8859-2", "WINDOWS-1250"],
-        alphabet="áäčďéíĺľňóôŕšťúýžÁÄČĎÉÍĹĽŇÓÔŔŠŤÚÝŽ",
-        wiki_start_pages=["Hlavná_stránka"],
-    ),
-    "Slovene": Language(
-        name="Slovene",
-        iso_code="sl",
-        # Q, W, X, Y are only used for foreign words.
-        use_ascii=False,
-        charsets=["ISO-8859-2", "WINDOWS-1250"],
-        alphabet="abcčdefghijklmnoprsštuvzžABCČDEFGHIJKLMNOPRSŠTUVZŽ",
-        wiki_start_pages=["Glavna_stran"],
-    ),
-    # Serbian can be written in both Latin and Cyrillic, but there's no
-    # simple way to get the Latin alphabet pages from Wikipedia through
-    # the API, so for now we just support Cyrillic.
-    "Serbian": Language(
-        name="Serbian",
-        iso_code="sr",
-        alphabet="АБВГДЂЕЖЗИЈКЛЉМНЊОПРСТЋУФХЦЧЏШабвгдђежзијклљмнњопрстћуфхцчџш",
-        charsets=["ISO-8859-5", "WINDOWS-1251", "MacCyrillic", "IBM855"],
-        wiki_start_pages=["Главна_страна"],
-    ),
-    "Thai": Language(
-        name="Thai",
-        iso_code="th",
-        use_ascii=False,
-        charsets=["ISO-8859-11", "TIS-620", "CP874"],
-        alphabet="กขฃคฅฆงจฉชซฌญฎฏฐฑฒณดตถทธนบปผฝพฟภมยรฤลฦวศษสหฬอฮฯะัาำิีึืฺุู฿เแโใไๅๆ็่้๊๋์ํ๎๏๐๑๒๓๔๕๖๗๘๙๚๛",
-        wiki_start_pages=["หน้าหลัก"],
-    ),
-    "Turkish": Language(
-        name="Turkish",
-        iso_code="tr",
-        # Q, W, and X are not used by Turkish
-        use_ascii=False,
-        charsets=["ISO-8859-3", "ISO-8859-9", "WINDOWS-1254"],
-        alphabet="abcçdefgğhıijklmnoöprsştuüvyzâîûABCÇDEFGĞHIİJKLMNOÖPRSŞTUÜVYZÂÎÛ",
-        wiki_start_pages=["Ana_Sayfa"],
-    ),
-    "Vietnamese": Language(
-        name="Vietnamese",
-        iso_code="vi",
-        use_ascii=False,
-        # Windows-1258 is the only common 8-bit
-        # Vietnamese encoding supported by Python.
-        # From Wikipedia:
-        # For systems that lack support for Unicode,
-        # dozens of 8-bit Vietnamese code pages are
-        # available.[1] The most common are VISCII
-        # (TCVN 5712:1993), VPS, and Windows-1258.[3]
-        # Where ASCII is required, such as when
-        # ensuring readability in plain text e-mail,
-        # Vietnamese letters are often encoded
-        # according to Vietnamese Quoted-Readable
-        # (VIQR) or VSCII Mnemonic (VSCII-MNEM),[4]
-        # though usage of either variable-width
-        # scheme has declined dramatically following
-        # the adoption of Unicode on the World Wide
-        # Web.
-        charsets=["WINDOWS-1258"],
-        alphabet="aăâbcdđeêghiklmnoôơpqrstuưvxyAĂÂBCDĐEÊGHIKLMNOÔƠPQRSTUƯVXY",
-        wiki_start_pages=["Chữ_Quốc_ngữ"],
-    ),
-}
+LANGUAGES = {'Arabic': Language(name='Arabic',
+                                iso_code='ar',
+                                use_ascii=False,
+                                # We only support encodings that use isolated
+                                # forms, because the current recommendation is
+                                # that the rendering system handles presentation
+                                # forms. This means we purposefully skip IBM864.
+                                charsets=['ISO-8859-6', 'WINDOWS-1256',
+                                          'CP720', 'CP864'],
+                                alphabet=u'ءآأؤإئابةتثجحخدذرزسشصضطظعغػؼؽؾؿـفقكلمنهوىيًٌٍَُِّ',
+                                wiki_start_pages=[u'الصفحة_الرئيسية']),
+             'Belarusian': Language(name='Belarusian',
+                                    iso_code='be',
+                                    use_ascii=False,
+                                    charsets=['ISO-8859-5', 'WINDOWS-1251',
+                                              'IBM866', 'MacCyrillic'],
+                                    alphabet=(u'АБВГДЕЁЖЗІЙКЛМНОПРСТУЎФХЦЧШЫЬЭЮЯ'
+                                              u'абвгдеёжзійклмнопрстуўфхцчшыьэюяʼ'),
+                                    wiki_start_pages=[u'Галоўная_старонка']),
+             'Bulgarian': Language(name='Bulgarian',
+                                   iso_code='bg',
+                                   use_ascii=False,
+                                   charsets=['ISO-8859-5', 'WINDOWS-1251',
+                                             'IBM855'],
+                                   alphabet=(u'АБВГДЕЖЗИЙКЛМНОПРСТУФХЦЧШЩЪЬЮЯ'
+                                             u'абвгдежзийклмнопрстуфхцчшщъьюя'),
+                                   wiki_start_pages=[u'Начална_страница']),
+             'Czech': Language(name='Czech',
+                               iso_code='cz',
+                               use_ascii=True,
+                               charsets=['ISO-8859-2', 'WINDOWS-1250'],
+                               alphabet=u'áčďéěíňóřšťúůýžÁČĎÉĚÍŇÓŘŠŤÚŮÝŽ',
+                               wiki_start_pages=[u'Hlavní_strana']),
+             'Danish': Language(name='Danish',
+                                iso_code='da',
+                                use_ascii=True,
+                                charsets=['ISO-8859-1', 'ISO-8859-15',
+                                          'WINDOWS-1252'],
+                                alphabet=u'æøåÆØÅ',
+                                wiki_start_pages=[u'Forside']),
+             'German': Language(name='German',
+                                iso_code='de',
+                                use_ascii=True,
+                                charsets=['ISO-8859-1', 'WINDOWS-1252'],
+                                alphabet=u'äöüßÄÖÜ',
+                                wiki_start_pages=[u'Wikipedia:Hauptseite']),
+             'Greek': Language(name='Greek',
+                               iso_code='el',
+                               use_ascii=False,
+                               charsets=['ISO-8859-7', 'WINDOWS-1253'],
+                               alphabet=(u'αβγδεζηθικλμνξοπρσςτυφχψωάέήίόύώ'
+                                         u'ΑΒΓΔΕΖΗΘΙΚΛΜΝΞΟΠΡΣΣΤΥΦΧΨΩΆΈΉΊΌΎΏ'),
+                               wiki_start_pages=[u'Πύλη:Κύρια']),
+             'English': Language(name='English',
+                                 iso_code='en',
+                                 use_ascii=True,
+                                 charsets=['ISO-8859-1', 'WINDOWS-1252'],
+                                 wiki_start_pages=[u'Main_Page']),
+             'Esperanto': Language(name='Esperanto',
+                                   iso_code='eo',
+                                   # Q, W, X, and Y not used at all
+                                   use_ascii=False,
+                                   charsets=['ISO-8859-3'],
+                                   alphabet=(u'abcĉdefgĝhĥijĵklmnoprsŝtuŭvz'
+                                             u'ABCĈDEFGĜHĤIJĴKLMNOPRSŜTUŬVZ'),
+                                   wiki_start_pages=[u'Vikipedio:Ĉefpaĝo']),
+             'Spanish': Language(name='Spanish',
+                                 iso_code='es',
+                                 use_ascii=True,
+                                 charsets=['ISO-8859-1', 'ISO-8859-15',
+                                           'WINDOWS-1252'],
+                                 alphabet=u'ñáéíóúüÑÁÉÍÓÚÜ',
+                                 wiki_start_pages=[u'Wikipedia:Portada']),
+             'Estonian': Language(name='Estonian',
+                                  iso_code='et',
+                                  use_ascii=False,
+                                  charsets=['ISO-8859-4', 'ISO-8859-13',
+                                            'WINDOWS-1257'],
+                                  # C, F, Š, Q, W, X, Y, Z, Ž are only for
+                                  # loanwords
+                                  alphabet=(u'ABDEGHIJKLMNOPRSTUVÕÄÖÜ'
+                                            u'abdeghijklmnoprstuvõäöü'),
+                                  wiki_start_pages=[u'Esileht']),
+             'Finnish': Language(name='Finnish',
+                                 iso_code='fi',
+                                 use_ascii=True,
+                                 charsets=['ISO-8859-1', 'ISO-8859-15',
+                                           'WINDOWS-1252'],
+                                 alphabet=u'ÅÄÖŠŽåäöšž',
+                                 wiki_start_pages=[u'Wikipedia:Etusivu']),
+             'French': Language(name='French',
+                                iso_code='fr',
+                                use_ascii=True,
+                                charsets=['ISO-8859-1', 'ISO-8859-15',
+                                          'WINDOWS-1252'],
+                                alphabet=u'œàâçèéîïùûêŒÀÂÇÈÉÎÏÙÛÊ',
+                                wiki_start_pages=[u'Wikipédia:Accueil_principal',
+                                                  u'Bœuf (animal)']),
+             'Hebrew': Language(name='Hebrew',
+                                iso_code='he',
+                                use_ascii=False,
+                                charsets=['ISO-8859-8', 'WINDOWS-1255'],
+                                alphabet=u'אבגדהוזחטיךכלםמןנסעףפץצקרשתװױײ',
+                                wiki_start_pages=[u'עמוד_ראשי']),
+             'Croatian': Language(name='Croatian',
+                                  iso_code='hr',
+                                  # Q, W, X, Y are only used for foreign words.
+                                  use_ascii=False,
+                                  charsets=['ISO-8859-2', 'WINDOWS-1250'],
+                                  alphabet=(u'abcčćdđefghijklmnoprsštuvzž'
+                                            u'ABCČĆDĐEFGHIJKLMNOPRSŠTUVZŽ'),
+                                  wiki_start_pages=[u'Glavna_stranica']),
+             'Hungarian': Language(name='Hungarian',
+                                   iso_code='hu',
+                                   # Q, W, X, Y are only used for foreign words.
+                                   use_ascii=False,
+                                   charsets=['ISO-8859-2', 'WINDOWS-1250'],
+                                   alphabet=(u'abcdefghijklmnoprstuvzáéíóöőúüű'
+                                             u'ABCDEFGHIJKLMNOPRSTUVZÁÉÍÓÖŐÚÜŰ'),
+                                   wiki_start_pages=[u'Kezdőlap']),
+             'Italian': Language(name='Italian',
+                                 iso_code='it',
+                                 use_ascii=True,
+                                 charsets=['ISO-8859-1', 'ISO-8859-15',
+                                           'WINDOWS-1252'],
+                                 alphabet=u'ÀÈÉÌÒÓÙàèéìòóù',
+                                 wiki_start_pages=[u'Pagina_principale']),
+             'Lithuanian': Language(name='Lithuanian',
+                                    iso_code='lt',
+                                    use_ascii=False,
+                                    charsets=['ISO-8859-13', 'WINDOWS-1257',
+                                              'ISO-8859-4'],
+                                    # Q, W, and X not used at all
+                                    alphabet=(u'AĄBCČDEĘĖFGHIĮYJKLMNOPRSŠTUŲŪVZŽ'
+                                              u'aąbcčdeęėfghiįyjklmnoprsštuųūvzž'),
+                                    wiki_start_pages=[u'Pagrindinis_puslapis']),
+             'Latvian': Language(name='Latvian',
+                                 iso_code='lv',
+                                 use_ascii=False,
+                                 charsets=['ISO-8859-13', 'WINDOWS-1257',
+                                           'ISO-8859-4'],
+                                 # Q, W, X, Y are only for loanwords
+                                 alphabet=(u'AĀBCČDEĒFGĢHIĪJKĶLĻMNŅOPRSŠTUŪVZŽ'
+                                           u'aābcčdeēfgģhiījkķlļmnņoprsštuūvzž'),
+                                 wiki_start_pages=[u'Sākumlapa']),
+             'Macedonian': Language(name='Macedonian',
+                                    iso_code='mk',
+                                    use_ascii=False,
+                                    charsets=['ISO-8859-5', 'WINDOWS-1251',
+                                              'MacCyrillic', 'IBM855'],
+                                    alphabet=(u'АБВГДЃЕЖЗЅИЈКЛЉМНЊОПРСТЌУФХЦЧЏШ'
+                                              u'абвгдѓежзѕијклљмнњопрстќуфхцчџш'),
+                                    wiki_start_pages=[u'Главна_страница']),
+             'Dutch': Language(name='Dutch',
+                               iso_code='nl',
+                               use_ascii=True,
+                               charsets=['ISO-8859-1', 'WINDOWS-1252'],
+                               wiki_start_pages=[u'Hoofdpagina']),
+             'Polish': Language(name='Polish',
+                                iso_code='pl',
+                                # Q and X are only used for foreign words.
+                                use_ascii=False,
+                                charsets=['ISO-8859-2', 'WINDOWS-1250'],
+                                alphabet=(u'AĄBCĆDEĘFGHIJKLŁMNŃOÓPRSŚTUWYZŹŻ'
+                                          u'aąbcćdeęfghijklłmnńoóprsśtuwyzźż'),
+                                wiki_start_pages=[u'Wikipedia:Strona_główna']),
+             'Portuguese': Language(name='Portuguese',
+                                 iso_code='pt',
+                                 use_ascii=True,
+                                 charsets=['ISO-8859-1', 'ISO-8859-15',
+                                           'WINDOWS-1252'],
+                                 alphabet=u'ÁÂÃÀÇÉÊÍÓÔÕÚáâãàçéêíóôõú',
+                                 wiki_start_pages=[u'Wikipédia:Página_principal']),
+             'Romanian': Language(name='Romanian',
+                                  iso_code='ro',
+                                  use_ascii=True,
+                                  charsets=['ISO-8859-2', 'WINDOWS-1250'],
+                                  alphabet=u'ăâîșțĂÂÎȘȚ',
+                                  wiki_start_pages=[u'Pagina_principală']),
+             'Russian': Language(name='Russian',
+                                 iso_code='ru',
+                                 use_ascii=False,
+                                 charsets=['ISO-8859-5', 'WINDOWS-1251',
+                                           'KOI8-R', 'MacCyrillic', 'IBM866',
+                                           'IBM855'],
+                                 alphabet=(u'абвгдеёжзийклмнопрстуфхцчшщъыьэюя'
+                                           u'АБВГДЕЁЖЗИЙКЛМНОПРСТУФХЦЧШЩЪЫЬЭЮЯ'),
+                                 wiki_start_pages=[u'Заглавная_страница']),
+             'Slovak': Language(name='Slovak',
+                                iso_code='sk',
+                                use_ascii=True,
+                                charsets=['ISO-8859-2', 'WINDOWS-1250'],
+                                alphabet=u'áäčďéíĺľňóôŕšťúýžÁÄČĎÉÍĹĽŇÓÔŔŠŤÚÝŽ',
+                                wiki_start_pages=[u'Hlavná_stránka']),
+             'Slovene': Language(name='Slovene',
+                                 iso_code='sl',
+                                 # Q, W, X, Y are only used for foreign words.
+                                 use_ascii=False,
+                                 charsets=['ISO-8859-2', 'WINDOWS-1250'],
+                                 alphabet=(u'abcčdefghijklmnoprsštuvzž'
+                                           u'ABCČDEFGHIJKLMNOPRSŠTUVZŽ'),
+                                 wiki_start_pages=[u'Glavna_stran']),
+             # Serbian can be written in both Latin and Cyrillic, but there's no
+             # simple way to get the Latin alphabet pages from Wikipedia through
+             # the API, so for now we just support Cyrillic.
+             'Serbian': Language(name='Serbian',
+                                 iso_code='sr',
+                                 alphabet=(u'АБВГДЂЕЖЗИЈКЛЉМНЊОПРСТЋУФХЦЧЏШ'
+                                           u'абвгдђежзијклљмнњопрстћуфхцчџш'),
+                                 charsets=['ISO-8859-5', 'WINDOWS-1251',
+                                           'MacCyrillic', 'IBM855'],
+                                 wiki_start_pages=[u'Главна_страна']),
+             'Thai': Language(name='Thai',
+                              iso_code='th',
+                              use_ascii=False,
+                              charsets=['ISO-8859-11', 'TIS-620', 'CP874'],
+                              alphabet=u'กขฃคฅฆงจฉชซฌญฎฏฐฑฒณดตถทธนบปผฝพฟภมยรฤลฦวศษสหฬอฮฯะัาำิีึืฺุู฿เแโใไๅๆ็่้๊๋์ํ๎๏๐๑๒๓๔๕๖๗๘๙๚๛',
+                              wiki_start_pages=[u'หน้าหลัก']),
+             'Turkish': Language(name='Turkish',
+                                 iso_code='tr',
+                                 # Q, W, and X are not used by Turkish
+                                 use_ascii=False,
+                                 charsets=['ISO-8859-3', 'ISO-8859-9',
+                                           'WINDOWS-1254'],
+                                 alphabet=(u'abcçdefgğhıijklmnoöprsştuüvyzâîû'
+                                           u'ABCÇDEFGĞHIİJKLMNOÖPRSŞTUÜVYZÂÎÛ'),
+                                 wiki_start_pages=[u'Ana_Sayfa']),
+             'Vietnamese': Language(name='Vietnamese',
+                                    iso_code='vi',
+                                    use_ascii=False,
+                                    # Windows-1258 is the only common 8-bit
+                                    # Vietnamese encoding supported by Python.
+                                    # From Wikipedia:
+                                    # For systems that lack support for Unicode,
+                                    # dozens of 8-bit Vietnamese code pages are
+                                    # available.[1] The most common are VISCII
+                                    # (TCVN 5712:1993), VPS, and Windows-1258.[3]
+                                    # Where ASCII is required, such as when
+                                    # ensuring readability in plain text e-mail,
+                                    # Vietnamese letters are often encoded
+                                    # according to Vietnamese Quoted-Readable
+                                    # (VIQR) or VSCII Mnemonic (VSCII-MNEM),[4]
+                                    # though usage of either variable-width
+                                    # scheme has declined dramatically following
+                                    # the adoption of Unicode on the World Wide
+                                    # Web.
+                                    charsets=['WINDOWS-1258'],
+                                    alphabet=(u'aăâbcdđeêghiklmnoôơpqrstuưvxy'
+                                              u'AĂÂBCDĐEÊGHIKLMNOÔƠPQRSTUƯVXY'),
+                                    wiki_start_pages=[u'Chữ_Quốc_ngữ']),
+            }
diff --git a/venv/lib/python3.10/site-packages/pip/_vendor/chardet/sbcharsetprober.py b/venv/lib/python3.10/site-packages/pip/_vendor/chardet/sbcharsetprober.py
index 31d70e1..46ba835 100644
--- a/venv/lib/python3.10/site-packages/pip/_vendor/chardet/sbcharsetprober.py
+++ b/venv/lib/python3.10/site-packages/pip/_vendor/chardet/sbcharsetprober.py
@@ -31,49 +31,44 @@
 from .charsetprober import CharSetProber
 from .enums import CharacterCategory, ProbingState, SequenceLikelihood
 
-SingleByteCharSetModel = namedtuple(
-    "SingleByteCharSetModel",
-    [
-        "charset_name",
-        "language",
-        "char_to_order_map",
-        "language_model",
-        "typical_positive_ratio",
-        "keep_ascii_letters",
-        "alphabet",
-    ],
-)
+
+SingleByteCharSetModel = namedtuple('SingleByteCharSetModel',
+                                    ['charset_name',
+                                     'language',
+                                     'char_to_order_map',
+                                     'language_model',
+                                     'typical_positive_ratio',
+                                     'keep_ascii_letters',
+                                     'alphabet'])
 
 
 class SingleByteCharSetProber(CharSetProber):
     SAMPLE_SIZE = 64
-    SB_ENOUGH_REL_THRESHOLD = 1024  # 0.25 * SAMPLE_SIZE^2
+    SB_ENOUGH_REL_THRESHOLD = 1024  #  0.25 * SAMPLE_SIZE^2
     POSITIVE_SHORTCUT_THRESHOLD = 0.95
     NEGATIVE_SHORTCUT_THRESHOLD = 0.05
 
-    def __init__(self, model, is_reversed=False, name_prober=None):
-        super().__init__()
+    def __init__(self, model, reversed=False, name_prober=None):
+        super(SingleByteCharSetProber, self).__init__()
         self._model = model
         # TRUE if we need to reverse every pair in the model lookup
-        self._reversed = is_reversed
+        self._reversed = reversed
         # Optional auxiliary prober for name decision
         self._name_prober = name_prober
         self._last_order = None
         self._seq_counters = None
         self._total_seqs = None
         self._total_char = None
-        self._control_char = None
         self._freq_char = None
         self.reset()
 
     def reset(self):
-        super().reset()
+        super(SingleByteCharSetProber, self).reset()
         # char order of last character
         self._last_order = 255
         self._seq_counters = [0] * SequenceLikelihood.get_num_categories()
         self._total_seqs = 0
         self._total_char = 0
-        self._control_char = 0
         # characters that fall in our sampling range
         self._freq_char = 0
 
@@ -81,20 +76,20 @@ def reset(self):
     def charset_name(self):
         if self._name_prober:
             return self._name_prober.charset_name
-        return self._model.charset_name
+        else:
+            return self._model.charset_name
 
     @property
     def language(self):
         if self._name_prober:
             return self._name_prober.language
-        return self._model.language
+        else:
+            return self._model.language
 
     def feed(self, byte_str):
         # TODO: Make filter_international_words keep things in self.alphabet
         if not self._model.keep_ascii_letters:
             byte_str = self.filter_international_words(byte_str)
-        else:
-            byte_str = self.remove_xml_tags(byte_str)
         if not byte_str:
             return self.state
         char_to_order_map = self._model.char_to_order_map
@@ -108,6 +103,9 @@ def feed(self, byte_str):
             #      _total_char purposes.
             if order < CharacterCategory.CONTROL:
                 self._total_char += 1
+            # TODO: Follow uchardet's lead and discount confidence for frequent
+            #       control characters.
+            #       See https://github.com/BYVoid/uchardet/commit/55b4f23971db61
             if order < self.SAMPLE_SIZE:
                 self._freq_char += 1
                 if self._last_order < self.SAMPLE_SIZE:
@@ -124,17 +122,14 @@ def feed(self, byte_str):
             if self._total_seqs > self.SB_ENOUGH_REL_THRESHOLD:
                 confidence = self.get_confidence()
                 if confidence > self.POSITIVE_SHORTCUT_THRESHOLD:
-                    self.logger.debug(
-                        "%s confidence = %s, we have a winner", charset_name, confidence
-                    )
+                    self.logger.debug('%s confidence = %s, we have a winner',
+                                      charset_name, confidence)
                     self._state = ProbingState.FOUND_IT
                 elif confidence < self.NEGATIVE_SHORTCUT_THRESHOLD:
-                    self.logger.debug(
-                        "%s confidence = %s, below negative shortcut threshold %s",
-                        charset_name,
-                        confidence,
-                        self.NEGATIVE_SHORTCUT_THRESHOLD,
-                    )
+                    self.logger.debug('%s confidence = %s, below negative '
+                                      'shortcut threshhold %s', charset_name,
+                                      confidence,
+                                      self.NEGATIVE_SHORTCUT_THRESHOLD)
                     self._state = ProbingState.NOT_ME
 
         return self.state
@@ -142,18 +137,8 @@ def feed(self, byte_str):
     def get_confidence(self):
         r = 0.01
         if self._total_seqs > 0:
-            r = (
-                (
-                    self._seq_counters[SequenceLikelihood.POSITIVE]
-                    + 0.25 * self._seq_counters[SequenceLikelihood.LIKELY]
-                )
-                / self._total_seqs
-                / self._model.typical_positive_ratio
-            )
-            # The more control characters (proportionnaly to the size
-            # of the text), the less confident we become in the current
-            # charset.
-            r = r * (self._total_char - self._control_char) / self._total_char
+            r = ((1.0 * self._seq_counters[SequenceLikelihood.POSITIVE]) /
+                 self._total_seqs / self._model.typical_positive_ratio)
             r = r * self._freq_char / self._total_char
             if r >= 1.0:
                 r = 0.99
diff --git a/venv/lib/python3.10/site-packages/pip/_vendor/chardet/sbcsgroupprober.py b/venv/lib/python3.10/site-packages/pip/_vendor/chardet/sbcsgroupprober.py
index cad001c..bdeef4e 100644
--- a/venv/lib/python3.10/site-packages/pip/_vendor/chardet/sbcsgroupprober.py
+++ b/venv/lib/python3.10/site-packages/pip/_vendor/chardet/sbcsgroupprober.py
@@ -28,20 +28,16 @@
 
 from .charsetgroupprober import CharSetGroupProber
 from .hebrewprober import HebrewProber
-from .langbulgarianmodel import ISO_8859_5_BULGARIAN_MODEL, WINDOWS_1251_BULGARIAN_MODEL
+from .langbulgarianmodel import (ISO_8859_5_BULGARIAN_MODEL,
+                                 WINDOWS_1251_BULGARIAN_MODEL)
 from .langgreekmodel import ISO_8859_7_GREEK_MODEL, WINDOWS_1253_GREEK_MODEL
 from .langhebrewmodel import WINDOWS_1255_HEBREW_MODEL
-
 # from .langhungarianmodel import (ISO_8859_2_HUNGARIAN_MODEL,
 #                                  WINDOWS_1250_HUNGARIAN_MODEL)
-from .langrussianmodel import (
-    IBM855_RUSSIAN_MODEL,
-    IBM866_RUSSIAN_MODEL,
-    ISO_8859_5_RUSSIAN_MODEL,
-    KOI8_R_RUSSIAN_MODEL,
-    MACCYRILLIC_RUSSIAN_MODEL,
-    WINDOWS_1251_RUSSIAN_MODEL,
-)
+from .langrussianmodel import (IBM855_RUSSIAN_MODEL, IBM866_RUSSIAN_MODEL,
+                               ISO_8859_5_RUSSIAN_MODEL, KOI8_R_RUSSIAN_MODEL,
+                               MACCYRILLIC_RUSSIAN_MODEL,
+                               WINDOWS_1251_RUSSIAN_MODEL)
 from .langthaimodel import TIS_620_THAI_MODEL
 from .langturkishmodel import ISO_8859_9_TURKISH_MODEL
 from .sbcharsetprober import SingleByteCharSetProber
@@ -49,17 +45,16 @@
 
 class SBCSGroupProber(CharSetGroupProber):
     def __init__(self):
-        super().__init__()
+        super(SBCSGroupProber, self).__init__()
         hebrew_prober = HebrewProber()
-        logical_hebrew_prober = SingleByteCharSetProber(
-            WINDOWS_1255_HEBREW_MODEL, is_reversed=False, name_prober=hebrew_prober
-        )
+        logical_hebrew_prober = SingleByteCharSetProber(WINDOWS_1255_HEBREW_MODEL,
+                                                        False, hebrew_prober)
         # TODO: See if using ISO-8859-8 Hebrew model works better here, since
         #       it's actually the visual one
-        visual_hebrew_prober = SingleByteCharSetProber(
-            WINDOWS_1255_HEBREW_MODEL, is_reversed=True, name_prober=hebrew_prober
-        )
-        hebrew_prober.set_model_probers(logical_hebrew_prober, visual_hebrew_prober)
+        visual_hebrew_prober = SingleByteCharSetProber(WINDOWS_1255_HEBREW_MODEL,
+                                                       True, hebrew_prober)
+        hebrew_prober.set_model_probers(logical_hebrew_prober,
+                                        visual_hebrew_prober)
         # TODO: ORDER MATTERS HERE. I changed the order vs what was in master
         #       and several tests failed that did not before. Some thought
         #       should be put into the ordering, and we should consider making
diff --git a/venv/lib/python3.10/site-packages/pip/_vendor/chardet/sjisprober.py b/venv/lib/python3.10/site-packages/pip/_vendor/chardet/sjisprober.py
index 3bcbdb7..9e29623 100644
--- a/venv/lib/python3.10/site-packages/pip/_vendor/chardet/sjisprober.py
+++ b/venv/lib/python3.10/site-packages/pip/_vendor/chardet/sjisprober.py
@@ -25,24 +25,24 @@
 # 02110-1301  USA
 ######################### END LICENSE BLOCK #########################
 
-from .chardistribution import SJISDistributionAnalysis
+from .mbcharsetprober import MultiByteCharSetProber
 from .codingstatemachine import CodingStateMachine
-from .enums import MachineState, ProbingState
+from .chardistribution import SJISDistributionAnalysis
 from .jpcntx import SJISContextAnalysis
-from .mbcharsetprober import MultiByteCharSetProber
 from .mbcssm import SJIS_SM_MODEL
+from .enums import ProbingState, MachineState
 
 
 class SJISProber(MultiByteCharSetProber):
     def __init__(self):
-        super().__init__()
+        super(SJISProber, self).__init__()
         self.coding_sm = CodingStateMachine(SJIS_SM_MODEL)
         self.distribution_analyzer = SJISDistributionAnalysis()
         self.context_analyzer = SJISContextAnalysis()
         self.reset()
 
     def reset(self):
-        super().reset()
+        super(SJISProber, self).reset()
         self.context_analyzer.reset()
 
     @property
@@ -54,40 +54,34 @@ def language(self):
         return "Japanese"
 
     def feed(self, byte_str):
-        for i, byte in enumerate(byte_str):
-            coding_state = self.coding_sm.next_state(byte)
+        for i in range(len(byte_str)):
+            coding_state = self.coding_sm.next_state(byte_str[i])
             if coding_state == MachineState.ERROR:
-                self.logger.debug(
-                    "%s %s prober hit error at byte %s",
-                    self.charset_name,
-                    self.language,
-                    i,
-                )
+                self.logger.debug('%s %s prober hit error at byte %s',
+                                  self.charset_name, self.language, i)
                 self._state = ProbingState.NOT_ME
                 break
-            if coding_state == MachineState.ITS_ME:
+            elif coding_state == MachineState.ITS_ME:
                 self._state = ProbingState.FOUND_IT
                 break
-            if coding_state == MachineState.START:
+            elif coding_state == MachineState.START:
                 char_len = self.coding_sm.get_current_charlen()
                 if i == 0:
-                    self._last_char[1] = byte
-                    self.context_analyzer.feed(
-                        self._last_char[2 - char_len :], char_len
-                    )
+                    self._last_char[1] = byte_str[0]
+                    self.context_analyzer.feed(self._last_char[2 - char_len:],
+                                               char_len)
                     self.distribution_analyzer.feed(self._last_char, char_len)
                 else:
-                    self.context_analyzer.feed(
-                        byte_str[i + 1 - char_len : i + 3 - char_len], char_len
-                    )
-                    self.distribution_analyzer.feed(byte_str[i - 1 : i + 1], char_len)
+                    self.context_analyzer.feed(byte_str[i + 1 - char_len:i + 3
+                                                        - char_len], char_len)
+                    self.distribution_analyzer.feed(byte_str[i - 1:i + 1],
+                                                    char_len)
 
         self._last_char[0] = byte_str[-1]
 
         if self.state == ProbingState.DETECTING:
-            if self.context_analyzer.got_enough_data() and (
-                self.get_confidence() > self.SHORTCUT_THRESHOLD
-            ):
+            if (self.context_analyzer.got_enough_data() and
+               (self.get_confidence() > self.SHORTCUT_THRESHOLD)):
                 self._state = ProbingState.FOUND_IT
 
         return self.state
diff --git a/venv/lib/python3.10/site-packages/pip/_vendor/chardet/universaldetector.py b/venv/lib/python3.10/site-packages/pip/_vendor/chardet/universaldetector.py
index 22fcf82..055a8ac 100644
--- a/venv/lib/python3.10/site-packages/pip/_vendor/chardet/universaldetector.py
+++ b/venv/lib/python3.10/site-packages/pip/_vendor/chardet/universaldetector.py
@@ -46,10 +46,9 @@ class a user of ``chardet`` should use.
 from .latin1prober import Latin1Prober
 from .mbcsgroupprober import MBCSGroupProber
 from .sbcsgroupprober import SBCSGroupProber
-from .utf1632prober import UTF1632Prober
 
 
-class UniversalDetector:
+class UniversalDetector(object):
     """
     The ``UniversalDetector`` class underlies the ``chardet.detect`` function
     and coordinates all of the different charset probers.
@@ -67,23 +66,20 @@ class UniversalDetector:
     """
 
     MINIMUM_THRESHOLD = 0.20
-    HIGH_BYTE_DETECTOR = re.compile(b"[\x80-\xFF]")
-    ESC_DETECTOR = re.compile(b"(\033|~{)")
-    WIN_BYTE_DETECTOR = re.compile(b"[\x80-\x9F]")
-    ISO_WIN_MAP = {
-        "iso-8859-1": "Windows-1252",
-        "iso-8859-2": "Windows-1250",
-        "iso-8859-5": "Windows-1251",
-        "iso-8859-6": "Windows-1256",
-        "iso-8859-7": "Windows-1253",
-        "iso-8859-8": "Windows-1255",
-        "iso-8859-9": "Windows-1254",
-        "iso-8859-13": "Windows-1257",
-    }
+    HIGH_BYTE_DETECTOR = re.compile(b'[\x80-\xFF]')
+    ESC_DETECTOR = re.compile(b'(\033|~{)')
+    WIN_BYTE_DETECTOR = re.compile(b'[\x80-\x9F]')
+    ISO_WIN_MAP = {'iso-8859-1': 'Windows-1252',
+                   'iso-8859-2': 'Windows-1250',
+                   'iso-8859-5': 'Windows-1251',
+                   'iso-8859-6': 'Windows-1256',
+                   'iso-8859-7': 'Windows-1253',
+                   'iso-8859-8': 'Windows-1255',
+                   'iso-8859-9': 'Windows-1254',
+                   'iso-8859-13': 'Windows-1257'}
 
     def __init__(self, lang_filter=LanguageFilter.ALL):
         self._esc_charset_prober = None
-        self._utf1632_prober = None
         self._charset_probers = []
         self.result = None
         self.done = None
@@ -95,34 +91,20 @@ def __init__(self, lang_filter=LanguageFilter.ALL):
         self._has_win_bytes = None
         self.reset()
 
-    @property
-    def input_state(self):
-        return self._input_state
-
-    @property
-    def has_win_bytes(self):
-        return self._has_win_bytes
-
-    @property
-    def charset_probers(self):
-        return self._charset_probers
-
     def reset(self):
         """
         Reset the UniversalDetector and all of its probers back to their
         initial states.  This is called by ``__init__``, so you only need to
         call this directly in between analyses of different documents.
         """
-        self.result = {"encoding": None, "confidence": 0.0, "language": None}
+        self.result = {'encoding': None, 'confidence': 0.0, 'language': None}
         self.done = False
         self._got_data = False
         self._has_win_bytes = False
         self._input_state = InputState.PURE_ASCII
-        self._last_char = b""
+        self._last_char = b''
         if self._esc_charset_prober:
             self._esc_charset_prober.reset()
-        if self._utf1632_prober:
-            self._utf1632_prober.reset()
         for prober in self._charset_probers:
             prober.reset()
 
@@ -143,7 +125,7 @@ def feed(self, byte_str):
         if self.done:
             return
 
-        if not byte_str:
+        if not len(byte_str):
             return
 
         if not isinstance(byte_str, bytearray):
@@ -154,36 +136,35 @@ def feed(self, byte_str):
             # If the data starts with BOM, we know it is UTF
             if byte_str.startswith(codecs.BOM_UTF8):
                 # EF BB BF  UTF-8 with BOM
-                self.result = {
-                    "encoding": "UTF-8-SIG",
-                    "confidence": 1.0,
-                    "language": "",
-                }
-            elif byte_str.startswith((codecs.BOM_UTF32_LE, codecs.BOM_UTF32_BE)):
+                self.result = {'encoding': "UTF-8-SIG",
+                               'confidence': 1.0,
+                               'language': ''}
+            elif byte_str.startswith((codecs.BOM_UTF32_LE,
+                                      codecs.BOM_UTF32_BE)):
                 # FF FE 00 00  UTF-32, little-endian BOM
                 # 00 00 FE FF  UTF-32, big-endian BOM
-                self.result = {"encoding": "UTF-32", "confidence": 1.0, "language": ""}
-            elif byte_str.startswith(b"\xFE\xFF\x00\x00"):
+                self.result = {'encoding': "UTF-32",
+                               'confidence': 1.0,
+                               'language': ''}
+            elif byte_str.startswith(b'\xFE\xFF\x00\x00'):
                 # FE FF 00 00  UCS-4, unusual octet order BOM (3412)
-                self.result = {
-                    "encoding": "X-ISO-10646-UCS-4-3412",
-                    "confidence": 1.0,
-                    "language": "",
-                }
-            elif byte_str.startswith(b"\x00\x00\xFF\xFE"):
+                self.result = {'encoding': "X-ISO-10646-UCS-4-3412",
+                               'confidence': 1.0,
+                               'language': ''}
+            elif byte_str.startswith(b'\x00\x00\xFF\xFE'):
                 # 00 00 FF FE  UCS-4, unusual octet order BOM (2143)
-                self.result = {
-                    "encoding": "X-ISO-10646-UCS-4-2143",
-                    "confidence": 1.0,
-                    "language": "",
-                }
+                self.result = {'encoding': "X-ISO-10646-UCS-4-2143",
+                               'confidence': 1.0,
+                               'language': ''}
             elif byte_str.startswith((codecs.BOM_LE, codecs.BOM_BE)):
                 # FF FE  UTF-16, little endian BOM
                 # FE FF  UTF-16, big endian BOM
-                self.result = {"encoding": "UTF-16", "confidence": 1.0, "language": ""}
+                self.result = {'encoding': "UTF-16",
+                               'confidence': 1.0,
+                               'language': ''}
 
             self._got_data = True
-            if self.result["encoding"] is not None:
+            if self.result['encoding'] is not None:
                 self.done = True
                 return
 
@@ -192,29 +173,12 @@ def feed(self, byte_str):
         if self._input_state == InputState.PURE_ASCII:
             if self.HIGH_BYTE_DETECTOR.search(byte_str):
                 self._input_state = InputState.HIGH_BYTE
-            elif (
-                self._input_state == InputState.PURE_ASCII
-                and self.ESC_DETECTOR.search(self._last_char + byte_str)
-            ):
+            elif self._input_state == InputState.PURE_ASCII and \
+                    self.ESC_DETECTOR.search(self._last_char + byte_str):
                 self._input_state = InputState.ESC_ASCII
 
         self._last_char = byte_str[-1:]
 
-        # next we will look to see if it is appears to be either a UTF-16 or
-        # UTF-32 encoding
-        if not self._utf1632_prober:
-            self._utf1632_prober = UTF1632Prober()
-
-        if self._utf1632_prober.state == ProbingState.DETECTING:
-            if self._utf1632_prober.feed(byte_str) == ProbingState.FOUND_IT:
-                self.result = {
-                    "encoding": self._utf1632_prober.charset_name,
-                    "confidence": self._utf1632_prober.get_confidence(),
-                    "language": "",
-                }
-                self.done = True
-                return
-
         # If we've seen escape sequences, use the EscCharSetProber, which
         # uses a simple state machine to check for known escape sequences in
         # HZ and ISO-2022 encodings, since those are the only encodings that
@@ -223,11 +187,12 @@ def feed(self, byte_str):
             if not self._esc_charset_prober:
                 self._esc_charset_prober = EscCharSetProber(self.lang_filter)
             if self._esc_charset_prober.feed(byte_str) == ProbingState.FOUND_IT:
-                self.result = {
-                    "encoding": self._esc_charset_prober.charset_name,
-                    "confidence": self._esc_charset_prober.get_confidence(),
-                    "language": self._esc_charset_prober.language,
-                }
+                self.result = {'encoding':
+                               self._esc_charset_prober.charset_name,
+                               'confidence':
+                               self._esc_charset_prober.get_confidence(),
+                               'language':
+                               self._esc_charset_prober.language}
                 self.done = True
         # If we've seen high bytes (i.e., those with values greater than 127),
         # we need to do more complicated checks using all our multi-byte and
@@ -244,11 +209,9 @@ def feed(self, byte_str):
                 self._charset_probers.append(Latin1Prober())
             for prober in self._charset_probers:
                 if prober.feed(byte_str) == ProbingState.FOUND_IT:
-                    self.result = {
-                        "encoding": prober.charset_name,
-                        "confidence": prober.get_confidence(),
-                        "language": prober.language,
-                    }
+                    self.result = {'encoding': prober.charset_name,
+                                   'confidence': prober.get_confidence(),
+                                   'language': prober.language}
                     self.done = True
                     break
             if self.WIN_BYTE_DETECTOR.search(byte_str):
@@ -268,11 +231,13 @@ def close(self):
         self.done = True
 
         if not self._got_data:
-            self.logger.debug("no data received!")
+            self.logger.debug('no data received!')
 
         # Default to ASCII if it is all we've seen so far
         elif self._input_state == InputState.PURE_ASCII:
-            self.result = {"encoding": "ascii", "confidence": 1.0, "language": ""}
+            self.result = {'encoding': 'ascii',
+                           'confidence': 1.0,
+                           'language': ''}
 
         # If we have seen non-ASCII, return the best that met MINIMUM_THRESHOLD
         elif self._input_state == InputState.HIGH_BYTE:
@@ -292,37 +257,30 @@ def close(self):
                 confidence = max_prober.get_confidence()
                 # Use Windows encoding name instead of ISO-8859 if we saw any
                 # extra Windows-specific bytes
-                if lower_charset_name.startswith("iso-8859"):
+                if lower_charset_name.startswith('iso-8859'):
                     if self._has_win_bytes:
-                        charset_name = self.ISO_WIN_MAP.get(
-                            lower_charset_name, charset_name
-                        )
-                self.result = {
-                    "encoding": charset_name,
-                    "confidence": confidence,
-                    "language": max_prober.language,
-                }
+                        charset_name = self.ISO_WIN_MAP.get(lower_charset_name,
+                                                            charset_name)
+                self.result = {'encoding': charset_name,
+                               'confidence': confidence,
+                               'language': max_prober.language}
 
         # Log all prober confidences if none met MINIMUM_THRESHOLD
         if self.logger.getEffectiveLevel() <= logging.DEBUG:
-            if self.result["encoding"] is None:
-                self.logger.debug("no probers hit minimum threshold")
+            if self.result['encoding'] is None:
+                self.logger.debug('no probers hit minimum threshold')
                 for group_prober in self._charset_probers:
                     if not group_prober:
                         continue
                     if isinstance(group_prober, CharSetGroupProber):
                         for prober in group_prober.probers:
-                            self.logger.debug(
-                                "%s %s confidence = %s",
-                                prober.charset_name,
-                                prober.language,
-                                prober.get_confidence(),
-                            )
+                            self.logger.debug('%s %s confidence = %s',
+                                              prober.charset_name,
+                                              prober.language,
+                                              prober.get_confidence())
                     else:
-                        self.logger.debug(
-                            "%s %s confidence = %s",
-                            group_prober.charset_name,
-                            group_prober.language,
-                            group_prober.get_confidence(),
-                        )
+                        self.logger.debug('%s %s confidence = %s',
+                                          group_prober.charset_name,
+                                          group_prober.language,
+                                          group_prober.get_confidence())
         return self.result
diff --git a/venv/lib/python3.10/site-packages/pip/_vendor/chardet/utf1632prober.py b/venv/lib/python3.10/site-packages/pip/_vendor/chardet/utf1632prober.py
deleted file mode 100644
index 9fd1580..0000000
--- a/venv/lib/python3.10/site-packages/pip/_vendor/chardet/utf1632prober.py
+++ /dev/null
@@ -1,223 +0,0 @@
-######################## BEGIN LICENSE BLOCK ########################
-#
-# Contributor(s):
-#   Jason Zavaglia
-#
-# This library is free software; you can redistribute it and/or
-# modify it under the terms of the GNU Lesser General Public
-# License as published by the Free Software Foundation; either
-# version 2.1 of the License, or (at your option) any later version.
-#
-# This library is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
-# Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public
-# License along with this library; if not, write to the Free Software
-# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
-# 02110-1301  USA
-######################### END LICENSE BLOCK #########################
-from .charsetprober import CharSetProber
-from .enums import ProbingState
-
-
-class UTF1632Prober(CharSetProber):
-    """
-    This class simply looks for occurrences of zero bytes, and infers
-    whether the file is UTF16 or UTF32 (low-endian or big-endian)
-    For instance, files looking like ( \0 \0 \0 [nonzero] )+
-    have a good probability to be UTF32BE.  Files looking like ( \0 [nonzero] )+
-    may be guessed to be UTF16BE, and inversely for little-endian varieties.
-    """
-
-    # how many logical characters to scan before feeling confident of prediction
-    MIN_CHARS_FOR_DETECTION = 20
-    # a fixed constant ratio of expected zeros or non-zeros in modulo-position.
-    EXPECTED_RATIO = 0.94
-
-    def __init__(self):
-        super().__init__()
-        self.position = 0
-        self.zeros_at_mod = [0] * 4
-        self.nonzeros_at_mod = [0] * 4
-        self._state = ProbingState.DETECTING
-        self.quad = [0, 0, 0, 0]
-        self.invalid_utf16be = False
-        self.invalid_utf16le = False
-        self.invalid_utf32be = False
-        self.invalid_utf32le = False
-        self.first_half_surrogate_pair_detected_16be = False
-        self.first_half_surrogate_pair_detected_16le = False
-        self.reset()
-
-    def reset(self):
-        super().reset()
-        self.position = 0
-        self.zeros_at_mod = [0] * 4
-        self.nonzeros_at_mod = [0] * 4
-        self._state = ProbingState.DETECTING
-        self.invalid_utf16be = False
-        self.invalid_utf16le = False
-        self.invalid_utf32be = False
-        self.invalid_utf32le = False
-        self.first_half_surrogate_pair_detected_16be = False
-        self.first_half_surrogate_pair_detected_16le = False
-        self.quad = [0, 0, 0, 0]
-
-    @property
-    def charset_name(self):
-        if self.is_likely_utf32be():
-            return "utf-32be"
-        if self.is_likely_utf32le():
-            return "utf-32le"
-        if self.is_likely_utf16be():
-            return "utf-16be"
-        if self.is_likely_utf16le():
-            return "utf-16le"
-        # default to something valid
-        return "utf-16"
-
-    @property
-    def language(self):
-        return ""
-
-    def approx_32bit_chars(self):
-        return max(1.0, self.position / 4.0)
-
-    def approx_16bit_chars(self):
-        return max(1.0, self.position / 2.0)
-
-    def is_likely_utf32be(self):
-        approx_chars = self.approx_32bit_chars()
-        return approx_chars >= self.MIN_CHARS_FOR_DETECTION and (
-            self.zeros_at_mod[0] / approx_chars > self.EXPECTED_RATIO
-            and self.zeros_at_mod[1] / approx_chars > self.EXPECTED_RATIO
-            and self.zeros_at_mod[2] / approx_chars > self.EXPECTED_RATIO
-            and self.nonzeros_at_mod[3] / approx_chars > self.EXPECTED_RATIO
-            and not self.invalid_utf32be
-        )
-
-    def is_likely_utf32le(self):
-        approx_chars = self.approx_32bit_chars()
-        return approx_chars >= self.MIN_CHARS_FOR_DETECTION and (
-            self.nonzeros_at_mod[0] / approx_chars > self.EXPECTED_RATIO
-            and self.zeros_at_mod[1] / approx_chars > self.EXPECTED_RATIO
-            and self.zeros_at_mod[2] / approx_chars > self.EXPECTED_RATIO
-            and self.zeros_at_mod[3] / approx_chars > self.EXPECTED_RATIO
-            and not self.invalid_utf32le
-        )
-
-    def is_likely_utf16be(self):
-        approx_chars = self.approx_16bit_chars()
-        return approx_chars >= self.MIN_CHARS_FOR_DETECTION and (
-            (self.nonzeros_at_mod[1] + self.nonzeros_at_mod[3]) / approx_chars
-            > self.EXPECTED_RATIO
-            and (self.zeros_at_mod[0] + self.zeros_at_mod[2]) / approx_chars
-            > self.EXPECTED_RATIO
-            and not self.invalid_utf16be
-        )
-
-    def is_likely_utf16le(self):
-        approx_chars = self.approx_16bit_chars()
-        return approx_chars >= self.MIN_CHARS_FOR_DETECTION and (
-            (self.nonzeros_at_mod[0] + self.nonzeros_at_mod[2]) / approx_chars
-            > self.EXPECTED_RATIO
-            and (self.zeros_at_mod[1] + self.zeros_at_mod[3]) / approx_chars
-            > self.EXPECTED_RATIO
-            and not self.invalid_utf16le
-        )
-
-    def validate_utf32_characters(self, quad):
-        """
-        Validate if the quad of bytes is valid UTF-32.
-
-        UTF-32 is valid in the range 0x00000000 - 0x0010FFFF
-        excluding 0x0000D800 - 0x0000DFFF
-
-        https://en.wikipedia.org/wiki/UTF-32
-        """
-        if (
-            quad[0] != 0
-            or quad[1] > 0x10
-            or (quad[0] == 0 and quad[1] == 0 and 0xD8 <= quad[2] <= 0xDF)
-        ):
-            self.invalid_utf32be = True
-        if (
-            quad[3] != 0
-            or quad[2] > 0x10
-            or (quad[3] == 0 and quad[2] == 0 and 0xD8 <= quad[1] <= 0xDF)
-        ):
-            self.invalid_utf32le = True
-
-    def validate_utf16_characters(self, pair):
-        """
-        Validate if the pair of bytes is  valid UTF-16.
-
-        UTF-16 is valid in the range 0x0000 - 0xFFFF excluding 0xD800 - 0xFFFF
-        with an exception for surrogate pairs, which must be in the range
-        0xD800-0xDBFF followed by 0xDC00-0xDFFF
-
-        https://en.wikipedia.org/wiki/UTF-16
-        """
-        if not self.first_half_surrogate_pair_detected_16be:
-            if 0xD8 <= pair[0] <= 0xDB:
-                self.first_half_surrogate_pair_detected_16be = True
-            elif 0xDC <= pair[0] <= 0xDF:
-                self.invalid_utf16be = True
-        else:
-            if 0xDC <= pair[0] <= 0xDF:
-                self.first_half_surrogate_pair_detected_16be = False
-            else:
-                self.invalid_utf16be = True
-
-        if not self.first_half_surrogate_pair_detected_16le:
-            if 0xD8 <= pair[1] <= 0xDB:
-                self.first_half_surrogate_pair_detected_16le = True
-            elif 0xDC <= pair[1] <= 0xDF:
-                self.invalid_utf16le = True
-        else:
-            if 0xDC <= pair[1] <= 0xDF:
-                self.first_half_surrogate_pair_detected_16le = False
-            else:
-                self.invalid_utf16le = True
-
-    def feed(self, byte_str):
-        for c in byte_str:
-            mod4 = self.position % 4
-            self.quad[mod4] = c
-            if mod4 == 3:
-                self.validate_utf32_characters(self.quad)
-                self.validate_utf16_characters(self.quad[0:2])
-                self.validate_utf16_characters(self.quad[2:4])
-            if c == 0:
-                self.zeros_at_mod[mod4] += 1
-            else:
-                self.nonzeros_at_mod[mod4] += 1
-            self.position += 1
-        return self.state
-
-    @property
-    def state(self):
-        if self._state in {ProbingState.NOT_ME, ProbingState.FOUND_IT}:
-            # terminal, decided states
-            return self._state
-        if self.get_confidence() > 0.80:
-            self._state = ProbingState.FOUND_IT
-        elif self.position > 4 * 1024:
-            # if we get to 4kb into the file, and we can't conclude it's UTF,
-            # let's give up
-            self._state = ProbingState.NOT_ME
-        return self._state
-
-    def get_confidence(self):
-        return (
-            0.85
-            if (
-                self.is_likely_utf16le()
-                or self.is_likely_utf16be()
-                or self.is_likely_utf32le()
-                or self.is_likely_utf32be()
-            )
-            else 0.00
-        )
diff --git a/venv/lib/python3.10/site-packages/pip/_vendor/chardet/utf8prober.py b/venv/lib/python3.10/site-packages/pip/_vendor/chardet/utf8prober.py
index 3aae09e..6c3196c 100644
--- a/venv/lib/python3.10/site-packages/pip/_vendor/chardet/utf8prober.py
+++ b/venv/lib/python3.10/site-packages/pip/_vendor/chardet/utf8prober.py
@@ -26,22 +26,23 @@
 ######################### END LICENSE BLOCK #########################
 
 from .charsetprober import CharSetProber
+from .enums import ProbingState, MachineState
 from .codingstatemachine import CodingStateMachine
-from .enums import MachineState, ProbingState
 from .mbcssm import UTF8_SM_MODEL
 
 
+
 class UTF8Prober(CharSetProber):
     ONE_CHAR_PROB = 0.5
 
     def __init__(self):
-        super().__init__()
+        super(UTF8Prober, self).__init__()
         self.coding_sm = CodingStateMachine(UTF8_SM_MODEL)
         self._num_mb_chars = None
         self.reset()
 
     def reset(self):
-        super().reset()
+        super(UTF8Prober, self).reset()
         self.coding_sm.reset()
         self._num_mb_chars = 0
 
@@ -59,10 +60,10 @@ def feed(self, byte_str):
             if coding_state == MachineState.ERROR:
                 self._state = ProbingState.NOT_ME
                 break
-            if coding_state == MachineState.ITS_ME:
+            elif coding_state == MachineState.ITS_ME:
                 self._state = ProbingState.FOUND_IT
                 break
-            if coding_state == MachineState.START:
+            elif coding_state == MachineState.START:
                 if self.coding_sm.get_current_charlen() >= 2:
                     self._num_mb_chars += 1
 
@@ -75,6 +76,7 @@ def feed(self, byte_str):
     def get_confidence(self):
         unlike = 0.99
         if self._num_mb_chars < 6:
-            unlike *= self.ONE_CHAR_PROB**self._num_mb_chars
+            unlike *= self.ONE_CHAR_PROB ** self._num_mb_chars
             return 1.0 - unlike
-        return unlike
+        else:
+            return unlike
diff --git a/venv/lib/python3.10/site-packages/pip/_vendor/chardet/version.py b/venv/lib/python3.10/site-packages/pip/_vendor/chardet/version.py
index a08a06b..70369b9 100644
--- a/venv/lib/python3.10/site-packages/pip/_vendor/chardet/version.py
+++ b/venv/lib/python3.10/site-packages/pip/_vendor/chardet/version.py
@@ -5,5 +5,5 @@
 :author: Dan Blanchard (dan.blanchard@gmail.com)
 """
 
-__version__ = "5.0.0"
-VERSION = __version__.split(".")
+__version__ = "4.0.0"
+VERSION = __version__.split('.')
diff --git a/venv/lib/python3.10/site-packages/pip/_vendor/colorama/__init__.py b/venv/lib/python3.10/site-packages/pip/_vendor/colorama/__init__.py
index 9138a8c..b149ed7 100644
--- a/venv/lib/python3.10/site-packages/pip/_vendor/colorama/__init__.py
+++ b/venv/lib/python3.10/site-packages/pip/_vendor/colorama/__init__.py
@@ -3,4 +3,4 @@
 from .ansi import Fore, Back, Style, Cursor
 from .ansitowin32 import AnsiToWin32
 
-__version__ = '0.4.5'
+__version__ = '0.4.4'
diff --git a/venv/lib/python3.10/site-packages/pip/_vendor/colorama/ansitowin32.py b/venv/lib/python3.10/site-packages/pip/_vendor/colorama/ansitowin32.py
index 3db248b..6039a05 100644
--- a/venv/lib/python3.10/site-packages/pip/_vendor/colorama/ansitowin32.py
+++ b/venv/lib/python3.10/site-packages/pip/_vendor/colorama/ansitowin32.py
@@ -37,12 +37,6 @@ def __enter__(self, *args, **kwargs):
     def __exit__(self, *args, **kwargs):
         return self.__wrapped.__exit__(*args, **kwargs)
 
-    def __setstate__(self, state):
-        self.__dict__ = state
-
-    def __getstate__(self):
-        return self.__dict__
-
     def write(self, text):
         self.__convertor.write(text)
 
@@ -63,9 +57,7 @@ def closed(self):
         stream = self.__wrapped
         try:
             return stream.closed
-        # AttributeError in the case that the stream doesn't support being closed
-        # ValueError for the case that the stream has already been detached when atexit runs
-        except (AttributeError, ValueError):
+        except AttributeError:
             return True
 
 
diff --git a/venv/lib/python3.10/site-packages/pip/_vendor/distlib/__init__.py b/venv/lib/python3.10/site-packages/pip/_vendor/distlib/__init__.py
index 962173c..6878387 100644
--- a/venv/lib/python3.10/site-packages/pip/_vendor/distlib/__init__.py
+++ b/venv/lib/python3.10/site-packages/pip/_vendor/distlib/__init__.py
@@ -1,12 +1,12 @@
 # -*- coding: utf-8 -*-
 #
-# Copyright (C) 2012-2022 Vinay Sajip.
+# Copyright (C) 2012-2019 Vinay Sajip.
 # Licensed to the Python Software Foundation under a contributor agreement.
 # See LICENSE.txt and CONTRIBUTORS.txt.
 #
 import logging
 
-__version__ = '0.3.6'
+__version__ = '0.3.4'
 
 class DistlibException(Exception):
     pass
diff --git a/venv/lib/python3.10/site-packages/pip/_vendor/distlib/database.py b/venv/lib/python3.10/site-packages/pip/_vendor/distlib/database.py
index 5db5d7f..f486994 100644
--- a/venv/lib/python3.10/site-packages/pip/_vendor/distlib/database.py
+++ b/venv/lib/python3.10/site-packages/pip/_vendor/distlib/database.py
@@ -385,9 +385,8 @@ def provides(self):
 
     def _get_requirements(self, req_attr):
         md = self.metadata
+        logger.debug('Getting requirements from metadata %r', md.todict())
         reqts = getattr(md, req_attr)
-        logger.debug('%s: got requirements %r from metadata: %r', self.name, req_attr,
-                     reqts)
         return set(md.get_requirements(reqts, extras=self.extras,
                                        env=self.context))
 
@@ -1315,26 +1314,22 @@ def get_required_dists(dists, dist):
 
     :param dists: a list of distributions
     :param dist: a distribution, member of *dists* for which we are interested
-                 in finding the dependencies.
     """
     if dist not in dists:
         raise DistlibException('given distribution %r is not a member '
                                'of the list' % dist.name)
     graph = make_graph(dists)
 
-    req = set()  # required distributions
+    req = []  # required distributions
     todo = graph.adjacency_list[dist]  # list of nodes we should inspect
-    seen = set(t[0] for t in todo) # already added to todo
 
     while todo:
         d = todo.pop()[0]
-        req.add(d)
-        pred_list = graph.adjacency_list[d]
-        for pred in pred_list:
-            d = pred[0]
-            if d not in req and d not in seen:
-                seen.add(d)
+        req.append(d)
+        for pred in graph.adjacency_list[d]:
+            if pred not in req:
                 todo.append(pred)
+
     return req
 
 
diff --git a/venv/lib/python3.10/site-packages/pip/_vendor/distlib/index.py b/venv/lib/python3.10/site-packages/pip/_vendor/distlib/index.py
index 9b6d129..b1fbbf8 100644
--- a/venv/lib/python3.10/site-packages/pip/_vendor/distlib/index.py
+++ b/venv/lib/python3.10/site-packages/pip/_vendor/distlib/index.py
@@ -12,7 +12,7 @@
 import tempfile
 try:
     from threading import Thread
-except ImportError:  # pragma: no cover
+except ImportError:
     from dummy_threading import Thread
 
 from . import DistlibException
@@ -104,7 +104,7 @@ def check_credentials(self):
         pm.add_password(self.realm, netloc, self.username, self.password)
         self.password_handler = HTTPBasicAuthHandler(pm)
 
-    def register(self, metadata):  # pragma: no cover
+    def register(self, metadata):
         """
         Register a distribution on PyPI, using the provided metadata.
 
@@ -142,7 +142,8 @@ def _reader(self, name, stream, outbuf):
             logger.debug('%s: %s' % (name, s))
         stream.close()
 
-    def get_sign_command(self, filename, signer, sign_password, keystore=None):  # pragma: no cover
+    def get_sign_command(self, filename, signer, sign_password,
+                         keystore=None):
         """
         Return a suitable command for signing a file.
 
@@ -205,7 +206,7 @@ def run_command(self, cmd, input_data=None):
         t2.join()
         return p.returncode, stdout, stderr
 
-    def sign_file(self, filename, signer, sign_password, keystore=None):  # pragma: no cover
+    def sign_file(self, filename, signer, sign_password, keystore=None):
         """
         Sign a file.
 
@@ -285,7 +286,7 @@ def upload_file(self, metadata, filename, signer=None, sign_password=None,
         request = self.encode_request(d.items(), files)
         return self.send_request(request)
 
-    def upload_documentation(self, metadata, doc_dir):  # pragma: no cover
+    def upload_documentation(self, metadata, doc_dir):
         """
         Upload documentation to the index.
 
@@ -498,7 +499,7 @@ def encode_request(self, fields, files):
         }
         return Request(self.url, body, headers)
 
-    def search(self, terms, operator=None):  # pragma: no cover
+    def search(self, terms, operator=None):
         if isinstance(terms, string_types):
             terms = {'name': terms}
         rpc_proxy = ServerProxy(self.url, timeout=3.0)
diff --git a/venv/lib/python3.10/site-packages/pip/_vendor/distlib/locators.py b/venv/lib/python3.10/site-packages/pip/_vendor/distlib/locators.py
index 966ebc0..c78bc9e 100644
--- a/venv/lib/python3.10/site-packages/pip/_vendor/distlib/locators.py
+++ b/venv/lib/python3.10/site-packages/pip/_vendor/distlib/locators.py
@@ -1053,9 +1053,9 @@ def get_distribution_names(self):
 
 
 # We use a legacy scheme simply because most of the dists on PyPI use legacy
-# versions which don't conform to PEP 440.
+# versions which don't conform to PEP 426 / PEP 440.
 default_locator = AggregatingLocator(
-                    # JSONLocator(), # don't use as PEP 426 is withdrawn
+                    JSONLocator(),
                     SimpleScrapingLocator('https://pypi.org/simple/',
                                           timeout=3.0),
                     scheme='legacy')
diff --git a/venv/lib/python3.10/site-packages/pip/_vendor/distlib/metadata.py b/venv/lib/python3.10/site-packages/pip/_vendor/distlib/metadata.py
index c329e19..6a26b0a 100644
--- a/venv/lib/python3.10/site-packages/pip/_vendor/distlib/metadata.py
+++ b/venv/lib/python3.10/site-packages/pip/_vendor/distlib/metadata.py
@@ -5,7 +5,7 @@
 #
 """Implementation of the Metadata for Python packages PEPs.
 
-Supports all metadata formats (1.0, 1.1, 1.2, 1.3/2.1 and 2.2).
+Supports all metadata formats (1.0, 1.1, 1.2, 1.3/2.1 and withdrawn 2.0).
 """
 from __future__ import unicode_literals
 
@@ -100,17 +100,12 @@ class MetadataInvalidError(DistlibException):
 
 _566_MARKERS = ('Description-Content-Type',)
 
-_643_MARKERS = ('Dynamic', 'License-File')
-
-_643_FIELDS = _566_FIELDS + _643_MARKERS
-
 _ALL_FIELDS = set()
 _ALL_FIELDS.update(_241_FIELDS)
 _ALL_FIELDS.update(_314_FIELDS)
 _ALL_FIELDS.update(_345_FIELDS)
 _ALL_FIELDS.update(_426_FIELDS)
 _ALL_FIELDS.update(_566_FIELDS)
-_ALL_FIELDS.update(_643_FIELDS)
 
 EXTRA_RE = re.compile(r'''extra\s*==\s*("([^"]+)"|'([^']+)')''')
 
@@ -126,10 +121,7 @@ def _version2fieldlist(version):
         # avoid adding field names if already there
         return _345_FIELDS + tuple(f for f in _566_FIELDS if f not in _345_FIELDS)
     elif version == '2.0':
-        raise ValueError('Metadata 2.0 is withdrawn and not supported')
-        # return _426_FIELDS
-    elif version == '2.2':
-        return _643_FIELDS
+        return _426_FIELDS
     raise MetadataUnrecognizedVersionError(version)
 
 
@@ -147,7 +139,7 @@ def _has_marker(keys, markers):
             continue
         keys.append(key)
 
-    possible_versions = ['1.0', '1.1', '1.2', '1.3', '2.1', '2.2']  # 2.0 removed
+    possible_versions = ['1.0', '1.1', '1.2', '1.3', '2.0', '2.1']
 
     # first let's try to see if a field is not part of one of the version
     for key in keys:
@@ -167,12 +159,9 @@ def _has_marker(keys, markers):
             if key != 'Description':  # In 2.1, description allowed after headers
                 possible_versions.remove('2.1')
                 logger.debug('Removed 2.1 due to %s', key)
-        if key not in _643_FIELDS and '2.2' in possible_versions:
-            possible_versions.remove('2.2')
-            logger.debug('Removed 2.2 due to %s', key)
-        # if key not in _426_FIELDS and '2.0' in possible_versions:
-            # possible_versions.remove('2.0')
-            # logger.debug('Removed 2.0 due to %s', key)
+        if key not in _426_FIELDS and '2.0' in possible_versions:
+            possible_versions.remove('2.0')
+            logger.debug('Removed 2.0 due to %s', key)
 
     # possible_version contains qualified versions
     if len(possible_versions) == 1:
@@ -185,18 +174,16 @@ def _has_marker(keys, markers):
     is_1_1 = '1.1' in possible_versions and _has_marker(keys, _314_MARKERS)
     is_1_2 = '1.2' in possible_versions and _has_marker(keys, _345_MARKERS)
     is_2_1 = '2.1' in possible_versions and _has_marker(keys, _566_MARKERS)
-    # is_2_0 = '2.0' in possible_versions and _has_marker(keys, _426_MARKERS)
-    is_2_2 = '2.2' in possible_versions and _has_marker(keys, _643_MARKERS)
-    if int(is_1_1) + int(is_1_2) + int(is_2_1) + int(is_2_2) > 1:
-        raise MetadataConflictError('You used incompatible 1.1/1.2/2.1/2.2 fields')
+    is_2_0 = '2.0' in possible_versions and _has_marker(keys, _426_MARKERS)
+    if int(is_1_1) + int(is_1_2) + int(is_2_1) + int(is_2_0) > 1:
+        raise MetadataConflictError('You used incompatible 1.1/1.2/2.0/2.1 fields')
 
-    # we have the choice, 1.0, or 1.2, 2.1 or 2.2
+    # we have the choice, 1.0, or 1.2, or 2.0
     #   - 1.0 has a broken Summary field but works with all tools
     #   - 1.1 is to avoid
     #   - 1.2 fixes Summary but has little adoption
-    #   - 2.1 adds more features
-    #   - 2.2 is the latest
-    if not is_1_1 and not is_1_2 and not is_2_1 and not is_2_2:
+    #   - 2.0 adds more features and is very new
+    if not is_1_1 and not is_1_2 and not is_2_1 and not is_2_0:
         # we couldn't find any specific marker
         if PKG_INFO_PREFERRED_VERSION in possible_versions:
             return PKG_INFO_PREFERRED_VERSION
@@ -206,10 +193,8 @@ def _has_marker(keys, markers):
         return '1.2'
     if is_2_1:
         return '2.1'
-    # if is_2_2:
-        # return '2.2'
 
-    return '2.2'
+    return '2.0'
 
 # This follows the rules about transforming keys as described in
 # https://www.python.org/dev/peps/pep-0566/#id17
@@ -225,7 +210,7 @@ def _has_marker(keys, markers):
                'Requires', 'Provides', 'Obsoletes-Dist',
                'Provides-Dist', 'Requires-Dist', 'Requires-External',
                'Project-URL', 'Supported-Platform', 'Setup-Requires-Dist',
-               'Provides-Extra', 'Extension', 'License-File')
+               'Provides-Extra', 'Extension')
 _LISTTUPLEFIELDS = ('Project-URL',)
 
 _ELEMENTSFIELD = ('Keywords',)
@@ -617,7 +602,7 @@ def __repr__(self):
 
 class Metadata(object):
     """
-    The metadata of a release. This implementation uses 2.1
+    The metadata of a release. This implementation uses 2.0 (JSON)
     metadata where possible. If not possible, it wraps a LegacyMetadata
     instance which handles the key-value metadata format.
     """
@@ -626,8 +611,6 @@ class Metadata(object):
 
     NAME_MATCHER = re.compile('^[0-9A-Z]([0-9A-Z_.-]*[0-9A-Z])?$', re.I)
 
-    FIELDNAME_MATCHER = re.compile('^[A-Z]([0-9A-Z-]*[0-9A-Z])?$', re.I)
-
     VERSION_MATCHER = PEP440_VERSION_RE
 
     SUMMARY_MATCHER = re.compile('.{1,2047}')
@@ -655,7 +638,6 @@ class Metadata(object):
         'name': (NAME_MATCHER, ('legacy',)),
         'version': (VERSION_MATCHER, ('legacy',)),
         'summary': (SUMMARY_MATCHER, ('legacy',)),
-        'dynamic': (FIELDNAME_MATCHER, ('legacy',)),
     }
 
     __slots__ = ('_legacy', '_data', 'scheme')
diff --git a/venv/lib/python3.10/site-packages/pip/_vendor/distlib/scripts.py b/venv/lib/python3.10/site-packages/pip/_vendor/distlib/scripts.py
index d270624..913912c 100644
--- a/venv/lib/python3.10/site-packages/pip/_vendor/distlib/scripts.py
+++ b/venv/lib/python3.10/site-packages/pip/_vendor/distlib/scripts.py
@@ -10,8 +10,6 @@
 import re
 import struct
 import sys
-import time
-from zipfile import ZipInfo
 
 from .compat import sysconfig, detect_encoding, ZipFile
 from .resources import finder
@@ -251,13 +249,7 @@ def _write_script(self, names, shebang, script_bytes, filenames, ext):
                 launcher = self._get_launcher('w')
             stream = BytesIO()
             with ZipFile(stream, 'w') as zf:
-                source_date_epoch = os.environ.get('SOURCE_DATE_EPOCH')
-                if source_date_epoch:
-                    date_time = time.gmtime(int(source_date_epoch))[:6]
-                    zinfo = ZipInfo(filename='__main__.py', date_time=date_time)
-                    zf.writestr(zinfo, script_bytes)
-                else:
-                    zf.writestr('__main__.py', script_bytes)
+                zf.writestr('__main__.py', script_bytes)
             zip_data = stream.getvalue()
             script_bytes = launcher + shebang + zip_data
         for name in names:
diff --git a/venv/lib/python3.10/site-packages/pip/_vendor/distlib/wheel.py b/venv/lib/python3.10/site-packages/pip/_vendor/distlib/wheel.py
index 028c2d9..48abfde 100644
--- a/venv/lib/python3.10/site-packages/pip/_vendor/distlib/wheel.py
+++ b/venv/lib/python3.10/site-packages/pip/_vendor/distlib/wheel.py
@@ -11,6 +11,7 @@
 import datetime
 from email import message_from_file
 import hashlib
+import imp
 import json
 import logging
 import os
@@ -60,18 +61,10 @@ def _derive_abi():
         parts = ['cp', VER_SUFFIX]
         if sysconfig.get_config_var('Py_DEBUG'):
             parts.append('d')
-        if IMP_PREFIX == 'cp':
-            vi = sys.version_info[:2]
-            if vi < (3, 8):
-                wpm = sysconfig.get_config_var('WITH_PYMALLOC')
-                if wpm is None:
-                    wpm = True
-                if wpm:
-                    parts.append('m')
-                if vi < (3, 3):
-                    us = sysconfig.get_config_var('Py_UNICODE_SIZE')
-                    if us == 4 or (us is None and sys.maxunicode == 0x10FFFF):
-                        parts.append('u')
+        if sysconfig.get_config_var('WITH_PYMALLOC'):
+            parts.append('m')
+        if sysconfig.get_config_var('Py_UNICODE_SIZE') == 4:
+            parts.append('u')
         return ''.join(parts)
     ABI = _derive_abi()
     del _derive_abi
@@ -102,29 +95,6 @@ def _derive_abi():
 else:
     to_posix = lambda o: o.replace(os.sep, '/')
 
-if sys.version_info[0] < 3:
-    import imp
-else:
-    imp = None
-    import importlib.machinery
-    import importlib.util
-
-def _get_suffixes():
-    if imp:
-        return [s[0] for s in imp.get_suffixes()]
-    else:
-        return importlib.machinery.EXTENSION_SUFFIXES
-
-def _load_dynamic(name, path):
-    # https://docs.python.org/3/library/importlib.html#importing-a-source-file-directly
-    if imp:
-        return imp.load_dynamic(name, path)
-    else:
-        spec = importlib.util.spec_from_file_location(name, path)
-        module = importlib.util.module_from_spec(spec)
-        sys.modules[name] = module
-        spec.loader.exec_module(module)
-        return module
 
 class Mounter(object):
     def __init__(self):
@@ -154,7 +124,7 @@ def load_module(self, fullname):
         else:
             if fullname not in self.libs:
                 raise ImportError('unable to find extension for %s' % fullname)
-            result = _load_dynamic(fullname, self.libs[fullname])
+            result = imp.load_dynamic(fullname, self.libs[fullname])
             result.__loader__ = self
             parts = fullname.rsplit('.', 1)
             if len(parts) > 1:
@@ -331,9 +301,10 @@ def get_hash(self, data, hash_kind=None):
         result = base64.urlsafe_b64encode(result).rstrip(b'=').decode('ascii')
         return hash_kind, result
 
-    def write_record(self, records, record_path, archive_record_path):
+    def write_record(self, records, record_path, base):
         records = list(records) # make a copy, as mutated
-        records.append((archive_record_path, '', ''))
+        p = to_posix(os.path.relpath(record_path, base))
+        records.append((p, '', ''))
         with CSVWriter(record_path) as writer:
             for row in records:
                 writer.writerow(row)
@@ -350,8 +321,8 @@ def write_records(self, info, libdir, archive_paths):
             records.append((ap, digest, size))
 
         p = os.path.join(distinfo, 'RECORD')
+        self.write_record(records, p, libdir)
         ap = to_posix(os.path.join(info_dir, 'RECORD'))
-        self.write_record(records, p, ap)
         archive_paths.append((ap, p))
 
     def build_zip(self, pathname, archive_paths):
@@ -994,7 +965,7 @@ def compatible_tags():
         versions.append(''.join([major, str(minor)]))
 
     abis = []
-    for suffix in _get_suffixes():
+    for suffix, _, _ in imp.get_suffixes():
         if suffix.startswith('.abi'):
             abis.append(suffix.split('.', 2)[1])
     abis.sort()
diff --git a/venv/lib/python3.10/site-packages/pip/_vendor/distro/__init__.py b/venv/lib/python3.10/site-packages/pip/_vendor/distro/__init__.py
deleted file mode 100644
index 7686fe8..0000000
--- a/venv/lib/python3.10/site-packages/pip/_vendor/distro/__init__.py
+++ /dev/null
@@ -1,54 +0,0 @@
-from .distro import (
-    NORMALIZED_DISTRO_ID,
-    NORMALIZED_LSB_ID,
-    NORMALIZED_OS_ID,
-    LinuxDistribution,
-    __version__,
-    build_number,
-    codename,
-    distro_release_attr,
-    distro_release_info,
-    id,
-    info,
-    like,
-    linux_distribution,
-    lsb_release_attr,
-    lsb_release_info,
-    major_version,
-    minor_version,
-    name,
-    os_release_attr,
-    os_release_info,
-    uname_attr,
-    uname_info,
-    version,
-    version_parts,
-)
-
-__all__ = [
-    "NORMALIZED_DISTRO_ID",
-    "NORMALIZED_LSB_ID",
-    "NORMALIZED_OS_ID",
-    "LinuxDistribution",
-    "build_number",
-    "codename",
-    "distro_release_attr",
-    "distro_release_info",
-    "id",
-    "info",
-    "like",
-    "linux_distribution",
-    "lsb_release_attr",
-    "lsb_release_info",
-    "major_version",
-    "minor_version",
-    "name",
-    "os_release_attr",
-    "os_release_info",
-    "uname_attr",
-    "uname_info",
-    "version",
-    "version_parts",
-]
-
-__version__ = __version__
diff --git a/venv/lib/python3.10/site-packages/pip/_vendor/distro/__main__.py b/venv/lib/python3.10/site-packages/pip/_vendor/distro/__main__.py
deleted file mode 100644
index 0c01d5b..0000000
--- a/venv/lib/python3.10/site-packages/pip/_vendor/distro/__main__.py
+++ /dev/null
@@ -1,4 +0,0 @@
-from .distro import main
-
-if __name__ == "__main__":
-    main()
diff --git a/venv/lib/python3.10/site-packages/pip/_vendor/distro/distro.py b/venv/lib/python3.10/site-packages/pip/_vendor/distro/distro.py
deleted file mode 100644
index 49066ae..0000000
--- a/venv/lib/python3.10/site-packages/pip/_vendor/distro/distro.py
+++ /dev/null
@@ -1,1374 +0,0 @@
-#!/usr/bin/env python
-# Copyright 2015,2016,2017 Nir Cohen
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""
-The ``distro`` package (``distro`` stands for Linux Distribution) provides
-information about the Linux distribution it runs on, such as a reliable
-machine-readable distro ID, or version information.
-
-It is the recommended replacement for Python's original
-:py:func:`platform.linux_distribution` function, but it provides much more
-functionality. An alternative implementation became necessary because Python
-3.5 deprecated this function, and Python 3.8 removed it altogether. Its
-predecessor function :py:func:`platform.dist` was already deprecated since
-Python 2.6 and removed in Python 3.8. Still, there are many cases in which
-access to OS distribution information is needed. See `Python issue 1322
-`_ for more information.
-"""
-
-import argparse
-import json
-import logging
-import os
-import re
-import shlex
-import subprocess
-import sys
-import warnings
-from typing import (
-    Any,
-    Callable,
-    Dict,
-    Iterable,
-    Optional,
-    Sequence,
-    TextIO,
-    Tuple,
-    Type,
-)
-
-try:
-    from typing import TypedDict
-except ImportError:
-    # Python 3.7
-    TypedDict = dict
-
-__version__ = "1.7.0"
-
-
-class VersionDict(TypedDict):
-    major: str
-    minor: str
-    build_number: str
-
-
-class InfoDict(TypedDict):
-    id: str
-    version: str
-    version_parts: VersionDict
-    like: str
-    codename: str
-
-
-_UNIXCONFDIR = os.environ.get("UNIXCONFDIR", "/etc")
-_UNIXUSRLIBDIR = os.environ.get("UNIXUSRLIBDIR", "/usr/lib")
-_OS_RELEASE_BASENAME = "os-release"
-
-#: Translation table for normalizing the "ID" attribute defined in os-release
-#: files, for use by the :func:`distro.id` method.
-#:
-#: * Key: Value as defined in the os-release file, translated to lower case,
-#:   with blanks translated to underscores.
-#:
-#: * Value: Normalized value.
-NORMALIZED_OS_ID = {
-    "ol": "oracle",  # Oracle Linux
-    "opensuse-leap": "opensuse",  # Newer versions of OpenSuSE report as opensuse-leap
-}
-
-#: Translation table for normalizing the "Distributor ID" attribute returned by
-#: the lsb_release command, for use by the :func:`distro.id` method.
-#:
-#: * Key: Value as returned by the lsb_release command, translated to lower
-#:   case, with blanks translated to underscores.
-#:
-#: * Value: Normalized value.
-NORMALIZED_LSB_ID = {
-    "enterpriseenterpriseas": "oracle",  # Oracle Enterprise Linux 4
-    "enterpriseenterpriseserver": "oracle",  # Oracle Linux 5
-    "redhatenterpriseworkstation": "rhel",  # RHEL 6, 7 Workstation
-    "redhatenterpriseserver": "rhel",  # RHEL 6, 7 Server
-    "redhatenterprisecomputenode": "rhel",  # RHEL 6 ComputeNode
-}
-
-#: Translation table for normalizing the distro ID derived from the file name
-#: of distro release files, for use by the :func:`distro.id` method.
-#:
-#: * Key: Value as derived from the file name of a distro release file,
-#:   translated to lower case, with blanks translated to underscores.
-#:
-#: * Value: Normalized value.
-NORMALIZED_DISTRO_ID = {
-    "redhat": "rhel",  # RHEL 6.x, 7.x
-}
-
-# Pattern for content of distro release file (reversed)
-_DISTRO_RELEASE_CONTENT_REVERSED_PATTERN = re.compile(
-    r"(?:[^)]*\)(.*)\()? *(?:STL )?([\d.+\-a-z]*\d) *(?:esaeler *)?(.+)"
-)
-
-# Pattern for base file name of distro release file
-_DISTRO_RELEASE_BASENAME_PATTERN = re.compile(r"(\w+)[-_](release|version)$")
-
-# Base file names to be ignored when searching for distro release file
-_DISTRO_RELEASE_IGNORE_BASENAMES = (
-    "debian_version",
-    "lsb-release",
-    "oem-release",
-    _OS_RELEASE_BASENAME,
-    "system-release",
-    "plesk-release",
-    "iredmail-release",
-)
-
-
-def linux_distribution(full_distribution_name: bool = True) -> Tuple[str, str, str]:
-    """
-    .. deprecated:: 1.6.0
-
-        :func:`distro.linux_distribution()` is deprecated. It should only be
-        used as a compatibility shim with Python's
-        :py:func:`platform.linux_distribution()`. Please use :func:`distro.id`,
-        :func:`distro.version` and :func:`distro.name` instead.
-
-    Return information about the current OS distribution as a tuple
-    ``(id_name, version, codename)`` with items as follows:
-
-    * ``id_name``:  If *full_distribution_name* is false, the result of
-      :func:`distro.id`. Otherwise, the result of :func:`distro.name`.
-
-    * ``version``:  The result of :func:`distro.version`.
-
-    * ``codename``:  The extra item (usually in parentheses) after the
-      os-release version number, or the result of :func:`distro.codename`.
-
-    The interface of this function is compatible with the original
-    :py:func:`platform.linux_distribution` function, supporting a subset of
-    its parameters.
-
-    The data it returns may not exactly be the same, because it uses more data
-    sources than the original function, and that may lead to different data if
-    the OS distribution is not consistent across multiple data sources it
-    provides (there are indeed such distributions ...).
-
-    Another reason for differences is the fact that the :func:`distro.id`
-    method normalizes the distro ID string to a reliable machine-readable value
-    for a number of popular OS distributions.
-    """
-    warnings.warn(
-        "distro.linux_distribution() is deprecated. It should only be used as a "
-        "compatibility shim with Python's platform.linux_distribution(). Please use "
-        "distro.id(), distro.version() and distro.name() instead.",
-        DeprecationWarning,
-        stacklevel=2,
-    )
-    return _distro.linux_distribution(full_distribution_name)
-
-
-def id() -> str:
-    """
-    Return the distro ID of the current distribution, as a
-    machine-readable string.
-
-    For a number of OS distributions, the returned distro ID value is
-    *reliable*, in the sense that it is documented and that it does not change
-    across releases of the distribution.
-
-    This package maintains the following reliable distro ID values:
-
-    ==============  =========================================
-    Distro ID       Distribution
-    ==============  =========================================
-    "ubuntu"        Ubuntu
-    "debian"        Debian
-    "rhel"          RedHat Enterprise Linux
-    "centos"        CentOS
-    "fedora"        Fedora
-    "sles"          SUSE Linux Enterprise Server
-    "opensuse"      openSUSE
-    "amzn"          Amazon Linux
-    "arch"          Arch Linux
-    "cloudlinux"    CloudLinux OS
-    "exherbo"       Exherbo Linux
-    "gentoo"        GenToo Linux
-    "ibm_powerkvm"  IBM PowerKVM
-    "kvmibm"        KVM for IBM z Systems
-    "linuxmint"     Linux Mint
-    "mageia"        Mageia
-    "mandriva"      Mandriva Linux
-    "parallels"     Parallels
-    "pidora"        Pidora
-    "raspbian"      Raspbian
-    "oracle"        Oracle Linux (and Oracle Enterprise Linux)
-    "scientific"    Scientific Linux
-    "slackware"     Slackware
-    "xenserver"     XenServer
-    "openbsd"       OpenBSD
-    "netbsd"        NetBSD
-    "freebsd"       FreeBSD
-    "midnightbsd"   MidnightBSD
-    "rocky"         Rocky Linux
-    "aix"           AIX
-    ==============  =========================================
-
-    If you have a need to get distros for reliable IDs added into this set,
-    or if you find that the :func:`distro.id` function returns a different
-    distro ID for one of the listed distros, please create an issue in the
-    `distro issue tracker`_.
-
-    **Lookup hierarchy and transformations:**
-
-    First, the ID is obtained from the following sources, in the specified
-    order. The first available and non-empty value is used:
-
-    * the value of the "ID" attribute of the os-release file,
-
-    * the value of the "Distributor ID" attribute returned by the lsb_release
-      command,
-
-    * the first part of the file name of the distro release file,
-
-    The so determined ID value then passes the following transformations,
-    before it is returned by this method:
-
-    * it is translated to lower case,
-
-    * blanks (which should not be there anyway) are translated to underscores,
-
-    * a normalization of the ID is performed, based upon
-      `normalization tables`_. The purpose of this normalization is to ensure
-      that the ID is as reliable as possible, even across incompatible changes
-      in the OS distributions. A common reason for an incompatible change is
-      the addition of an os-release file, or the addition of the lsb_release
-      command, with ID values that differ from what was previously determined
-      from the distro release file name.
-    """
-    return _distro.id()
-
-
-def name(pretty: bool = False) -> str:
-    """
-    Return the name of the current OS distribution, as a human-readable
-    string.
-
-    If *pretty* is false, the name is returned without version or codename.
-    (e.g. "CentOS Linux")
-
-    If *pretty* is true, the version and codename are appended.
-    (e.g. "CentOS Linux 7.1.1503 (Core)")
-
-    **Lookup hierarchy:**
-
-    The name is obtained from the following sources, in the specified order.
-    The first available and non-empty value is used:
-
-    * If *pretty* is false:
-
-      - the value of the "NAME" attribute of the os-release file,
-
-      - the value of the "Distributor ID" attribute returned by the lsb_release
-        command,
-
-      - the value of the "" field of the distro release file.
-
-    * If *pretty* is true:
-
-      - the value of the "PRETTY_NAME" attribute of the os-release file,
-
-      - the value of the "Description" attribute returned by the lsb_release
-        command,
-
-      - the value of the "" field of the distro release file, appended
-        with the value of the pretty version ("" and ""
-        fields) of the distro release file, if available.
-    """
-    return _distro.name(pretty)
-
-
-def version(pretty: bool = False, best: bool = False) -> str:
-    """
-    Return the version of the current OS distribution, as a human-readable
-    string.
-
-    If *pretty* is false, the version is returned without codename (e.g.
-    "7.0").
-
-    If *pretty* is true, the codename in parenthesis is appended, if the
-    codename is non-empty (e.g. "7.0 (Maipo)").
-
-    Some distributions provide version numbers with different precisions in
-    the different sources of distribution information. Examining the different
-    sources in a fixed priority order does not always yield the most precise
-    version (e.g. for Debian 8.2, or CentOS 7.1).
-
-    Some other distributions may not provide this kind of information. In these
-    cases, an empty string would be returned. This behavior can be observed
-    with rolling releases distributions (e.g. Arch Linux).
-
-    The *best* parameter can be used to control the approach for the returned
-    version:
-
-    If *best* is false, the first non-empty version number in priority order of
-    the examined sources is returned.
-
-    If *best* is true, the most precise version number out of all examined
-    sources is returned.
-
-    **Lookup hierarchy:**
-
-    In all cases, the version number is obtained from the following sources.
-    If *best* is false, this order represents the priority order:
-
-    * the value of the "VERSION_ID" attribute of the os-release file,
-    * the value of the "Release" attribute returned by the lsb_release
-      command,
-    * the version number parsed from the "" field of the first line
-      of the distro release file,
-    * the version number parsed from the "PRETTY_NAME" attribute of the
-      os-release file, if it follows the format of the distro release files.
-    * the version number parsed from the "Description" attribute returned by
-      the lsb_release command, if it follows the format of the distro release
-      files.
-    """
-    return _distro.version(pretty, best)
-
-
-def version_parts(best: bool = False) -> Tuple[str, str, str]:
-    """
-    Return the version of the current OS distribution as a tuple
-    ``(major, minor, build_number)`` with items as follows:
-
-    * ``major``:  The result of :func:`distro.major_version`.
-
-    * ``minor``:  The result of :func:`distro.minor_version`.
-
-    * ``build_number``:  The result of :func:`distro.build_number`.
-
-    For a description of the *best* parameter, see the :func:`distro.version`
-    method.
-    """
-    return _distro.version_parts(best)
-
-
-def major_version(best: bool = False) -> str:
-    """
-    Return the major version of the current OS distribution, as a string,
-    if provided.
-    Otherwise, the empty string is returned. The major version is the first
-    part of the dot-separated version string.
-
-    For a description of the *best* parameter, see the :func:`distro.version`
-    method.
-    """
-    return _distro.major_version(best)
-
-
-def minor_version(best: bool = False) -> str:
-    """
-    Return the minor version of the current OS distribution, as a string,
-    if provided.
-    Otherwise, the empty string is returned. The minor version is the second
-    part of the dot-separated version string.
-
-    For a description of the *best* parameter, see the :func:`distro.version`
-    method.
-    """
-    return _distro.minor_version(best)
-
-
-def build_number(best: bool = False) -> str:
-    """
-    Return the build number of the current OS distribution, as a string,
-    if provided.
-    Otherwise, the empty string is returned. The build number is the third part
-    of the dot-separated version string.
-
-    For a description of the *best* parameter, see the :func:`distro.version`
-    method.
-    """
-    return _distro.build_number(best)
-
-
-def like() -> str:
-    """
-    Return a space-separated list of distro IDs of distributions that are
-    closely related to the current OS distribution in regards to packaging
-    and programming interfaces, for example distributions the current
-    distribution is a derivative from.
-
-    **Lookup hierarchy:**
-
-    This information item is only provided by the os-release file.
-    For details, see the description of the "ID_LIKE" attribute in the
-    `os-release man page
-    `_.
-    """
-    return _distro.like()
-
-
-def codename() -> str:
-    """
-    Return the codename for the release of the current OS distribution,
-    as a string.
-
-    If the distribution does not have a codename, an empty string is returned.
-
-    Note that the returned codename is not always really a codename. For
-    example, openSUSE returns "x86_64". This function does not handle such
-    cases in any special way and just returns the string it finds, if any.
-
-    **Lookup hierarchy:**
-
-    * the codename within the "VERSION" attribute of the os-release file, if
-      provided,
-
-    * the value of the "Codename" attribute returned by the lsb_release
-      command,
-
-    * the value of the "" field of the distro release file.
-    """
-    return _distro.codename()
-
-
-def info(pretty: bool = False, best: bool = False) -> InfoDict:
-    """
-    Return certain machine-readable information items about the current OS
-    distribution in a dictionary, as shown in the following example:
-
-    .. sourcecode:: python
-
-        {
-            'id': 'rhel',
-            'version': '7.0',
-            'version_parts': {
-                'major': '7',
-                'minor': '0',
-                'build_number': ''
-            },
-            'like': 'fedora',
-            'codename': 'Maipo'
-        }
-
-    The dictionary structure and keys are always the same, regardless of which
-    information items are available in the underlying data sources. The values
-    for the various keys are as follows:
-
-    * ``id``:  The result of :func:`distro.id`.
-
-    * ``version``:  The result of :func:`distro.version`.
-
-    * ``version_parts -> major``:  The result of :func:`distro.major_version`.
-
-    * ``version_parts -> minor``:  The result of :func:`distro.minor_version`.
-
-    * ``version_parts -> build_number``:  The result of
-      :func:`distro.build_number`.
-
-    * ``like``:  The result of :func:`distro.like`.
-
-    * ``codename``:  The result of :func:`distro.codename`.
-
-    For a description of the *pretty* and *best* parameters, see the
-    :func:`distro.version` method.
-    """
-    return _distro.info(pretty, best)
-
-
-def os_release_info() -> Dict[str, str]:
-    """
-    Return a dictionary containing key-value pairs for the information items
-    from the os-release file data source of the current OS distribution.
-
-    See `os-release file`_ for details about these information items.
-    """
-    return _distro.os_release_info()
-
-
-def lsb_release_info() -> Dict[str, str]:
-    """
-    Return a dictionary containing key-value pairs for the information items
-    from the lsb_release command data source of the current OS distribution.
-
-    See `lsb_release command output`_ for details about these information
-    items.
-    """
-    return _distro.lsb_release_info()
-
-
-def distro_release_info() -> Dict[str, str]:
-    """
-    Return a dictionary containing key-value pairs for the information items
-    from the distro release file data source of the current OS distribution.
-
-    See `distro release file`_ for details about these information items.
-    """
-    return _distro.distro_release_info()
-
-
-def uname_info() -> Dict[str, str]:
-    """
-    Return a dictionary containing key-value pairs for the information items
-    from the distro release file data source of the current OS distribution.
-    """
-    return _distro.uname_info()
-
-
-def os_release_attr(attribute: str) -> str:
-    """
-    Return a single named information item from the os-release file data source
-    of the current OS distribution.
-
-    Parameters:
-
-    * ``attribute`` (string): Key of the information item.
-
-    Returns:
-
-    * (string): Value of the information item, if the item exists.
-      The empty string, if the item does not exist.
-
-    See `os-release file`_ for details about these information items.
-    """
-    return _distro.os_release_attr(attribute)
-
-
-def lsb_release_attr(attribute: str) -> str:
-    """
-    Return a single named information item from the lsb_release command output
-    data source of the current OS distribution.
-
-    Parameters:
-
-    * ``attribute`` (string): Key of the information item.
-
-    Returns:
-
-    * (string): Value of the information item, if the item exists.
-      The empty string, if the item does not exist.
-
-    See `lsb_release command output`_ for details about these information
-    items.
-    """
-    return _distro.lsb_release_attr(attribute)
-
-
-def distro_release_attr(attribute: str) -> str:
-    """
-    Return a single named information item from the distro release file
-    data source of the current OS distribution.
-
-    Parameters:
-
-    * ``attribute`` (string): Key of the information item.
-
-    Returns:
-
-    * (string): Value of the information item, if the item exists.
-      The empty string, if the item does not exist.
-
-    See `distro release file`_ for details about these information items.
-    """
-    return _distro.distro_release_attr(attribute)
-
-
-def uname_attr(attribute: str) -> str:
-    """
-    Return a single named information item from the distro release file
-    data source of the current OS distribution.
-
-    Parameters:
-
-    * ``attribute`` (string): Key of the information item.
-
-    Returns:
-
-    * (string): Value of the information item, if the item exists.
-                The empty string, if the item does not exist.
-    """
-    return _distro.uname_attr(attribute)
-
-
-try:
-    from functools import cached_property
-except ImportError:
-    # Python < 3.8
-    class cached_property:  # type: ignore
-        """A version of @property which caches the value.  On access, it calls the
-        underlying function and sets the value in `__dict__` so future accesses
-        will not re-call the property.
-        """
-
-        def __init__(self, f: Callable[[Any], Any]) -> None:
-            self._fname = f.__name__
-            self._f = f
-
-        def __get__(self, obj: Any, owner: Type[Any]) -> Any:
-            assert obj is not None, f"call {self._fname} on an instance"
-            ret = obj.__dict__[self._fname] = self._f(obj)
-            return ret
-
-
-class LinuxDistribution:
-    """
-    Provides information about a OS distribution.
-
-    This package creates a private module-global instance of this class with
-    default initialization arguments, that is used by the
-    `consolidated accessor functions`_ and `single source accessor functions`_.
-    By using default initialization arguments, that module-global instance
-    returns data about the current OS distribution (i.e. the distro this
-    package runs on).
-
-    Normally, it is not necessary to create additional instances of this class.
-    However, in situations where control is needed over the exact data sources
-    that are used, instances of this class can be created with a specific
-    distro release file, or a specific os-release file, or without invoking the
-    lsb_release command.
-    """
-
-    def __init__(
-        self,
-        include_lsb: Optional[bool] = None,
-        os_release_file: str = "",
-        distro_release_file: str = "",
-        include_uname: Optional[bool] = None,
-        root_dir: Optional[str] = None,
-        include_oslevel: Optional[bool] = None,
-    ) -> None:
-        """
-        The initialization method of this class gathers information from the
-        available data sources, and stores that in private instance attributes.
-        Subsequent access to the information items uses these private instance
-        attributes, so that the data sources are read only once.
-
-        Parameters:
-
-        * ``include_lsb`` (bool): Controls whether the
-          `lsb_release command output`_ is included as a data source.
-
-          If the lsb_release command is not available in the program execution
-          path, the data source for the lsb_release command will be empty.
-
-        * ``os_release_file`` (string): The path name of the
-          `os-release file`_ that is to be used as a data source.
-
-          An empty string (the default) will cause the default path name to
-          be used (see `os-release file`_ for details).
-
-          If the specified or defaulted os-release file does not exist, the
-          data source for the os-release file will be empty.
-
-        * ``distro_release_file`` (string): The path name of the
-          `distro release file`_ that is to be used as a data source.
-
-          An empty string (the default) will cause a default search algorithm
-          to be used (see `distro release file`_ for details).
-
-          If the specified distro release file does not exist, or if no default
-          distro release file can be found, the data source for the distro
-          release file will be empty.
-
-        * ``include_uname`` (bool): Controls whether uname command output is
-          included as a data source. If the uname command is not available in
-          the program execution path the data source for the uname command will
-          be empty.
-
-        * ``root_dir`` (string): The absolute path to the root directory to use
-          to find distro-related information files. Note that ``include_*``
-          parameters must not be enabled in combination with ``root_dir``.
-
-        * ``include_oslevel`` (bool): Controls whether (AIX) oslevel command
-          output is included as a data source. If the oslevel command is not
-          available in the program execution path the data source will be
-          empty.
-
-        Public instance attributes:
-
-        * ``os_release_file`` (string): The path name of the
-          `os-release file`_ that is actually used as a data source. The
-          empty string if no distro release file is used as a data source.
-
-        * ``distro_release_file`` (string): The path name of the
-          `distro release file`_ that is actually used as a data source. The
-          empty string if no distro release file is used as a data source.
-
-        * ``include_lsb`` (bool): The result of the ``include_lsb`` parameter.
-          This controls whether the lsb information will be loaded.
-
-        * ``include_uname`` (bool): The result of the ``include_uname``
-          parameter. This controls whether the uname information will
-          be loaded.
-
-        * ``include_oslevel`` (bool): The result of the ``include_oslevel``
-          parameter. This controls whether (AIX) oslevel information will be
-          loaded.
-
-        * ``root_dir`` (string): The result of the ``root_dir`` parameter.
-          The absolute path to the root directory to use to find distro-related
-          information files.
-
-        Raises:
-
-        * :py:exc:`ValueError`: Initialization parameters combination is not
-           supported.
-
-        * :py:exc:`OSError`: Some I/O issue with an os-release file or distro
-          release file.
-
-        * :py:exc:`UnicodeError`: A data source has unexpected characters or
-          uses an unexpected encoding.
-        """
-        self.root_dir = root_dir
-        self.etc_dir = os.path.join(root_dir, "etc") if root_dir else _UNIXCONFDIR
-        self.usr_lib_dir = (
-            os.path.join(root_dir, "usr/lib") if root_dir else _UNIXUSRLIBDIR
-        )
-
-        if os_release_file:
-            self.os_release_file = os_release_file
-        else:
-            etc_dir_os_release_file = os.path.join(self.etc_dir, _OS_RELEASE_BASENAME)
-            usr_lib_os_release_file = os.path.join(
-                self.usr_lib_dir, _OS_RELEASE_BASENAME
-            )
-
-            # NOTE: The idea is to respect order **and** have it set
-            #       at all times for API backwards compatibility.
-            if os.path.isfile(etc_dir_os_release_file) or not os.path.isfile(
-                usr_lib_os_release_file
-            ):
-                self.os_release_file = etc_dir_os_release_file
-            else:
-                self.os_release_file = usr_lib_os_release_file
-
-        self.distro_release_file = distro_release_file or ""  # updated later
-
-        is_root_dir_defined = root_dir is not None
-        if is_root_dir_defined and (include_lsb or include_uname or include_oslevel):
-            raise ValueError(
-                "Including subprocess data sources from specific root_dir is disallowed"
-                " to prevent false information"
-            )
-        self.include_lsb = (
-            include_lsb if include_lsb is not None else not is_root_dir_defined
-        )
-        self.include_uname = (
-            include_uname if include_uname is not None else not is_root_dir_defined
-        )
-        self.include_oslevel = (
-            include_oslevel if include_oslevel is not None else not is_root_dir_defined
-        )
-
-    def __repr__(self) -> str:
-        """Return repr of all info"""
-        return (
-            "LinuxDistribution("
-            "os_release_file={self.os_release_file!r}, "
-            "distro_release_file={self.distro_release_file!r}, "
-            "include_lsb={self.include_lsb!r}, "
-            "include_uname={self.include_uname!r}, "
-            "include_oslevel={self.include_oslevel!r}, "
-            "root_dir={self.root_dir!r}, "
-            "_os_release_info={self._os_release_info!r}, "
-            "_lsb_release_info={self._lsb_release_info!r}, "
-            "_distro_release_info={self._distro_release_info!r}, "
-            "_uname_info={self._uname_info!r}, "
-            "_oslevel_info={self._oslevel_info!r})".format(self=self)
-        )
-
-    def linux_distribution(
-        self, full_distribution_name: bool = True
-    ) -> Tuple[str, str, str]:
-        """
-        Return information about the OS distribution that is compatible
-        with Python's :func:`platform.linux_distribution`, supporting a subset
-        of its parameters.
-
-        For details, see :func:`distro.linux_distribution`.
-        """
-        return (
-            self.name() if full_distribution_name else self.id(),
-            self.version(),
-            self._os_release_info.get("release_codename") or self.codename(),
-        )
-
-    def id(self) -> str:
-        """Return the distro ID of the OS distribution, as a string.
-
-        For details, see :func:`distro.id`.
-        """
-
-        def normalize(distro_id: str, table: Dict[str, str]) -> str:
-            distro_id = distro_id.lower().replace(" ", "_")
-            return table.get(distro_id, distro_id)
-
-        distro_id = self.os_release_attr("id")
-        if distro_id:
-            return normalize(distro_id, NORMALIZED_OS_ID)
-
-        distro_id = self.lsb_release_attr("distributor_id")
-        if distro_id:
-            return normalize(distro_id, NORMALIZED_LSB_ID)
-
-        distro_id = self.distro_release_attr("id")
-        if distro_id:
-            return normalize(distro_id, NORMALIZED_DISTRO_ID)
-
-        distro_id = self.uname_attr("id")
-        if distro_id:
-            return normalize(distro_id, NORMALIZED_DISTRO_ID)
-
-        return ""
-
-    def name(self, pretty: bool = False) -> str:
-        """
-        Return the name of the OS distribution, as a string.
-
-        For details, see :func:`distro.name`.
-        """
-        name = (
-            self.os_release_attr("name")
-            or self.lsb_release_attr("distributor_id")
-            or self.distro_release_attr("name")
-            or self.uname_attr("name")
-        )
-        if pretty:
-            name = self.os_release_attr("pretty_name") or self.lsb_release_attr(
-                "description"
-            )
-            if not name:
-                name = self.distro_release_attr("name") or self.uname_attr("name")
-                version = self.version(pretty=True)
-                if version:
-                    name = f"{name} {version}"
-        return name or ""
-
-    def version(self, pretty: bool = False, best: bool = False) -> str:
-        """
-        Return the version of the OS distribution, as a string.
-
-        For details, see :func:`distro.version`.
-        """
-        versions = [
-            self.os_release_attr("version_id"),
-            self.lsb_release_attr("release"),
-            self.distro_release_attr("version_id"),
-            self._parse_distro_release_content(self.os_release_attr("pretty_name")).get(
-                "version_id", ""
-            ),
-            self._parse_distro_release_content(
-                self.lsb_release_attr("description")
-            ).get("version_id", ""),
-            self.uname_attr("release"),
-        ]
-        if self.uname_attr("id").startswith("aix"):
-            # On AIX platforms, prefer oslevel command output.
-            versions.insert(0, self.oslevel_info())
-        version = ""
-        if best:
-            # This algorithm uses the last version in priority order that has
-            # the best precision. If the versions are not in conflict, that
-            # does not matter; otherwise, using the last one instead of the
-            # first one might be considered a surprise.
-            for v in versions:
-                if v.count(".") > version.count(".") or version == "":
-                    version = v
-        else:
-            for v in versions:
-                if v != "":
-                    version = v
-                    break
-        if pretty and version and self.codename():
-            version = f"{version} ({self.codename()})"
-        return version
-
-    def version_parts(self, best: bool = False) -> Tuple[str, str, str]:
-        """
-        Return the version of the OS distribution, as a tuple of version
-        numbers.
-
-        For details, see :func:`distro.version_parts`.
-        """
-        version_str = self.version(best=best)
-        if version_str:
-            version_regex = re.compile(r"(\d+)\.?(\d+)?\.?(\d+)?")
-            matches = version_regex.match(version_str)
-            if matches:
-                major, minor, build_number = matches.groups()
-                return major, minor or "", build_number or ""
-        return "", "", ""
-
-    def major_version(self, best: bool = False) -> str:
-        """
-        Return the major version number of the current distribution.
-
-        For details, see :func:`distro.major_version`.
-        """
-        return self.version_parts(best)[0]
-
-    def minor_version(self, best: bool = False) -> str:
-        """
-        Return the minor version number of the current distribution.
-
-        For details, see :func:`distro.minor_version`.
-        """
-        return self.version_parts(best)[1]
-
-    def build_number(self, best: bool = False) -> str:
-        """
-        Return the build number of the current distribution.
-
-        For details, see :func:`distro.build_number`.
-        """
-        return self.version_parts(best)[2]
-
-    def like(self) -> str:
-        """
-        Return the IDs of distributions that are like the OS distribution.
-
-        For details, see :func:`distro.like`.
-        """
-        return self.os_release_attr("id_like") or ""
-
-    def codename(self) -> str:
-        """
-        Return the codename of the OS distribution.
-
-        For details, see :func:`distro.codename`.
-        """
-        try:
-            # Handle os_release specially since distros might purposefully set
-            # this to empty string to have no codename
-            return self._os_release_info["codename"]
-        except KeyError:
-            return (
-                self.lsb_release_attr("codename")
-                or self.distro_release_attr("codename")
-                or ""
-            )
-
-    def info(self, pretty: bool = False, best: bool = False) -> InfoDict:
-        """
-        Return certain machine-readable information about the OS
-        distribution.
-
-        For details, see :func:`distro.info`.
-        """
-        return dict(
-            id=self.id(),
-            version=self.version(pretty, best),
-            version_parts=dict(
-                major=self.major_version(best),
-                minor=self.minor_version(best),
-                build_number=self.build_number(best),
-            ),
-            like=self.like(),
-            codename=self.codename(),
-        )
-
-    def os_release_info(self) -> Dict[str, str]:
-        """
-        Return a dictionary containing key-value pairs for the information
-        items from the os-release file data source of the OS distribution.
-
-        For details, see :func:`distro.os_release_info`.
-        """
-        return self._os_release_info
-
-    def lsb_release_info(self) -> Dict[str, str]:
-        """
-        Return a dictionary containing key-value pairs for the information
-        items from the lsb_release command data source of the OS
-        distribution.
-
-        For details, see :func:`distro.lsb_release_info`.
-        """
-        return self._lsb_release_info
-
-    def distro_release_info(self) -> Dict[str, str]:
-        """
-        Return a dictionary containing key-value pairs for the information
-        items from the distro release file data source of the OS
-        distribution.
-
-        For details, see :func:`distro.distro_release_info`.
-        """
-        return self._distro_release_info
-
-    def uname_info(self) -> Dict[str, str]:
-        """
-        Return a dictionary containing key-value pairs for the information
-        items from the uname command data source of the OS distribution.
-
-        For details, see :func:`distro.uname_info`.
-        """
-        return self._uname_info
-
-    def oslevel_info(self) -> str:
-        """
-        Return AIX' oslevel command output.
-        """
-        return self._oslevel_info
-
-    def os_release_attr(self, attribute: str) -> str:
-        """
-        Return a single named information item from the os-release file data
-        source of the OS distribution.
-
-        For details, see :func:`distro.os_release_attr`.
-        """
-        return self._os_release_info.get(attribute, "")
-
-    def lsb_release_attr(self, attribute: str) -> str:
-        """
-        Return a single named information item from the lsb_release command
-        output data source of the OS distribution.
-
-        For details, see :func:`distro.lsb_release_attr`.
-        """
-        return self._lsb_release_info.get(attribute, "")
-
-    def distro_release_attr(self, attribute: str) -> str:
-        """
-        Return a single named information item from the distro release file
-        data source of the OS distribution.
-
-        For details, see :func:`distro.distro_release_attr`.
-        """
-        return self._distro_release_info.get(attribute, "")
-
-    def uname_attr(self, attribute: str) -> str:
-        """
-        Return a single named information item from the uname command
-        output data source of the OS distribution.
-
-        For details, see :func:`distro.uname_attr`.
-        """
-        return self._uname_info.get(attribute, "")
-
-    @cached_property
-    def _os_release_info(self) -> Dict[str, str]:
-        """
-        Get the information items from the specified os-release file.
-
-        Returns:
-            A dictionary containing all information items.
-        """
-        if os.path.isfile(self.os_release_file):
-            with open(self.os_release_file, encoding="utf-8") as release_file:
-                return self._parse_os_release_content(release_file)
-        return {}
-
-    @staticmethod
-    def _parse_os_release_content(lines: TextIO) -> Dict[str, str]:
-        """
-        Parse the lines of an os-release file.
-
-        Parameters:
-
-        * lines: Iterable through the lines in the os-release file.
-                 Each line must be a unicode string or a UTF-8 encoded byte
-                 string.
-
-        Returns:
-            A dictionary containing all information items.
-        """
-        props = {}
-        lexer = shlex.shlex(lines, posix=True)
-        lexer.whitespace_split = True
-
-        tokens = list(lexer)
-        for token in tokens:
-            # At this point, all shell-like parsing has been done (i.e.
-            # comments processed, quotes and backslash escape sequences
-            # processed, multi-line values assembled, trailing newlines
-            # stripped, etc.), so the tokens are now either:
-            # * variable assignments: var=value
-            # * commands or their arguments (not allowed in os-release)
-            # Ignore any tokens that are not variable assignments
-            if "=" in token:
-                k, v = token.split("=", 1)
-                props[k.lower()] = v
-
-        if "version" in props:
-            # extract release codename (if any) from version attribute
-            match = re.search(r"\((\D+)\)|,\s*(\D+)", props["version"])
-            if match:
-                release_codename = match.group(1) or match.group(2)
-                props["codename"] = props["release_codename"] = release_codename
-
-        if "version_codename" in props:
-            # os-release added a version_codename field.  Use that in
-            # preference to anything else Note that some distros purposefully
-            # do not have code names.  They should be setting
-            # version_codename=""
-            props["codename"] = props["version_codename"]
-        elif "ubuntu_codename" in props:
-            # Same as above but a non-standard field name used on older Ubuntus
-            props["codename"] = props["ubuntu_codename"]
-
-        return props
-
-    @cached_property
-    def _lsb_release_info(self) -> Dict[str, str]:
-        """
-        Get the information items from the lsb_release command output.
-
-        Returns:
-            A dictionary containing all information items.
-        """
-        if not self.include_lsb:
-            return {}
-        try:
-            cmd = ("lsb_release", "-a")
-            stdout = subprocess.check_output(cmd, stderr=subprocess.DEVNULL)
-        # Command not found or lsb_release returned error
-        except (OSError, subprocess.CalledProcessError):
-            return {}
-        content = self._to_str(stdout).splitlines()
-        return self._parse_lsb_release_content(content)
-
-    @staticmethod
-    def _parse_lsb_release_content(lines: Iterable[str]) -> Dict[str, str]:
-        """
-        Parse the output of the lsb_release command.
-
-        Parameters:
-
-        * lines: Iterable through the lines of the lsb_release output.
-                 Each line must be a unicode string or a UTF-8 encoded byte
-                 string.
-
-        Returns:
-            A dictionary containing all information items.
-        """
-        props = {}
-        for line in lines:
-            kv = line.strip("\n").split(":", 1)
-            if len(kv) != 2:
-                # Ignore lines without colon.
-                continue
-            k, v = kv
-            props.update({k.replace(" ", "_").lower(): v.strip()})
-        return props
-
-    @cached_property
-    def _uname_info(self) -> Dict[str, str]:
-        if not self.include_uname:
-            return {}
-        try:
-            cmd = ("uname", "-rs")
-            stdout = subprocess.check_output(cmd, stderr=subprocess.DEVNULL)
-        except OSError:
-            return {}
-        content = self._to_str(stdout).splitlines()
-        return self._parse_uname_content(content)
-
-    @cached_property
-    def _oslevel_info(self) -> str:
-        if not self.include_oslevel:
-            return ""
-        try:
-            stdout = subprocess.check_output("oslevel", stderr=subprocess.DEVNULL)
-        except (OSError, subprocess.CalledProcessError):
-            return ""
-        return self._to_str(stdout).strip()
-
-    @staticmethod
-    def _parse_uname_content(lines: Sequence[str]) -> Dict[str, str]:
-        if not lines:
-            return {}
-        props = {}
-        match = re.search(r"^([^\s]+)\s+([\d\.]+)", lines[0].strip())
-        if match:
-            name, version = match.groups()
-
-            # This is to prevent the Linux kernel version from
-            # appearing as the 'best' version on otherwise
-            # identifiable distributions.
-            if name == "Linux":
-                return {}
-            props["id"] = name.lower()
-            props["name"] = name
-            props["release"] = version
-        return props
-
-    @staticmethod
-    def _to_str(bytestring: bytes) -> str:
-        encoding = sys.getfilesystemencoding()
-        return bytestring.decode(encoding)
-
-    @cached_property
-    def _distro_release_info(self) -> Dict[str, str]:
-        """
-        Get the information items from the specified distro release file.
-
-        Returns:
-            A dictionary containing all information items.
-        """
-        if self.distro_release_file:
-            # If it was specified, we use it and parse what we can, even if
-            # its file name or content does not match the expected pattern.
-            distro_info = self._parse_distro_release_file(self.distro_release_file)
-            basename = os.path.basename(self.distro_release_file)
-            # The file name pattern for user-specified distro release files
-            # is somewhat more tolerant (compared to when searching for the
-            # file), because we want to use what was specified as best as
-            # possible.
-            match = _DISTRO_RELEASE_BASENAME_PATTERN.match(basename)
-            if "name" in distro_info and "cloudlinux" in distro_info["name"].lower():
-                distro_info["id"] = "cloudlinux"
-            elif match:
-                distro_info["id"] = match.group(1)
-            return distro_info
-        else:
-            try:
-                basenames = os.listdir(self.etc_dir)
-                # We sort for repeatability in cases where there are multiple
-                # distro specific files; e.g. CentOS, Oracle, Enterprise all
-                # containing `redhat-release` on top of their own.
-                basenames.sort()
-            except OSError:
-                # This may occur when /etc is not readable but we can't be
-                # sure about the *-release files. Check common entries of
-                # /etc for information. If they turn out to not be there the
-                # error is handled in `_parse_distro_release_file()`.
-                basenames = [
-                    "SuSE-release",
-                    "arch-release",
-                    "base-release",
-                    "centos-release",
-                    "fedora-release",
-                    "gentoo-release",
-                    "mageia-release",
-                    "mandrake-release",
-                    "mandriva-release",
-                    "mandrivalinux-release",
-                    "manjaro-release",
-                    "oracle-release",
-                    "redhat-release",
-                    "rocky-release",
-                    "sl-release",
-                    "slackware-version",
-                ]
-            for basename in basenames:
-                if basename in _DISTRO_RELEASE_IGNORE_BASENAMES:
-                    continue
-                match = _DISTRO_RELEASE_BASENAME_PATTERN.match(basename)
-                if match:
-                    filepath = os.path.join(self.etc_dir, basename)
-                    distro_info = self._parse_distro_release_file(filepath)
-                    if "name" in distro_info:
-                        # The name is always present if the pattern matches
-                        self.distro_release_file = filepath
-                        distro_info["id"] = match.group(1)
-                        if "cloudlinux" in distro_info["name"].lower():
-                            distro_info["id"] = "cloudlinux"
-                        return distro_info
-            return {}
-
-    def _parse_distro_release_file(self, filepath: str) -> Dict[str, str]:
-        """
-        Parse a distro release file.
-
-        Parameters:
-
-        * filepath: Path name of the distro release file.
-
-        Returns:
-            A dictionary containing all information items.
-        """
-        try:
-            with open(filepath, encoding="utf-8") as fp:
-                # Only parse the first line. For instance, on SLES there
-                # are multiple lines. We don't want them...
-                return self._parse_distro_release_content(fp.readline())
-        except OSError:
-            # Ignore not being able to read a specific, seemingly version
-            # related file.
-            # See https://github.com/python-distro/distro/issues/162
-            return {}
-
-    @staticmethod
-    def _parse_distro_release_content(line: str) -> Dict[str, str]:
-        """
-        Parse a line from a distro release file.
-
-        Parameters:
-        * line: Line from the distro release file. Must be a unicode string
-                or a UTF-8 encoded byte string.
-
-        Returns:
-            A dictionary containing all information items.
-        """
-        matches = _DISTRO_RELEASE_CONTENT_REVERSED_PATTERN.match(line.strip()[::-1])
-        distro_info = {}
-        if matches:
-            # regexp ensures non-None
-            distro_info["name"] = matches.group(3)[::-1]
-            if matches.group(2):
-                distro_info["version_id"] = matches.group(2)[::-1]
-            if matches.group(1):
-                distro_info["codename"] = matches.group(1)[::-1]
-        elif line:
-            distro_info["name"] = line.strip()
-        return distro_info
-
-
-_distro = LinuxDistribution()
-
-
-def main() -> None:
-    logger = logging.getLogger(__name__)
-    logger.setLevel(logging.DEBUG)
-    logger.addHandler(logging.StreamHandler(sys.stdout))
-
-    parser = argparse.ArgumentParser(description="OS distro info tool")
-    parser.add_argument(
-        "--json", "-j", help="Output in machine readable format", action="store_true"
-    )
-
-    parser.add_argument(
-        "--root-dir",
-        "-r",
-        type=str,
-        dest="root_dir",
-        help="Path to the root filesystem directory (defaults to /)",
-    )
-
-    args = parser.parse_args()
-
-    if args.root_dir:
-        dist = LinuxDistribution(
-            include_lsb=False,
-            include_uname=False,
-            include_oslevel=False,
-            root_dir=args.root_dir,
-        )
-    else:
-        dist = _distro
-
-    if args.json:
-        logger.info(json.dumps(dist.info(), indent=4, sort_keys=True))
-    else:
-        logger.info("Name: %s", dist.name(pretty=True))
-        distribution_version = dist.version(pretty=True)
-        logger.info("Version: %s", distribution_version)
-        distribution_codename = dist.codename()
-        logger.info("Codename: %s", distribution_codename)
-
-
-if __name__ == "__main__":
-    main()
diff --git a/venv/lib/python3.10/site-packages/pip/_vendor/idna/core.py b/venv/lib/python3.10/site-packages/pip/_vendor/idna/core.py
index 4f30037..55ab967 100644
--- a/venv/lib/python3.10/site-packages/pip/_vendor/idna/core.py
+++ b/venv/lib/python3.10/site-packages/pip/_vendor/idna/core.py
@@ -339,10 +339,7 @@ def uts46_remap(domain: str, std3_rules: bool = True, transitional: bool = False
 
 def encode(s: Union[str, bytes, bytearray], strict: bool = False, uts46: bool = False, std3_rules: bool = False, transitional: bool = False) -> bytes:
     if isinstance(s, (bytes, bytearray)):
-        try:
-            s = s.decode('ascii')
-        except UnicodeDecodeError:
-            raise IDNAError('should pass a unicode string to the function rather than a byte string.')
+        s = s.decode('ascii')
     if uts46:
         s = uts46_remap(s, std3_rules, transitional)
     trailing_dot = False
diff --git a/venv/lib/python3.10/site-packages/pip/_vendor/idna/idnadata.py b/venv/lib/python3.10/site-packages/pip/_vendor/idna/idnadata.py
index 67db462..1b5805d 100644
--- a/venv/lib/python3.10/site-packages/pip/_vendor/idna/idnadata.py
+++ b/venv/lib/python3.10/site-packages/pip/_vendor/idna/idnadata.py
@@ -1,6 +1,6 @@
 # This file is automatically generated by tools/idna-data
 
-__version__ = '15.0.0'
+__version__ = '14.0.0'
 scripts = {
     'Greek': (
         0x37000000374,
@@ -55,13 +55,12 @@
         0x16fe200016fe4,
         0x16ff000016ff2,
         0x200000002a6e0,
-        0x2a7000002b73a,
+        0x2a7000002b739,
         0x2b7400002b81e,
         0x2b8200002cea2,
         0x2ceb00002ebe1,
         0x2f8000002fa1e,
         0x300000003134b,
-        0x31350000323b0,
     ),
     'Hebrew': (
         0x591000005c8,
@@ -78,7 +77,6 @@
         0x304100003097,
         0x309d000030a0,
         0x1b0010001b120,
-        0x1b1320001b133,
         0x1b1500001b153,
         0x1f2000001f201,
     ),
@@ -95,7 +93,6 @@
         0x1affd0001afff,
         0x1b0000001b001,
         0x1b1200001b123,
-        0x1b1550001b156,
         0x1b1640001b168,
     ),
 }
@@ -1334,7 +1331,7 @@
         0xcdd00000cdf,
         0xce000000ce4,
         0xce600000cf0,
-        0xcf100000cf4,
+        0xcf100000cf3,
         0xd0000000d0d,
         0xd0e00000d11,
         0xd1200000d45,
@@ -1369,7 +1366,7 @@
         0xeb400000ebe,
         0xec000000ec5,
         0xec600000ec7,
-        0xec800000ecf,
+        0xec800000ece,
         0xed000000eda,
         0xede00000ee0,
         0xf0000000f01,
@@ -1862,7 +1859,7 @@
         0xab200000ab27,
         0xab280000ab2f,
         0xab300000ab5b,
-        0xab600000ab69,
+        0xab600000ab6a,
         0xabc00000abeb,
         0xabec0000abee,
         0xabf00000abfa,
@@ -1946,7 +1943,7 @@
         0x10e8000010eaa,
         0x10eab00010ead,
         0x10eb000010eb2,
-        0x10efd00010f1d,
+        0x10f0000010f1d,
         0x10f2700010f28,
         0x10f3000010f51,
         0x10f7000010f86,
@@ -1969,7 +1966,7 @@
         0x111dc000111dd,
         0x1120000011212,
         0x1121300011238,
-        0x1123e00011242,
+        0x1123e0001123f,
         0x1128000011287,
         0x1128800011289,
         0x1128a0001128e,
@@ -2050,16 +2047,11 @@
         0x11d9300011d99,
         0x11da000011daa,
         0x11ee000011ef7,
-        0x11f0000011f11,
-        0x11f1200011f3b,
-        0x11f3e00011f43,
-        0x11f5000011f5a,
         0x11fb000011fb1,
         0x120000001239a,
         0x1248000012544,
         0x12f9000012ff1,
-        0x1300000013430,
-        0x1344000013456,
+        0x130000001342f,
         0x1440000014647,
         0x1680000016a39,
         0x16a4000016a5f,
@@ -2087,9 +2079,7 @@
         0x1aff50001affc,
         0x1affd0001afff,
         0x1b0000001b123,
-        0x1b1320001b133,
         0x1b1500001b153,
-        0x1b1550001b156,
         0x1b1640001b168,
         0x1b1700001b2fc,
         0x1bc000001bc6b,
@@ -2106,21 +2096,17 @@
         0x1da9b0001daa0,
         0x1daa10001dab0,
         0x1df000001df1f,
-        0x1df250001df2b,
         0x1e0000001e007,
         0x1e0080001e019,
         0x1e01b0001e022,
         0x1e0230001e025,
         0x1e0260001e02b,
-        0x1e0300001e06e,
-        0x1e08f0001e090,
         0x1e1000001e12d,
         0x1e1300001e13e,
         0x1e1400001e14a,
         0x1e14e0001e14f,
         0x1e2900001e2af,
         0x1e2c00001e2fa,
-        0x1e4d00001e4fa,
         0x1e7e00001e7e7,
         0x1e7e80001e7ec,
         0x1e7ed0001e7ef,
@@ -2129,13 +2115,13 @@
         0x1e8d00001e8d7,
         0x1e9220001e94c,
         0x1e9500001e95a,
+        0x1fbf00001fbfa,
         0x200000002a6e0,
-        0x2a7000002b73a,
+        0x2a7000002b739,
         0x2b7400002b81e,
         0x2b8200002cea2,
         0x2ceb00002ebe1,
         0x300000003134b,
-        0x31350000323b0,
     ),
     'CONTEXTJ': (
         0x200c0000200e,
diff --git a/venv/lib/python3.10/site-packages/pip/_vendor/idna/package_data.py b/venv/lib/python3.10/site-packages/pip/_vendor/idna/package_data.py
index 8501893..f5ea87c 100644
--- a/venv/lib/python3.10/site-packages/pip/_vendor/idna/package_data.py
+++ b/venv/lib/python3.10/site-packages/pip/_vendor/idna/package_data.py
@@ -1,2 +1,2 @@
-__version__ = '3.4'
+__version__ = '3.3'
 
diff --git a/venv/lib/python3.10/site-packages/pip/_vendor/idna/uts46data.py b/venv/lib/python3.10/site-packages/pip/_vendor/idna/uts46data.py
index 186796c..8f65705 100644
--- a/venv/lib/python3.10/site-packages/pip/_vendor/idna/uts46data.py
+++ b/venv/lib/python3.10/site-packages/pip/_vendor/idna/uts46data.py
@@ -7,7 +7,7 @@
 """IDNA Mapping Table from UTS46."""
 
 
-__version__ = '15.0.0'
+__version__ = '14.0.0'
 def _seg_0() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
     return [
     (0x0, '3'),
@@ -1300,7 +1300,7 @@ def _seg_12() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
     (0xCE6, 'V'),
     (0xCF0, 'X'),
     (0xCF1, 'V'),
-    (0xCF4, 'X'),
+    (0xCF3, 'X'),
     (0xD00, 'V'),
     (0xD0D, 'X'),
     (0xD0E, 'V'),
@@ -1368,7 +1368,7 @@ def _seg_13() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
     (0xEC6, 'V'),
     (0xEC7, 'X'),
     (0xEC8, 'V'),
-    (0xECF, 'X'),
+    (0xECE, 'X'),
     (0xED0, 'V'),
     (0xEDA, 'X'),
     (0xEDC, 'M', 'ຫນ'),
@@ -5917,7 +5917,7 @@ def _seg_56() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
     (0x10EAE, 'X'),
     (0x10EB0, 'V'),
     (0x10EB2, 'X'),
-    (0x10EFD, 'V'),
+    (0x10F00, 'V'),
     (0x10F28, 'X'),
     (0x10F30, 'V'),
     (0x10F5A, 'X'),
@@ -5956,7 +5956,7 @@ def _seg_57() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
     (0x11200, 'V'),
     (0x11212, 'X'),
     (0x11213, 'V'),
-    (0x11242, 'X'),
+    (0x1123F, 'X'),
     (0x11280, 'V'),
     (0x11287, 'X'),
     (0x11288, 'V'),
@@ -6097,8 +6097,6 @@ def _seg_58() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
     (0x11AA3, 'X'),
     (0x11AB0, 'V'),
     (0x11AF9, 'X'),
-    (0x11B00, 'V'),
-    (0x11B0A, 'X'),
     (0x11C00, 'V'),
     (0x11C09, 'X'),
     (0x11C0A, 'V'),
@@ -6141,19 +6139,13 @@ def _seg_58() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
     (0x11DAA, 'X'),
     (0x11EE0, 'V'),
     (0x11EF9, 'X'),
-    (0x11F00, 'V'),
+    (0x11FB0, 'V'),
+    (0x11FB1, 'X'),
+    (0x11FC0, 'V'),
     ]
 
 def _seg_59() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
     return [
-    (0x11F11, 'X'),
-    (0x11F12, 'V'),
-    (0x11F3B, 'X'),
-    (0x11F3E, 'V'),
-    (0x11F5A, 'X'),
-    (0x11FB0, 'V'),
-    (0x11FB1, 'X'),
-    (0x11FC0, 'V'),
     (0x11FF2, 'X'),
     (0x11FFF, 'V'),
     (0x1239A, 'X'),
@@ -6166,9 +6158,7 @@ def _seg_59() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
     (0x12F90, 'V'),
     (0x12FF3, 'X'),
     (0x13000, 'V'),
-    (0x13430, 'X'),
-    (0x13440, 'V'),
-    (0x13456, 'X'),
+    (0x1342F, 'X'),
     (0x14400, 'V'),
     (0x14647, 'X'),
     (0x16800, 'V'),
@@ -6246,10 +6236,6 @@ def _seg_59() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
     (0x18D00, 'V'),
     (0x18D09, 'X'),
     (0x1AFF0, 'V'),
-    ]
-
-def _seg_60() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
-    return [
     (0x1AFF4, 'X'),
     (0x1AFF5, 'V'),
     (0x1AFFC, 'X'),
@@ -6257,13 +6243,13 @@ def _seg_60() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
     (0x1AFFF, 'X'),
     (0x1B000, 'V'),
     (0x1B123, 'X'),
-    (0x1B132, 'V'),
-    (0x1B133, 'X'),
     (0x1B150, 'V'),
     (0x1B153, 'X'),
-    (0x1B155, 'V'),
-    (0x1B156, 'X'),
     (0x1B164, 'V'),
+    ]
+
+def _seg_60() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
+    return [
     (0x1B168, 'X'),
     (0x1B170, 'V'),
     (0x1B2FC, 'X'),
@@ -6309,8 +6295,6 @@ def _seg_60() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
     (0x1D1EB, 'X'),
     (0x1D200, 'V'),
     (0x1D246, 'X'),
-    (0x1D2C0, 'V'),
-    (0x1D2D4, 'X'),
     (0x1D2E0, 'V'),
     (0x1D2F4, 'X'),
     (0x1D300, 'V'),
@@ -6350,10 +6334,6 @@ def _seg_60() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
     (0x1D41E, 'M', 'e'),
     (0x1D41F, 'M', 'f'),
     (0x1D420, 'M', 'g'),
-    ]
-
-def _seg_61() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
-    return [
     (0x1D421, 'M', 'h'),
     (0x1D422, 'M', 'i'),
     (0x1D423, 'M', 'j'),
@@ -6370,6 +6350,10 @@ def _seg_61() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
     (0x1D42E, 'M', 'u'),
     (0x1D42F, 'M', 'v'),
     (0x1D430, 'M', 'w'),
+    ]
+
+def _seg_61() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
+    return [
     (0x1D431, 'M', 'x'),
     (0x1D432, 'M', 'y'),
     (0x1D433, 'M', 'z'),
@@ -6454,10 +6438,6 @@ def _seg_61() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
     (0x1D482, 'M', 'a'),
     (0x1D483, 'M', 'b'),
     (0x1D484, 'M', 'c'),
-    ]
-
-def _seg_62() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
-    return [
     (0x1D485, 'M', 'd'),
     (0x1D486, 'M', 'e'),
     (0x1D487, 'M', 'f'),
@@ -6474,6 +6454,10 @@ def _seg_62() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
     (0x1D492, 'M', 'q'),
     (0x1D493, 'M', 'r'),
     (0x1D494, 'M', 's'),
+    ]
+
+def _seg_62() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
+    return [
     (0x1D495, 'M', 't'),
     (0x1D496, 'M', 'u'),
     (0x1D497, 'M', 'v'),
@@ -6558,10 +6542,6 @@ def _seg_62() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
     (0x1D4E9, 'M', 'z'),
     (0x1D4EA, 'M', 'a'),
     (0x1D4EB, 'M', 'b'),
-    ]
-
-def _seg_63() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
-    return [
     (0x1D4EC, 'M', 'c'),
     (0x1D4ED, 'M', 'd'),
     (0x1D4EE, 'M', 'e'),
@@ -6578,6 +6558,10 @@ def _seg_63() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
     (0x1D4F9, 'M', 'p'),
     (0x1D4FA, 'M', 'q'),
     (0x1D4FB, 'M', 'r'),
+    ]
+
+def _seg_63() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
+    return [
     (0x1D4FC, 'M', 's'),
     (0x1D4FD, 'M', 't'),
     (0x1D4FE, 'M', 'u'),
@@ -6662,10 +6646,6 @@ def _seg_63() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
     (0x1D550, 'M', 'y'),
     (0x1D551, 'X'),
     (0x1D552, 'M', 'a'),
-    ]
-
-def _seg_64() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
-    return [
     (0x1D553, 'M', 'b'),
     (0x1D554, 'M', 'c'),
     (0x1D555, 'M', 'd'),
@@ -6682,6 +6662,10 @@ def _seg_64() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
     (0x1D560, 'M', 'o'),
     (0x1D561, 'M', 'p'),
     (0x1D562, 'M', 'q'),
+    ]
+
+def _seg_64() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
+    return [
     (0x1D563, 'M', 'r'),
     (0x1D564, 'M', 's'),
     (0x1D565, 'M', 't'),
@@ -6766,10 +6750,6 @@ def _seg_64() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
     (0x1D5B4, 'M', 'u'),
     (0x1D5B5, 'M', 'v'),
     (0x1D5B6, 'M', 'w'),
-    ]
-
-def _seg_65() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
-    return [
     (0x1D5B7, 'M', 'x'),
     (0x1D5B8, 'M', 'y'),
     (0x1D5B9, 'M', 'z'),
@@ -6786,6 +6766,10 @@ def _seg_65() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
     (0x1D5C4, 'M', 'k'),
     (0x1D5C5, 'M', 'l'),
     (0x1D5C6, 'M', 'm'),
+    ]
+
+def _seg_65() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
+    return [
     (0x1D5C7, 'M', 'n'),
     (0x1D5C8, 'M', 'o'),
     (0x1D5C9, 'M', 'p'),
@@ -6870,10 +6854,6 @@ def _seg_65() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
     (0x1D618, 'M', 'q'),
     (0x1D619, 'M', 'r'),
     (0x1D61A, 'M', 's'),
-    ]
-
-def _seg_66() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
-    return [
     (0x1D61B, 'M', 't'),
     (0x1D61C, 'M', 'u'),
     (0x1D61D, 'M', 'v'),
@@ -6890,6 +6870,10 @@ def _seg_66() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
     (0x1D628, 'M', 'g'),
     (0x1D629, 'M', 'h'),
     (0x1D62A, 'M', 'i'),
+    ]
+
+def _seg_66() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
+    return [
     (0x1D62B, 'M', 'j'),
     (0x1D62C, 'M', 'k'),
     (0x1D62D, 'M', 'l'),
@@ -6974,10 +6958,6 @@ def _seg_66() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
     (0x1D67C, 'M', 'm'),
     (0x1D67D, 'M', 'n'),
     (0x1D67E, 'M', 'o'),
-    ]
-
-def _seg_67() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
-    return [
     (0x1D67F, 'M', 'p'),
     (0x1D680, 'M', 'q'),
     (0x1D681, 'M', 'r'),
@@ -6994,6 +6974,10 @@ def _seg_67() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
     (0x1D68C, 'M', 'c'),
     (0x1D68D, 'M', 'd'),
     (0x1D68E, 'M', 'e'),
+    ]
+
+def _seg_67() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
+    return [
     (0x1D68F, 'M', 'f'),
     (0x1D690, 'M', 'g'),
     (0x1D691, 'M', 'h'),
@@ -7078,10 +7062,6 @@ def _seg_67() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
     (0x1D6E2, 'M', 'α'),
     (0x1D6E3, 'M', 'β'),
     (0x1D6E4, 'M', 'γ'),
-    ]
-
-def _seg_68() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
-    return [
     (0x1D6E5, 'M', 'δ'),
     (0x1D6E6, 'M', 'ε'),
     (0x1D6E7, 'M', 'ζ'),
@@ -7098,6 +7078,10 @@ def _seg_68() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
     (0x1D6F2, 'M', 'ρ'),
     (0x1D6F3, 'M', 'θ'),
     (0x1D6F4, 'M', 'σ'),
+    ]
+
+def _seg_68() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
+    return [
     (0x1D6F5, 'M', 'τ'),
     (0x1D6F6, 'M', 'υ'),
     (0x1D6F7, 'M', 'φ'),
@@ -7182,10 +7166,6 @@ def _seg_68() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
     (0x1D747, 'M', 'σ'),
     (0x1D749, 'M', 'τ'),
     (0x1D74A, 'M', 'υ'),
-    ]
-
-def _seg_69() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
-    return [
     (0x1D74B, 'M', 'φ'),
     (0x1D74C, 'M', 'χ'),
     (0x1D74D, 'M', 'ψ'),
@@ -7202,6 +7182,10 @@ def _seg_69() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
     (0x1D758, 'M', 'γ'),
     (0x1D759, 'M', 'δ'),
     (0x1D75A, 'M', 'ε'),
+    ]
+
+def _seg_69() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
+    return [
     (0x1D75B, 'M', 'ζ'),
     (0x1D75C, 'M', 'η'),
     (0x1D75D, 'M', 'θ'),
@@ -7286,10 +7270,6 @@ def _seg_69() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
     (0x1D7AD, 'M', 'δ'),
     (0x1D7AE, 'M', 'ε'),
     (0x1D7AF, 'M', 'ζ'),
-    ]
-
-def _seg_70() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
-    return [
     (0x1D7B0, 'M', 'η'),
     (0x1D7B1, 'M', 'θ'),
     (0x1D7B2, 'M', 'ι'),
@@ -7306,6 +7286,10 @@ def _seg_70() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
     (0x1D7BE, 'M', 'υ'),
     (0x1D7BF, 'M', 'φ'),
     (0x1D7C0, 'M', 'χ'),
+    ]
+
+def _seg_70() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
+    return [
     (0x1D7C1, 'M', 'ψ'),
     (0x1D7C2, 'M', 'ω'),
     (0x1D7C3, 'M', '∂'),
@@ -7375,8 +7359,6 @@ def _seg_70() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
     (0x1DAB0, 'X'),
     (0x1DF00, 'V'),
     (0x1DF1F, 'X'),
-    (0x1DF25, 'V'),
-    (0x1DF2B, 'X'),
     (0x1E000, 'V'),
     (0x1E007, 'X'),
     (0x1E008, 'V'),
@@ -7387,75 +7369,6 @@ def _seg_70() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
     (0x1E025, 'X'),
     (0x1E026, 'V'),
     (0x1E02B, 'X'),
-    (0x1E030, 'M', 'а'),
-    (0x1E031, 'M', 'б'),
-    (0x1E032, 'M', 'в'),
-    ]
-
-def _seg_71() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
-    return [
-    (0x1E033, 'M', 'г'),
-    (0x1E034, 'M', 'д'),
-    (0x1E035, 'M', 'е'),
-    (0x1E036, 'M', 'ж'),
-    (0x1E037, 'M', 'з'),
-    (0x1E038, 'M', 'и'),
-    (0x1E039, 'M', 'к'),
-    (0x1E03A, 'M', 'л'),
-    (0x1E03B, 'M', 'м'),
-    (0x1E03C, 'M', 'о'),
-    (0x1E03D, 'M', 'п'),
-    (0x1E03E, 'M', 'р'),
-    (0x1E03F, 'M', 'с'),
-    (0x1E040, 'M', 'т'),
-    (0x1E041, 'M', 'у'),
-    (0x1E042, 'M', 'ф'),
-    (0x1E043, 'M', 'х'),
-    (0x1E044, 'M', 'ц'),
-    (0x1E045, 'M', 'ч'),
-    (0x1E046, 'M', 'ш'),
-    (0x1E047, 'M', 'ы'),
-    (0x1E048, 'M', 'э'),
-    (0x1E049, 'M', 'ю'),
-    (0x1E04A, 'M', 'ꚉ'),
-    (0x1E04B, 'M', 'ә'),
-    (0x1E04C, 'M', 'і'),
-    (0x1E04D, 'M', 'ј'),
-    (0x1E04E, 'M', 'ө'),
-    (0x1E04F, 'M', 'ү'),
-    (0x1E050, 'M', 'ӏ'),
-    (0x1E051, 'M', 'а'),
-    (0x1E052, 'M', 'б'),
-    (0x1E053, 'M', 'в'),
-    (0x1E054, 'M', 'г'),
-    (0x1E055, 'M', 'д'),
-    (0x1E056, 'M', 'е'),
-    (0x1E057, 'M', 'ж'),
-    (0x1E058, 'M', 'з'),
-    (0x1E059, 'M', 'и'),
-    (0x1E05A, 'M', 'к'),
-    (0x1E05B, 'M', 'л'),
-    (0x1E05C, 'M', 'о'),
-    (0x1E05D, 'M', 'п'),
-    (0x1E05E, 'M', 'с'),
-    (0x1E05F, 'M', 'у'),
-    (0x1E060, 'M', 'ф'),
-    (0x1E061, 'M', 'х'),
-    (0x1E062, 'M', 'ц'),
-    (0x1E063, 'M', 'ч'),
-    (0x1E064, 'M', 'ш'),
-    (0x1E065, 'M', 'ъ'),
-    (0x1E066, 'M', 'ы'),
-    (0x1E067, 'M', 'ґ'),
-    (0x1E068, 'M', 'і'),
-    (0x1E069, 'M', 'ѕ'),
-    (0x1E06A, 'M', 'џ'),
-    (0x1E06B, 'M', 'ҫ'),
-    (0x1E06C, 'M', 'ꙑ'),
-    (0x1E06D, 'M', 'ұ'),
-    (0x1E06E, 'X'),
-    (0x1E08F, 'V'),
-    (0x1E090, 'X'),
     (0x1E100, 'V'),
     (0x1E12D, 'X'),
     (0x1E130, 'V'),
@@ -7470,8 +7383,6 @@ def _seg_71() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
     (0x1E2FA, 'X'),
     (0x1E2FF, 'V'),
     (0x1E300, 'X'),
-    (0x1E4D0, 'V'),
-    (0x1E4FA, 'X'),
     (0x1E7E0, 'V'),
     (0x1E7E7, 'X'),
     (0x1E7E8, 'V'),
@@ -7479,6 +7390,10 @@ def _seg_71() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
     (0x1E7ED, 'V'),
     (0x1E7EF, 'X'),
     (0x1E7F0, 'V'),
+    ]
+
+def _seg_71() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
+    return [
     (0x1E7FF, 'X'),
     (0x1E800, 'V'),
     (0x1E8C5, 'X'),
@@ -7494,10 +7409,6 @@ def _seg_71() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
     (0x1E907, 'M', '𞤩'),
     (0x1E908, 'M', '𞤪'),
     (0x1E909, 'M', '𞤫'),
-    ]
-
-def _seg_72() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
-    return [
     (0x1E90A, 'M', '𞤬'),
     (0x1E90B, 'M', '𞤭'),
     (0x1E90C, 'M', '𞤮'),
@@ -7583,6 +7494,10 @@ def _seg_72() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
     (0x1EE31, 'M', 'ص'),
     (0x1EE32, 'M', 'ق'),
     (0x1EE33, 'X'),
+    ]
+
+def _seg_72() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
+    return [
     (0x1EE34, 'M', 'ش'),
     (0x1EE35, 'M', 'ت'),
     (0x1EE36, 'M', 'ث'),
@@ -7598,10 +7513,6 @@ def _seg_72() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
     (0x1EE48, 'X'),
     (0x1EE49, 'M', 'ي'),
     (0x1EE4A, 'X'),
-    ]
-
-def _seg_73() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
-    return [
     (0x1EE4B, 'M', 'ل'),
     (0x1EE4C, 'X'),
     (0x1EE4D, 'M', 'ن'),
@@ -7687,6 +7598,10 @@ def _seg_73() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
     (0x1EEA3, 'M', 'د'),
     (0x1EEA4, 'X'),
     (0x1EEA5, 'M', 'و'),
+    ]
+
+def _seg_73() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
+    return [
     (0x1EEA6, 'M', 'ز'),
     (0x1EEA7, 'M', 'ح'),
     (0x1EEA8, 'M', 'ط'),
@@ -7702,10 +7617,6 @@ def _seg_73() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
     (0x1EEB2, 'M', 'ق'),
     (0x1EEB3, 'M', 'ر'),
     (0x1EEB4, 'M', 'ش'),
-    ]
-
-def _seg_74() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
-    return [
     (0x1EEB5, 'M', 'ت'),
     (0x1EEB6, 'M', 'ث'),
     (0x1EEB7, 'M', 'خ'),
@@ -7791,6 +7702,10 @@ def _seg_74() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
     (0x1F141, 'M', 'r'),
     (0x1F142, 'M', 's'),
     (0x1F143, 'M', 't'),
+    ]
+
+def _seg_74() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
+    return [
     (0x1F144, 'M', 'u'),
     (0x1F145, 'M', 'v'),
     (0x1F146, 'M', 'w'),
@@ -7806,10 +7721,6 @@ def _seg_74() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
     (0x1F150, 'V'),
     (0x1F16A, 'M', 'mc'),
     (0x1F16B, 'M', 'md'),
-    ]
-
-def _seg_75() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
-    return [
     (0x1F16C, 'M', 'mr'),
     (0x1F16D, 'V'),
     (0x1F190, 'M', 'dj'),
@@ -7882,19 +7793,23 @@ def _seg_75() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
     (0x1F266, 'X'),
     (0x1F300, 'V'),
     (0x1F6D8, 'X'),
-    (0x1F6DC, 'V'),
+    (0x1F6DD, 'V'),
     (0x1F6ED, 'X'),
     (0x1F6F0, 'V'),
     (0x1F6FD, 'X'),
     (0x1F700, 'V'),
-    (0x1F777, 'X'),
-    (0x1F77B, 'V'),
-    (0x1F7DA, 'X'),
+    (0x1F774, 'X'),
+    (0x1F780, 'V'),
+    (0x1F7D9, 'X'),
     (0x1F7E0, 'V'),
     (0x1F7EC, 'X'),
     (0x1F7F0, 'V'),
     (0x1F7F1, 'X'),
     (0x1F800, 'V'),
+    ]
+
+def _seg_75() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
+    return [
     (0x1F80C, 'X'),
     (0x1F810, 'V'),
     (0x1F848, 'X'),
@@ -7910,24 +7825,24 @@ def _seg_75() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
     (0x1FA54, 'X'),
     (0x1FA60, 'V'),
     (0x1FA6E, 'X'),
-    ]
-
-def _seg_76() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
-    return [
     (0x1FA70, 'V'),
+    (0x1FA75, 'X'),
+    (0x1FA78, 'V'),
     (0x1FA7D, 'X'),
     (0x1FA80, 'V'),
-    (0x1FA89, 'X'),
+    (0x1FA87, 'X'),
     (0x1FA90, 'V'),
-    (0x1FABE, 'X'),
-    (0x1FABF, 'V'),
+    (0x1FAAD, 'X'),
+    (0x1FAB0, 'V'),
+    (0x1FABB, 'X'),
+    (0x1FAC0, 'V'),
     (0x1FAC6, 'X'),
-    (0x1FACE, 'V'),
-    (0x1FADC, 'X'),
+    (0x1FAD0, 'V'),
+    (0x1FADA, 'X'),
     (0x1FAE0, 'V'),
-    (0x1FAE9, 'X'),
+    (0x1FAE8, 'X'),
     (0x1FAF0, 'V'),
-    (0x1FAF9, 'X'),
+    (0x1FAF7, 'X'),
     (0x1FB00, 'V'),
     (0x1FB93, 'X'),
     (0x1FB94, 'V'),
@@ -7946,7 +7861,7 @@ def _seg_76() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
     (0x20000, 'V'),
     (0x2A6E0, 'X'),
     (0x2A700, 'V'),
-    (0x2B73A, 'X'),
+    (0x2B739, 'X'),
     (0x2B740, 'V'),
     (0x2B81E, 'X'),
     (0x2B820, 'V'),
@@ -7995,6 +7910,10 @@ def _seg_76() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
     (0x2F827, 'M', '勤'),
     (0x2F828, 'M', '勺'),
     (0x2F829, 'M', '包'),
+    ]
+
+def _seg_76() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
+    return [
     (0x2F82A, 'M', '匆'),
     (0x2F82B, 'M', '北'),
     (0x2F82C, 'M', '卉'),
@@ -8014,10 +7933,6 @@ def _seg_76() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
     (0x2F83C, 'M', '咞'),
     (0x2F83D, 'M', '吸'),
     (0x2F83E, 'M', '呈'),
-    ]
-
-def _seg_77() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
-    return [
     (0x2F83F, 'M', '周'),
     (0x2F840, 'M', '咢'),
     (0x2F841, 'M', '哶'),
@@ -8099,6 +8014,10 @@ def _seg_77() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
     (0x2F88F, 'M', '𪎒'),
     (0x2F890, 'M', '廾'),
     (0x2F891, 'M', '𢌱'),
+    ]
+
+def _seg_77() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
+    return [
     (0x2F893, 'M', '舁'),
     (0x2F894, 'M', '弢'),
     (0x2F896, 'M', '㣇'),
@@ -8118,10 +8037,6 @@ def _seg_77() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
     (0x2F8A4, 'M', '𢛔'),
     (0x2F8A5, 'M', '惇'),
     (0x2F8A6, 'M', '慈'),
-    ]
-
-def _seg_78() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
-    return [
     (0x2F8A7, 'M', '慌'),
     (0x2F8A8, 'M', '慎'),
     (0x2F8A9, 'M', '慌'),
@@ -8203,6 +8118,10 @@ def _seg_78() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
     (0x2F8F5, 'M', '殺'),
     (0x2F8F6, 'M', '殻'),
     (0x2F8F7, 'M', '𣪍'),
+    ]
+
+def _seg_78() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
+    return [
     (0x2F8F8, 'M', '𡴋'),
     (0x2F8F9, 'M', '𣫺'),
     (0x2F8FA, 'M', '汎'),
@@ -8222,10 +8141,6 @@ def _seg_78() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
     (0x2F908, 'M', '港'),
     (0x2F909, 'M', '湮'),
     (0x2F90A, 'M', '㴳'),
-    ]
-
-def _seg_79() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
-    return [
     (0x2F90B, 'M', '滋'),
     (0x2F90C, 'M', '滇'),
     (0x2F90D, 'M', '𣻑'),
@@ -8307,6 +8222,10 @@ def _seg_79() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
     (0x2F95B, 'M', '穏'),
     (0x2F95C, 'M', '𥥼'),
     (0x2F95D, 'M', '𥪧'),
+    ]
+
+def _seg_79() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
+    return [
     (0x2F95F, 'X'),
     (0x2F960, 'M', '䈂'),
     (0x2F961, 'M', '𥮫'),
@@ -8326,10 +8245,6 @@ def _seg_79() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
     (0x2F96F, 'M', '縂'),
     (0x2F970, 'M', '繅'),
     (0x2F971, 'M', '䌴'),
-    ]
-
-def _seg_80() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
-    return [
     (0x2F972, 'M', '𦈨'),
     (0x2F973, 'M', '𦉇'),
     (0x2F974, 'M', '䍙'),
@@ -8411,6 +8326,10 @@ def _seg_80() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
     (0x2F9C0, 'M', '蟡'),
     (0x2F9C1, 'M', '蠁'),
     (0x2F9C2, 'M', '䗹'),
+    ]
+
+def _seg_80() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
+    return [
     (0x2F9C3, 'M', '衠'),
     (0x2F9C4, 'M', '衣'),
     (0x2F9C5, 'M', '𧙧'),
@@ -8430,10 +8349,6 @@ def _seg_80() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
     (0x2F9D3, 'M', '𧲨'),
     (0x2F9D4, 'M', '貫'),
     (0x2F9D5, 'M', '賁'),
-    ]
-
-def _seg_81() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
-    return [
     (0x2F9D6, 'M', '贛'),
     (0x2F9D7, 'M', '起'),
     (0x2F9D8, 'M', '𧼯'),
@@ -8508,8 +8423,6 @@ def _seg_81() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
     (0x2FA1E, 'X'),
     (0x30000, 'V'),
     (0x3134B, 'X'),
-    (0x31350, 'V'),
-    (0x323B0, 'X'),
     (0xE0100, 'I'),
     (0xE01F0, 'X'),
     ]
@@ -8596,5 +8509,4 @@ def _seg_81() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
     + _seg_78()
     + _seg_79()
     + _seg_80()
-    + _seg_81()
 )  # type: Tuple[Union[Tuple[int, str], Tuple[int, str, str]], ...]
diff --git a/venv/lib/python3.10/site-packages/pip/_vendor/msgpack/__init__.py b/venv/lib/python3.10/site-packages/pip/_vendor/msgpack/__init__.py
index 5071021..d6705e2 100644
--- a/venv/lib/python3.10/site-packages/pip/_vendor/msgpack/__init__.py
+++ b/venv/lib/python3.10/site-packages/pip/_vendor/msgpack/__init__.py
@@ -1,4 +1,5 @@
 # coding: utf-8
+from ._version import version
 from .exceptions import *
 from .ext import ExtType, Timestamp
 
@@ -6,10 +7,6 @@
 import sys
 
 
-version = (1, 0, 4)
-__version__ = "1.0.4"
-
-
 if os.environ.get("MSGPACK_PUREPYTHON") or sys.version_info[0] == 2:
     from .fallback import Packer, unpackb, Unpacker
 else:
diff --git a/venv/lib/python3.10/site-packages/pip/_vendor/msgpack/ext.py b/venv/lib/python3.10/site-packages/pip/_vendor/msgpack/ext.py
index 25544c5..4eb9dd6 100644
--- a/venv/lib/python3.10/site-packages/pip/_vendor/msgpack/ext.py
+++ b/venv/lib/python3.10/site-packages/pip/_vendor/msgpack/ext.py
@@ -59,7 +59,7 @@ def __init__(self, seconds, nanoseconds=0):
             raise TypeError("seconds must be an interger")
         if not isinstance(nanoseconds, int_types):
             raise TypeError("nanoseconds must be an integer")
-        if not (0 <= nanoseconds < 10**9):
+        if not (0 <= nanoseconds < 10 ** 9):
             raise ValueError(
                 "nanoseconds must be a non-negative integer less than 999999999."
             )
@@ -143,7 +143,7 @@ def from_unix(unix_sec):
         :type unix_float: int or float.
         """
         seconds = int(unix_sec // 1)
-        nanoseconds = int((unix_sec % 1) * 10**9)
+        nanoseconds = int((unix_sec % 1) * 10 ** 9)
         return Timestamp(seconds, nanoseconds)
 
     def to_unix(self):
@@ -161,7 +161,7 @@ def from_unix_nano(unix_ns):
         :param int unix_ns: Posix timestamp in nanoseconds.
         :rtype: Timestamp
         """
-        return Timestamp(*divmod(unix_ns, 10**9))
+        return Timestamp(*divmod(unix_ns, 10 ** 9))
 
     def to_unix_nano(self):
         """Get the timestamp as a unixtime in nanoseconds.
@@ -169,7 +169,7 @@ def to_unix_nano(self):
         :returns: posix timestamp in nanoseconds
         :rtype: int
         """
-        return self.seconds * 10**9 + self.nanoseconds
+        return self.seconds * 10 ** 9 + self.nanoseconds
 
     def to_datetime(self):
         """Get the timestamp as a UTC datetime.
diff --git a/venv/lib/python3.10/site-packages/pip/_vendor/msgpack/fallback.py b/venv/lib/python3.10/site-packages/pip/_vendor/msgpack/fallback.py
index f560c7b..b27acb2 100644
--- a/venv/lib/python3.10/site-packages/pip/_vendor/msgpack/fallback.py
+++ b/venv/lib/python3.10/site-packages/pip/_vendor/msgpack/fallback.py
@@ -11,6 +11,7 @@
     def dict_iteritems(d):
         return d.iteritems()
 
+
 else:
     int_types = int
     unicode = str
@@ -31,6 +32,7 @@ def _is_recursionerror(e):
             and e.args[0].startswith("maximum recursion depth exceeded")
         )
 
+
 else:
 
     def _is_recursionerror(e):
@@ -66,6 +68,7 @@ def write(self, s):
         def getvalue(self):
             return self.builder.build()
 
+
 else:
     USING_STRINGBUILDER = False
     from io import BytesIO as StringIO
@@ -140,6 +143,7 @@ def _unpack_from(f, b, o=0):
         """Explicit type cast for legacy struct.unpack_from"""
         return struct.unpack_from(f, bytes(b), o)
 
+
 else:
     _unpack_from = struct.unpack_from
 
@@ -318,7 +322,7 @@ def __init__(
         self._buf_checkpoint = 0
 
         if not max_buffer_size:
-            max_buffer_size = 2**31 - 1
+            max_buffer_size = 2 ** 31 - 1
         if max_str_len == -1:
             max_str_len = max_buffer_size
         if max_bin_len == -1:
@@ -423,8 +427,6 @@ def _reserve(self, n, raise_outofdata=True):
 
         # Read from file
         remain_bytes = -remain_bytes
-        if remain_bytes + len(self._buffer) > self._max_buffer_size:
-            raise BufferFull
         while remain_bytes > 0:
             to_read_bytes = max(self._read_size, remain_bytes)
             read_data = self.file_like.read(to_read_bytes)
@@ -802,20 +804,20 @@ def _pack(
                 raise OverflowError("Integer value out of range")
             if check(obj, (bytes, bytearray)):
                 n = len(obj)
-                if n >= 2**32:
+                if n >= 2 ** 32:
                     raise ValueError("%s is too large" % type(obj).__name__)
                 self._pack_bin_header(n)
                 return self._buffer.write(obj)
             if check(obj, unicode):
                 obj = obj.encode("utf-8", self._unicode_errors)
                 n = len(obj)
-                if n >= 2**32:
+                if n >= 2 ** 32:
                     raise ValueError("String is too large")
                 self._pack_raw_header(n)
                 return self._buffer.write(obj)
             if check(obj, memoryview):
                 n = len(obj) * obj.itemsize
-                if n >= 2**32:
+                if n >= 2 ** 32:
                     raise ValueError("Memoryview is too large")
                 self._pack_bin_header(n)
                 return self._buffer.write(obj)
@@ -897,7 +899,7 @@ def pack_map_pairs(self, pairs):
             return ret
 
     def pack_array_header(self, n):
-        if n >= 2**32:
+        if n >= 2 ** 32:
             raise ValueError
         self._pack_array_header(n)
         if self._autoreset:
@@ -906,7 +908,7 @@ def pack_array_header(self, n):
             return ret
 
     def pack_map_header(self, n):
-        if n >= 2**32:
+        if n >= 2 ** 32:
             raise ValueError
         self._pack_map_header(n)
         if self._autoreset:
diff --git a/venv/lib/python3.10/site-packages/pip/_vendor/pep517/__init__.py b/venv/lib/python3.10/site-packages/pip/_vendor/pep517/__init__.py
index 38ea0f5..2b6b885 100644
--- a/venv/lib/python3.10/site-packages/pip/_vendor/pep517/__init__.py
+++ b/venv/lib/python3.10/site-packages/pip/_vendor/pep517/__init__.py
@@ -1,6 +1,6 @@
 """Wrappers to build Python packages using PEP 517 hooks
 """
 
-__version__ = '0.13.0'
+__version__ = '0.12.0'
 
 from .wrappers import *  # noqa: F401, F403
diff --git a/venv/lib/python3.10/site-packages/pip/_vendor/pep517/_compat.py b/venv/lib/python3.10/site-packages/pip/_vendor/pep517/_compat.py
deleted file mode 100644
index 95e509c..0000000
--- a/venv/lib/python3.10/site-packages/pip/_vendor/pep517/_compat.py
+++ /dev/null
@@ -1,8 +0,0 @@
-__all__ = ("tomllib",)
-
-import sys
-
-if sys.version_info >= (3, 11):
-    import tomllib
-else:
-    from pip._vendor import tomli as tomllib
diff --git a/venv/lib/python3.10/site-packages/pip/_vendor/pep517/build.py b/venv/lib/python3.10/site-packages/pip/_vendor/pep517/build.py
index b30909c..bc463b2 100644
--- a/venv/lib/python3.10/site-packages/pip/_vendor/pep517/build.py
+++ b/venv/lib/python3.10/site-packages/pip/_vendor/pep517/build.py
@@ -1,14 +1,15 @@
 """Build a project using PEP 517 hooks.
 """
 import argparse
+import io
 import logging
 import os
 import shutil
-import tempfile
 
-from ._compat import tomllib
 from .envbuild import BuildEnvironment
 from .wrappers import Pep517HookCaller
+from .dirtools import tempdir, mkdir_p
+from .compat import FileNotFoundError, toml_load
 
 log = logging.getLogger(__name__)
 
@@ -30,8 +31,8 @@ def load_system(source_dir):
     Load the build system from a source dir (pyproject.toml).
     """
     pyproject = os.path.join(source_dir, 'pyproject.toml')
-    with open(pyproject, 'rb') as f:
-        pyproject_data = tomllib.load(f)
+    with io.open(pyproject, 'rb') as f:
+        pyproject_data = toml_load(f)
     return pyproject_data['build-system']
 
 
@@ -63,7 +64,7 @@ def _do_build(hooks, env, dist, dest):
     env.pip_install(reqs)
     log.info('Installed dynamic build dependencies')
 
-    with tempfile.TemporaryDirectory() as td:
+    with tempdir() as td:
         log.info('Trying to build %s in %s', dist, td)
         build_name = 'build_{dist}'.format(**locals())
         build = getattr(hooks, build_name)
@@ -75,7 +76,7 @@ def _do_build(hooks, env, dist, dest):
 def build(source_dir, dist, dest=None, system=None):
     system = system or load_system(source_dir)
     dest = os.path.join(source_dir, dest or 'dist')
-    os.makedirs(dest, exist_ok=True)
+    mkdir_p(dest)
 
     validate_system(system)
     hooks = Pep517HookCaller(
diff --git a/venv/lib/python3.10/site-packages/pip/_vendor/pep517/check.py b/venv/lib/python3.10/site-packages/pip/_vendor/pep517/check.py
index b79f627..bf3c722 100644
--- a/venv/lib/python3.10/site-packages/pip/_vendor/pep517/check.py
+++ b/venv/lib/python3.10/site-packages/pip/_vendor/pep517/check.py
@@ -1,19 +1,19 @@
 """Check a project and backend by attempting to build using PEP 517 hooks.
 """
 import argparse
+import io
 import logging
 import os
+from os.path import isfile, join as pjoin
 import shutil
+from subprocess import CalledProcessError
 import sys
 import tarfile
-import zipfile
-from os.path import isfile
-from os.path import join as pjoin
-from subprocess import CalledProcessError
 from tempfile import mkdtemp
+import zipfile
 
-from ._compat import tomllib
 from .colorlog import enable_colourful_output
+from .compat import TOMLDecodeError, toml_load
 from .envbuild import BuildEnvironment
 from .wrappers import Pep517HookCaller
 
@@ -142,15 +142,15 @@ def check(source_dir):
         return False
 
     try:
-        with open(pyproject, 'rb') as f:
-            pyproject_data = tomllib.load(f)
+        with io.open(pyproject, 'rb') as f:
+            pyproject_data = toml_load(f)
         # Ensure the mandatory data can be loaded
         buildsys = pyproject_data['build-system']
         requires = buildsys['requires']
         backend = buildsys['build-backend']
         backend_path = buildsys.get('backend-path')
         log.info('Loaded pyproject.toml')
-    except (tomllib.TOMLDecodeError, KeyError):
+    except (TOMLDecodeError, KeyError):
         log.error("Invalid pyproject.toml", exc_info=True)
         return False
 
diff --git a/venv/lib/python3.10/site-packages/pip/_vendor/pep517/colorlog.py b/venv/lib/python3.10/site-packages/pip/_vendor/pep517/colorlog.py
index 66310a7..69c8a59 100644
--- a/venv/lib/python3.10/site-packages/pip/_vendor/pep517/colorlog.py
+++ b/venv/lib/python3.10/site-packages/pip/_vendor/pep517/colorlog.py
@@ -73,6 +73,8 @@ def __init__(self, color=True, datefmt=None):
             # right conversion in python 3.
             fg_color = (curses.tigetstr("setaf") or
                         curses.tigetstr("setf") or "")
+            if (3, 0) < sys.version_info < (3, 2, 3):
+                fg_color = str(fg_color, "ascii")
 
             for levelno, code in self.DEFAULT_COLORS.items():
                 self._colors[levelno] = str(
diff --git a/venv/lib/python3.10/site-packages/pip/_vendor/pep517/dirtools.py b/venv/lib/python3.10/site-packages/pip/_vendor/pep517/dirtools.py
index 3eff4d8..58c6ca0 100644
--- a/venv/lib/python3.10/site-packages/pip/_vendor/pep517/dirtools.py
+++ b/venv/lib/python3.10/site-packages/pip/_vendor/pep517/dirtools.py
@@ -1,8 +1,33 @@
-import io
 import os
+import io
+import contextlib
+import tempfile
+import shutil
+import errno
 import zipfile
 
 
+@contextlib.contextmanager
+def tempdir():
+    """Create a temporary directory in a context manager."""
+    td = tempfile.mkdtemp()
+    try:
+        yield td
+    finally:
+        shutil.rmtree(td)
+
+
+def mkdir_p(*args, **kwargs):
+    """Like `mkdir`, but does not raise an exception if the
+    directory already exists.
+    """
+    try:
+        return os.mkdir(*args, **kwargs)
+    except OSError as exc:
+        if exc.errno != errno.EEXIST:
+            raise
+
+
 def dir_to_zipfile(root):
     """Construct an in-memory zip file for a directory."""
     buffer = io.BytesIO()
diff --git a/venv/lib/python3.10/site-packages/pip/_vendor/pep517/envbuild.py b/venv/lib/python3.10/site-packages/pip/_vendor/pep517/envbuild.py
index c0415c4..fe8873c 100644
--- a/venv/lib/python3.10/site-packages/pip/_vendor/pep517/envbuild.py
+++ b/venv/lib/python3.10/site-packages/pip/_vendor/pep517/envbuild.py
@@ -1,26 +1,27 @@
 """Build wheels/sdists by installing build deps to a temporary environment.
 """
 
-import logging
+import io
 import os
+import logging
 import shutil
-import sys
 from subprocess import check_call
+import sys
 from sysconfig import get_paths
 from tempfile import mkdtemp
 
-from ._compat import tomllib
-from .wrappers import LoggerWrapper, Pep517HookCaller
+from .compat import toml_load
+from .wrappers import Pep517HookCaller, LoggerWrapper
 
 log = logging.getLogger(__name__)
 
 
 def _load_pyproject(source_dir):
-    with open(
+    with io.open(
             os.path.join(source_dir, 'pyproject.toml'),
             'rb',
             ) as f:
-        pyproject_data = tomllib.load(f)
+        pyproject_data = toml_load(f)
     buildsys = pyproject_data['build-system']
     return (
         buildsys['requires'],
@@ -29,7 +30,7 @@ def _load_pyproject(source_dir):
     )
 
 
-class BuildEnvironment:
+class BuildEnvironment(object):
     """Context manager to install build deps in a simple temporary environment
 
     Based on code I wrote for pip, which is MIT licensed.
diff --git a/venv/lib/python3.10/site-packages/pip/_vendor/pep517/in_process/__init__.py b/venv/lib/python3.10/site-packages/pip/_vendor/pep517/in_process/__init__.py
index 281a356..c932313 100644
--- a/venv/lib/python3.10/site-packages/pip/_vendor/pep517/in_process/__init__.py
+++ b/venv/lib/python3.10/site-packages/pip/_vendor/pep517/in_process/__init__.py
@@ -3,24 +3,15 @@
 The subpackage should stay as empty as possible to avoid shadowing modules that
 the backend might import.
 """
+from os.path import dirname, abspath, join as pjoin
 from contextlib import contextmanager
-from os.path import abspath, dirname
-from os.path import join as pjoin
 
 try:
     import importlib.resources as resources
-    try:
-        resources.files
-    except AttributeError:
-        # Python 3.8 compatibility
-        def _in_proc_script_path():
-            return resources.path(__package__, '_in_process.py')
-    else:
-        def _in_proc_script_path():
-            return resources.as_file(
-                resources.files(__package__).joinpath('_in_process.py'))
+
+    def _in_proc_script_path():
+        return resources.path(__package__, '_in_process.py')
 except ImportError:
-    # Python 3.6 compatibility
     @contextmanager
     def _in_proc_script_path():
         yield pjoin(dirname(abspath(__file__)), '_in_process.py')
diff --git a/venv/lib/python3.10/site-packages/pip/_vendor/pep517/in_process/_in_process.py b/venv/lib/python3.10/site-packages/pip/_vendor/pep517/in_process/_in_process.py
index ae4cf9e..954a4ab 100644
--- a/venv/lib/python3.10/site-packages/pip/_vendor/pep517/in_process/_in_process.py
+++ b/venv/lib/python3.10/site-packages/pip/_vendor/pep517/in_process/_in_process.py
@@ -12,29 +12,41 @@
 - control_dir/output.json
   - {"return_val": ...}
 """
+from glob import glob
+from importlib import import_module
 import json
 import os
 import os.path
+from os.path import join as pjoin
 import re
 import shutil
 import sys
 import traceback
-from glob import glob
-from importlib import import_module
-from os.path import join as pjoin
 
-# This file is run as a script, and `import wrappers` is not zip-safe, so we
-# include write_json() and read_json() from wrappers.py.
+# This file is run as a script, and `import compat` is not zip-safe, so we
+# include write_json() and read_json() from compat.py.
+#
+# Handle reading and writing JSON in UTF-8, on Python 3 and 2.
 
+if sys.version_info[0] >= 3:
+    # Python 3
+    def write_json(obj, path, **kwargs):
+        with open(path, 'w', encoding='utf-8') as f:
+            json.dump(obj, f, **kwargs)
 
-def write_json(obj, path, **kwargs):
-    with open(path, 'w', encoding='utf-8') as f:
-        json.dump(obj, f, **kwargs)
+    def read_json(path):
+        with open(path, 'r', encoding='utf-8') as f:
+            return json.load(f)
 
+else:
+    # Python 2
+    def write_json(obj, path, **kwargs):
+        with open(path, 'wb') as f:
+            json.dump(obj, f, encoding='utf-8', **kwargs)
 
-def read_json(path):
-    with open(path, encoding='utf-8') as f:
-        return json.load(f)
+    def read_json(path):
+        with open(path, 'rb') as f:
+            return json.load(f)
 
 
 class BackendUnavailable(Exception):
@@ -52,7 +64,7 @@ def __init__(self, message):
 class HookMissing(Exception):
     """Raised if a hook is missing and we are not executing the fallback"""
     def __init__(self, hook_name=None):
-        super().__init__(hook_name)
+        super(HookMissing, self).__init__(hook_name)
         self.hook_name = hook_name
 
 
diff --git a/venv/lib/python3.10/site-packages/pip/_vendor/pep517/meta.py b/venv/lib/python3.10/site-packages/pip/_vendor/pep517/meta.py
index 4afc3c0..d525de5 100644
--- a/venv/lib/python3.10/site-packages/pip/_vendor/pep517/meta.py
+++ b/venv/lib/python3.10/site-packages/pip/_vendor/pep517/meta.py
@@ -1,11 +1,10 @@
 """Build metadata for a project using PEP 517 hooks.
 """
 import argparse
-import functools
 import logging
 import os
 import shutil
-import tempfile
+import functools
 
 try:
     import importlib.metadata as imp_meta
@@ -17,10 +16,10 @@
 except ImportError:
     from zipp import Path
 
-from .build import compat_system, load_system, validate_system
-from .dirtools import dir_to_zipfile
 from .envbuild import BuildEnvironment
 from .wrappers import Pep517HookCaller, quiet_subprocess_runner
+from .dirtools import tempdir, mkdir_p, dir_to_zipfile
+from .build import validate_system, load_system, compat_system
 
 log = logging.getLogger(__name__)
 
@@ -32,7 +31,7 @@ def _prep_meta(hooks, env, dest):
     env.pip_install(reqs)
     log.info('Installed dynamic build dependencies')
 
-    with tempfile.TemporaryDirectory() as td:
+    with tempdir() as td:
         log.info('Trying to build metadata in %s', td)
         filename = hooks.prepare_metadata_for_build_wheel(td, {})
         source = os.path.join(td, filename)
@@ -42,7 +41,7 @@ def _prep_meta(hooks, env, dest):
 def build(source_dir='.', dest=None, system=None):
     system = system or load_system(source_dir)
     dest = os.path.join(source_dir, dest or 'dist')
-    os.makedirs(dest, exist_ok=True)
+    mkdir_p(dest)
     validate_system(system)
     hooks = Pep517HookCaller(
         source_dir, system['build-backend'], system.get('backend-path')
@@ -55,7 +54,7 @@ def build(source_dir='.', dest=None, system=None):
 
 
 def build_as_zip(builder=build):
-    with tempfile.TemporaryDirectory() as out_dir:
+    with tempdir() as out_dir:
         builder(dest=out_dir)
         return dir_to_zipfile(out_dir)
 
diff --git a/venv/lib/python3.10/site-packages/pip/_vendor/pep517/wrappers.py b/venv/lib/python3.10/site-packages/pip/_vendor/pep517/wrappers.py
index 987a62a..e031ed7 100644
--- a/venv/lib/python3.10/site-packages/pip/_vendor/pep517/wrappers.py
+++ b/venv/lib/python3.10/site-packages/pip/_vendor/pep517/wrappers.py
@@ -1,13 +1,13 @@
-import json
-import os
-import sys
-import tempfile
 import threading
 from contextlib import contextmanager
-from os.path import abspath
-from os.path import join as pjoin
-from subprocess import STDOUT, check_call, check_output
+import os
+from os.path import abspath, join as pjoin
+import shutil
+from subprocess import check_call, check_output, STDOUT
+import sys
+from tempfile import mkdtemp
 
+from . import compat
 from .in_process import _in_proc_script_path
 
 __all__ = [
@@ -21,14 +21,13 @@
 ]
 
 
-def write_json(obj, path, **kwargs):
-    with open(path, 'w', encoding='utf-8') as f:
-        json.dump(obj, f, **kwargs)
-
-
-def read_json(path):
-    with open(path, encoding='utf-8') as f:
-        return json.load(f)
+@contextmanager
+def tempdir():
+    td = mkdtemp()
+    try:
+        yield td
+    finally:
+        shutil.rmtree(td)
 
 
 class BackendUnavailable(Exception):
@@ -48,7 +47,7 @@ def __init__(self, backend_name, backend_path, message):
 class HookMissing(Exception):
     """Will be raised on missing hooks."""
     def __init__(self, hook_name):
-        super().__init__(hook_name)
+        super(HookMissing, self).__init__(hook_name)
         self.hook_name = hook_name
 
 
@@ -100,7 +99,7 @@ def norm_and_check(source_tree, requested):
     return abs_requested
 
 
-class Pep517HookCaller:
+class Pep517HookCaller(object):
     """A wrapper around a source directory to be built with a PEP 517 backend.
 
     :param source_dir: The path to the source directory, containing
@@ -293,15 +292,29 @@ def build_sdist(self, sdist_directory, config_settings=None):
         })
 
     def _call_hook(self, hook_name, kwargs):
-        extra_environ = {'PEP517_BUILD_BACKEND': self.build_backend}
+        # On Python 2, pytoml returns Unicode values (which is correct) but the
+        # environment passed to check_call needs to contain string values. We
+        # convert here by encoding using ASCII (the backend can only contain
+        # letters, digits and _, . and : characters, and will be used as a
+        # Python identifier, so non-ASCII content is wrong on Python 2 in
+        # any case).
+        # For backend_path, we use sys.getfilesystemencoding.
+        if sys.version_info[0] == 2:
+            build_backend = self.build_backend.encode('ASCII')
+        else:
+            build_backend = self.build_backend
+        extra_environ = {'PEP517_BUILD_BACKEND': build_backend}
 
         if self.backend_path:
             backend_path = os.pathsep.join(self.backend_path)
+            if sys.version_info[0] == 2:
+                backend_path = backend_path.encode(sys.getfilesystemencoding())
             extra_environ['PEP517_BACKEND_PATH'] = backend_path
 
-        with tempfile.TemporaryDirectory() as td:
+        with tempdir() as td:
             hook_input = {'kwargs': kwargs}
-            write_json(hook_input, pjoin(td, 'input.json'), indent=2)
+            compat.write_json(hook_input, pjoin(td, 'input.json'),
+                              indent=2)
 
             # Run the hook in a subprocess
             with _in_proc_script_path() as script:
@@ -312,7 +325,7 @@ def _call_hook(self, hook_name, kwargs):
                     extra_environ=extra_environ
                 )
 
-            data = read_json(pjoin(td, 'output.json'))
+            data = compat.read_json(pjoin(td, 'output.json'))
             if data.get('unsupported'):
                 raise UnsupportedOperation(data.get('traceback', ''))
             if data.get('no_backend'):
diff --git a/venv/lib/python3.10/site-packages/pip/_vendor/platformdirs/__init__.py b/venv/lib/python3.10/site-packages/pip/_vendor/platformdirs/__init__.py
index 9d513dc..089b515 100644
--- a/venv/lib/python3.10/site-packages/pip/_vendor/platformdirs/__init__.py
+++ b/venv/lib/python3.10/site-packages/pip/_vendor/platformdirs/__init__.py
@@ -4,6 +4,7 @@
 """
 from __future__ import annotations
 
+import importlib
 import os
 import sys
 from pathlib import Path
@@ -17,26 +18,16 @@
 
 
 def _set_platform_dir_class() -> type[PlatformDirsABC]:
-    if sys.platform == "win32":
-        from pip._vendor.platformdirs.windows import Windows as Result
+    if os.getenv("ANDROID_DATA") == "/data" and os.getenv("ANDROID_ROOT") == "/system":
+        module, name = "pip._vendor.platformdirs.android", "Android"
+    elif sys.platform == "win32":
+        module, name = "pip._vendor.platformdirs.windows", "Windows"
     elif sys.platform == "darwin":
-        from pip._vendor.platformdirs.macos import MacOS as Result
+        module, name = "pip._vendor.platformdirs.macos", "MacOS"
     else:
-        from pip._vendor.platformdirs.unix import Unix as Result
-
-    if os.getenv("ANDROID_DATA") == "/data" and os.getenv("ANDROID_ROOT") == "/system":
-
-        if os.getenv("SHELL") is not None:
-            return Result
-
-        from pip._vendor.platformdirs.android import _android_folder
-
-        if _android_folder() is not None:
-            from pip._vendor.platformdirs.android import Android
-
-            return Android  # return to avoid redefinition of result
-
-    return Result
+        module, name = "pip._vendor.platformdirs.unix", "Unix"
+    result: type[PlatformDirsABC] = getattr(importlib.import_module(module), name)
+    return result
 
 
 PlatformDirs = _set_platform_dir_class()  #: Currently active platform
diff --git a/venv/lib/python3.10/site-packages/pip/_vendor/platformdirs/android.py b/venv/lib/python3.10/site-packages/pip/_vendor/platformdirs/android.py
index eda8093..a684058 100644
--- a/venv/lib/python3.10/site-packages/pip/_vendor/platformdirs/android.py
+++ b/venv/lib/python3.10/site-packages/pip/_vendor/platformdirs/android.py
@@ -4,7 +4,6 @@
 import re
 import sys
 from functools import lru_cache
-from typing import cast
 
 from .api import PlatformDirsABC
 
@@ -19,7 +18,7 @@ class Android(PlatformDirsABC):
     @property
     def user_data_dir(self) -> str:
         """:return: data directory tied to the user, e.g. ``/data/user///files/``"""
-        return self._append_app_name_and_version(cast(str, _android_folder()), "files")
+        return self._append_app_name_and_version(_android_folder(), "files")
 
     @property
     def site_data_dir(self) -> str:
@@ -31,7 +30,7 @@ def user_config_dir(self) -> str:
         """
         :return: config directory tied to the user, e.g. ``/data/user///shared_prefs/``
         """
-        return self._append_app_name_and_version(cast(str, _android_folder()), "shared_prefs")
+        return self._append_app_name_and_version(_android_folder(), "shared_prefs")
 
     @property
     def site_config_dir(self) -> str:
@@ -41,7 +40,7 @@ def site_config_dir(self) -> str:
     @property
     def user_cache_dir(self) -> str:
         """:return: cache directory tied to the user, e.g. e.g. ``/data/user///cache/``"""
-        return self._append_app_name_and_version(cast(str, _android_folder()), "cache")
+        return self._append_app_name_and_version(_android_folder(), "cache")
 
     @property
     def user_state_dir(self) -> str:
@@ -79,14 +78,14 @@ def user_runtime_dir(self) -> str:
 
 
 @lru_cache(maxsize=1)
-def _android_folder() -> str | None:
-    """:return: base folder for the Android OS or None if cannot be found"""
+def _android_folder() -> str:
+    """:return: base folder for the Android OS"""
     try:
         # First try to get path to android app via pyjnius
         from jnius import autoclass
 
         Context = autoclass("android.content.Context")  # noqa: N806
-        result: str | None = Context.getFilesDir().getParentFile().getAbsolutePath()
+        result: str = Context.getFilesDir().getParentFile().getAbsolutePath()
     except Exception:
         # if fails find an android folder looking path on the sys.path
         pattern = re.compile(r"/data/(data|user/\d+)/(.+)/files")
@@ -95,7 +94,7 @@ def _android_folder() -> str | None:
                 result = path.split("/files")[0]
                 break
         else:
-            result = None
+            raise OSError("Cannot find path to android app folder")
     return result
 
 
diff --git a/venv/lib/python3.10/site-packages/pip/_vendor/platformdirs/version.py b/venv/lib/python3.10/site-packages/pip/_vendor/platformdirs/version.py
index 4552c02..175ded8 100644
--- a/venv/lib/python3.10/site-packages/pip/_vendor/platformdirs/version.py
+++ b/venv/lib/python3.10/site-packages/pip/_vendor/platformdirs/version.py
@@ -1,4 +1,4 @@
-"""Version information"""
+""" Version information """
 
-__version__ = "2.5.2"
-__version_info__ = (2, 5, 2)
+__version__ = "2.4.1"
+__version_info__ = (2, 4, 1)
diff --git a/venv/lib/python3.10/site-packages/pip/_vendor/pygments/__init__.py b/venv/lib/python3.10/site-packages/pip/_vendor/pygments/__init__.py
index 7185e53..22c50b3 100644
--- a/venv/lib/python3.10/site-packages/pip/_vendor/pygments/__init__.py
+++ b/venv/lib/python3.10/site-packages/pip/_vendor/pygments/__init__.py
@@ -21,12 +21,12 @@
     .. _Pygments master branch:
        https://github.com/pygments/pygments/archive/master.zip#egg=Pygments-dev
 
-    :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+    :copyright: Copyright 2006-2021 by the Pygments team, see AUTHORS.
     :license: BSD, see LICENSE for details.
 """
 from io import StringIO, BytesIO
 
-__version__ = '2.13.0'
+__version__ = '2.11.2'
 __docformat__ = 'restructuredtext'
 
 __all__ = ['lex', 'format', 'highlight']
@@ -38,10 +38,10 @@ def lex(code, lexer):
     """
     try:
         return lexer.get_tokens(code)
-    except TypeError:
-        # Heuristic to catch a common mistake.
-        from pip._vendor.pygments.lexer import RegexLexer
-        if isinstance(lexer, type) and issubclass(lexer, RegexLexer):
+    except TypeError as err:
+        if (isinstance(err.args[0], str) and
+            ('unbound method get_tokens' in err.args[0] or
+             'missing 1 required positional argument' in err.args[0])):
             raise TypeError('lex() argument must be a lexer instance, '
                             'not a class')
         raise
@@ -62,10 +62,10 @@ def format(tokens, formatter, outfile=None):  # pylint: disable=redefined-builti
             return realoutfile.getvalue()
         else:
             formatter.format(tokens, outfile)
-    except TypeError:
-        # Heuristic to catch a common mistake.
-        from pip._vendor.pygments.formatter import Formatter
-        if isinstance(formatter, type) and issubclass(formatter, Formatter):
+    except TypeError as err:
+        if (isinstance(err.args[0], str) and
+            ('unbound method format' in err.args[0] or
+             'missing 1 required positional argument' in err.args[0])):
             raise TypeError('format() argument must be a formatter instance, '
                             'not a class')
         raise
@@ -80,3 +80,4 @@ def highlight(code, lexer, formatter, outfile=None):
     it is returned as a string.
     """
     return format(lex(code, lexer), formatter, outfile)
+
diff --git a/venv/lib/python3.10/site-packages/pip/_vendor/pygments/__main__.py b/venv/lib/python3.10/site-packages/pip/_vendor/pygments/__main__.py
index 90cafd9..010896b 100644
--- a/venv/lib/python3.10/site-packages/pip/_vendor/pygments/__main__.py
+++ b/venv/lib/python3.10/site-packages/pip/_vendor/pygments/__main__.py
@@ -4,7 +4,7 @@
 
     Main entry point for ``python -m pygments``.
 
-    :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+    :copyright: Copyright 2006-2021 by the Pygments team, see AUTHORS.
     :license: BSD, see LICENSE for details.
 """
 
diff --git a/venv/lib/python3.10/site-packages/pip/_vendor/pygments/cmdline.py b/venv/lib/python3.10/site-packages/pip/_vendor/pygments/cmdline.py
index de73b06..908064e 100644
--- a/venv/lib/python3.10/site-packages/pip/_vendor/pygments/cmdline.py
+++ b/venv/lib/python3.10/site-packages/pip/_vendor/pygments/cmdline.py
@@ -4,7 +4,7 @@
 
     Command line interface.
 
-    :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+    :copyright: Copyright 2006-2021 by the Pygments team, see AUTHORS.
     :license: BSD, see LICENSE for details.
 """
 
@@ -25,7 +25,7 @@
 from pip._vendor.pygments.formatters import get_all_formatters, get_formatter_by_name, \
     load_formatter_from_file, get_formatter_for_filename, find_formatter_class
 from pip._vendor.pygments.formatters.terminal import TerminalFormatter
-from pip._vendor.pygments.formatters.terminal256 import Terminal256Formatter, TerminalTrueColorFormatter
+from pip._vendor.pygments.formatters.terminal256 import Terminal256Formatter
 from pip._vendor.pygments.filters import get_all_filters, find_filter_class
 from pip._vendor.pygments.styles import get_all_styles, get_style_by_name
 
@@ -185,7 +185,7 @@ def main_inner(parser, argns):
         return 0
 
     if argns.V:
-        print('Pygments version %s, (c) 2006-2022 by Georg Brandl, Matthäus '
+        print('Pygments version %s, (c) 2006-2021 by Georg Brandl, Matthäus '
               'Chajdas and contributors.' % __version__)
         return 0
 
@@ -445,9 +445,7 @@ def is_only_option(opt):
             return 1
     else:
         if not fmter:
-            if os.environ.get('COLORTERM','') in ('truecolor', '24bit'):
-                fmter = TerminalTrueColorFormatter(**parsed_opts)
-            elif '256' in os.environ.get('TERM', ''):
+            if '256' in os.environ.get('TERM', ''):
                 fmter = Terminal256Formatter(**parsed_opts)
             else:
                 fmter = TerminalFormatter(**parsed_opts)
@@ -638,9 +636,6 @@ def main(args=sys.argv):
 
     try:
         return main_inner(parser, argns)
-    except BrokenPipeError:
-        # someone closed our stdout, e.g. by quitting a pager.
-        return 0
     except Exception:
         if argns.v:
             print(file=sys.stderr)
diff --git a/venv/lib/python3.10/site-packages/pip/_vendor/pygments/console.py b/venv/lib/python3.10/site-packages/pip/_vendor/pygments/console.py
index 2ada68e..8dd08ab 100644
--- a/venv/lib/python3.10/site-packages/pip/_vendor/pygments/console.py
+++ b/venv/lib/python3.10/site-packages/pip/_vendor/pygments/console.py
@@ -4,7 +4,7 @@
 
     Format colored console output.
 
-    :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+    :copyright: Copyright 2006-2021 by the Pygments team, see AUTHORS.
     :license: BSD, see LICENSE for details.
 """
 
diff --git a/venv/lib/python3.10/site-packages/pip/_vendor/pygments/filter.py b/venv/lib/python3.10/site-packages/pip/_vendor/pygments/filter.py
index e5c9664..85b4829 100644
--- a/venv/lib/python3.10/site-packages/pip/_vendor/pygments/filter.py
+++ b/venv/lib/python3.10/site-packages/pip/_vendor/pygments/filter.py
@@ -4,7 +4,7 @@
 
     Module that implements the default filter.
 
-    :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+    :copyright: Copyright 2006-2021 by the Pygments team, see AUTHORS.
     :license: BSD, see LICENSE for details.
 """
 
diff --git a/venv/lib/python3.10/site-packages/pip/_vendor/pygments/filters/__init__.py b/venv/lib/python3.10/site-packages/pip/_vendor/pygments/filters/__init__.py
index c302a6c..1d5a808 100644
--- a/venv/lib/python3.10/site-packages/pip/_vendor/pygments/filters/__init__.py
+++ b/venv/lib/python3.10/site-packages/pip/_vendor/pygments/filters/__init__.py
@@ -5,7 +5,7 @@
     Module containing filter lookup functions and default
     filters.
 
-    :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+    :copyright: Copyright 2006-2021 by the Pygments team, see AUTHORS.
     :license: BSD, see LICENSE for details.
 """
 
@@ -69,16 +69,13 @@ class CodeTagFilter(Filter):
 
     `codetags` : list of strings
        A list of strings that are flagged as code tags.  The default is to
-       highlight ``XXX``, ``TODO``, ``FIXME``, ``BUG`` and ``NOTE``.
-
-    .. versionchanged:: 2.13
-       Now recognizes ``FIXME`` by default.
+       highlight ``XXX``, ``TODO``, ``BUG`` and ``NOTE``.
     """
 
     def __init__(self, **options):
         Filter.__init__(self, **options)
         tags = get_list_opt(options, 'codetags',
-                            ['XXX', 'TODO', 'FIXME', 'BUG', 'NOTE'])
+                            ['XXX', 'TODO', 'BUG', 'NOTE'])
         self.tag_re = re.compile(r'\b(%s)\b' % '|'.join([
             re.escape(tag) for tag in tags if tag
         ]))
diff --git a/venv/lib/python3.10/site-packages/pip/_vendor/pygments/formatter.py b/venv/lib/python3.10/site-packages/pip/_vendor/pygments/formatter.py
index a2349ef..b585562 100644
--- a/venv/lib/python3.10/site-packages/pip/_vendor/pygments/formatter.py
+++ b/venv/lib/python3.10/site-packages/pip/_vendor/pygments/formatter.py
@@ -4,7 +4,7 @@
 
     Base formatter class.
 
-    :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+    :copyright: Copyright 2006-2021 by the Pygments team, see AUTHORS.
     :license: BSD, see LICENSE for details.
 """
 
diff --git a/venv/lib/python3.10/site-packages/pip/_vendor/pygments/formatters/__init__.py b/venv/lib/python3.10/site-packages/pip/_vendor/pygments/formatters/__init__.py
index 43c4c89..34f2bec 100644
--- a/venv/lib/python3.10/site-packages/pip/_vendor/pygments/formatters/__init__.py
+++ b/venv/lib/python3.10/site-packages/pip/_vendor/pygments/formatters/__init__.py
@@ -4,14 +4,14 @@
 
     Pygments formatters.
 
-    :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+    :copyright: Copyright 2006-2021 by the Pygments team, see AUTHORS.
     :license: BSD, see LICENSE for details.
 """
 
 import re
 import sys
 import types
-from fnmatch import fnmatch
+import fnmatch
 from os.path import basename
 
 from pip._vendor.pygments.formatters._mapping import FORMATTERS
@@ -22,6 +22,16 @@
            'get_all_formatters', 'load_formatter_from_file'] + list(FORMATTERS)
 
 _formatter_cache = {}  # classes by name
+_pattern_cache = {}
+
+
+def _fn_matches(fn, glob):
+    """Return whether the supplied file name fn matches pattern filename."""
+    if glob not in _pattern_cache:
+        pattern = _pattern_cache[glob] = re.compile(fnmatch.translate(glob))
+        return pattern.match(fn)
+    return _pattern_cache[glob].match(fn)
+
 
 def _load_formatters(module_name):
     """Load a formatter (and all others in the module too)."""
@@ -112,13 +122,13 @@ def get_formatter_for_filename(fn, **options):
     fn = basename(fn)
     for modname, name, _, filenames, _ in FORMATTERS.values():
         for filename in filenames:
-            if fnmatch(fn, filename):
+            if _fn_matches(fn, filename):
                 if name not in _formatter_cache:
                     _load_formatters(modname)
                 return _formatter_cache[name](**options)
     for cls in find_plugin_formatters():
         for filename in cls.filenames:
-            if fnmatch(fn, filename):
+            if _fn_matches(fn, filename):
                 return cls(**options)
     raise ClassNotFound("no formatter found for file name %r" % fn)
 
diff --git a/venv/lib/python3.10/site-packages/pip/_vendor/pygments/formatters/_mapping.py b/venv/lib/python3.10/site-packages/pip/_vendor/pygments/formatters/_mapping.py
index 6e34f96..57445dc 100644
--- a/venv/lib/python3.10/site-packages/pip/_vendor/pygments/formatters/_mapping.py
+++ b/venv/lib/python3.10/site-packages/pip/_vendor/pygments/formatters/_mapping.py
@@ -1,5 +1,16 @@
-# Automatically generated by scripts/gen_mapfiles.py.
-# DO NOT EDIT BY HAND; run `make mapfiles` instead.
+"""
+    pygments.formatters._mapping
+    ~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+    Formatter mapping definitions. This file is generated by itself. Everytime
+    you change something on a builtin formatter definition, run this script from
+    the formatters folder to update it.
+
+    Do not alter the FORMATTERS dictionary by hand.
+
+    :copyright: Copyright 2006-2021 by the Pygments team, see AUTHORS.
+    :license: BSD, see LICENSE for details.
+"""
 
 FORMATTERS = {
     'BBCodeFormatter': ('pygments.formatters.bbcode', 'BBCode', ('bbcode', 'bb'), (), 'Format tokens with BBcodes. These formatting codes are used by many bulletin boards, so you can highlight your sourcecode with pygments before posting it there.'),
@@ -19,5 +30,55 @@
     'Terminal256Formatter': ('pygments.formatters.terminal256', 'Terminal256', ('terminal256', 'console256', '256'), (), 'Format tokens with ANSI color sequences, for output in a 256-color terminal or console.  Like in `TerminalFormatter` color sequences are terminated at newlines, so that paging the output works correctly.'),
     'TerminalFormatter': ('pygments.formatters.terminal', 'Terminal', ('terminal', 'console'), (), 'Format tokens with ANSI color sequences, for output in a text console. Color sequences are terminated at newlines, so that paging the output works correctly.'),
     'TerminalTrueColorFormatter': ('pygments.formatters.terminal256', 'TerminalTrueColor', ('terminal16m', 'console16m', '16m'), (), 'Format tokens with ANSI color sequences, for output in a true-color terminal or console.  Like in `TerminalFormatter` color sequences are terminated at newlines, so that paging the output works correctly.'),
-    'TestcaseFormatter': ('pygments.formatters.other', 'Testcase', ('testcase',), (), 'Format tokens as appropriate for a new testcase.'),
+    'TestcaseFormatter': ('pygments.formatters.other', 'Testcase', ('testcase',), (), 'Format tokens as appropriate for a new testcase.')
 }
+
+if __name__ == '__main__':  # pragma: no cover
+    import sys
+    import os
+
+    # lookup formatters
+    found_formatters = []
+    imports = []
+    sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..', '..'))
+    from pip._vendor.pygments.util import docstring_headline
+
+    for root, dirs, files in os.walk('.'):
+        for filename in files:
+            if filename.endswith('.py') and not filename.startswith('_'):
+                module_name = 'pygments.formatters%s.%s' % (
+                    root[1:].replace('/', '.'), filename[:-3])
+                print(module_name)
+                module = __import__(module_name, None, None, [''])
+                for formatter_name in module.__all__:
+                    formatter = getattr(module, formatter_name)
+                    found_formatters.append(
+                        '%r: %r' % (formatter_name,
+                                    (module_name,
+                                     formatter.name,
+                                     tuple(formatter.aliases),
+                                     tuple(formatter.filenames),
+                                     docstring_headline(formatter))))
+    # sort them to make the diff minimal
+    found_formatters.sort()
+
+    # extract useful sourcecode from this file
+    with open(__file__) as fp:
+        content = fp.read()
+        # replace crnl to nl for Windows.
+        #
+        # Note that, originally, contributers should keep nl of master
+        # repository, for example by using some kind of automatic
+        # management EOL, like `EolExtension
+        #  `.
+        content = content.replace("\r\n", "\n")
+    header = content[:content.find('FORMATTERS = {')]
+    footer = content[content.find("if __name__ == '__main__':"):]
+
+    # write new file
+    with open(__file__, 'w') as fp:
+        fp.write(header)
+        fp.write('FORMATTERS = {\n    %s\n}\n\n' % ',\n    '.join(found_formatters))
+        fp.write(footer)
+
+    print ('=== %d formatters processed.' % len(found_formatters))
diff --git a/venv/lib/python3.10/site-packages/pip/_vendor/pygments/formatters/bbcode.py b/venv/lib/python3.10/site-packages/pip/_vendor/pygments/formatters/bbcode.py
index 2be2b4e..35a3732 100644
--- a/venv/lib/python3.10/site-packages/pip/_vendor/pygments/formatters/bbcode.py
+++ b/venv/lib/python3.10/site-packages/pip/_vendor/pygments/formatters/bbcode.py
@@ -4,7 +4,7 @@
 
     BBcode formatter.
 
-    :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+    :copyright: Copyright 2006-2021 by the Pygments team, see AUTHORS.
     :license: BSD, see LICENSE for details.
 """
 
diff --git a/venv/lib/python3.10/site-packages/pip/_vendor/pygments/formatters/groff.py b/venv/lib/python3.10/site-packages/pip/_vendor/pygments/formatters/groff.py
index f3dcbce..da9d7fc 100644
--- a/venv/lib/python3.10/site-packages/pip/_vendor/pygments/formatters/groff.py
+++ b/venv/lib/python3.10/site-packages/pip/_vendor/pygments/formatters/groff.py
@@ -4,7 +4,7 @@
 
     Formatter for groff output.
 
-    :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+    :copyright: Copyright 2006-2021 by the Pygments team, see AUTHORS.
     :license: BSD, see LICENSE for details.
 """
 
@@ -144,8 +144,6 @@ def format_unencoded(self, tokensource, outfile):
             self._write_lineno(outfile)
 
         for ttype, value in tokensource:
-            while ttype not in self.styles:
-                ttype = ttype.parent
             start, end = self.styles[ttype]
 
             for line in value.splitlines(True):
diff --git a/venv/lib/python3.10/site-packages/pip/_vendor/pygments/formatters/html.py b/venv/lib/python3.10/site-packages/pip/_vendor/pygments/formatters/html.py
index d5cda4c..47f5d9c 100644
--- a/venv/lib/python3.10/site-packages/pip/_vendor/pygments/formatters/html.py
+++ b/venv/lib/python3.10/site-packages/pip/_vendor/pygments/formatters/html.py
@@ -4,7 +4,7 @@
 
     Formatter for HTML output.
 
-    :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+    :copyright: Copyright 2006-2021 by the Pygments team, see AUTHORS.
     :license: BSD, see LICENSE for details.
 """
 
@@ -62,7 +62,7 @@ def _get_ttype_class(ttype):
 CSSFILE_TEMPLATE = '''\
 /*
 generated by Pygments 
-Copyright 2006-2022 by the Pygments team.
+Copyright 2006-2021 by the Pygments team.
 Licensed under the BSD license, see LICENSE for details.
 */
 %(styledefs)s
@@ -73,7 +73,7 @@ def _get_ttype_class(ttype):
    "http://www.w3.org/TR/html4/strict.dtd">
 
 
@@ -385,7 +385,7 @@ class ``"special"`` (default: ``0``).
 
         class CodeHtmlFormatter(HtmlFormatter):
 
-            def wrap(self, source, *, include_div):
+            def wrap(self, source, outfile):
                 return self._wrap_code(source)
 
             def _wrap_code(self, source):
@@ -667,7 +667,7 @@ def _wrap_tablelinenos(self, inner):
         mw = len(str(lncount + fl - 1))
         sp = self.linenospecial
         st = self.linenostep
-        anchor_name = self.lineanchors or self.linespans
+        la = self.lineanchors
         aln = self.anchorlinenos
         nocls = self.noclasses
 
@@ -680,7 +680,7 @@ def _wrap_tablelinenos(self, inner):
             if print_line:
                 line = '%*d' % (mw, i)
                 if aln:
-                    line = '%s' % (anchor_name, i, line)
+                    line = '%s' % (la, i, line)
             else:
                 line = ' ' * mw
 
@@ -707,21 +707,20 @@ def _wrap_tablelinenos(self, inner):
         filename_tr = ""
         if self.filename:
             filename_tr = (
-                ''
-                '' + self.filename + ''
+                '
' + '' + self.filename + '
' '') # in case you wonder about the seemingly redundant
here: since the # content in the other cell also is wrapped in a div, some browsers in # some configurations seem to mess up the formatting... - yield 0, (f'' + filename_tr + + yield 0, ( + '
' % self.cssclass + filename_tr + '
' +
-            ls + '
') - yield 0, '
' + ls + '
' + ) yield 0, dummyoutfile.getvalue() - yield 0, '' yield 0, '
' - def _wrap_inlinelinenos(self, inner): # need a list of lines since we need the width of a single number :( @@ -730,7 +729,7 @@ def _wrap_inlinelinenos(self, inner): st = self.linenostep num = self.linenostart mw = len(str(len(inner_lines) + num - 1)) - anchor_name = self.lineanchors or self.linespans + la = self.lineanchors aln = self.anchorlinenos nocls = self.noclasses @@ -760,7 +759,7 @@ def _wrap_inlinelinenos(self, inner): linenos = line if aln: - yield 1, ('%s' % (anchor_name, num, linenos) + + yield 1, ('%s' % (la, num, linenos) + inner_line) else: yield 1, linenos + inner_line @@ -934,20 +933,16 @@ def _highlight_lines(self, tokensource): else: yield 1, value - def wrap(self, source): + def wrap(self, source, outfile): """ Wrap the ``source``, which is a generator yielding individual lines, in custom generators. See docstring for `format`. Can be overridden. """ - - output = source if self.wrapcode: - output = self._wrap_code(output) - - output = self._wrap_pre(output) - - return output + return self._wrap_div(self._wrap_pre(self._wrap_code(source))) + else: + return self._wrap_div(self._wrap_pre(source)) def format_unencoded(self, tokensource, outfile): """ @@ -978,10 +973,9 @@ def format_unencoded(self, tokensource, outfile): source = self._wrap_lineanchors(source) if self.linespans: source = self._wrap_linespans(source) - source = self.wrap(source) + source = self.wrap(source, outfile) if self.linenos == 1: source = self._wrap_tablelinenos(source) - source = self._wrap_div(source) if self.full: source = self._wrap_full(source, outfile) diff --git a/venv/lib/python3.10/site-packages/pip/_vendor/pygments/formatters/img.py b/venv/lib/python3.10/site-packages/pip/_vendor/pygments/formatters/img.py index 0f36a32..9785592 100644 --- a/venv/lib/python3.10/site-packages/pip/_vendor/pygments/formatters/img.py +++ b/venv/lib/python3.10/site-packages/pip/_vendor/pygments/formatters/img.py @@ -4,7 +4,7 @@ Formatter for Pixmap output. - :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS. + :copyright: Copyright 2006-2021 by the Pygments team, see AUTHORS. :license: BSD, see LICENSE for details. """ @@ -206,17 +206,13 @@ def get_char_size(self): """ Get the character size. """ - return self.get_text_size('M') + return self.fonts['NORMAL'].getsize('M') def get_text_size(self, text): """ - Get the text size (width, height). + Get the text size(width, height). """ - font = self.fonts['NORMAL'] - if hasattr(font, 'getbbox'): # Pillow >= 9.2.0 - return font.getbbox(text)[2:4] - else: - return font.getsize(text) + return self.fonts['NORMAL'].getsize(text) def get_font(self, bold, oblique): """ @@ -524,7 +520,7 @@ def _create_drawables(self, tokensource): text_fg = self._get_text_color(style), text_bg = self._get_text_bg_color(style), ) - temp_width, _ = self.fonts.get_text_size(temp) + temp_width, temp_hight = self.fonts.get_text_size(temp) linelength += temp_width maxlinelength = max(maxlinelength, linelength) charno += len(temp) diff --git a/venv/lib/python3.10/site-packages/pip/_vendor/pygments/formatters/irc.py b/venv/lib/python3.10/site-packages/pip/_vendor/pygments/formatters/irc.py index 3f6d52d..ad986e0 100644 --- a/venv/lib/python3.10/site-packages/pip/_vendor/pygments/formatters/irc.py +++ b/venv/lib/python3.10/site-packages/pip/_vendor/pygments/formatters/irc.py @@ -4,7 +4,7 @@ Formatter for IRC output - :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS. + :copyright: Copyright 2006-2021 by the Pygments team, see AUTHORS. :license: BSD, see LICENSE for details. """ diff --git a/venv/lib/python3.10/site-packages/pip/_vendor/pygments/formatters/latex.py b/venv/lib/python3.10/site-packages/pip/_vendor/pygments/formatters/latex.py index 4a7375a..60e9892 100644 --- a/venv/lib/python3.10/site-packages/pip/_vendor/pygments/formatters/latex.py +++ b/venv/lib/python3.10/site-packages/pip/_vendor/pygments/formatters/latex.py @@ -4,7 +4,7 @@ Formatter for LaTeX fancyvrb output. - :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS. + :copyright: Copyright 2006-2021 by the Pygments team, see AUTHORS. :license: BSD, see LICENSE for details. """ @@ -159,8 +159,6 @@ class LatexFormatter(Formatter): \PY{k}{pass} \end{Verbatim} - Wrapping can be disabled using the `nowrap` option. - The special command used here (``\PY``) and all the other macros it needs are output by the `get_style_defs` method. @@ -173,11 +171,6 @@ class LatexFormatter(Formatter): Additional options accepted: - `nowrap` - If set to ``True``, don't wrap the tokens at all, not even inside a - ``\begin{Verbatim}`` environment. This disables most other options - (default: ``False``). - `style` The style to use, can be a string or a Style subclass (default: ``'default'``). @@ -255,7 +248,6 @@ class LatexFormatter(Formatter): def __init__(self, **options): Formatter.__init__(self, **options) - self.nowrap = get_bool_opt(options, 'nowrap', False) self.docclass = options.get('docclass', 'article') self.preamble = options.get('preamble', '') self.linenos = get_bool_opt(options, 'linenos', False) @@ -342,19 +334,18 @@ def format_unencoded(self, tokensource, outfile): realoutfile = outfile outfile = StringIO() - if not self.nowrap: - outfile.write('\\begin{' + self.envname + '}[commandchars=\\\\\\{\\}') - if self.linenos: - start, step = self.linenostart, self.linenostep - outfile.write(',numbers=left' + - (start and ',firstnumber=%d' % start or '') + - (step and ',stepnumber=%d' % step or '')) - if self.mathescape or self.texcomments or self.escapeinside: - outfile.write(',codes={\\catcode`\\$=3\\catcode`\\^=7' - '\\catcode`\\_=8\\relax}') - if self.verboptions: - outfile.write(',' + self.verboptions) - outfile.write(']\n') + outfile.write('\\begin{' + self.envname + '}[commandchars=\\\\\\{\\}') + if self.linenos: + start, step = self.linenostart, self.linenostep + outfile.write(',numbers=left' + + (start and ',firstnumber=%d' % start or '') + + (step and ',stepnumber=%d' % step or '')) + if self.mathescape or self.texcomments or self.escapeinside: + outfile.write(',codes={\\catcode`\\$=3\\catcode`\\^=7' + '\\catcode`\\_=8\\relax}') + if self.verboptions: + outfile.write(',' + self.verboptions) + outfile.write(']\n') for ttype, value in tokensource: if ttype in Token.Comment: @@ -417,8 +408,7 @@ def format_unencoded(self, tokensource, outfile): else: outfile.write(value) - if not self.nowrap: - outfile.write('\\end{' + self.envname + '}\n') + outfile.write('\\end{' + self.envname + '}\n') if self.full: encoding = self.encoding or 'utf8' diff --git a/venv/lib/python3.10/site-packages/pip/_vendor/pygments/formatters/other.py b/venv/lib/python3.10/site-packages/pip/_vendor/pygments/formatters/other.py index 1e39cd4..4fdf5e7 100644 --- a/venv/lib/python3.10/site-packages/pip/_vendor/pygments/formatters/other.py +++ b/venv/lib/python3.10/site-packages/pip/_vendor/pygments/formatters/other.py @@ -4,7 +4,7 @@ Other formatters: NullFormatter, RawTokenFormatter. - :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS. + :copyright: Copyright 2006-2021 by the Pygments team, see AUTHORS. :license: BSD, see LICENSE for details. """ diff --git a/venv/lib/python3.10/site-packages/pip/_vendor/pygments/formatters/pangomarkup.py b/venv/lib/python3.10/site-packages/pip/_vendor/pygments/formatters/pangomarkup.py index bd00866..b0657a5 100644 --- a/venv/lib/python3.10/site-packages/pip/_vendor/pygments/formatters/pangomarkup.py +++ b/venv/lib/python3.10/site-packages/pip/_vendor/pygments/formatters/pangomarkup.py @@ -4,7 +4,7 @@ Formatter for Pango markup output. - :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS. + :copyright: Copyright 2006-2021 by the Pygments team, see AUTHORS. :license: BSD, see LICENSE for details. """ diff --git a/venv/lib/python3.10/site-packages/pip/_vendor/pygments/formatters/rtf.py b/venv/lib/python3.10/site-packages/pip/_vendor/pygments/formatters/rtf.py index 4114d16..b4b0aca 100644 --- a/venv/lib/python3.10/site-packages/pip/_vendor/pygments/formatters/rtf.py +++ b/venv/lib/python3.10/site-packages/pip/_vendor/pygments/formatters/rtf.py @@ -4,7 +4,7 @@ A formatter that generates RTF files. - :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS. + :copyright: Copyright 2006-2021 by the Pygments team, see AUTHORS. :license: BSD, see LICENSE for details. """ diff --git a/venv/lib/python3.10/site-packages/pip/_vendor/pygments/formatters/svg.py b/venv/lib/python3.10/site-packages/pip/_vendor/pygments/formatters/svg.py index 075150a..d4de51f 100644 --- a/venv/lib/python3.10/site-packages/pip/_vendor/pygments/formatters/svg.py +++ b/venv/lib/python3.10/site-packages/pip/_vendor/pygments/formatters/svg.py @@ -4,7 +4,7 @@ Formatter for SVG output. - :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS. + :copyright: Copyright 2006-2021 by the Pygments team, see AUTHORS. :license: BSD, see LICENSE for details. """ diff --git a/venv/lib/python3.10/site-packages/pip/_vendor/pygments/formatters/terminal.py b/venv/lib/python3.10/site-packages/pip/_vendor/pygments/formatters/terminal.py index e0bda16..ae66022 100644 --- a/venv/lib/python3.10/site-packages/pip/_vendor/pygments/formatters/terminal.py +++ b/venv/lib/python3.10/site-packages/pip/_vendor/pygments/formatters/terminal.py @@ -4,7 +4,7 @@ Formatter for terminal output with ANSI sequences. - :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS. + :copyright: Copyright 2006-2021 by the Pygments team, see AUTHORS. :license: BSD, see LICENSE for details. """ diff --git a/venv/lib/python3.10/site-packages/pip/_vendor/pygments/formatters/terminal256.py b/venv/lib/python3.10/site-packages/pip/_vendor/pygments/formatters/terminal256.py index 201b3c3..b5eab14 100644 --- a/venv/lib/python3.10/site-packages/pip/_vendor/pygments/formatters/terminal256.py +++ b/venv/lib/python3.10/site-packages/pip/_vendor/pygments/formatters/terminal256.py @@ -10,7 +10,7 @@ Formatter version 1. - :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS. + :copyright: Copyright 2006-2021 by the Pygments team, see AUTHORS. :license: BSD, see LICENSE for details. """ diff --git a/venv/lib/python3.10/site-packages/pip/_vendor/pygments/lexer.py b/venv/lib/python3.10/site-packages/pip/_vendor/pygments/lexer.py index ec7f4de..b6d4b23 100644 --- a/venv/lib/python3.10/site-packages/pip/_vendor/pygments/lexer.py +++ b/venv/lib/python3.10/site-packages/pip/_vendor/pygments/lexer.py @@ -4,7 +4,7 @@ Base lexer classes. - :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS. + :copyright: Copyright 2006-2021 by the Pygments team, see AUTHORS. :license: BSD, see LICENSE for details. """ @@ -76,9 +76,6 @@ class Lexer(metaclass=LexerMeta): #: Name of the lexer name = None - #: URL of the language specification/definition - url = None - #: Shortcuts for the lexer aliases = [] @@ -621,7 +618,7 @@ def get_tokens_unprocessed(self, text, stack=('root',)): """ Split ``text`` into (tokentype, text) pairs. - ``stack`` is the initial stack (default: ``['root']``) + ``stack`` is the inital stack (default: ``['root']``) """ pos = 0 tokendefs = self._tokens @@ -741,7 +738,7 @@ def get_tokens_unprocessed(self, text=None, context=None): elif isinstance(new_state, int): # see RegexLexer for why this check is made if abs(new_state) >= len(ctx.stack): - del ctx.stack[1:] + del ctx.state[1:] else: del ctx.stack[new_state:] elif new_state == '#push': @@ -795,7 +792,7 @@ def do_insertions(insertions, tokens): # iterate over the token stream where we want to insert # the tokens from the insertion list. for i, t, v in tokens: - # first iteration. store the position of first item + # first iteration. store the postition of first item if realpos is None: realpos = i oldi = 0 diff --git a/venv/lib/python3.10/site-packages/pip/_vendor/pygments/lexers/__init__.py b/venv/lib/python3.10/site-packages/pip/_vendor/pygments/lexers/__init__.py index ed69f24..6981b8d 100644 --- a/venv/lib/python3.10/site-packages/pip/_vendor/pygments/lexers/__init__.py +++ b/venv/lib/python3.10/site-packages/pip/_vendor/pygments/lexers/__init__.py @@ -4,14 +4,14 @@ Pygments lexers. - :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS. + :copyright: Copyright 2006-2021 by the Pygments team, see AUTHORS. :license: BSD, see LICENSE for details. """ import re import sys import types -from fnmatch import fnmatch +import fnmatch from os.path import basename from pip._vendor.pygments.lexers._mapping import LEXERS @@ -28,6 +28,16 @@ 'guess_lexer', 'load_lexer_from_file'] + list(LEXERS) + list(COMPAT) _lexer_cache = {} +_pattern_cache = {} + + +def _fn_matches(fn, glob): + """Return whether the supplied file name fn matches pattern filename.""" + if glob not in _pattern_cache: + pattern = _pattern_cache[glob] = re.compile(fnmatch.translate(glob)) + return pattern.match(fn) + return _pattern_cache[glob].match(fn) + def _load_lexers(module_name): """Load a lexer (and all others in the module too).""" @@ -37,18 +47,14 @@ def _load_lexers(module_name): _lexer_cache[cls.name] = cls -def get_all_lexers(plugins=True): +def get_all_lexers(): """Return a generator of tuples in the form ``(name, aliases, filenames, mimetypes)`` of all know lexers. - - If *plugins* is true (the default), plugin lexers supplied by entrypoints - are also returned. Otherwise, only builtin ones are considered. """ for item in LEXERS.values(): yield item[1:] - if plugins: - for lexer in find_plugin_lexers(): - yield lexer.name, lexer.aliases, lexer.filenames, lexer.mimetypes + for lexer in find_plugin_lexers(): + yield lexer.name, lexer.aliases, lexer.filenames, lexer.mimetypes def find_lexer_class(name): @@ -159,13 +165,13 @@ def find_lexer_class_for_filename(_fn, code=None): fn = basename(_fn) for modname, name, _, filenames, _ in LEXERS.values(): for filename in filenames: - if fnmatch(fn, filename): + if _fn_matches(fn, filename): if name not in _lexer_cache: _load_lexers(modname) matches.append((_lexer_cache[name], filename)) for cls in find_plugin_lexers(): for filename in cls.filenames: - if fnmatch(fn, filename): + if _fn_matches(fn, filename): matches.append((cls, filename)) if isinstance(code, bytes): @@ -252,11 +258,11 @@ def guess_lexer_for_filename(_fn, _text, **options): matching_lexers = set() for lexer in _iter_lexerclasses(): for filename in lexer.filenames: - if fnmatch(fn, filename): + if _fn_matches(fn, filename): matching_lexers.add(lexer) primary[lexer] = True for filename in lexer.alias_filenames: - if fnmatch(fn, filename): + if _fn_matches(fn, filename): matching_lexers.add(lexer) primary[lexer] = False if not matching_lexers: diff --git a/venv/lib/python3.10/site-packages/pip/_vendor/pygments/lexers/_mapping.py b/venv/lib/python3.10/site-packages/pip/_vendor/pygments/lexers/_mapping.py index 40dcaa3..c972e3a 100644 --- a/venv/lib/python3.10/site-packages/pip/_vendor/pygments/lexers/_mapping.py +++ b/venv/lib/python3.10/site-packages/pip/_vendor/pygments/lexers/_mapping.py @@ -1,5 +1,16 @@ -# Automatically generated by scripts/gen_mapfiles.py. -# DO NOT EDIT BY HAND; run `make mapfiles` instead. +""" + pygments.lexers._mapping + ~~~~~~~~~~~~~~~~~~~~~~~~ + + Lexer mapping definitions. This file is generated by itself. Everytime + you change something on a builtin lexer definition, run this script from + the lexers folder to update it. + + Do not alter the LEXERS dictionary by hand. + + :copyright: Copyright 2006-2014, 2016 by the Pygments team, see AUTHORS. + :license: BSD, see LICENSE for details. +""" LEXERS = { 'ABAPLexer': ('pip._vendor.pygments.lexers.business', 'ABAP', ('abap',), ('*.abap', '*.ABAP'), ('text/x-abap',)), @@ -8,7 +19,7 @@ 'AbnfLexer': ('pip._vendor.pygments.lexers.grammar_notation', 'ABNF', ('abnf',), ('*.abnf',), ('text/x-abnf',)), 'ActionScript3Lexer': ('pip._vendor.pygments.lexers.actionscript', 'ActionScript 3', ('actionscript3', 'as3'), ('*.as',), ('application/x-actionscript3', 'text/x-actionscript3', 'text/actionscript3')), 'ActionScriptLexer': ('pip._vendor.pygments.lexers.actionscript', 'ActionScript', ('actionscript', 'as'), ('*.as',), ('application/x-actionscript', 'text/x-actionscript', 'text/actionscript')), - 'AdaLexer': ('pip._vendor.pygments.lexers.ada', 'Ada', ('ada', 'ada95', 'ada2005'), ('*.adb', '*.ads', '*.ada'), ('text/x-ada',)), + 'AdaLexer': ('pip._vendor.pygments.lexers.pascal', 'Ada', ('ada', 'ada95', 'ada2005'), ('*.adb', '*.ads', '*.ada'), ('text/x-ada',)), 'AdlLexer': ('pip._vendor.pygments.lexers.archetype', 'ADL', ('adl',), ('*.adl', '*.adls', '*.adlf', '*.adlx'), ()), 'AgdaLexer': ('pip._vendor.pygments.lexers.haskell', 'Agda', ('agda',), ('*.agda',), ('text/x-agda',)), 'AheuiLexer': ('pip._vendor.pygments.lexers.esoteric', 'Aheui', ('aheui',), ('*.aheui',), ()), @@ -48,7 +59,6 @@ 'BatchLexer': ('pip._vendor.pygments.lexers.shell', 'Batchfile', ('batch', 'bat', 'dosbatch', 'winbatch'), ('*.bat', '*.cmd'), ('application/x-dos-batch',)), 'BddLexer': ('pip._vendor.pygments.lexers.bdd', 'Bdd', ('bdd',), ('*.feature',), ('text/x-bdd',)), 'BefungeLexer': ('pip._vendor.pygments.lexers.esoteric', 'Befunge', ('befunge',), ('*.befunge',), ('application/x-befunge',)), - 'BerryLexer': ('pip._vendor.pygments.lexers.berry', 'Berry', ('berry', 'be'), ('*.be',), ('text/x-berry', 'application/x-berry')), 'BibTeXLexer': ('pip._vendor.pygments.lexers.bibtex', 'BibTeX', ('bibtex', 'bib'), ('*.bib',), ('text/x-bibtex',)), 'BlitzBasicLexer': ('pip._vendor.pygments.lexers.basic', 'BlitzBasic', ('blitzbasic', 'b3d', 'bplus'), ('*.bb', '*.decls'), ('text/x-bb',)), 'BlitzMaxLexer': ('pip._vendor.pygments.lexers.basic', 'BlitzMax', ('blitzmax', 'bmax'), ('*.bmx',), ('text/x-bmx',)), @@ -63,7 +73,6 @@ 'CMakeLexer': ('pip._vendor.pygments.lexers.make', 'CMake', ('cmake',), ('*.cmake', 'CMakeLists.txt'), ('text/x-cmake',)), 'CObjdumpLexer': ('pip._vendor.pygments.lexers.asm', 'c-objdump', ('c-objdump',), ('*.c-objdump',), ('text/x-c-objdump',)), 'CPSALexer': ('pip._vendor.pygments.lexers.lisp', 'CPSA', ('cpsa',), ('*.cpsa',), ()), - 'CSSUL4Lexer': ('pip._vendor.pygments.lexers.ul4', 'CSS+UL4', ('css+ul4',), ('*.cssul4',), ()), 'CSharpAspxLexer': ('pip._vendor.pygments.lexers.dotnet', 'aspx-cs', ('aspx-cs',), ('*.aspx', '*.asax', '*.ascx', '*.ashx', '*.asmx', '*.axd'), ()), 'CSharpLexer': ('pip._vendor.pygments.lexers.dotnet', 'C#', ('csharp', 'c#', 'cs'), ('*.cs',), ('text/x-csharp',)), 'Ca65Lexer': ('pip._vendor.pygments.lexers.asm', 'ca65 assembler', ('ca65',), ('*.s',), ()), @@ -84,7 +93,7 @@ 'CirruLexer': ('pip._vendor.pygments.lexers.webmisc', 'Cirru', ('cirru',), ('*.cirru',), ('text/x-cirru',)), 'ClayLexer': ('pip._vendor.pygments.lexers.c_like', 'Clay', ('clay',), ('*.clay',), ('text/x-clay',)), 'CleanLexer': ('pip._vendor.pygments.lexers.clean', 'Clean', ('clean',), ('*.icl', '*.dcl'), ()), - 'ClojureLexer': ('pip._vendor.pygments.lexers.jvm', 'Clojure', ('clojure', 'clj'), ('*.clj', '*.cljc'), ('text/x-clojure', 'application/x-clojure')), + 'ClojureLexer': ('pip._vendor.pygments.lexers.jvm', 'Clojure', ('clojure', 'clj'), ('*.clj',), ('text/x-clojure', 'application/x-clojure')), 'ClojureScriptLexer': ('pip._vendor.pygments.lexers.jvm', 'ClojureScript', ('clojurescript', 'cljs'), ('*.cljs',), ('text/x-clojurescript', 'application/x-clojurescript')), 'CobolFreeformatLexer': ('pip._vendor.pygments.lexers.business', 'COBOLFree', ('cobolfree',), ('*.cbl', '*.CBL'), ()), 'CobolLexer': ('pip._vendor.pygments.lexers.business', 'COBOL', ('cobol',), ('*.cob', '*.COB', '*.cpy', '*.CPY'), ('text/x-cobol',)), @@ -92,12 +101,10 @@ 'ColdfusionCFCLexer': ('pip._vendor.pygments.lexers.templates', 'Coldfusion CFC', ('cfc',), ('*.cfc',), ()), 'ColdfusionHtmlLexer': ('pip._vendor.pygments.lexers.templates', 'Coldfusion HTML', ('cfm',), ('*.cfm', '*.cfml'), ('application/x-coldfusion',)), 'ColdfusionLexer': ('pip._vendor.pygments.lexers.templates', 'cfstatement', ('cfs',), (), ()), - 'Comal80Lexer': ('pip._vendor.pygments.lexers.comal', 'COMAL-80', ('comal', 'comal80'), ('*.cml', '*.comal'), ()), 'CommonLispLexer': ('pip._vendor.pygments.lexers.lisp', 'Common Lisp', ('common-lisp', 'cl', 'lisp'), ('*.cl', '*.lisp'), ('text/x-common-lisp',)), 'ComponentPascalLexer': ('pip._vendor.pygments.lexers.oberon', 'Component Pascal', ('componentpascal', 'cp'), ('*.cp', '*.cps'), ('text/x-component-pascal',)), 'CoqLexer': ('pip._vendor.pygments.lexers.theorem', 'Coq', ('coq',), ('*.v',), ('text/x-coq',)), - 'CplintLexer': ('pip._vendor.pygments.lexers.cplint', 'cplint', ('cplint',), ('*.ecl', '*.prolog', '*.pro', '*.pl', '*.P', '*.lpad', '*.cpl'), ('text/x-cplint',)), - 'CppLexer': ('pip._vendor.pygments.lexers.c_cpp', 'C++', ('cpp', 'c++'), ('*.cpp', '*.hpp', '*.c++', '*.h++', '*.cc', '*.hh', '*.cxx', '*.hxx', '*.C', '*.H', '*.cp', '*.CPP', '*.tpp'), ('text/x-c++hdr', 'text/x-c++src')), + 'CppLexer': ('pip._vendor.pygments.lexers.c_cpp', 'C++', ('cpp', 'c++'), ('*.cpp', '*.hpp', '*.c++', '*.h++', '*.cc', '*.hh', '*.cxx', '*.hxx', '*.C', '*.H', '*.cp', '*.CPP'), ('text/x-c++hdr', 'text/x-c++src')), 'CppObjdumpLexer': ('pip._vendor.pygments.lexers.asm', 'cpp-objdump', ('cpp-objdump', 'c++-objdumb', 'cxx-objdump'), ('*.cpp-objdump', '*.c++-objdump', '*.cxx-objdump'), ('text/x-cpp-objdump',)), 'CrmshLexer': ('pip._vendor.pygments.lexers.dsls', 'Crmsh', ('crmsh', 'pcmk'), ('*.crmsh', '*.pcmk'), ()), 'CrocLexer': ('pip._vendor.pygments.lexers.d', 'Croc', ('croc',), ('*.croc',), ('text/x-crocsrc',)), @@ -106,7 +113,7 @@ 'CsoundDocumentLexer': ('pip._vendor.pygments.lexers.csound', 'Csound Document', ('csound-document', 'csound-csd'), ('*.csd',), ()), 'CsoundOrchestraLexer': ('pip._vendor.pygments.lexers.csound', 'Csound Orchestra', ('csound', 'csound-orc'), ('*.orc', '*.udo'), ()), 'CsoundScoreLexer': ('pip._vendor.pygments.lexers.csound', 'Csound Score', ('csound-score', 'csound-sco'), ('*.sco',), ()), - 'CssDjangoLexer': ('pip._vendor.pygments.lexers.templates', 'CSS+Django/Jinja', ('css+django', 'css+jinja'), ('*.css.j2', '*.css.jinja2'), ('text/css+django', 'text/css+jinja')), + 'CssDjangoLexer': ('pip._vendor.pygments.lexers.templates', 'CSS+Django/Jinja', ('css+django', 'css+jinja'), (), ('text/css+django', 'text/css+jinja')), 'CssErbLexer': ('pip._vendor.pygments.lexers.templates', 'CSS+Ruby', ('css+ruby', 'css+erb'), (), ('text/css+ruby',)), 'CssGenshiLexer': ('pip._vendor.pygments.lexers.templates', 'CSS+Genshi Text', ('css+genshitext', 'css+genshi'), (), ('text/css+genshi',)), 'CssLexer': ('pip._vendor.pygments.lexers.css', 'CSS', ('css',), ('*.css',), ('text/css',)), @@ -188,7 +195,6 @@ 'GroffLexer': ('pip._vendor.pygments.lexers.markup', 'Groff', ('groff', 'nroff', 'man'), ('*.[1-9]', '*.man', '*.1p', '*.3pm'), ('application/x-troff', 'text/troff')), 'GroovyLexer': ('pip._vendor.pygments.lexers.jvm', 'Groovy', ('groovy',), ('*.groovy', '*.gradle'), ('text/x-groovy',)), 'HLSLShaderLexer': ('pip._vendor.pygments.lexers.graphics', 'HLSL', ('hlsl',), ('*.hlsl', '*.hlsli'), ('text/x-hlsl',)), - 'HTMLUL4Lexer': ('pip._vendor.pygments.lexers.ul4', 'HTML+UL4', ('html+ul4',), ('*.htmlul4',), ()), 'HamlLexer': ('pip._vendor.pygments.lexers.html', 'Haml', ('haml',), ('*.haml',), ('text/x-haml',)), 'HandlebarsHtmlLexer': ('pip._vendor.pygments.lexers.templates', 'HTML+Handlebars', ('html+handlebars',), ('*.handlebars', '*.hbs'), ('text/html+handlebars', 'text/x-handlebars-template')), 'HandlebarsLexer': ('pip._vendor.pygments.lexers.templates', 'Handlebars', ('handlebars',), (), ()), @@ -197,7 +203,7 @@ 'HexdumpLexer': ('pip._vendor.pygments.lexers.hexdump', 'Hexdump', ('hexdump',), (), ()), 'HsailLexer': ('pip._vendor.pygments.lexers.asm', 'HSAIL', ('hsail', 'hsa'), ('*.hsail',), ('text/x-hsail',)), 'HspecLexer': ('pip._vendor.pygments.lexers.haskell', 'Hspec', ('hspec',), (), ()), - 'HtmlDjangoLexer': ('pip._vendor.pygments.lexers.templates', 'HTML+Django/Jinja', ('html+django', 'html+jinja', 'htmldjango'), ('*.html.j2', '*.htm.j2', '*.xhtml.j2', '*.html.jinja2', '*.htm.jinja2', '*.xhtml.jinja2'), ('text/html+django', 'text/html+jinja')), + 'HtmlDjangoLexer': ('pip._vendor.pygments.lexers.templates', 'HTML+Django/Jinja', ('html+django', 'html+jinja', 'htmldjango'), (), ('text/html+django', 'text/html+jinja')), 'HtmlGenshiLexer': ('pip._vendor.pygments.lexers.templates', 'HTML+Genshi', ('html+genshi', 'html+kid'), (), ('text/html+genshi',)), 'HtmlLexer': ('pip._vendor.pygments.lexers.html', 'HTML', ('html',), ('*.html', '*.htm', '*.xhtml', '*.xslt'), ('text/html', 'application/xhtml+xml')), 'HtmlPhpLexer': ('pip._vendor.pygments.lexers.templates', 'HTML+PHP', ('html+php',), ('*.phtml',), ('application/x-php', 'application/x-httpd-php', 'application/x-httpd-php3', 'application/x-httpd-php4', 'application/x-httpd-php5')), @@ -219,18 +225,16 @@ 'IrcLogsLexer': ('pip._vendor.pygments.lexers.textfmts', 'IRC logs', ('irc',), ('*.weechatlog',), ('text/x-irclog',)), 'IsabelleLexer': ('pip._vendor.pygments.lexers.theorem', 'Isabelle', ('isabelle',), ('*.thy',), ('text/x-isabelle',)), 'JLexer': ('pip._vendor.pygments.lexers.j', 'J', ('j',), ('*.ijs',), ('text/x-j',)), - 'JMESPathLexer': ('pip._vendor.pygments.lexers.jmespath', 'JMESPath', ('jmespath', 'jp'), ('*.jp',), ()), 'JSLTLexer': ('pip._vendor.pygments.lexers.jslt', 'JSLT', ('jslt',), ('*.jslt',), ('text/x-jslt',)), 'JagsLexer': ('pip._vendor.pygments.lexers.modeling', 'JAGS', ('jags',), ('*.jag', '*.bug'), ()), 'JasminLexer': ('pip._vendor.pygments.lexers.jvm', 'Jasmin', ('jasmin', 'jasminxt'), ('*.j',), ()), 'JavaLexer': ('pip._vendor.pygments.lexers.jvm', 'Java', ('java',), ('*.java',), ('text/x-java',)), - 'JavascriptDjangoLexer': ('pip._vendor.pygments.lexers.templates', 'JavaScript+Django/Jinja', ('javascript+django', 'js+django', 'javascript+jinja', 'js+jinja'), ('*.js.j2', '*.js.jinja2'), ('application/x-javascript+django', 'application/x-javascript+jinja', 'text/x-javascript+django', 'text/x-javascript+jinja', 'text/javascript+django', 'text/javascript+jinja')), + 'JavascriptDjangoLexer': ('pip._vendor.pygments.lexers.templates', 'JavaScript+Django/Jinja', ('javascript+django', 'js+django', 'javascript+jinja', 'js+jinja'), (), ('application/x-javascript+django', 'application/x-javascript+jinja', 'text/x-javascript+django', 'text/x-javascript+jinja', 'text/javascript+django', 'text/javascript+jinja')), 'JavascriptErbLexer': ('pip._vendor.pygments.lexers.templates', 'JavaScript+Ruby', ('javascript+ruby', 'js+ruby', 'javascript+erb', 'js+erb'), (), ('application/x-javascript+ruby', 'text/x-javascript+ruby', 'text/javascript+ruby')), 'JavascriptGenshiLexer': ('pip._vendor.pygments.lexers.templates', 'JavaScript+Genshi Text', ('js+genshitext', 'js+genshi', 'javascript+genshitext', 'javascript+genshi'), (), ('application/x-javascript+genshi', 'text/x-javascript+genshi', 'text/javascript+genshi')), 'JavascriptLexer': ('pip._vendor.pygments.lexers.javascript', 'JavaScript', ('javascript', 'js'), ('*.js', '*.jsm', '*.mjs', '*.cjs'), ('application/javascript', 'application/x-javascript', 'text/x-javascript', 'text/javascript')), 'JavascriptPhpLexer': ('pip._vendor.pygments.lexers.templates', 'JavaScript+PHP', ('javascript+php', 'js+php'), (), ('application/x-javascript+php', 'text/x-javascript+php', 'text/javascript+php')), 'JavascriptSmartyLexer': ('pip._vendor.pygments.lexers.templates', 'JavaScript+Smarty', ('javascript+smarty', 'js+smarty'), (), ('application/x-javascript+smarty', 'text/x-javascript+smarty', 'text/javascript+smarty')), - 'JavascriptUL4Lexer': ('pip._vendor.pygments.lexers.ul4', 'Javascript+UL4', ('js+ul4',), ('*.jsul4',), ()), 'JclLexer': ('pip._vendor.pygments.lexers.scripting', 'JCL', ('jcl',), ('*.jcl',), ('text/x-jcl',)), 'JsgfLexer': ('pip._vendor.pygments.lexers.grammar_notation', 'JSGF', ('jsgf',), ('*.jsgf',), ('application/jsgf', 'application/x-jsgf', 'text/jsgf')), 'JsonBareObjectLexer': ('pip._vendor.pygments.lexers.data', 'JSONBareObject', (), (), ()), @@ -240,7 +244,6 @@ 'JuliaConsoleLexer': ('pip._vendor.pygments.lexers.julia', 'Julia console', ('jlcon', 'julia-repl'), (), ()), 'JuliaLexer': ('pip._vendor.pygments.lexers.julia', 'Julia', ('julia', 'jl'), ('*.jl',), ('text/x-julia', 'application/x-julia')), 'JuttleLexer': ('pip._vendor.pygments.lexers.javascript', 'Juttle', ('juttle',), ('*.juttle',), ('application/juttle', 'application/x-juttle', 'text/x-juttle', 'text/juttle')), - 'KLexer': ('pip._vendor.pygments.lexers.q', 'K', ('k',), ('*.k',), ()), 'KalLexer': ('pip._vendor.pygments.lexers.javascript', 'Kal', ('kal',), ('*.kal',), ('text/kal', 'application/kal')), 'KconfigLexer': ('pip._vendor.pygments.lexers.configs', 'Kconfig', ('kconfig', 'menuconfig', 'linux-config', 'kernel-config'), ('Kconfig*', '*Config.in*', 'external.in*', 'standard-modules.in'), ('text/x-kconfig',)), 'KernelLogLexer': ('pip._vendor.pygments.lexers.textfmts', 'Kernel log', ('kmsg', 'dmesg'), ('*.kmsg', '*.dmesg'), ()), @@ -270,11 +273,9 @@ 'LogosLexer': ('pip._vendor.pygments.lexers.objective', 'Logos', ('logos',), ('*.x', '*.xi', '*.xm', '*.xmi'), ('text/x-logos',)), 'LogtalkLexer': ('pip._vendor.pygments.lexers.prolog', 'Logtalk', ('logtalk',), ('*.lgt', '*.logtalk'), ('text/x-logtalk',)), 'LuaLexer': ('pip._vendor.pygments.lexers.scripting', 'Lua', ('lua',), ('*.lua', '*.wlua'), ('text/x-lua', 'application/x-lua')), - 'MCFunctionLexer': ('pip._vendor.pygments.lexers.mcfunction', 'MCFunction', ('mcfunction', 'mcf'), ('*.mcfunction',), ('text/mcfunction',)), 'MIMELexer': ('pip._vendor.pygments.lexers.mime', 'MIME', ('mime',), (), ('multipart/mixed', 'multipart/related', 'multipart/alternative')), 'MOOCodeLexer': ('pip._vendor.pygments.lexers.scripting', 'MOOCode', ('moocode', 'moo'), ('*.moo',), ('text/x-moocode',)), 'MSDOSSessionLexer': ('pip._vendor.pygments.lexers.shell', 'MSDOS Session', ('doscon',), (), ()), - 'Macaulay2Lexer': ('pip._vendor.pygments.lexers.macaulay2', 'Macaulay2', ('macaulay2',), ('*.m2',), ()), 'MakefileLexer': ('pip._vendor.pygments.lexers.make', 'Makefile', ('make', 'makefile', 'mf', 'bsdmake'), ('*.mak', '*.mk', 'Makefile', 'makefile', 'Makefile.*', 'GNUmakefile'), ('text/x-makefile',)), 'MakoCssLexer': ('pip._vendor.pygments.lexers.templates', 'CSS+Mako', ('css+mako',), (), ('text/css+mako',)), 'MakoHtmlLexer': ('pip._vendor.pygments.lexers.templates', 'HTML+Mako', ('html+mako',), (), ('text/html+mako',)), @@ -378,11 +379,8 @@ 'PythonConsoleLexer': ('pip._vendor.pygments.lexers.python', 'Python console session', ('pycon',), (), ('text/x-python-doctest',)), 'PythonLexer': ('pip._vendor.pygments.lexers.python', 'Python', ('python', 'py', 'sage', 'python3', 'py3'), ('*.py', '*.pyw', '*.jy', '*.sage', '*.sc', 'SConstruct', 'SConscript', '*.bzl', 'BUCK', 'BUILD', 'BUILD.bazel', 'WORKSPACE', '*.tac'), ('text/x-python', 'application/x-python', 'text/x-python3', 'application/x-python3')), 'PythonTracebackLexer': ('pip._vendor.pygments.lexers.python', 'Python Traceback', ('pytb', 'py3tb'), ('*.pytb', '*.py3tb'), ('text/x-python-traceback', 'text/x-python3-traceback')), - 'PythonUL4Lexer': ('pip._vendor.pygments.lexers.ul4', 'Python+UL4', ('py+ul4',), ('*.pyul4',), ()), 'QBasicLexer': ('pip._vendor.pygments.lexers.basic', 'QBasic', ('qbasic', 'basic'), ('*.BAS', '*.bas'), ('text/basic',)), - 'QLexer': ('pip._vendor.pygments.lexers.q', 'Q', ('q',), ('*.q',), ()), 'QVToLexer': ('pip._vendor.pygments.lexers.qvt', 'QVTO', ('qvto', 'qvt'), ('*.qvto',), ()), - 'QlikLexer': ('pip._vendor.pygments.lexers.qlik', 'Qlik', ('qlik', 'qlikview', 'qliksense', 'qlikscript'), ('*.qvs', '*.qvw'), ()), 'QmlLexer': ('pip._vendor.pygments.lexers.webmisc', 'QML', ('qml', 'qbs'), ('*.qml', '*.qbs'), ('application/x-qml', 'application/x-qt.qbs+qml')), 'RConsoleLexer': ('pip._vendor.pygments.lexers.r', 'RConsole', ('rconsole', 'rout'), ('*.Rout',), ()), 'RNCCompactLexer': ('pip._vendor.pygments.lexers.rnc', 'Relax-NG Compact', ('rng-compact', 'rnc'), ('*.rnc',), ()), @@ -410,7 +408,7 @@ 'RitaLexer': ('pip._vendor.pygments.lexers.rita', 'Rita', ('rita',), ('*.rita',), ('text/rita',)), 'RoboconfGraphLexer': ('pip._vendor.pygments.lexers.roboconf', 'Roboconf Graph', ('roboconf-graph',), ('*.graph',), ()), 'RoboconfInstancesLexer': ('pip._vendor.pygments.lexers.roboconf', 'Roboconf Instances', ('roboconf-instances',), ('*.instances',), ()), - 'RobotFrameworkLexer': ('pip._vendor.pygments.lexers.robotframework', 'RobotFramework', ('robotframework',), ('*.robot', '*.resource'), ('text/x-robotframework',)), + 'RobotFrameworkLexer': ('pip._vendor.pygments.lexers.robotframework', 'RobotFramework', ('robotframework',), ('*.robot',), ('text/x-robotframework',)), 'RqlLexer': ('pip._vendor.pygments.lexers.sql', 'RQL', ('rql',), ('*.rql',), ('text/x-rql',)), 'RslLexer': ('pip._vendor.pygments.lexers.dsls', 'RSL', ('rsl',), ('*.rsl',), ('text/rsl',)), 'RstLexer': ('pip._vendor.pygments.lexers.markup', 'reStructuredText', ('restructuredtext', 'rst', 'rest'), ('*.rst', '*.rest'), ('text/x-rst', 'text/prs.fallenstein.rst')), @@ -421,7 +419,6 @@ 'SASLexer': ('pip._vendor.pygments.lexers.sas', 'SAS', ('sas',), ('*.SAS', '*.sas'), ('text/x-sas', 'text/sas', 'application/x-sas')), 'SLexer': ('pip._vendor.pygments.lexers.r', 'S', ('splus', 's', 'r'), ('*.S', '*.R', '.Rhistory', '.Rprofile', '.Renviron'), ('text/S-plus', 'text/S', 'text/x-r-source', 'text/x-r', 'text/x-R', 'text/x-r-history', 'text/x-r-profile')), 'SMLLexer': ('pip._vendor.pygments.lexers.ml', 'Standard ML', ('sml',), ('*.sml', '*.sig', '*.fun'), ('text/x-standardml', 'application/x-standardml')), - 'SNBTLexer': ('pip._vendor.pygments.lexers.mcfunction', 'SNBT', ('snbt',), ('*.snbt',), ('text/snbt',)), 'SarlLexer': ('pip._vendor.pygments.lexers.jvm', 'SARL', ('sarl',), ('*.sarl',), ('text/x-sarl',)), 'SassLexer': ('pip._vendor.pygments.lexers.css', 'Sass', ('sass',), ('*.sass',), ('text/x-sass',)), 'SaviLexer': ('pip._vendor.pygments.lexers.savi', 'Savi', ('savi',), ('*.savi',), ()), @@ -453,7 +450,6 @@ 'SourcesListLexer': ('pip._vendor.pygments.lexers.installers', 'Debian Sourcelist', ('debsources', 'sourceslist', 'sources.list'), ('sources.list',), ()), 'SparqlLexer': ('pip._vendor.pygments.lexers.rdf', 'SPARQL', ('sparql',), ('*.rq', '*.sparql'), ('application/sparql-query',)), 'SpiceLexer': ('pip._vendor.pygments.lexers.spice', 'Spice', ('spice', 'spicelang'), ('*.spice',), ('text/x-spice',)), - 'SqlJinjaLexer': ('pip._vendor.pygments.lexers.templates', 'SQL+Jinja', ('sql+jinja',), ('*.sql', '*.sql.j2', '*.sql.jinja2'), ()), 'SqlLexer': ('pip._vendor.pygments.lexers.sql', 'SQL', ('sql',), ('*.sql',), ('text/x-sql',)), 'SqliteConsoleLexer': ('pip._vendor.pygments.lexers.sql', 'sqlite3con', ('sqlite3',), ('*.sqlite3-console',), ('text/x-sqlite3-console',)), 'SquidConfLexer': ('pip._vendor.pygments.lexers.configs', 'SquidConf', ('squidconf', 'squid.conf', 'squid'), ('squid.conf',), ('text/x-squidconf',)), @@ -469,7 +465,6 @@ 'TNTLexer': ('pip._vendor.pygments.lexers.tnt', 'Typographic Number Theory', ('tnt',), ('*.tnt',), ()), 'TOMLLexer': ('pip._vendor.pygments.lexers.configs', 'TOML', ('toml',), ('*.toml', 'Pipfile', 'poetry.lock'), ()), 'Tads3Lexer': ('pip._vendor.pygments.lexers.int_fiction', 'TADS 3', ('tads3',), ('*.t',), ()), - 'TalLexer': ('pip._vendor.pygments.lexers.tal', 'Tal', ('tal', 'uxntal'), ('*.tal',), ('text/x-uxntal',)), 'TasmLexer': ('pip._vendor.pygments.lexers.asm', 'TASM', ('tasm',), ('*.asm', '*.ASM', '*.tasm'), ('text/x-tasm',)), 'TclLexer': ('pip._vendor.pygments.lexers.tcl', 'Tcl', ('tcl',), ('*.tcl', '*.rvt'), ('text/x-tcl', 'text/x-script.tcl', 'application/x-tcl')), 'TcshLexer': ('pip._vendor.pygments.lexers.shell', 'Tcsh', ('tcsh', 'csh'), ('*.tcsh', '*.csh'), ('application/x-csh',)), @@ -495,10 +490,8 @@ 'TypoScriptCssDataLexer': ('pip._vendor.pygments.lexers.typoscript', 'TypoScriptCssData', ('typoscriptcssdata',), (), ()), 'TypoScriptHtmlDataLexer': ('pip._vendor.pygments.lexers.typoscript', 'TypoScriptHtmlData', ('typoscripthtmldata',), (), ()), 'TypoScriptLexer': ('pip._vendor.pygments.lexers.typoscript', 'TypoScript', ('typoscript',), ('*.typoscript',), ('text/x-typoscript',)), - 'UL4Lexer': ('pip._vendor.pygments.lexers.ul4', 'UL4', ('ul4',), ('*.ul4',), ()), 'UcodeLexer': ('pip._vendor.pygments.lexers.unicon', 'ucode', ('ucode',), ('*.u', '*.u1', '*.u2'), ()), 'UniconLexer': ('pip._vendor.pygments.lexers.unicon', 'Unicon', ('unicon',), ('*.icn',), ('text/unicon',)), - 'UnixConfigLexer': ('pip._vendor.pygments.lexers.configs', 'Unix/Linux config files', ('unixconfig', 'linuxconfig'), (), ()), 'UrbiscriptLexer': ('pip._vendor.pygments.lexers.urbi', 'UrbiScript', ('urbiscript',), ('*.u',), ('application/x-urbiscript',)), 'UsdLexer': ('pip._vendor.pygments.lexers.usd', 'USD', ('usd', 'usda'), ('*.usd', '*.usda'), ()), 'VBScriptLexer': ('pip._vendor.pygments.lexers.basic', 'VBScript', ('vbscript',), ('*.vbs', '*.VBS'), ()), @@ -508,7 +501,7 @@ 'VGLLexer': ('pip._vendor.pygments.lexers.dsls', 'VGL', ('vgl',), ('*.rpf',), ()), 'ValaLexer': ('pip._vendor.pygments.lexers.c_like', 'Vala', ('vala', 'vapi'), ('*.vala', '*.vapi'), ('text/x-vala',)), 'VbNetAspxLexer': ('pip._vendor.pygments.lexers.dotnet', 'aspx-vb', ('aspx-vb',), ('*.aspx', '*.asax', '*.ascx', '*.ashx', '*.asmx', '*.axd'), ()), - 'VbNetLexer': ('pip._vendor.pygments.lexers.dotnet', 'VB.net', ('vb.net', 'vbnet', 'lobas', 'oobas', 'sobas'), ('*.vb', '*.bas'), ('text/x-vbnet', 'text/x-vba')), + 'VbNetLexer': ('pip._vendor.pygments.lexers.dotnet', 'VB.net', ('vb.net', 'vbnet'), ('*.vb', '*.bas'), ('text/x-vbnet', 'text/x-vba')), 'VelocityHtmlLexer': ('pip._vendor.pygments.lexers.templates', 'HTML+Velocity', ('html+velocity',), (), ('text/html+velocity',)), 'VelocityLexer': ('pip._vendor.pygments.lexers.templates', 'Velocity', ('velocity',), ('*.vm', '*.fhtml'), ()), 'VelocityXmlLexer': ('pip._vendor.pygments.lexers.templates', 'XML+Velocity', ('xml+velocity',), (), ('application/xml+velocity',)), @@ -520,9 +513,8 @@ 'WebIDLLexer': ('pip._vendor.pygments.lexers.webidl', 'Web IDL', ('webidl',), ('*.webidl',), ()), 'WhileyLexer': ('pip._vendor.pygments.lexers.whiley', 'Whiley', ('whiley',), ('*.whiley',), ('text/x-whiley',)), 'X10Lexer': ('pip._vendor.pygments.lexers.x10', 'X10', ('x10', 'xten'), ('*.x10',), ('text/x-x10',)), - 'XMLUL4Lexer': ('pip._vendor.pygments.lexers.ul4', 'XML+UL4', ('xml+ul4',), ('*.xmlul4',), ()), 'XQueryLexer': ('pip._vendor.pygments.lexers.webmisc', 'XQuery', ('xquery', 'xqy', 'xq', 'xql', 'xqm'), ('*.xqy', '*.xquery', '*.xq', '*.xql', '*.xqm'), ('text/xquery', 'application/xquery')), - 'XmlDjangoLexer': ('pip._vendor.pygments.lexers.templates', 'XML+Django/Jinja', ('xml+django', 'xml+jinja'), ('*.xml.j2', '*.xml.jinja2'), ('application/xml+django', 'application/xml+jinja')), + 'XmlDjangoLexer': ('pip._vendor.pygments.lexers.templates', 'XML+Django/Jinja', ('xml+django', 'xml+jinja'), (), ('application/xml+django', 'application/xml+jinja')), 'XmlErbLexer': ('pip._vendor.pygments.lexers.templates', 'XML+Ruby', ('xml+ruby', 'xml+erb'), (), ('application/xml+ruby',)), 'XmlLexer': ('pip._vendor.pygments.lexers.html', 'XML', ('xml',), ('*.xml', '*.xsl', '*.rss', '*.xslt', '*.xsd', '*.wsdl', '*.wsf'), ('text/xml', 'application/xml', 'image/svg+xml', 'application/rss+xml', 'application/atom+xml')), 'XmlPhpLexer': ('pip._vendor.pygments.lexers.templates', 'XML+PHP', ('xml+php',), (), ('application/xml+php',)), @@ -531,7 +523,7 @@ 'XsltLexer': ('pip._vendor.pygments.lexers.html', 'XSLT', ('xslt',), ('*.xsl', '*.xslt', '*.xpl'), ('application/xsl+xml', 'application/xslt+xml')), 'XtendLexer': ('pip._vendor.pygments.lexers.jvm', 'Xtend', ('xtend',), ('*.xtend',), ('text/x-xtend',)), 'XtlangLexer': ('pip._vendor.pygments.lexers.lisp', 'xtlang', ('extempore',), ('*.xtm',), ()), - 'YamlJinjaLexer': ('pip._vendor.pygments.lexers.templates', 'YAML+Jinja', ('yaml+jinja', 'salt', 'sls'), ('*.sls', '*.yaml.j2', '*.yml.j2', '*.yaml.jinja2', '*.yml.jinja2'), ('text/x-yaml+jinja', 'text/x-sls')), + 'YamlJinjaLexer': ('pip._vendor.pygments.lexers.templates', 'YAML+Jinja', ('yaml+jinja', 'salt', 'sls'), ('*.sls',), ('text/x-yaml+jinja', 'text/x-sls')), 'YamlLexer': ('pip._vendor.pygments.lexers.data', 'YAML', ('yaml',), ('*.yaml', '*.yml'), ('text/x-yaml',)), 'YangLexer': ('pip._vendor.pygments.lexers.yang', 'YANG', ('yang',), ('*.yang',), ('application/yang',)), 'ZeekLexer': ('pip._vendor.pygments.lexers.dsls', 'Zeek', ('zeek', 'bro'), ('*.zeek', '*.bro'), ()), @@ -539,3 +531,50 @@ 'ZigLexer': ('pip._vendor.pygments.lexers.zig', 'Zig', ('zig',), ('*.zig',), ('text/zig',)), 'apdlexer': ('pip._vendor.pygments.lexers.apdlexer', 'ANSYS parametric design language', ('ansys', 'apdl'), ('*.ans',), ()), } + +if __name__ == '__main__': # pragma: no cover + import sys + import os + + # lookup lexers + found_lexers = [] + sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..', '..')) + for root, dirs, files in os.walk('.'): + for filename in files: + if filename.endswith('.py') and not filename.startswith('_'): + module_name = 'pygments.lexers%s.%s' % ( + root[1:].replace('/', '.'), filename[:-3]) + print(module_name) + module = __import__(module_name, None, None, ['']) + for lexer_name in module.__all__: + lexer = getattr(module, lexer_name) + found_lexers.append( + '%r: %r' % (lexer_name, + (module_name, + lexer.name, + tuple(lexer.aliases), + tuple(lexer.filenames), + tuple(lexer.mimetypes)))) + # sort them to make the diff minimal + found_lexers.sort() + + # extract useful sourcecode from this file + with open(__file__) as fp: + content = fp.read() + # replace crnl to nl for Windows. + # + # Note that, originally, contributers should keep nl of master + # repository, for example by using some kind of automatic + # management EOL, like `EolExtension + # `. + content = content.replace("\r\n", "\n") + header = content[:content.find('LEXERS = {')] + footer = content[content.find("if __name__ == '__main__':"):] + + # write new file + with open(__file__, 'w') as fp: + fp.write(header) + fp.write('LEXERS = {\n %s,\n}\n\n' % ',\n '.join(found_lexers)) + fp.write(footer) + + print ('=== %d lexers processed.' % len(found_lexers)) diff --git a/venv/lib/python3.10/site-packages/pip/_vendor/pygments/lexers/python.py b/venv/lib/python3.10/site-packages/pip/_vendor/pygments/lexers/python.py index c24e3c8..0e7bab9 100644 --- a/venv/lib/python3.10/site-packages/pip/_vendor/pygments/lexers/python.py +++ b/venv/lib/python3.10/site-packages/pip/_vendor/pygments/lexers/python.py @@ -4,7 +4,7 @@ Lexers for Python and related languages. - :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS. + :copyright: Copyright 2006-2021 by the Pygments team, see AUTHORS. :license: BSD, see LICENSE for details. """ @@ -27,7 +27,7 @@ class PythonLexer(RegexLexer): """ - For Python source code (version 3.x). + For `Python `_ source code (version 3.x). .. versionadded:: 0.10 @@ -37,7 +37,6 @@ class PythonLexer(RegexLexer): """ name = 'Python' - url = 'http://www.python.org' aliases = ['python', 'py', 'sage', 'python3', 'py3'] filenames = [ '*.py', @@ -62,6 +61,8 @@ class PythonLexer(RegexLexer): mimetypes = ['text/x-python', 'application/x-python', 'text/x-python3', 'application/x-python3'] + flags = re.MULTILINE | re.UNICODE + uni_name = "[%s][%s]*" % (uni.xid_start, uni.xid_continue) def innerstring_rules(ttype): @@ -142,7 +143,7 @@ def fstring_rules(ttype): combined('fstringescape', 'dqf')), ("([fF])(')", bygroups(String.Affix, String.Single), combined('fstringescape', 'sqf')), - # raw bytes and strings + # raw strings ('(?i)(rb|br|r)(""")', bygroups(String.Affix, String.Double), 'tdqs'), ("(?i)(rb|br|r)(''')", @@ -152,24 +153,14 @@ def fstring_rules(ttype): ("(?i)(rb|br|r)(')", bygroups(String.Affix, String.Single), 'sqs'), # non-raw strings - ('([uU]?)(""")', bygroups(String.Affix, String.Double), + ('([uUbB]?)(""")', bygroups(String.Affix, String.Double), combined('stringescape', 'tdqs')), - ("([uU]?)(''')", bygroups(String.Affix, String.Single), + ("([uUbB]?)(''')", bygroups(String.Affix, String.Single), combined('stringescape', 'tsqs')), - ('([uU]?)(")', bygroups(String.Affix, String.Double), + ('([uUbB]?)(")', bygroups(String.Affix, String.Double), combined('stringescape', 'dqs')), - ("([uU]?)(')", bygroups(String.Affix, String.Single), + ("([uUbB]?)(')", bygroups(String.Affix, String.Single), combined('stringescape', 'sqs')), - # non-raw bytes - ('([bB])(""")', bygroups(String.Affix, String.Double), - combined('bytesescape', 'tdqs')), - ("([bB])(''')", bygroups(String.Affix, String.Single), - combined('bytesescape', 'tsqs')), - ('([bB])(")', bygroups(String.Affix, String.Double), - combined('bytesescape', 'dqs')), - ("([bB])(')", bygroups(String.Affix, String.Single), - combined('bytesescape', 'sqs')), - (r'[^\S\n]+', Text), include('numbers'), (r'!=|==|<<|>>|:=|[-~+/*%=<>&^|.]', Operator), @@ -223,7 +214,7 @@ def fstring_rules(ttype): (r'(^[ \t]*)' # at beginning of line + possible indentation r'(match|case)\b' # a possible keyword r'(?![ \t]*(?:' # not followed by... - r'[:,;=^&|@~)\]}]|(?:' + # characters and keywords that mean this isn't + r'[:,;=^&|@~)\]}]|(?:' + # characters and keywords that mean this isn't r'|'.join(keyword.kwlist) + r')\b))', # pattern matching bygroups(Text, Keyword), 'soft-keywords-inner'), ], @@ -268,8 +259,7 @@ def fstring_rules(ttype): 'InterruptedError', 'IsADirectoryError', 'NotADirectoryError', 'PermissionError', 'ProcessLookupError', 'TimeoutError', # others new in Python 3 - 'StopAsyncIteration', 'ModuleNotFoundError', 'RecursionError', - 'EncodingWarning'), + 'StopAsyncIteration', 'ModuleNotFoundError', 'RecursionError'), prefix=r'(?`_ source code. .. versionchanged:: 2.5 This class has been renamed from ``PythonLexer``. ``PythonLexer`` now @@ -425,7 +412,6 @@ class Python2Lexer(RegexLexer): """ name = 'Python 2.x' - url = 'http://www.python.org' aliases = ['python2', 'py2'] filenames = [] # now taken over by PythonLexer (3.x) mimetypes = ['text/x-python2', 'application/x-python2'] @@ -830,13 +816,12 @@ class Python2TracebackLexer(RegexLexer): class CythonLexer(RegexLexer): """ - For Pyrex and Cython source code. + For Pyrex and `Cython `_ source code. .. versionadded:: 1.1 """ name = 'Cython' - url = 'http://cython.org' aliases = ['cython', 'pyx', 'pyrex'] filenames = ['*.pyx', '*.pxd', '*.pxi'] mimetypes = ['text/x-cython', 'application/x-cython'] @@ -1010,7 +995,7 @@ class CythonLexer(RegexLexer): class DgLexer(RegexLexer): """ - Lexer for dg, + Lexer for `dg `_, a functional and object-oriented programming language running on the CPython 3 VM. @@ -1115,7 +1100,6 @@ class NumPyLexer(PythonLexer): """ name = 'NumPy' - url = 'https://numpy.org/' aliases = ['numpy'] # override the mimetypes to not inherit them from python diff --git a/venv/lib/python3.10/site-packages/pip/_vendor/pygments/modeline.py b/venv/lib/python3.10/site-packages/pip/_vendor/pygments/modeline.py index 4363083..047d86d 100644 --- a/venv/lib/python3.10/site-packages/pip/_vendor/pygments/modeline.py +++ b/venv/lib/python3.10/site-packages/pip/_vendor/pygments/modeline.py @@ -4,7 +4,7 @@ A simple modeline parser (based on pymodeline). - :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS. + :copyright: Copyright 2006-2021 by the Pygments team, see AUTHORS. :license: BSD, see LICENSE for details. """ diff --git a/venv/lib/python3.10/site-packages/pip/_vendor/pygments/plugin.py b/venv/lib/python3.10/site-packages/pip/_vendor/pygments/plugin.py index 3590bee..958ca21 100644 --- a/venv/lib/python3.10/site-packages/pip/_vendor/pygments/plugin.py +++ b/venv/lib/python3.10/site-packages/pip/_vendor/pygments/plugin.py @@ -2,12 +2,9 @@ pygments.plugin ~~~~~~~~~~~~~~~ - Pygments plugin interface. By default, this tries to use - ``importlib.metadata``, which is in the Python standard - library since Python 3.8, or its ``importlib_metadata`` - backport for earlier versions of Python. It falls back on - ``pkg_resources`` if not found. Finally, if ``pkg_resources`` - is not found either, no plugins are loaded at all. + Pygments setuptools plugin interface. The methods defined + here also work if setuptools isn't installed but they just + return nothing. lexer plugins:: @@ -34,10 +31,9 @@ yourfilter = yourfilter:YourFilter - :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS. + :copyright: Copyright 2006-2021 by the Pygments team, see AUTHORS. :license: BSD, see LICENSE for details. """ - LEXER_ENTRY_POINT = 'pygments.lexers' FORMATTER_ENTRY_POINT = 'pygments.formatters' STYLE_ENTRY_POINT = 'pygments.styles' @@ -46,26 +42,11 @@ def iter_entry_points(group_name): try: - from importlib.metadata import entry_points - except ImportError: - try: - from importlib_metadata import entry_points - except ImportError: - try: - from pip._vendor.pkg_resources import iter_entry_points - except (ImportError, OSError): - return [] - else: - return iter_entry_points(group_name) - groups = entry_points() - if hasattr(groups, 'select'): - # New interface in Python 3.10 and newer versions of the - # importlib_metadata backport. - return groups.select(group=group_name) - else: - # Older interface, deprecated in Python 3.10 and recent - # importlib_metadata, but we need it in Python 3.8 and 3.9. - return groups.get(group_name, []) + from pip._vendor import pkg_resources + except (ImportError, OSError): + return [] + + return pkg_resources.iter_entry_points(group_name) def find_plugin_lexers(): diff --git a/venv/lib/python3.10/site-packages/pip/_vendor/pygments/regexopt.py b/venv/lib/python3.10/site-packages/pip/_vendor/pygments/regexopt.py index ae00791..cb2c8e2 100644 --- a/venv/lib/python3.10/site-packages/pip/_vendor/pygments/regexopt.py +++ b/venv/lib/python3.10/site-packages/pip/_vendor/pygments/regexopt.py @@ -5,7 +5,7 @@ An algorithm that generates optimized regexes for matching long lists of literal strings. - :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS. + :copyright: Copyright 2006-2021 by the Pygments team, see AUTHORS. :license: BSD, see LICENSE for details. """ diff --git a/venv/lib/python3.10/site-packages/pip/_vendor/pygments/scanner.py b/venv/lib/python3.10/site-packages/pip/_vendor/pygments/scanner.py index d47ed48..5f32a22 100644 --- a/venv/lib/python3.10/site-packages/pip/_vendor/pygments/scanner.py +++ b/venv/lib/python3.10/site-packages/pip/_vendor/pygments/scanner.py @@ -11,7 +11,7 @@ Have a look at the `DelphiLexer` to get an idea of how to use this scanner. - :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS. + :copyright: Copyright 2006-2021 by the Pygments team, see AUTHORS. :license: BSD, see LICENSE for details. """ import re @@ -72,7 +72,7 @@ def test(self, pattern): def scan(self, pattern): """ Scan the text for the given pattern and update pos/match - and related fields. The return value is a boolean that + and related fields. The return value is a boolen that indicates if the pattern matched. The matched value is stored on the instance as ``match``, the last value is stored as ``last``. ``start_pos`` is the position of the diff --git a/venv/lib/python3.10/site-packages/pip/_vendor/pygments/sphinxext.py b/venv/lib/python3.10/site-packages/pip/_vendor/pygments/sphinxext.py index c41bd49..2412dee 100644 --- a/venv/lib/python3.10/site-packages/pip/_vendor/pygments/sphinxext.py +++ b/venv/lib/python3.10/site-packages/pip/_vendor/pygments/sphinxext.py @@ -5,7 +5,7 @@ Sphinx extension to generate automatic documentation of lexers, formatters and filters. - :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS. + :copyright: Copyright 2006-2021 by the Pygments team, see AUTHORS. :license: BSD, see LICENSE for details. """ diff --git a/venv/lib/python3.10/site-packages/pip/_vendor/pygments/style.py b/venv/lib/python3.10/site-packages/pip/_vendor/pygments/style.py index 84abbc2..6b7469c 100644 --- a/venv/lib/python3.10/site-packages/pip/_vendor/pygments/style.py +++ b/venv/lib/python3.10/site-packages/pip/_vendor/pygments/style.py @@ -4,7 +4,7 @@ Basic style object. - :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS. + :copyright: Copyright 2006-2021 by the Pygments team, see AUTHORS. :license: BSD, see LICENSE for details. """ diff --git a/venv/lib/python3.10/site-packages/pip/_vendor/pygments/styles/__init__.py b/venv/lib/python3.10/site-packages/pip/_vendor/pygments/styles/__init__.py index 44cc0ef..e437d17 100644 --- a/venv/lib/python3.10/site-packages/pip/_vendor/pygments/styles/__init__.py +++ b/venv/lib/python3.10/site-packages/pip/_vendor/pygments/styles/__init__.py @@ -4,7 +4,7 @@ Contains built-in styles. - :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS. + :copyright: Copyright 2006-2021 by the Pygments team, see AUTHORS. :license: BSD, see LICENSE for details. """ @@ -48,7 +48,6 @@ 'solarized-dark': 'solarized::SolarizedDarkStyle', 'solarized-light': 'solarized::SolarizedLightStyle', 'sas': 'sas::SasStyle', - 'staroffice' : 'staroffice::StarofficeStyle', 'stata': 'stata_light::StataLightStyle', 'stata-light': 'stata_light::StataLightStyle', 'stata-dark': 'stata_dark::StataDarkStyle', @@ -59,9 +58,6 @@ 'dracula': 'dracula::DraculaStyle', 'one-dark': 'onedark::OneDarkStyle', 'lilypond' : 'lilypond::LilyPondStyle', - 'nord': 'nord::NordStyle', - 'nord-darker': 'nord::NordDarkerStyle', - 'github-dark': 'gh_dark::GhDarkStyle' } diff --git a/venv/lib/python3.10/site-packages/pip/_vendor/pygments/token.py b/venv/lib/python3.10/site-packages/pip/_vendor/pygments/token.py index e3e565a..9013acb 100644 --- a/venv/lib/python3.10/site-packages/pip/_vendor/pygments/token.py +++ b/venv/lib/python3.10/site-packages/pip/_vendor/pygments/token.py @@ -4,7 +4,7 @@ Basic token types and the standard tokens. - :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS. + :copyright: Copyright 2006-2021 by the Pygments team, see AUTHORS. :license: BSD, see LICENSE for details. """ @@ -189,7 +189,6 @@ def string_to_tokentype(s): Operator.Word: 'ow', Punctuation: 'p', - Punctuation.Marker: 'pm', Comment: 'c', Comment.Hashbang: 'ch', diff --git a/venv/lib/python3.10/site-packages/pip/_vendor/pygments/unistring.py b/venv/lib/python3.10/site-packages/pip/_vendor/pygments/unistring.py index 2e3c808..2872985 100644 --- a/venv/lib/python3.10/site-packages/pip/_vendor/pygments/unistring.py +++ b/venv/lib/python3.10/site-packages/pip/_vendor/pygments/unistring.py @@ -7,7 +7,7 @@ Inspired by chartypes_create.py from the MoinMoin project. - :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS. + :copyright: Copyright 2006-2021 by the Pygments team, see AUTHORS. :license: BSD, see LICENSE for details. """ @@ -122,7 +122,7 @@ def _handle_runs(char_list): # pragma: no cover c = chr(code) cat = unicodedata.category(c) if ord(c) == 0xdc00: - # Hack to avoid combining this combining with the preceding high + # Hack to avoid combining this combining with the preceeding high # surrogate, 0xdbff, when doing a repr. c = '\\' + c elif ord(c) in (0x2d, 0x5b, 0x5c, 0x5d, 0x5e): diff --git a/venv/lib/python3.10/site-packages/pip/_vendor/pygments/util.py b/venv/lib/python3.10/site-packages/pip/_vendor/pygments/util.py index 8032962..5d6ddc3 100644 --- a/venv/lib/python3.10/site-packages/pip/_vendor/pygments/util.py +++ b/venv/lib/python3.10/site-packages/pip/_vendor/pygments/util.py @@ -4,7 +4,7 @@ Utility functions. - :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS. + :copyright: Copyright 2006-2021 by the Pygments team, see AUTHORS. :license: BSD, see LICENSE for details. """ @@ -23,7 +23,7 @@ [^>]*> ''', re.DOTALL | re.MULTILINE | re.VERBOSE) tag_re = re.compile(r'<(.+?)(\s.*?)?>.*?', - re.IGNORECASE | re.DOTALL | re.MULTILINE) + re.UNICODE | re.IGNORECASE | re.DOTALL | re.MULTILINE) xml_decl_re = re.compile(r'\s*<\?xml[^>]*\?>', re.I) diff --git a/venv/lib/python3.10/site-packages/pip/_vendor/pyparsing/__init__.py b/venv/lib/python3.10/site-packages/pip/_vendor/pyparsing/__init__.py index 7537250..fbc6d8c 100644 --- a/venv/lib/python3.10/site-packages/pip/_vendor/pyparsing/__init__.py +++ b/venv/lib/python3.10/site-packages/pip/_vendor/pyparsing/__init__.py @@ -1,6 +1,6 @@ # module pyparsing.py # -# Copyright (c) 2003-2022 Paul T. McGuire +# Copyright (c) 2003-2021 Paul T. McGuire # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the @@ -105,17 +105,14 @@ class version_info(NamedTuple): @property def __version__(self): - return ( - "{}.{}.{}".format(self.major, self.minor, self.micro) - + ( - "{}{}{}".format( - "r" if self.releaselevel[0] == "c" else "", - self.releaselevel[0], - self.serial, - ), - "", - )[self.releaselevel == "final"] - ) + return "{}.{}.{}".format(self.major, self.minor, self.micro) + ( + "{}{}{}".format( + "r" if self.releaselevel[0] == "c" else "", + self.releaselevel[0], + self.serial, + ), + "", + )[self.releaselevel == "final"] def __str__(self): return "{} {} / {}".format(__name__, self.__version__, __version_time__) @@ -128,8 +125,8 @@ def __repr__(self): ) -__version_info__ = version_info(3, 0, 9, "final", 0) -__version_time__ = "05 May 2022 07:02 UTC" +__version_info__ = version_info(3, 0, 7, "final", 0) +__version_time__ = "15 Jan 2022 04:10 UTC" __version__ = __version_info__.__version__ __versionTime__ = __version_time__ __author__ = "Paul McGuire " diff --git a/venv/lib/python3.10/site-packages/pip/_vendor/pyparsing/actions.py b/venv/lib/python3.10/site-packages/pip/_vendor/pyparsing/actions.py index f72c66e..2bcc550 100644 --- a/venv/lib/python3.10/site-packages/pip/_vendor/pyparsing/actions.py +++ b/venv/lib/python3.10/site-packages/pip/_vendor/pyparsing/actions.py @@ -55,7 +55,7 @@ def replace_with(repl_str): na = one_of("N/A NA").set_parse_action(replace_with(math.nan)) term = na | num - term[1, ...].parse_string("324 234 N/A 234") # -> [324, 234, nan, 234] + OneOrMore(term).parse_string("324 234 N/A 234") # -> [324, 234, nan, 234] """ return lambda s, l, t: [repl_str] diff --git a/venv/lib/python3.10/site-packages/pip/_vendor/pyparsing/core.py b/venv/lib/python3.10/site-packages/pip/_vendor/pyparsing/core.py index 6ff3c76..6311815 100644 --- a/venv/lib/python3.10/site-packages/pip/_vendor/pyparsing/core.py +++ b/venv/lib/python3.10/site-packages/pip/_vendor/pyparsing/core.py @@ -2,8 +2,9 @@ # core.py # import os -import typing from typing import ( + Optional as OptionalType, + Iterable as IterableType, NamedTuple, Union, Callable, @@ -13,6 +14,7 @@ List, TextIO, Set, + Dict as DictType, Sequence, ) from abc import ABC, abstractmethod @@ -21,6 +23,7 @@ import copy import warnings import re +import sre_constants import sys from collections.abc import Iterable import traceback @@ -50,7 +53,7 @@ str_type: Tuple[type, ...] = (str, bytes) # -# Copyright (c) 2003-2022 Paul T. McGuire +# Copyright (c) 2003-2021 Paul T. McGuire # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the @@ -73,19 +76,6 @@ # -if sys.version_info >= (3, 8): - from functools import cached_property -else: - - class cached_property: - def __init__(self, func): - self._func = func - - def __get__(self, instance, owner=None): - ret = instance.__dict__[self._func.__name__] = self._func(instance) - return ret - - class __compat__(__config_flags): """ A cross-version compatibility configuration for pyparsing features that will be @@ -190,7 +180,7 @@ def enable_all_warnings() -> None: def _should_enable_warnings( - cmd_line_warn_options: typing.Iterable[str], warn_env_var: typing.Optional[str] + cmd_line_warn_options: IterableType[str], warn_env_var: OptionalType[str] ) -> bool: enable = bool(warn_env_var) for warn_opt in cmd_line_warn_options: @@ -256,10 +246,10 @@ def _should_enable_warnings( alphanums = alphas + nums printables = "".join([c for c in string.printable if c not in string.whitespace]) -_trim_arity_call_line: traceback.StackSummary = None +_trim_arity_call_line = None -def _trim_arity(func, max_limit=3): +def _trim_arity(func, maxargs=2): """decorator to trim function calls to match the arity of the target""" global _trim_arity_call_line @@ -277,12 +267,16 @@ def extract_tb(tb, limit=0): # synthesize what would be returned by traceback.extract_stack at the call to # user's parse action 'func', so that we don't incur call penalty at parse time - # fmt: off - LINE_DIFF = 7 + LINE_DIFF = 11 # IF ANY CODE CHANGES, EVEN JUST COMMENTS OR BLANK LINES, BETWEEN THE NEXT LINE AND # THE CALL TO FUNC INSIDE WRAPPER, LINE_DIFF MUST BE MODIFIED!!!! - _trim_arity_call_line = (_trim_arity_call_line or traceback.extract_stack(limit=2)[-1]) - pa_call_line_synth = (_trim_arity_call_line[0], _trim_arity_call_line[1] + LINE_DIFF) + _trim_arity_call_line = ( + _trim_arity_call_line or traceback.extract_stack(limit=2)[-1] + ) + pa_call_line_synth = ( + _trim_arity_call_line[0], + _trim_arity_call_line[1] + LINE_DIFF, + ) def wrapper(*args): nonlocal found_arity, limit @@ -303,18 +297,16 @@ def wrapper(*args): del tb if trim_arity_type_error: - if limit < max_limit: + if limit <= maxargs: limit += 1 continue raise - # fmt: on # copy func name to wrapper for sensible debug output # (can't use functools.wraps, since that messes with function signature) func_name = getattr(func, "__name__", getattr(func, "__class__").__name__) wrapper.__name__ = func_name - wrapper.__doc__ = func.__doc__ return wrapper @@ -402,7 +394,7 @@ class ParserElement(ABC): DEFAULT_WHITE_CHARS: str = " \n\t\r" verbose_stacktrace: bool = False - _literalStringClass: typing.Optional[type] = None + _literalStringClass: OptionalType[type] = None @staticmethod def set_default_whitespace_chars(chars: str) -> None: @@ -412,11 +404,11 @@ def set_default_whitespace_chars(chars: str) -> None: Example:: # default whitespace chars are space, and newline - Word(alphas)[1, ...].parse_string("abc def\nghi jkl") # -> ['abc', 'def', 'ghi', 'jkl'] + OneOrMore(Word(alphas)).parse_string("abc def\nghi jkl") # -> ['abc', 'def', 'ghi', 'jkl'] # change to just treat newline as significant ParserElement.set_default_whitespace_chars(" \t") - Word(alphas)[1, ...].parse_string("abc def\nghi jkl") # -> ['abc', 'def'] + OneOrMore(Word(alphas)).parse_string("abc def\nghi jkl") # -> ['abc', 'def'] """ ParserElement.DEFAULT_WHITE_CHARS = chars @@ -448,13 +440,13 @@ def inline_literals_using(cls: type) -> None: ParserElement._literalStringClass = cls class DebugActions(NamedTuple): - debug_try: typing.Optional[DebugStartAction] - debug_match: typing.Optional[DebugSuccessAction] - debug_fail: typing.Optional[DebugExceptionAction] + debug_try: OptionalType[DebugStartAction] + debug_match: OptionalType[DebugSuccessAction] + debug_fail: OptionalType[DebugExceptionAction] def __init__(self, savelist: bool = False): self.parseAction: List[ParseAction] = list() - self.failAction: typing.Optional[ParseFailAction] = None + self.failAction: OptionalType[ParseFailAction] = None self.customName = None self._defaultName = None self.resultsName = None @@ -475,6 +467,7 @@ def __init__(self, savelist: bool = False): self.modalResults = True # custom debug actions self.debugActions = self.DebugActions(None, None, None) + self.re = None # avoid redundant calls to preParse self.callPreparse = True self.callDuringTry = False @@ -508,7 +501,7 @@ def copy(self) -> "ParserElement": integerK = integer.copy().add_parse_action(lambda toks: toks[0] * 1024) + Suppress("K") integerM = integer.copy().add_parse_action(lambda toks: toks[0] * 1024 * 1024) + Suppress("M") - print((integerK | integerM | integer)[1, ...].parse_string("5K 100 640K 256M")) + print(OneOrMore(integerK | integerM | integer).parse_string("5K 100 640K 256M")) prints:: @@ -893,7 +886,7 @@ def can_parse_next(self, instring: str, loc: int) -> bool: # cache for left-recursion in Forward references recursion_lock = RLock() - recursion_memos: typing.Dict[ + recursion_memos: DictType[ Tuple[int, "Forward", bool], Tuple[int, Union[ParseResults, Exception]] ] = {} @@ -983,7 +976,7 @@ def disable_memoization() -> None: @staticmethod def enable_left_recursion( - cache_size_limit: typing.Optional[int] = None, *, force=False + cache_size_limit: OptionalType[int] = None, *, force=False ) -> None: """ Enables "bounded recursion" parsing, which allows for both direct and indirect @@ -1349,7 +1342,7 @@ def split( last = e yield instring[last:] - def __add__(self, other) -> "ParserElement": + def __add__(self, other): """ Implementation of ``+`` operator - returns :class:`And`. Adding strings to a :class:`ParserElement` converts them to :class:`Literal`s by default. @@ -1389,7 +1382,7 @@ def __add__(self, other) -> "ParserElement": ) return And([self, other]) - def __radd__(self, other) -> "ParserElement": + def __radd__(self, other): """ Implementation of ``+`` operator when left operand is not a :class:`ParserElement` """ @@ -1406,7 +1399,7 @@ def __radd__(self, other) -> "ParserElement": ) return other + self - def __sub__(self, other) -> "ParserElement": + def __sub__(self, other): """ Implementation of ``-`` operator, returns :class:`And` with error stop """ @@ -1420,7 +1413,7 @@ def __sub__(self, other) -> "ParserElement": ) return self + And._ErrorStop() + other - def __rsub__(self, other) -> "ParserElement": + def __rsub__(self, other): """ Implementation of ``-`` operator when left operand is not a :class:`ParserElement` """ @@ -1434,7 +1427,7 @@ def __rsub__(self, other) -> "ParserElement": ) return other - self - def __mul__(self, other) -> "ParserElement": + def __mul__(self, other): """ Implementation of ``*`` operator, allows use of ``expr * 3`` in place of ``expr + expr + expr``. Expressions may also be multiplied by a 2-integer @@ -1520,10 +1513,10 @@ def makeOptionalList(n): ret = And([self] * minElements) return ret - def __rmul__(self, other) -> "ParserElement": + def __rmul__(self, other): return self.__mul__(other) - def __or__(self, other) -> "ParserElement": + def __or__(self, other): """ Implementation of ``|`` operator - returns :class:`MatchFirst` """ @@ -1540,7 +1533,7 @@ def __or__(self, other) -> "ParserElement": ) return MatchFirst([self, other]) - def __ror__(self, other) -> "ParserElement": + def __ror__(self, other): """ Implementation of ``|`` operator when left operand is not a :class:`ParserElement` """ @@ -1554,7 +1547,7 @@ def __ror__(self, other) -> "ParserElement": ) return other | self - def __xor__(self, other) -> "ParserElement": + def __xor__(self, other): """ Implementation of ``^`` operator - returns :class:`Or` """ @@ -1568,7 +1561,7 @@ def __xor__(self, other) -> "ParserElement": ) return Or([self, other]) - def __rxor__(self, other) -> "ParserElement": + def __rxor__(self, other): """ Implementation of ``^`` operator when left operand is not a :class:`ParserElement` """ @@ -1582,7 +1575,7 @@ def __rxor__(self, other) -> "ParserElement": ) return other ^ self - def __and__(self, other) -> "ParserElement": + def __and__(self, other): """ Implementation of ``&`` operator - returns :class:`Each` """ @@ -1596,7 +1589,7 @@ def __and__(self, other) -> "ParserElement": ) return Each([self, other]) - def __rand__(self, other) -> "ParserElement": + def __rand__(self, other): """ Implementation of ``&`` operator when left operand is not a :class:`ParserElement` """ @@ -1610,7 +1603,7 @@ def __rand__(self, other) -> "ParserElement": ) return other & self - def __invert__(self) -> "ParserElement": + def __invert__(self): """ Implementation of ``~`` operator - returns :class:`NotAny` """ @@ -1660,7 +1653,7 @@ def __getitem__(self, key): ret = self * tuple(key[:2]) return ret - def __call__(self, name: str = None) -> "ParserElement": + def __call__(self, name: str = None): """ Shortcut for :class:`set_results_name`, with ``list_all_matches=False``. @@ -1736,7 +1729,7 @@ def ignore(self, other: "ParserElement") -> "ParserElement": Example:: - patt = Word(alphas)[1, ...] + patt = OneOrMore(Word(alphas)) patt.parse_string('ablaj /* comment */ lskjd') # -> ['ablaj'] @@ -1796,7 +1789,7 @@ def set_debug(self, flag: bool = True) -> "ParserElement": # turn on debugging for wd wd.set_debug() - term[1, ...].parse_string("abc 123 xyz 890") + OneOrMore(term).parse_string("abc 123 xyz 890") prints:: @@ -1951,12 +1944,12 @@ def run_tests( self, tests: Union[str, List[str]], parse_all: bool = True, - comment: typing.Optional[Union["ParserElement", str]] = "#", + comment: OptionalType[Union["ParserElement", str]] = "#", full_dump: bool = True, print_results: bool = True, failure_tests: bool = False, post_parse: Callable[[str, ParseResults], str] = None, - file: typing.Optional[TextIO] = None, + file: OptionalType[TextIO] = None, with_line_numbers: bool = False, *, parseAll: bool = True, @@ -2147,7 +2140,6 @@ def create_diagram( output_html: Union[TextIO, Path, str], vertical: int = 3, show_results_names: bool = False, - show_groups: bool = False, **kwargs, ) -> None: """ @@ -2160,7 +2152,7 @@ def create_diagram( instead of horizontally (default=3) - show_results_names - bool flag whether diagram should show annotations for defined results names - - show_groups - bool flag whether groups should be highlighted with an unlabeled surrounding box + Additional diagram-formatting keyword arguments can also be included; see railroad.Diagram class. """ @@ -2178,7 +2170,6 @@ def create_diagram( self, vertical=vertical, show_results_names=show_results_names, - show_groups=show_groups, diagram_kwargs=kwargs, ) if isinstance(output_html, (str, Path)): @@ -2228,7 +2219,7 @@ def __init__(self, expr: ParserElement, must_skip: bool = False): def _generateDefaultName(self): return str(self.anchor + Empty()).replace("Empty", "...") - def __add__(self, other) -> "ParserElement": + def __add__(self, other): skipper = SkipTo(other).set_name("...")("_skipped*") if self.must_skip: @@ -2383,11 +2374,11 @@ class Keyword(Token): def __init__( self, match_string: str = "", - ident_chars: typing.Optional[str] = None, + ident_chars: OptionalType[str] = None, caseless: bool = False, *, matchString: str = "", - identChars: typing.Optional[str] = None, + identChars: OptionalType[str] = None, ): super().__init__() identChars = identChars or ident_chars @@ -2477,7 +2468,7 @@ class CaselessLiteral(Literal): Example:: - CaselessLiteral("CMD")[1, ...].parse_string("cmd CMD Cmd10") + OneOrMore(CaselessLiteral("CMD")).parse_string("cmd CMD Cmd10") # -> ['CMD', 'CMD', 'CMD'] (Contrast with example for :class:`CaselessKeyword`.) @@ -2502,7 +2493,7 @@ class CaselessKeyword(Keyword): Example:: - CaselessKeyword("CMD")[1, ...].parse_string("cmd CMD Cmd10") + OneOrMore(CaselessKeyword("CMD")).parse_string("cmd CMD Cmd10") # -> ['CMD', 'CMD'] (Contrast with example for :class:`CaselessLiteral`.) @@ -2511,10 +2502,10 @@ class CaselessKeyword(Keyword): def __init__( self, match_string: str = "", - ident_chars: typing.Optional[str] = None, + ident_chars: OptionalType[str] = None, *, matchString: str = "", - identChars: typing.Optional[str] = None, + identChars: OptionalType[str] = None, ): identChars = identChars or ident_chars match_string = matchString or match_string @@ -2678,17 +2669,17 @@ class Word(Token): def __init__( self, init_chars: str = "", - body_chars: typing.Optional[str] = None, + body_chars: OptionalType[str] = None, min: int = 1, max: int = 0, exact: int = 0, as_keyword: bool = False, - exclude_chars: typing.Optional[str] = None, + exclude_chars: OptionalType[str] = None, *, - initChars: typing.Optional[str] = None, - bodyChars: typing.Optional[str] = None, + initChars: OptionalType[str] = None, + bodyChars: OptionalType[str] = None, asKeyword: bool = False, - excludeChars: typing.Optional[str] = None, + excludeChars: OptionalType[str] = None, ): initChars = initChars or init_chars bodyChars = bodyChars or body_chars @@ -2782,7 +2773,7 @@ def __init__( try: self.re = re.compile(self.reString) - except re.error: + except sre_constants.error: self.re = None else: self.re_match = self.re.match @@ -2870,10 +2861,10 @@ def __init__( self, charset: str, as_keyword: bool = False, - exclude_chars: typing.Optional[str] = None, + exclude_chars: OptionalType[str] = None, *, asKeyword: bool = False, - excludeChars: typing.Optional[str] = None, + excludeChars: OptionalType[str] = None, ): asKeyword = asKeyword or as_keyword excludeChars = excludeChars or exclude_chars @@ -2935,12 +2926,19 @@ def __init__( if not pattern: raise ValueError("null string passed to Regex; use Empty() instead") - self._re = None - self.reString = self.pattern = pattern + self.pattern = pattern self.flags = flags + try: + self.re = re.compile(self.pattern, self.flags) + self.reString = self.pattern + except sre_constants.error: + raise ValueError( + "invalid pattern ({!r}) passed to Regex".format(pattern) + ) + elif hasattr(pattern, "pattern") and hasattr(pattern, "match"): - self._re = pattern + self.re = pattern self.pattern = self.reString = pattern.pattern self.flags = flags @@ -2949,8 +2947,11 @@ def __init__( "Regex may only be constructed with a string or a compiled RE object" ) + self.re_match = self.re.match + self.errmsg = "Expected " + self.name self.mayIndexError = False + self.mayReturnEmpty = self.re_match("") is not None self.asGroupList = asGroupList self.asMatch = asMatch if self.asGroupList: @@ -2958,26 +2959,6 @@ def __init__( if self.asMatch: self.parseImpl = self.parseImplAsMatch - @cached_property - def re(self): - if self._re: - return self._re - else: - try: - return re.compile(self.pattern, self.flags) - except re.error: - raise ValueError( - "invalid pattern ({!r}) passed to Regex".format(self.pattern) - ) - - @cached_property - def re_match(self): - return self.re.match - - @cached_property - def mayReturnEmpty(self): - return self.re_match("") is not None - def _generateDefaultName(self): return "Re:({})".format(repr(self.pattern).replace("\\\\", "\\")) @@ -3086,18 +3067,18 @@ class QuotedString(Token): def __init__( self, quote_char: str = "", - esc_char: typing.Optional[str] = None, - esc_quote: typing.Optional[str] = None, + esc_char: OptionalType[str] = None, + esc_quote: OptionalType[str] = None, multiline: bool = False, unquote_results: bool = True, - end_quote_char: typing.Optional[str] = None, + end_quote_char: OptionalType[str] = None, convert_whitespace_escapes: bool = True, *, quoteChar: str = "", - escChar: typing.Optional[str] = None, - escQuote: typing.Optional[str] = None, + escChar: OptionalType[str] = None, + escQuote: OptionalType[str] = None, unquoteResults: bool = True, - endQuoteChar: typing.Optional[str] = None, + endQuoteChar: OptionalType[str] = None, convertWhitespaceEscapes: bool = True, ): super().__init__() @@ -3187,7 +3168,7 @@ def __init__( self.re = re.compile(self.pattern, self.flags) self.reString = self.pattern self.re_match = self.re.match - except re.error: + except sre_constants.error: raise ValueError( "invalid pattern {!r} passed to Regex".format(self.pattern) ) @@ -3598,7 +3579,7 @@ class ParseExpression(ParserElement): post-processing parsed tokens. """ - def __init__(self, exprs: typing.Iterable[ParserElement], savelist: bool = False): + def __init__(self, exprs: IterableType[ParserElement], savelist: bool = False): super().__init__(savelist) self.exprs: List[ParserElement] if isinstance(exprs, _generatorType): @@ -3765,7 +3746,7 @@ class And(ParseExpression): Example:: integer = Word(nums) - name_expr = Word(alphas)[1, ...] + name_expr = OneOrMore(Word(alphas)) expr = And([integer("id"), name_expr("name"), integer("age")]) # more easily written as: @@ -3780,9 +3761,7 @@ def __init__(self, *args, **kwargs): def _generateDefaultName(self): return "-" - def __init__( - self, exprs_arg: typing.Iterable[ParserElement], savelist: bool = True - ): + def __init__(self, exprs_arg: IterableType[ParserElement], savelist: bool = True): exprs: List[ParserElement] = list(exprs_arg) if exprs and Ellipsis in exprs: tmp = [] @@ -3847,9 +3826,7 @@ def streamline(self) -> ParserElement: seen.add(id(cur)) if isinstance(cur, IndentedBlock): prev.add_parse_action( - lambda s, l, t, cur_=cur: setattr( - cur_, "parent_anchor", col(l, s) - ) + lambda s, l, t, cur_=cur: setattr(cur_, "parent_anchor", col(l, s)) ) break subs = cur.recurse() @@ -3926,7 +3903,7 @@ class Or(ParseExpression): [['123'], ['3.1416'], ['789']] """ - def __init__(self, exprs: typing.Iterable[ParserElement], savelist: bool = False): + def __init__(self, exprs: IterableType[ParserElement], savelist: bool = False): super().__init__(exprs, savelist) if self.exprs: self.mayReturnEmpty = any(e.mayReturnEmpty for e in self.exprs) @@ -4081,7 +4058,7 @@ class MatchFirst(ParseExpression): print(number.search_string("123 3.1416 789")) # Better -> [['123'], ['3.1416'], ['789']] """ - def __init__(self, exprs: typing.Iterable[ParserElement], savelist: bool = False): + def __init__(self, exprs: IterableType[ParserElement], savelist: bool = False): super().__init__(exprs, savelist) if self.exprs: self.mayReturnEmpty = any(e.mayReturnEmpty for e in self.exprs) @@ -4232,7 +4209,7 @@ class Each(ParseExpression): - size: 20 """ - def __init__(self, exprs: typing.Iterable[ParserElement], savelist: bool = True): + def __init__(self, exprs: IterableType[ParserElement], savelist: bool = True): super().__init__(exprs, savelist) if self.exprs: self.mayReturnEmpty = all(e.mayReturnEmpty for e in self.exprs) @@ -4568,7 +4545,7 @@ class FollowedBy(ParseElementEnhance): label = data_word + FollowedBy(':') attr_expr = Group(label + Suppress(':') + OneOrMore(data_word, stop_on=label).set_parse_action(' '.join)) - attr_expr[1, ...].parse_string("shape: SQUARE color: BLACK posn: upper left").pprint() + OneOrMore(attr_expr).parse_string("shape: SQUARE color: BLACK posn: upper left").pprint() prints:: @@ -4619,7 +4596,7 @@ class PrecededBy(ParseElementEnhance): """ def __init__( - self, expr: Union[ParserElement, str], retreat: typing.Optional[int] = None + self, expr: Union[ParserElement, str], retreat: OptionalType[int] = None ): super().__init__(expr) self.expr = self.expr().leave_whitespace() @@ -4730,7 +4707,7 @@ class NotAny(ParseElementEnhance): # very crude boolean expression - to support parenthesis groups and # operation hierarchy, use infix_notation - boolean_expr = boolean_term + ((AND | OR) + boolean_term)[...] + boolean_expr = boolean_term + ZeroOrMore((AND | OR) + boolean_term) # integers that are followed by "." are actually floats integer = Word(nums) + ~Char(".") @@ -4758,9 +4735,9 @@ class _MultipleMatch(ParseElementEnhance): def __init__( self, expr: ParserElement, - stop_on: typing.Optional[Union[ParserElement, str]] = None, + stop_on: OptionalType[Union[ParserElement, str]] = None, *, - stopOn: typing.Optional[Union[ParserElement, str]] = None, + stopOn: OptionalType[Union[ParserElement, str]] = None, ): super().__init__(expr) stopOn = stopOn or stop_on @@ -4849,7 +4826,7 @@ class OneOrMore(_MultipleMatch): attr_expr = Group(label + Suppress(':') + OneOrMore(data_word).set_parse_action(' '.join)) text = "shape: SQUARE posn: upper left color: BLACK" - attr_expr[1, ...].parse_string(text).pprint() # Fail! read 'color' as data instead of next label -> [['shape', 'SQUARE color']] + OneOrMore(attr_expr).parse_string(text).pprint() # Fail! read 'color' as data instead of next label -> [['shape', 'SQUARE color']] # use stop_on attribute for OneOrMore to avoid reading label string as part of the data attr_expr = Group(label + Suppress(':') + OneOrMore(data_word, stop_on=label).set_parse_action(' '.join)) @@ -4879,9 +4856,9 @@ class ZeroOrMore(_MultipleMatch): def __init__( self, expr: ParserElement, - stop_on: typing.Optional[Union[ParserElement, str]] = None, + stop_on: OptionalType[Union[ParserElement, str]] = None, *, - stopOn: typing.Optional[Union[ParserElement, str]] = None, + stopOn: OptionalType[Union[ParserElement, str]] = None, ): super().__init__(expr, stopOn=stopOn or stop_on) self.mayReturnEmpty = True @@ -5025,20 +5002,20 @@ class SkipTo(ParseElementEnhance): prints:: ['101', 'Critical', 'Intermittent system crash', '6'] - - days_open: '6' - - desc: 'Intermittent system crash' - - issue_num: '101' - - sev: 'Critical' + - days_open: 6 + - desc: Intermittent system crash + - issue_num: 101 + - sev: Critical ['94', 'Cosmetic', "Spelling error on Login ('log|n')", '14'] - - days_open: '14' - - desc: "Spelling error on Login ('log|n')" - - issue_num: '94' - - sev: 'Cosmetic' + - days_open: 14 + - desc: Spelling error on Login ('log|n') + - issue_num: 94 + - sev: Cosmetic ['79', 'Minor', 'System slow when running too many reports', '47'] - - days_open: '47' - - desc: 'System slow when running too many reports' - - issue_num: '79' - - sev: 'Minor' + - days_open: 47 + - desc: System slow when running too many reports + - issue_num: 79 + - sev: Minor """ def __init__( @@ -5046,7 +5023,7 @@ def __init__( other: Union[ParserElement, str], include: bool = False, ignore: bool = None, - fail_on: typing.Optional[Union[ParserElement, str]] = None, + fail_on: OptionalType[Union[ParserElement, str]] = None, *, failOn: Union[ParserElement, str] = None, ): @@ -5143,7 +5120,7 @@ class Forward(ParseElementEnhance): parser created using ``Forward``. """ - def __init__(self, other: typing.Optional[Union[ParserElement, str]] = None): + def __init__(self, other: OptionalType[Union[ParserElement, str]] = None): self.caller_frame = traceback.extract_stack(limit=2)[0] super().__init__(other, savelist=False) self.lshift_line = None @@ -5395,7 +5372,7 @@ def __init__( join_string: str = "", adjacent: bool = True, *, - joinString: typing.Optional[str] = None, + joinString: OptionalType[str] = None, ): super().__init__(expr) joinString = joinString if joinString is not None else join_string @@ -5482,10 +5459,10 @@ class Dict(TokenConverter): attr_expr = (label + Suppress(':') + OneOrMore(data_word, stop_on=label).set_parse_action(' '.join)) # print attributes as plain groups - print(attr_expr[1, ...].parse_string(text).dump()) + print(OneOrMore(attr_expr).parse_string(text).dump()) - # instead of OneOrMore(expr), parse using Dict(Group(expr)[1, ...]) - Dict will auto-assign names - result = Dict(Group(attr_expr)[1, ...]).parse_string(text) + # instead of OneOrMore(expr), parse using Dict(OneOrMore(Group(expr))) - Dict will auto-assign names + result = Dict(OneOrMore(Group(attr_expr))).parse_string(text) print(result.dump()) # access named fields as dict entries, or output as dict @@ -5496,10 +5473,10 @@ class Dict(TokenConverter): ['shape', 'SQUARE', 'posn', 'upper left', 'color', 'light blue', 'texture', 'burlap'] [['shape', 'SQUARE'], ['posn', 'upper left'], ['color', 'light blue'], ['texture', 'burlap']] - - color: 'light blue' - - posn: 'upper left' - - shape: 'SQUARE' - - texture: 'burlap' + - color: light blue + - posn: upper left + - shape: SQUARE + - texture: burlap SQUARE {'color': 'light blue', 'posn': 'upper left', 'texture': 'burlap', 'shape': 'SQUARE'} @@ -5558,12 +5535,12 @@ class Suppress(TokenConverter): source = "a, b, c,d" wd = Word(alphas) - wd_list1 = wd + (',' + wd)[...] + wd_list1 = wd + ZeroOrMore(',' + wd) print(wd_list1.parse_string(source)) # often, delimiters that are useful during parsing are just in the # way afterward - use Suppress to keep them out of the parsed output - wd_list2 = wd + (Suppress(',') + wd)[...] + wd_list2 = wd + ZeroOrMore(Suppress(',') + wd) print(wd_list2.parse_string(source)) # Skipped text (using '...') can be suppressed as well @@ -5587,13 +5564,13 @@ def __init__(self, expr: Union[ParserElement, str], savelist: bool = False): expr = _PendingSkip(NoMatch()) super().__init__(expr) - def __add__(self, other) -> "ParserElement": + def __add__(self, other): if isinstance(self.expr, _PendingSkip): return Suppress(SkipTo(other)) + other else: return super().__add__(other) - def __sub__(self, other) -> "ParserElement": + def __sub__(self, other): if isinstance(self.expr, _PendingSkip): return Suppress(SkipTo(other)) - other else: @@ -5622,7 +5599,7 @@ def trace_parse_action(f: ParseAction) -> ParseAction: def remove_duplicate_chars(tokens): return ''.join(sorted(set(''.join(tokens)))) - wds = wd[1, ...].set_parse_action(remove_duplicate_chars) + wds = OneOrMore(wd).set_parse_action(remove_duplicate_chars) print(wds.parse_string("slkdjs sld sldd sdlf sdljf")) prints:: @@ -5728,18 +5705,18 @@ def token_map(func, *args) -> ParseAction: Example (compare the last to example in :class:`ParserElement.transform_string`:: - hex_ints = Word(hexnums)[1, ...].set_parse_action(token_map(int, 16)) + hex_ints = OneOrMore(Word(hexnums)).set_parse_action(token_map(int, 16)) hex_ints.run_tests(''' 00 11 22 aa FF 0a 0d 1a ''') upperword = Word(alphas).set_parse_action(token_map(str.upper)) - upperword[1, ...].run_tests(''' + OneOrMore(upperword).run_tests(''' my kingdom for a horse ''') wd = Word(alphas).set_parse_action(token_map(str.title)) - wd[1, ...].set_parse_action(' '.join).run_tests(''' + OneOrMore(wd).set_parse_action(' '.join).run_tests(''' now is the winter of our discontent made glorious summer by this sun of york ''') @@ -5795,9 +5772,7 @@ def autoname_elements() -> None: # build list of built-in expressions, for future reference if a global default value # gets updated -_builtin_exprs: List[ParserElement] = [ - v for v in vars().values() if isinstance(v, ParserElement) -] +_builtin_exprs = [v for v in vars().values() if isinstance(v, ParserElement)] # backward compatibility names tokenMap = token_map diff --git a/venv/lib/python3.10/site-packages/pip/_vendor/pyparsing/diagram/__init__.py b/venv/lib/python3.10/site-packages/pip/_vendor/pyparsing/diagram/__init__.py index 1506d66..895b97b 100644 --- a/venv/lib/python3.10/site-packages/pip/_vendor/pyparsing/diagram/__init__.py +++ b/venv/lib/python3.10/site-packages/pip/_vendor/pyparsing/diagram/__init__.py @@ -1,8 +1,9 @@ import railroad from pip._vendor import pyparsing -import typing +from pip._vendor.pkg_resources import resource_filename from typing import ( List, + Optional, NamedTuple, Generic, TypeVar, @@ -15,42 +16,13 @@ from io import StringIO import inspect - -jinja2_template_source = """\ - - - - {% if not head %} - - {% else %} - {{ head | safe }} - {% endif %} - - -{{ body | safe }} -{% for diagram in diagrams %} -
-

{{ diagram.title }}

-
{{ diagram.text }}
-
- {{ diagram.svg }} -
-
-{% endfor %} - - -""" - -template = Template(jinja2_template_source) +with open(resource_filename(__name__, "template.jinja2"), encoding="utf-8") as fp: + template = Template(fp.read()) # Note: ideally this would be a dataclass, but we're supporting Python 3.5+ so we can't do this yet NamedDiagram = NamedTuple( "NamedDiagram", - [("name", str), ("diagram", typing.Optional[railroad.DiagramItem]), ("index", int)], + [("name", str), ("diagram", Optional[railroad.DiagramItem]), ("index", int)], ) """ A simple structure for associating a name with a railroad diagram @@ -82,7 +54,7 @@ class AnnotatedItem(railroad.Group): """ def __init__(self, label: str, item): - super().__init__(item=item, label="[{}]".format(label) if label else label) + super().__init__(item=item, label="[{}]".format(label)) class EditablePartial(Generic[T]): @@ -134,8 +106,6 @@ def railroad_to_html(diagrams: List[NamedDiagram], **kwargs) -> str: """ data = [] for diagram in diagrams: - if diagram.diagram is None: - continue io = StringIO() diagram.diagram.writeSvg(io.write) title = diagram.name @@ -164,10 +134,9 @@ def resolve_partial(partial: "EditablePartial[T]") -> T: def to_railroad( element: pyparsing.ParserElement, - diagram_kwargs: typing.Optional[dict] = None, + diagram_kwargs: Optional[dict] = None, vertical: int = 3, show_results_names: bool = False, - show_groups: bool = False, ) -> List[NamedDiagram]: """ Convert a pyparsing element tree into a list of diagrams. This is the recommended entrypoint to diagram @@ -178,8 +147,6 @@ def to_railroad( shown vertically instead of horizontally :param show_results_names - bool to indicate whether results name annotations should be included in the diagram - :param show_groups - bool to indicate whether groups should be highlighted with an unlabeled - surrounding box """ # Convert the whole tree underneath the root lookup = ConverterState(diagram_kwargs=diagram_kwargs or {}) @@ -189,7 +156,6 @@ def to_railroad( parent=None, vertical=vertical, show_results_names=show_results_names, - show_groups=show_groups, ) root_id = id(element) @@ -245,12 +211,12 @@ def __init__( parent: EditablePartial, number: int, name: str = None, - parent_index: typing.Optional[int] = None, + parent_index: Optional[int] = None, ): #: The pyparsing element that this represents self.element: pyparsing.ParserElement = element #: The name of the element - self.name: typing.Optional[str] = name + self.name: str = name #: The output Railroad element in an unconverted state self.converted: EditablePartial = converted #: The parent Railroad element, which we store so that we can extract this if it's duplicated @@ -258,7 +224,7 @@ def __init__( #: The order in which we found this element, used for sorting diagrams if this is extracted into a diagram self.number: int = number #: The index of this inside its parent - self.parent_index: typing.Optional[int] = parent_index + self.parent_index: Optional[int] = parent_index #: If true, we should extract this out into a subdiagram self.extract: bool = False #: If true, all of this element's children have been filled out @@ -299,7 +265,7 @@ class ConverterState: Stores some state that persists between recursions into the element tree """ - def __init__(self, diagram_kwargs: typing.Optional[dict] = None): + def __init__(self, diagram_kwargs: Optional[dict] = None): #: A dictionary mapping ParserElements to state relating to them self._element_diagram_states: Dict[int, ElementState] = {} #: A dictionary mapping ParserElement IDs to subdiagrams generated from them @@ -390,14 +356,13 @@ def _apply_diagram_item_enhancements(fn): def _inner( element: pyparsing.ParserElement, - parent: typing.Optional[EditablePartial], + parent: Optional[EditablePartial], lookup: ConverterState = None, vertical: int = None, index: int = 0, name_hint: str = None, show_results_names: bool = False, - show_groups: bool = False, - ) -> typing.Optional[EditablePartial]: + ) -> Optional[EditablePartial]: ret = fn( element, @@ -407,7 +372,6 @@ def _inner( index, name_hint, show_results_names, - show_groups, ) # apply annotation for results name, if present @@ -441,14 +405,13 @@ def _visible_exprs(exprs: Iterable[pyparsing.ParserElement]): @_apply_diagram_item_enhancements def _to_diagram_element( element: pyparsing.ParserElement, - parent: typing.Optional[EditablePartial], + parent: Optional[EditablePartial], lookup: ConverterState = None, vertical: int = None, index: int = 0, name_hint: str = None, show_results_names: bool = False, - show_groups: bool = False, -) -> typing.Optional[EditablePartial]: +) -> Optional[EditablePartial]: """ Recursively converts a PyParsing Element to a railroad Element :param lookup: The shared converter state that keeps track of useful things @@ -460,7 +423,6 @@ def _to_diagram_element( :param name_hint: If provided, this will override the generated name :param show_results_names: bool flag indicating whether to add annotations for results names :returns: The converted version of the input element, but as a Partial that hasn't yet been constructed - :param show_groups: bool flag indicating whether to show groups using bounding box """ exprs = element.recurse() name = name_hint or element.customName or element.__class__.__name__ @@ -475,7 +437,7 @@ def _to_diagram_element( if isinstance( element, ( - # pyparsing.TokenConverter, + pyparsing.TokenConverter, # pyparsing.Forward, pyparsing.Located, ), @@ -495,7 +457,6 @@ def _to_diagram_element( index=index, name_hint=propagated_name, show_results_names=show_results_names, - show_groups=show_groups, ) # If the element isn't worth extracting, we always treat it as the first time we say it @@ -549,15 +510,6 @@ def _to_diagram_element( ret = EditablePartial.from_call(AnnotatedItem, label="LOOKAHEAD", item="") elif isinstance(element, pyparsing.PrecededBy): ret = EditablePartial.from_call(AnnotatedItem, label="LOOKBEHIND", item="") - elif isinstance(element, pyparsing.Group): - if show_groups: - ret = EditablePartial.from_call(AnnotatedItem, label="", item="") - else: - ret = EditablePartial.from_call(railroad.Group, label="", item="") - elif isinstance(element, pyparsing.TokenConverter): - ret = EditablePartial.from_call( - AnnotatedItem, label=type(element).__name__.lower(), item="" - ) elif isinstance(element, pyparsing.Opt): ret = EditablePartial.from_call(railroad.Optional, item="") elif isinstance(element, pyparsing.OneOrMore): @@ -606,7 +558,6 @@ def _to_diagram_element( vertical=vertical, index=i, show_results_names=show_results_names, - show_groups=show_groups, ) # Some elements don't need to be shown in the diagram diff --git a/venv/lib/python3.10/site-packages/pip/_vendor/pyparsing/exceptions.py b/venv/lib/python3.10/site-packages/pip/_vendor/pyparsing/exceptions.py index a38447b..e06513e 100644 --- a/venv/lib/python3.10/site-packages/pip/_vendor/pyparsing/exceptions.py +++ b/venv/lib/python3.10/site-packages/pip/_vendor/pyparsing/exceptions.py @@ -2,7 +2,7 @@ import re import sys -import typing +from typing import Optional from .util import col, line, lineno, _collapse_string_to_ranges from .unicode import pyparsing_unicode as ppu @@ -25,7 +25,7 @@ def __init__( self, pstr: str, loc: int = 0, - msg: typing.Optional[str] = None, + msg: Optional[str] = None, elem=None, ): self.loc = loc diff --git a/venv/lib/python3.10/site-packages/pip/_vendor/pyparsing/helpers.py b/venv/lib/python3.10/site-packages/pip/_vendor/pyparsing/helpers.py index 9588b3b..5e7b3ad 100644 --- a/venv/lib/python3.10/site-packages/pip/_vendor/pyparsing/helpers.py +++ b/venv/lib/python3.10/site-packages/pip/_vendor/pyparsing/helpers.py @@ -1,7 +1,6 @@ # helpers.py import html.entities import re -import typing from . import __diag__ from .core import * @@ -15,8 +14,8 @@ def delimited_list( expr: Union[str, ParserElement], delim: Union[str, ParserElement] = ",", combine: bool = False, - min: typing.Optional[int] = None, - max: typing.Optional[int] = None, + min: OptionalType[int] = None, + max: OptionalType[int] = None, *, allow_trailing_delim: bool = False, ) -> ParserElement: @@ -70,9 +69,9 @@ def delimited_list( def counted_array( expr: ParserElement, - int_expr: typing.Optional[ParserElement] = None, + int_expr: OptionalType[ParserElement] = None, *, - intExpr: typing.Optional[ParserElement] = None, + intExpr: OptionalType[ParserElement] = None, ) -> ParserElement: """Helper to define a counted list of expressions. @@ -186,9 +185,7 @@ def copy_token_to_repeater(s, l, t): def must_match_these_tokens(s, l, t): theseTokens = _flatten(t.as_list()) if theseTokens != matchTokens: - raise ParseException( - s, l, "Expected {}, found{}".format(matchTokens, theseTokens) - ) + raise ParseException(s, l, "Expected {}, found{}".format(matchTokens, theseTokens)) rep.set_parse_action(must_match_these_tokens, callDuringTry=True) @@ -198,7 +195,7 @@ def must_match_these_tokens(s, l, t): def one_of( - strs: Union[typing.Iterable[str], str], + strs: Union[IterableType[str], str], caseless: bool = False, use_regex: bool = True, as_keyword: bool = False, @@ -313,7 +310,7 @@ def one_of( return ret - except re.error: + except sre_constants.error: warnings.warn( "Exception creating Regex for one_of, building MatchFirst", stacklevel=2 ) @@ -338,7 +335,7 @@ def dict_of(key: ParserElement, value: ParserElement) -> ParserElement: text = "shape: SQUARE posn: upper left color: light blue texture: burlap" attr_expr = (label + Suppress(':') + OneOrMore(data_word, stop_on=label).set_parse_action(' '.join)) - print(attr_expr[1, ...].parse_string(text).dump()) + print(OneOrMore(attr_expr).parse_string(text).dump()) attr_label = label attr_value = Suppress(':') + OneOrMore(data_word, stop_on=label).set_parse_action(' '.join) @@ -353,10 +350,10 @@ def dict_of(key: ParserElement, value: ParserElement) -> ParserElement: prints:: [['shape', 'SQUARE'], ['posn', 'upper left'], ['color', 'light blue'], ['texture', 'burlap']] - - color: 'light blue' - - posn: 'upper left' - - shape: 'SQUARE' - - texture: 'burlap' + - color: light blue + - posn: upper left + - shape: SQUARE + - texture: burlap SQUARE SQUARE {'color': 'light blue', 'shape': 'SQUARE', 'posn': 'upper left', 'texture': 'burlap'} @@ -462,7 +459,7 @@ def locatedExpr(expr: ParserElement) -> ParserElement: def nested_expr( opener: Union[str, ParserElement] = "(", closer: Union[str, ParserElement] = ")", - content: typing.Optional[ParserElement] = None, + content: OptionalType[ParserElement] = None, ignore_expr: ParserElement = quoted_string(), *, ignoreExpr: ParserElement = quoted_string(), @@ -683,8 +680,6 @@ def make_xml_tags( return _makeTags(tag_str, True) -any_open_tag: ParserElement -any_close_tag: ParserElement any_open_tag, any_close_tag = make_html_tags( Word(alphas, alphanums + "_:").set_name("any tag") ) @@ -713,7 +708,7 @@ class OpAssoc(Enum): InfixNotationOperatorArgType, int, OpAssoc, - typing.Optional[ParseAction], + OptionalType[ParseAction], ], Tuple[ InfixNotationOperatorArgType, @@ -763,14 +758,10 @@ def infix_notation( a tuple or list of functions, this is equivalent to calling ``set_parse_action(*fn)`` (:class:`ParserElement.set_parse_action`) - - ``lpar`` - expression for matching left-parentheses; if passed as a - str, then will be parsed as Suppress(lpar). If lpar is passed as - an expression (such as ``Literal('(')``), then it will be kept in - the parsed results, and grouped with them. (default= ``Suppress('(')``) - - ``rpar`` - expression for matching right-parentheses; if passed as a - str, then will be parsed as Suppress(rpar). If rpar is passed as - an expression (such as ``Literal(')')``), then it will be kept in - the parsed results, and grouped with them. (default= ``Suppress(')')``) + - ``lpar`` - expression for matching left-parentheses + (default= ``Suppress('(')``) + - ``rpar`` - expression for matching right-parentheses + (default= ``Suppress(')')``) Example:: @@ -812,17 +803,9 @@ def parseImpl(self, instring, loc, doActions=True): _FB.__name__ = "FollowedBy>" ret = Forward() - if isinstance(lpar, str): - lpar = Suppress(lpar) - if isinstance(rpar, str): - rpar = Suppress(rpar) - - # if lpar and rpar are not suppressed, wrap in group - if not (isinstance(rpar, Suppress) and isinstance(rpar, Suppress)): - lastExpr = base_expr | Group(lpar + ret + rpar) - else: - lastExpr = base_expr | (lpar + ret + rpar) - + lpar = Suppress(lpar) + rpar = Suppress(rpar) + lastExpr = base_expr | (lpar + ret + rpar) for i, operDef in enumerate(op_list): opExpr, arity, rightLeftAssoc, pa = (operDef + (None,))[:4] if isinstance(opExpr, str_type): @@ -843,7 +826,7 @@ def parseImpl(self, instring, loc, doActions=True): if rightLeftAssoc not in (OpAssoc.LEFT, OpAssoc.RIGHT): raise ValueError("operator must indicate right or left associativity") - thisExpr: Forward = Forward().set_name(term_name) + thisExpr = Forward().set_name(term_name) if rightLeftAssoc is OpAssoc.LEFT: if arity == 1: matchExpr = _FB(lastExpr + opExpr) + Group(lastExpr + opExpr[1, ...]) @@ -948,7 +931,7 @@ def eggs(z): assignment = Group(identifier + "=" + rvalue) stmt << (funcDef | assignment | identifier) - module_body = stmt[1, ...] + module_body = OneOrMore(stmt) parseTree = module_body.parseString(data) parseTree.pprint() @@ -1058,9 +1041,7 @@ def checkUnindent(s, l, t): # build list of built-in expressions, for future reference if a global default value # gets updated -_builtin_exprs: List[ParserElement] = [ - v for v in vars().values() if isinstance(v, ParserElement) -] +_builtin_exprs = [v for v in vars().values() if isinstance(v, ParserElement)] # pre-PEP8 compatible names diff --git a/venv/lib/python3.10/site-packages/pip/_vendor/pyparsing/results.py b/venv/lib/python3.10/site-packages/pip/_vendor/pyparsing/results.py index 00c9421..9676f45 100644 --- a/venv/lib/python3.10/site-packages/pip/_vendor/pyparsing/results.py +++ b/venv/lib/python3.10/site-packages/pip/_vendor/pyparsing/results.py @@ -65,9 +65,9 @@ def test(s, fn=repr): 'month' in result -> True 'minutes' in result -> False result.dump() -> ['1999', '/', '12', '/', '31'] - - day: '31' - - month: '12' - - year: '1999' + - day: 31 + - month: 12 + - year: 1999 """ _null_values: Tuple[Any, ...] = (None, [], "", ()) @@ -287,7 +287,7 @@ def remove_first(tokens): print(numlist.parse_string("0 123 321")) # -> ['123', '321'] label = Word(alphas) - patt = label("LABEL") + Word(nums)[1, ...] + patt = label("LABEL") + OneOrMore(Word(nums)) print(patt.parse_string("AAB 123 321").dump()) # Use pop() in a parse action to remove named result (note that corresponding value is not @@ -301,7 +301,7 @@ def remove_LABEL(tokens): prints:: ['AAB', '123', '321'] - - LABEL: 'AAB' + - LABEL: AAB ['AAB', '123', '321'] """ @@ -394,7 +394,7 @@ def extend(self, itemseq): Example:: - patt = Word(alphas)[1, ...] + patt = OneOrMore(Word(alphas)) # use a parse action to append the reverse of the matched strings, to make a palindrome def make_palindrome(tokens): @@ -487,7 +487,7 @@ def as_list(self) -> list: Example:: - patt = Word(alphas)[1, ...] + patt = OneOrMore(Word(alphas)) result = patt.parse_string("sldkj lsdkj sldkj") # even though the result prints in string-like form, it is actually a pyparsing ParseResults print(type(result), result) # -> ['sldkj', 'lsdkj', 'sldkj'] @@ -554,7 +554,7 @@ def get_name(self): user_data = (Group(house_number_expr)("house_number") | Group(ssn_expr)("ssn") | Group(integer)("age")) - user_info = user_data[1, ...] + user_info = OneOrMore(user_data) result = user_info.parse_string("22 111-22-3333 #221B") for item in result: @@ -603,15 +603,15 @@ def dump(self, indent="", full=True, include_list=True, _depth=0) -> str: integer = Word(nums) date_str = integer("year") + '/' + integer("month") + '/' + integer("day") - result = date_str.parse_string('1999/12/31') + result = date_str.parse_string('12/31/1999') print(result.dump()) prints:: - ['1999', '/', '12', '/', '31'] - - day: '31' - - month: '12' - - year: '1999' + ['12', '/', '31', '/', '1999'] + - day: 1999 + - month: 31 + - year: 12 """ out = [] NL = "\n" diff --git a/venv/lib/python3.10/site-packages/pip/_vendor/pyparsing/testing.py b/venv/lib/python3.10/site-packages/pip/_vendor/pyparsing/testing.py index 84a0ef1..991972f 100644 --- a/venv/lib/python3.10/site-packages/pip/_vendor/pyparsing/testing.py +++ b/venv/lib/python3.10/site-packages/pip/_vendor/pyparsing/testing.py @@ -1,7 +1,7 @@ # testing.py from contextlib import contextmanager -import typing +from typing import Optional from .core import ( ParserElement, @@ -237,12 +237,12 @@ def assertRaisesParseException(self, exc_type=ParseException, msg=None): @staticmethod def with_line_numbers( s: str, - start_line: typing.Optional[int] = None, - end_line: typing.Optional[int] = None, + start_line: Optional[int] = None, + end_line: Optional[int] = None, expand_tabs: bool = True, eol_mark: str = "|", - mark_spaces: typing.Optional[str] = None, - mark_control: typing.Optional[str] = None, + mark_spaces: Optional[str] = None, + mark_control: Optional[str] = None, ) -> str: """ Helpful method for debugging a parser - prints a string with line and column numbers. diff --git a/venv/lib/python3.10/site-packages/pip/_vendor/pyparsing/unicode.py b/venv/lib/python3.10/site-packages/pip/_vendor/pyparsing/unicode.py index 0652620..9226148 100644 --- a/venv/lib/python3.10/site-packages/pip/_vendor/pyparsing/unicode.py +++ b/venv/lib/python3.10/site-packages/pip/_vendor/pyparsing/unicode.py @@ -120,18 +120,7 @@ class pyparsing_unicode(unicode_set): A namespace class for defining common language unicode_sets. """ - # fmt: off - - # define ranges in language character sets - _ranges: UnicodeRangeList = [ - (0x0020, sys.maxunicode), - ] - - class BasicMultilingualPlane(unicode_set): - "Unicode set for the Basic Multilingual Plane" - _ranges: UnicodeRangeList = [ - (0x0020, 0xFFFF), - ] + _ranges: UnicodeRangeList = [(32, sys.maxunicode)] class Latin1(unicode_set): "Unicode set for Latin-1 Unicode Character Range" @@ -289,13 +278,11 @@ class Hangul(unicode_set): class CJK(Chinese, Japanese, Hangul): "Unicode set for combined Chinese, Japanese, and Korean (CJK) Unicode Character Range" + pass class Thai(unicode_set): "Unicode set for Thai Unicode Character Range" - _ranges: UnicodeRangeList = [ - (0x0E01, 0x0E3A), - (0x0E3F, 0x0E5B) - ] + _ranges: UnicodeRangeList = [(0x0E01, 0x0E3A), (0x0E3F, 0x0E5B)] class Arabic(unicode_set): "Unicode set for Arabic Unicode Character Range" @@ -321,12 +308,7 @@ class Hebrew(unicode_set): class Devanagari(unicode_set): "Unicode set for Devanagari Unicode Character Range" - _ranges: UnicodeRangeList = [ - (0x0900, 0x097F), - (0xA8E0, 0xA8FF) - ] - - # fmt: on + _ranges: UnicodeRangeList = [(0x0900, 0x097F), (0xA8E0, 0xA8FF)] pyparsing_unicode.Japanese._ranges = ( @@ -335,9 +317,7 @@ class Devanagari(unicode_set): + pyparsing_unicode.Japanese.Katakana._ranges ) -pyparsing_unicode.BMP = pyparsing_unicode.BasicMultilingualPlane - -# add language identifiers using language Unicode +# define ranges in language character sets pyparsing_unicode.العربية = pyparsing_unicode.Arabic pyparsing_unicode.中文 = pyparsing_unicode.Chinese pyparsing_unicode.кириллица = pyparsing_unicode.Cyrillic diff --git a/venv/lib/python3.10/site-packages/pip/_vendor/requests/__init__.py b/venv/lib/python3.10/site-packages/pip/_vendor/requests/__init__.py index 9e97059..75a633b 100644 --- a/venv/lib/python3.10/site-packages/pip/_vendor/requests/__init__.py +++ b/venv/lib/python3.10/site-packages/pip/_vendor/requests/__init__.py @@ -1,3 +1,5 @@ +# -*- coding: utf-8 -*- + # __ # /__) _ _ _ _ _/ _ # / ( (- (/ (/ (- _) / _) @@ -38,10 +40,8 @@ :license: Apache 2.0, see LICENSE for more details. """ -import warnings - from pip._vendor import urllib3 - +import warnings from .exceptions import RequestsDependencyWarning charset_normalizer_version = None @@ -51,14 +51,13 @@ except ImportError: chardet_version = None - def check_compatibility(urllib3_version, chardet_version, charset_normalizer_version): - urllib3_version = urllib3_version.split(".") - assert urllib3_version != ["dev"] # Verify urllib3 isn't installed from git. + urllib3_version = urllib3_version.split('.') + assert urllib3_version != ['dev'] # Verify urllib3 isn't installed from git. # Sometimes, urllib3 only reports its version as 16.1. if len(urllib3_version) == 2: - urllib3_version.append("0") + urllib3_version.append('0') # Check urllib3 for compatibility. major, minor, patch = urllib3_version # noqa: F811 @@ -70,46 +69,36 @@ def check_compatibility(urllib3_version, chardet_version, charset_normalizer_ver # Check charset_normalizer for compatibility. if chardet_version: - major, minor, patch = chardet_version.split(".")[:3] + major, minor, patch = chardet_version.split('.')[:3] major, minor, patch = int(major), int(minor), int(patch) - # chardet_version >= 3.0.2, < 6.0.0 - assert (3, 0, 2) <= (major, minor, patch) < (6, 0, 0) + # chardet_version >= 3.0.2, < 5.0.0 + assert (3, 0, 2) <= (major, minor, patch) < (5, 0, 0) elif charset_normalizer_version: - major, minor, patch = charset_normalizer_version.split(".")[:3] + major, minor, patch = charset_normalizer_version.split('.')[:3] major, minor, patch = int(major), int(minor), int(patch) # charset_normalizer >= 2.0.0 < 3.0.0 assert (2, 0, 0) <= (major, minor, patch) < (3, 0, 0) else: raise Exception("You need either charset_normalizer or chardet installed") - def _check_cryptography(cryptography_version): # cryptography < 1.3.4 try: - cryptography_version = list(map(int, cryptography_version.split("."))) + cryptography_version = list(map(int, cryptography_version.split('.'))) except ValueError: return if cryptography_version < [1, 3, 4]: - warning = "Old version of cryptography ({}) may cause slowdown.".format( - cryptography_version - ) + warning = 'Old version of cryptography ({}) may cause slowdown.'.format(cryptography_version) warnings.warn(warning, RequestsDependencyWarning) - # Check imported dependencies for compatibility. try: - check_compatibility( - urllib3.__version__, chardet_version, charset_normalizer_version - ) + check_compatibility(urllib3.__version__, chardet_version, charset_normalizer_version) except (AssertionError, ValueError): - warnings.warn( - "urllib3 ({}) or chardet ({})/charset_normalizer ({}) doesn't match a supported " - "version!".format( - urllib3.__version__, chardet_version, charset_normalizer_version - ), - RequestsDependencyWarning, - ) + warnings.warn("urllib3 ({}) or chardet ({})/charset_normalizer ({}) doesn't match a supported " + "version!".format(urllib3.__version__, chardet_version, charset_normalizer_version), + RequestsDependencyWarning) # Attempt to enable urllib3's fallback for SNI support # if the standard library doesn't support SNI or the @@ -127,56 +116,39 @@ def _check_cryptography(cryptography_version): if not getattr(ssl, "HAS_SNI", False): from pip._vendor.urllib3.contrib import pyopenssl - pyopenssl.inject_into_urllib3() # Check cryptography version from cryptography import __version__ as cryptography_version - _check_cryptography(cryptography_version) except ImportError: pass # urllib3's DependencyWarnings should be silenced. from pip._vendor.urllib3.exceptions import DependencyWarning +warnings.simplefilter('ignore', DependencyWarning) -warnings.simplefilter("ignore", DependencyWarning) +from .__version__ import __title__, __description__, __url__, __version__ +from .__version__ import __build__, __author__, __author_email__, __license__ +from .__version__ import __copyright__, __cake__ + +from . import utils +from . import packages +from .models import Request, Response, PreparedRequest +from .api import request, get, head, post, patch, put, delete, options +from .sessions import session, Session +from .status_codes import codes +from .exceptions import ( + RequestException, Timeout, URLRequired, + TooManyRedirects, HTTPError, ConnectionError, + FileModeWarning, ConnectTimeout, ReadTimeout, JSONDecodeError +) # Set default logging handler to avoid "No handler found" warnings. import logging from logging import NullHandler -from . import packages, utils -from .__version__ import ( - __author__, - __author_email__, - __build__, - __cake__, - __copyright__, - __description__, - __license__, - __title__, - __url__, - __version__, -) -from .api import delete, get, head, options, patch, post, put, request -from .exceptions import ( - ConnectionError, - ConnectTimeout, - FileModeWarning, - HTTPError, - JSONDecodeError, - ReadTimeout, - RequestException, - Timeout, - TooManyRedirects, - URLRequired, -) -from .models import PreparedRequest, Request, Response -from .sessions import Session, session -from .status_codes import codes - logging.getLogger(__name__).addHandler(NullHandler()) # FileModeWarnings go off per the default. -warnings.simplefilter("default", FileModeWarning, append=True) +warnings.simplefilter('default', FileModeWarning, append=True) diff --git a/venv/lib/python3.10/site-packages/pip/_vendor/requests/__version__.py b/venv/lib/python3.10/site-packages/pip/_vendor/requests/__version__.py index e725ada..e973b03 100644 --- a/venv/lib/python3.10/site-packages/pip/_vendor/requests/__version__.py +++ b/venv/lib/python3.10/site-packages/pip/_vendor/requests/__version__.py @@ -2,13 +2,13 @@ # |( |- |.| | | |- `-. | `-. # ' ' `-' `-`.`-' `-' `-' ' `-' -__title__ = "requests" -__description__ = "Python HTTP for Humans." -__url__ = "https://requests.readthedocs.io" -__version__ = "2.28.1" -__build__ = 0x022801 -__author__ = "Kenneth Reitz" -__author_email__ = "me@kennethreitz.org" -__license__ = "Apache 2.0" -__copyright__ = "Copyright 2022 Kenneth Reitz" -__cake__ = "\u2728 \U0001f370 \u2728" +__title__ = 'requests' +__description__ = 'Python HTTP for Humans.' +__url__ = 'https://requests.readthedocs.io' +__version__ = '2.27.1' +__build__ = 0x022701 +__author__ = 'Kenneth Reitz' +__author_email__ = 'me@kennethreitz.org' +__license__ = 'Apache 2.0' +__copyright__ = 'Copyright 2022 Kenneth Reitz' +__cake__ = u'\u2728 \U0001f370 \u2728' diff --git a/venv/lib/python3.10/site-packages/pip/_vendor/requests/_internal_utils.py b/venv/lib/python3.10/site-packages/pip/_vendor/requests/_internal_utils.py index 7dc9bc5..759d9a5 100644 --- a/venv/lib/python3.10/site-packages/pip/_vendor/requests/_internal_utils.py +++ b/venv/lib/python3.10/site-packages/pip/_vendor/requests/_internal_utils.py @@ -1,3 +1,5 @@ +# -*- coding: utf-8 -*- + """ requests._internal_utils ~~~~~~~~~~~~~~ @@ -5,22 +7,11 @@ Provides utility functions that are consumed internally by Requests which depend on extremely few external helpers (such as compat) """ -import re - -from .compat import builtin_str - -_VALID_HEADER_NAME_RE_BYTE = re.compile(rb"^[^:\s][^:\r\n]*$") -_VALID_HEADER_NAME_RE_STR = re.compile(r"^[^:\s][^:\r\n]*$") -_VALID_HEADER_VALUE_RE_BYTE = re.compile(rb"^\S[^\r\n]*$|^$") -_VALID_HEADER_VALUE_RE_STR = re.compile(r"^\S[^\r\n]*$|^$") -HEADER_VALIDATORS = { - bytes: (_VALID_HEADER_NAME_RE_BYTE, _VALID_HEADER_VALUE_RE_BYTE), - str: (_VALID_HEADER_NAME_RE_STR, _VALID_HEADER_VALUE_RE_STR), -} +from .compat import is_py2, builtin_str, str -def to_native_string(string, encoding="ascii"): +def to_native_string(string, encoding='ascii'): """Given a string object, regardless of type, returns a representation of that string in the native string type, encoding and decoding where necessary. This assumes ASCII unless told otherwise. @@ -28,7 +19,10 @@ def to_native_string(string, encoding="ascii"): if isinstance(string, builtin_str): out = string else: - out = string.decode(encoding) + if is_py2: + out = string.encode(encoding) + else: + out = string.decode(encoding) return out @@ -42,7 +36,7 @@ def unicode_is_ascii(u_string): """ assert isinstance(u_string, str) try: - u_string.encode("ascii") + u_string.encode('ascii') return True except UnicodeEncodeError: return False diff --git a/venv/lib/python3.10/site-packages/pip/_vendor/requests/adapters.py b/venv/lib/python3.10/site-packages/pip/_vendor/requests/adapters.py index f68f7d4..b3dfa57 100644 --- a/venv/lib/python3.10/site-packages/pip/_vendor/requests/adapters.py +++ b/venv/lib/python3.10/site-packages/pip/_vendor/requests/adapters.py @@ -1,3 +1,5 @@ +# -*- coding: utf-8 -*- + """ requests.adapters ~~~~~~~~~~~~~~~~~ @@ -7,76 +9,58 @@ """ import os.path -import socket # noqa: F401 +import socket -from pip._vendor.urllib3.exceptions import ClosedPoolError, ConnectTimeoutError -from pip._vendor.urllib3.exceptions import HTTPError as _HTTPError -from pip._vendor.urllib3.exceptions import InvalidHeader as _InvalidHeader -from pip._vendor.urllib3.exceptions import ( - LocationValueError, - MaxRetryError, - NewConnectionError, - ProtocolError, -) -from pip._vendor.urllib3.exceptions import ProxyError as _ProxyError -from pip._vendor.urllib3.exceptions import ReadTimeoutError, ResponseError -from pip._vendor.urllib3.exceptions import SSLError as _SSLError from pip._vendor.urllib3.poolmanager import PoolManager, proxy_from_url from pip._vendor.urllib3.response import HTTPResponse -from pip._vendor.urllib3.util import Timeout as TimeoutSauce from pip._vendor.urllib3.util import parse_url +from pip._vendor.urllib3.util import Timeout as TimeoutSauce from pip._vendor.urllib3.util.retry import Retry +from pip._vendor.urllib3.exceptions import ClosedPoolError +from pip._vendor.urllib3.exceptions import ConnectTimeoutError +from pip._vendor.urllib3.exceptions import HTTPError as _HTTPError +from pip._vendor.urllib3.exceptions import InvalidHeader as _InvalidHeader +from pip._vendor.urllib3.exceptions import MaxRetryError +from pip._vendor.urllib3.exceptions import NewConnectionError +from pip._vendor.urllib3.exceptions import ProxyError as _ProxyError +from pip._vendor.urllib3.exceptions import ProtocolError +from pip._vendor.urllib3.exceptions import ReadTimeoutError +from pip._vendor.urllib3.exceptions import SSLError as _SSLError +from pip._vendor.urllib3.exceptions import ResponseError +from pip._vendor.urllib3.exceptions import LocationValueError -from .auth import _basic_auth_str -from .compat import basestring, urlparse -from .cookies import extract_cookies_to_jar -from .exceptions import ( - ConnectionError, - ConnectTimeout, - InvalidHeader, - InvalidProxyURL, - InvalidSchema, - InvalidURL, - ProxyError, - ReadTimeout, - RetryError, - SSLError, -) from .models import Response +from .compat import urlparse, basestring +from .utils import (DEFAULT_CA_BUNDLE_PATH, extract_zipped_paths, + get_encoding_from_headers, prepend_scheme_if_needed, + get_auth_from_url, urldefragauth, select_proxy) from .structures import CaseInsensitiveDict -from .utils import ( - DEFAULT_CA_BUNDLE_PATH, - extract_zipped_paths, - get_auth_from_url, - get_encoding_from_headers, - prepend_scheme_if_needed, - select_proxy, - urldefragauth, -) +from .cookies import extract_cookies_to_jar +from .exceptions import (ConnectionError, ConnectTimeout, ReadTimeout, SSLError, + ProxyError, RetryError, InvalidSchema, InvalidProxyURL, + InvalidURL, InvalidHeader) +from .auth import _basic_auth_str try: from pip._vendor.urllib3.contrib.socks import SOCKSProxyManager except ImportError: - def SOCKSProxyManager(*args, **kwargs): raise InvalidSchema("Missing dependencies for SOCKS support.") - DEFAULT_POOLBLOCK = False DEFAULT_POOLSIZE = 10 DEFAULT_RETRIES = 0 DEFAULT_POOL_TIMEOUT = None -class BaseAdapter: +class BaseAdapter(object): """The Base Transport Adapter""" def __init__(self): - super().__init__() + super(BaseAdapter, self).__init__() - def send( - self, request, stream=False, timeout=None, verify=True, cert=None, proxies=None - ): + def send(self, request, stream=False, timeout=None, verify=True, + cert=None, proxies=None): """Sends PreparedRequest object. Returns Response object. :param request: The :class:`PreparedRequest ` being sent. @@ -124,22 +108,12 @@ class HTTPAdapter(BaseAdapter): >>> a = requests.adapters.HTTPAdapter(max_retries=3) >>> s.mount('http://', a) """ + __attrs__ = ['max_retries', 'config', '_pool_connections', '_pool_maxsize', + '_pool_block'] - __attrs__ = [ - "max_retries", - "config", - "_pool_connections", - "_pool_maxsize", - "_pool_block", - ] - - def __init__( - self, - pool_connections=DEFAULT_POOLSIZE, - pool_maxsize=DEFAULT_POOLSIZE, - max_retries=DEFAULT_RETRIES, - pool_block=DEFAULT_POOLBLOCK, - ): + def __init__(self, pool_connections=DEFAULT_POOLSIZE, + pool_maxsize=DEFAULT_POOLSIZE, max_retries=DEFAULT_RETRIES, + pool_block=DEFAULT_POOLBLOCK): if max_retries == DEFAULT_RETRIES: self.max_retries = Retry(0, read=False) else: @@ -147,7 +121,7 @@ def __init__( self.config = {} self.proxy_manager = {} - super().__init__() + super(HTTPAdapter, self).__init__() self._pool_connections = pool_connections self._pool_maxsize = pool_maxsize @@ -167,13 +141,10 @@ def __setstate__(self, state): for attr, value in state.items(): setattr(self, attr, value) - self.init_poolmanager( - self._pool_connections, self._pool_maxsize, block=self._pool_block - ) + self.init_poolmanager(self._pool_connections, self._pool_maxsize, + block=self._pool_block) - def init_poolmanager( - self, connections, maxsize, block=DEFAULT_POOLBLOCK, **pool_kwargs - ): + def init_poolmanager(self, connections, maxsize, block=DEFAULT_POOLBLOCK, **pool_kwargs): """Initializes a urllib3 PoolManager. This method should not be called from user code, and is only @@ -190,13 +161,8 @@ def init_poolmanager( self._pool_maxsize = maxsize self._pool_block = block - self.poolmanager = PoolManager( - num_pools=connections, - maxsize=maxsize, - block=block, - strict=True, - **pool_kwargs, - ) + self.poolmanager = PoolManager(num_pools=connections, maxsize=maxsize, + block=block, strict=True, **pool_kwargs) def proxy_manager_for(self, proxy, **proxy_kwargs): """Return urllib3 ProxyManager for the given proxy. @@ -212,7 +178,7 @@ def proxy_manager_for(self, proxy, **proxy_kwargs): """ if proxy in self.proxy_manager: manager = self.proxy_manager[proxy] - elif proxy.lower().startswith("socks"): + elif proxy.lower().startswith('socks'): username, password = get_auth_from_url(proxy) manager = self.proxy_manager[proxy] = SOCKSProxyManager( proxy, @@ -221,7 +187,7 @@ def proxy_manager_for(self, proxy, **proxy_kwargs): num_pools=self._pool_connections, maxsize=self._pool_maxsize, block=self._pool_block, - **proxy_kwargs, + **proxy_kwargs ) else: proxy_headers = self.proxy_headers(proxy) @@ -231,8 +197,7 @@ def proxy_manager_for(self, proxy, **proxy_kwargs): num_pools=self._pool_connections, maxsize=self._pool_maxsize, block=self._pool_block, - **proxy_kwargs, - ) + **proxy_kwargs) return manager @@ -248,7 +213,7 @@ def cert_verify(self, conn, url, verify, cert): to a CA bundle to use :param cert: The SSL certificate to verify. """ - if url.lower().startswith("https") and verify: + if url.lower().startswith('https') and verify: cert_loc = None @@ -260,19 +225,17 @@ def cert_verify(self, conn, url, verify, cert): cert_loc = extract_zipped_paths(DEFAULT_CA_BUNDLE_PATH) if not cert_loc or not os.path.exists(cert_loc): - raise OSError( - f"Could not find a suitable TLS CA certificate bundle, " - f"invalid path: {cert_loc}" - ) + raise IOError("Could not find a suitable TLS CA certificate bundle, " + "invalid path: {}".format(cert_loc)) - conn.cert_reqs = "CERT_REQUIRED" + conn.cert_reqs = 'CERT_REQUIRED' if not os.path.isdir(cert_loc): conn.ca_certs = cert_loc else: conn.ca_cert_dir = cert_loc else: - conn.cert_reqs = "CERT_NONE" + conn.cert_reqs = 'CERT_NONE' conn.ca_certs = None conn.ca_cert_dir = None @@ -284,14 +247,11 @@ def cert_verify(self, conn, url, verify, cert): conn.cert_file = cert conn.key_file = None if conn.cert_file and not os.path.exists(conn.cert_file): - raise OSError( - f"Could not find the TLS certificate file, " - f"invalid path: {conn.cert_file}" - ) + raise IOError("Could not find the TLS certificate file, " + "invalid path: {}".format(conn.cert_file)) if conn.key_file and not os.path.exists(conn.key_file): - raise OSError( - f"Could not find the TLS key file, invalid path: {conn.key_file}" - ) + raise IOError("Could not find the TLS key file, " + "invalid path: {}".format(conn.key_file)) def build_response(self, req, resp): """Builds a :class:`Response ` object from a urllib3 @@ -306,10 +266,10 @@ def build_response(self, req, resp): response = Response() # Fallback to None if there's no status_code, for whatever reason. - response.status_code = getattr(resp, "status", None) + response.status_code = getattr(resp, 'status', None) # Make headers case-insensitive. - response.headers = CaseInsensitiveDict(getattr(resp, "headers", {})) + response.headers = CaseInsensitiveDict(getattr(resp, 'headers', {})) # Set encoding. response.encoding = get_encoding_from_headers(response.headers) @@ -317,7 +277,7 @@ def build_response(self, req, resp): response.reason = response.raw.reason if isinstance(req.url, bytes): - response.url = req.url.decode("utf-8") + response.url = req.url.decode('utf-8') else: response.url = req.url @@ -342,13 +302,11 @@ def get_connection(self, url, proxies=None): proxy = select_proxy(url, proxies) if proxy: - proxy = prepend_scheme_if_needed(proxy, "http") + proxy = prepend_scheme_if_needed(proxy, 'http') proxy_url = parse_url(proxy) if not proxy_url.host: - raise InvalidProxyURL( - "Please check proxy URL. It is malformed " - "and could be missing the host." - ) + raise InvalidProxyURL("Please check proxy URL. It is malformed" + " and could be missing the host.") proxy_manager = self.proxy_manager_for(proxy) conn = proxy_manager.connection_from_url(url) else: @@ -386,11 +344,11 @@ def request_url(self, request, proxies): proxy = select_proxy(request.url, proxies) scheme = urlparse(request.url).scheme - is_proxied_http_request = proxy and scheme != "https" + is_proxied_http_request = (proxy and scheme != 'https') using_socks_proxy = False if proxy: proxy_scheme = urlparse(proxy).scheme.lower() - using_socks_proxy = proxy_scheme.startswith("socks") + using_socks_proxy = proxy_scheme.startswith('socks') url = request.path_url if is_proxied_http_request and not using_socks_proxy: @@ -429,13 +387,12 @@ def proxy_headers(self, proxy): username, password = get_auth_from_url(proxy) if username: - headers["Proxy-Authorization"] = _basic_auth_str(username, password) + headers['Proxy-Authorization'] = _basic_auth_str(username, + password) return headers - def send( - self, request, stream=False, timeout=None, verify=True, cert=None, proxies=None - ): + def send(self, request, stream=False, timeout=None, verify=True, cert=None, proxies=None): """Sends PreparedRequest object. Returns Response object. :param request: The :class:`PreparedRequest ` being sent. @@ -459,26 +416,20 @@ def send( self.cert_verify(conn, request.url, verify, cert) url = self.request_url(request, proxies) - self.add_headers( - request, - stream=stream, - timeout=timeout, - verify=verify, - cert=cert, - proxies=proxies, - ) + self.add_headers(request, stream=stream, timeout=timeout, verify=verify, cert=cert, proxies=proxies) - chunked = not (request.body is None or "Content-Length" in request.headers) + chunked = not (request.body is None or 'Content-Length' in request.headers) if isinstance(timeout, tuple): try: connect, read = timeout timeout = TimeoutSauce(connect=connect, read=read) - except ValueError: - raise ValueError( - f"Invalid timeout {timeout}. Pass a (connect, read) timeout tuple, " - f"or a single float to set both timeouts to the same value." - ) + except ValueError as e: + # this may raise a string formatting error. + err = ("Invalid timeout {}. Pass a (connect, read) " + "timeout tuple, or a single float to set " + "both timeouts to the same value".format(timeout)) + raise ValueError(err) elif isinstance(timeout, TimeoutSauce): pass else: @@ -496,24 +447,22 @@ def send( preload_content=False, decode_content=False, retries=self.max_retries, - timeout=timeout, + timeout=timeout ) # Send the request. else: - if hasattr(conn, "proxy_pool"): + if hasattr(conn, 'proxy_pool'): conn = conn.proxy_pool low_conn = conn._get_conn(timeout=DEFAULT_POOL_TIMEOUT) try: - skip_host = "Host" in request.headers - low_conn.putrequest( - request.method, - url, - skip_accept_encoding=True, - skip_host=skip_host, - ) + skip_host = 'Host' in request.headers + low_conn.putrequest(request.method, + url, + skip_accept_encoding=True, + skip_host=skip_host) for header, value in request.headers.items(): low_conn.putheader(header, value) @@ -521,29 +470,34 @@ def send( low_conn.endheaders() for i in request.body: - low_conn.send(hex(len(i))[2:].encode("utf-8")) - low_conn.send(b"\r\n") + low_conn.send(hex(len(i))[2:].encode('utf-8')) + low_conn.send(b'\r\n') low_conn.send(i) - low_conn.send(b"\r\n") - low_conn.send(b"0\r\n\r\n") + low_conn.send(b'\r\n') + low_conn.send(b'0\r\n\r\n') # Receive the response from the server - r = low_conn.getresponse() + try: + # For Python 2.7, use buffering of HTTP responses + r = low_conn.getresponse(buffering=True) + except TypeError: + # For compatibility with Python 3.3+ + r = low_conn.getresponse() resp = HTTPResponse.from_httplib( r, pool=conn, connection=low_conn, preload_content=False, - decode_content=False, + decode_content=False ) - except Exception: + except: # If we hit any problems here, clean up the connection. - # Then, raise so that we can handle the actual exception. + # Then, reraise so that we can handle the actual exception. low_conn.close() raise - except (ProtocolError, OSError) as err: + except (ProtocolError, socket.error) as err: raise ConnectionError(err, request=request) except MaxRetryError as e: diff --git a/venv/lib/python3.10/site-packages/pip/_vendor/requests/api.py b/venv/lib/python3.10/site-packages/pip/_vendor/requests/api.py index 2f71aae..4cba90e 100644 --- a/venv/lib/python3.10/site-packages/pip/_vendor/requests/api.py +++ b/venv/lib/python3.10/site-packages/pip/_vendor/requests/api.py @@ -1,3 +1,5 @@ +# -*- coding: utf-8 -*- + """ requests.api ~~~~~~~~~~~~ @@ -70,7 +72,7 @@ def get(url, params=None, **kwargs): :rtype: requests.Response """ - return request("get", url, params=params, **kwargs) + return request('get', url, params=params, **kwargs) def options(url, **kwargs): @@ -82,7 +84,7 @@ def options(url, **kwargs): :rtype: requests.Response """ - return request("options", url, **kwargs) + return request('options', url, **kwargs) def head(url, **kwargs): @@ -96,8 +98,8 @@ def head(url, **kwargs): :rtype: requests.Response """ - kwargs.setdefault("allow_redirects", False) - return request("head", url, **kwargs) + kwargs.setdefault('allow_redirects', False) + return request('head', url, **kwargs) def post(url, data=None, json=None, **kwargs): @@ -112,7 +114,7 @@ def post(url, data=None, json=None, **kwargs): :rtype: requests.Response """ - return request("post", url, data=data, json=json, **kwargs) + return request('post', url, data=data, json=json, **kwargs) def put(url, data=None, **kwargs): @@ -127,7 +129,7 @@ def put(url, data=None, **kwargs): :rtype: requests.Response """ - return request("put", url, data=data, **kwargs) + return request('put', url, data=data, **kwargs) def patch(url, data=None, **kwargs): @@ -142,7 +144,7 @@ def patch(url, data=None, **kwargs): :rtype: requests.Response """ - return request("patch", url, data=data, **kwargs) + return request('patch', url, data=data, **kwargs) def delete(url, **kwargs): @@ -154,4 +156,4 @@ def delete(url, **kwargs): :rtype: requests.Response """ - return request("delete", url, **kwargs) + return request('delete', url, **kwargs) diff --git a/venv/lib/python3.10/site-packages/pip/_vendor/requests/auth.py b/venv/lib/python3.10/site-packages/pip/_vendor/requests/auth.py index 9733686..eeface3 100644 --- a/venv/lib/python3.10/site-packages/pip/_vendor/requests/auth.py +++ b/venv/lib/python3.10/site-packages/pip/_vendor/requests/auth.py @@ -1,3 +1,5 @@ +# -*- coding: utf-8 -*- + """ requests.auth ~~~~~~~~~~~~~ @@ -5,21 +7,22 @@ This module contains the authentication handlers for Requests. """ -import hashlib import os import re -import threading import time +import hashlib +import threading import warnings + from base64 import b64encode -from ._internal_utils import to_native_string -from .compat import basestring, str, urlparse +from .compat import urlparse, str, basestring from .cookies import extract_cookies_to_jar +from ._internal_utils import to_native_string from .utils import parse_dict_header -CONTENT_TYPE_FORM_URLENCODED = "application/x-www-form-urlencoded" -CONTENT_TYPE_MULTI_PART = "multipart/form-data" +CONTENT_TYPE_FORM_URLENCODED = 'application/x-www-form-urlencoded' +CONTENT_TYPE_MULTI_PART = 'multipart/form-data' def _basic_auth_str(username, password): @@ -54,23 +57,23 @@ def _basic_auth_str(username, password): # -- End Removal -- if isinstance(username, str): - username = username.encode("latin1") + username = username.encode('latin1') if isinstance(password, str): - password = password.encode("latin1") + password = password.encode('latin1') - authstr = "Basic " + to_native_string( - b64encode(b":".join((username, password))).strip() + authstr = 'Basic ' + to_native_string( + b64encode(b':'.join((username, password))).strip() ) return authstr -class AuthBase: +class AuthBase(object): """Base class that all auth implementations derive from""" def __call__(self, r): - raise NotImplementedError("Auth hooks must be callable.") + raise NotImplementedError('Auth hooks must be callable.') class HTTPBasicAuth(AuthBase): @@ -81,18 +84,16 @@ def __init__(self, username, password): self.password = password def __eq__(self, other): - return all( - [ - self.username == getattr(other, "username", None), - self.password == getattr(other, "password", None), - ] - ) + return all([ + self.username == getattr(other, 'username', None), + self.password == getattr(other, 'password', None) + ]) def __ne__(self, other): return not self == other def __call__(self, r): - r.headers["Authorization"] = _basic_auth_str(self.username, self.password) + r.headers['Authorization'] = _basic_auth_str(self.username, self.password) return r @@ -100,7 +101,7 @@ class HTTPProxyAuth(HTTPBasicAuth): """Attaches HTTP Proxy Authentication to a given Request object.""" def __call__(self, r): - r.headers["Proxy-Authorization"] = _basic_auth_str(self.username, self.password) + r.headers['Proxy-Authorization'] = _basic_auth_str(self.username, self.password) return r @@ -115,9 +116,9 @@ def __init__(self, username, password): def init_per_thread_state(self): # Ensure state is initialized just once per-thread - if not hasattr(self._thread_local, "init"): + if not hasattr(self._thread_local, 'init'): self._thread_local.init = True - self._thread_local.last_nonce = "" + self._thread_local.last_nonce = '' self._thread_local.nonce_count = 0 self._thread_local.chal = {} self._thread_local.pos = None @@ -128,52 +129,44 @@ def build_digest_header(self, method, url): :rtype: str """ - realm = self._thread_local.chal["realm"] - nonce = self._thread_local.chal["nonce"] - qop = self._thread_local.chal.get("qop") - algorithm = self._thread_local.chal.get("algorithm") - opaque = self._thread_local.chal.get("opaque") + realm = self._thread_local.chal['realm'] + nonce = self._thread_local.chal['nonce'] + qop = self._thread_local.chal.get('qop') + algorithm = self._thread_local.chal.get('algorithm') + opaque = self._thread_local.chal.get('opaque') hash_utf8 = None if algorithm is None: - _algorithm = "MD5" + _algorithm = 'MD5' else: _algorithm = algorithm.upper() # lambdas assume digest modules are imported at the top level - if _algorithm == "MD5" or _algorithm == "MD5-SESS": - + if _algorithm == 'MD5' or _algorithm == 'MD5-SESS': def md5_utf8(x): if isinstance(x, str): - x = x.encode("utf-8") + x = x.encode('utf-8') return hashlib.md5(x).hexdigest() - hash_utf8 = md5_utf8 - elif _algorithm == "SHA": - + elif _algorithm == 'SHA': def sha_utf8(x): if isinstance(x, str): - x = x.encode("utf-8") + x = x.encode('utf-8') return hashlib.sha1(x).hexdigest() - hash_utf8 = sha_utf8 - elif _algorithm == "SHA-256": - + elif _algorithm == 'SHA-256': def sha256_utf8(x): if isinstance(x, str): - x = x.encode("utf-8") + x = x.encode('utf-8') return hashlib.sha256(x).hexdigest() - hash_utf8 = sha256_utf8 - elif _algorithm == "SHA-512": - + elif _algorithm == 'SHA-512': def sha512_utf8(x): if isinstance(x, str): - x = x.encode("utf-8") + x = x.encode('utf-8') return hashlib.sha512(x).hexdigest() - hash_utf8 = sha512_utf8 - KD = lambda s, d: hash_utf8(f"{s}:{d}") # noqa:E731 + KD = lambda s, d: hash_utf8("%s:%s" % (s, d)) if hash_utf8 is None: return None @@ -184,10 +177,10 @@ def sha512_utf8(x): #: path is request-uri defined in RFC 2616 which should not be empty path = p_parsed.path or "/" if p_parsed.query: - path += f"?{p_parsed.query}" + path += '?' + p_parsed.query - A1 = f"{self.username}:{realm}:{self.password}" - A2 = f"{method}:{path}" + A1 = '%s:%s:%s' % (self.username, realm, self.password) + A2 = '%s:%s' % (method, path) HA1 = hash_utf8(A1) HA2 = hash_utf8(A2) @@ -196,20 +189,22 @@ def sha512_utf8(x): self._thread_local.nonce_count += 1 else: self._thread_local.nonce_count = 1 - ncvalue = f"{self._thread_local.nonce_count:08x}" - s = str(self._thread_local.nonce_count).encode("utf-8") - s += nonce.encode("utf-8") - s += time.ctime().encode("utf-8") + ncvalue = '%08x' % self._thread_local.nonce_count + s = str(self._thread_local.nonce_count).encode('utf-8') + s += nonce.encode('utf-8') + s += time.ctime().encode('utf-8') s += os.urandom(8) - cnonce = hashlib.sha1(s).hexdigest()[:16] - if _algorithm == "MD5-SESS": - HA1 = hash_utf8(f"{HA1}:{nonce}:{cnonce}") + cnonce = (hashlib.sha1(s).hexdigest()[:16]) + if _algorithm == 'MD5-SESS': + HA1 = hash_utf8('%s:%s:%s' % (HA1, nonce, cnonce)) if not qop: - respdig = KD(HA1, f"{nonce}:{HA2}") - elif qop == "auth" or "auth" in qop.split(","): - noncebit = f"{nonce}:{ncvalue}:{cnonce}:auth:{HA2}" + respdig = KD(HA1, "%s:%s" % (nonce, HA2)) + elif qop == 'auth' or 'auth' in qop.split(','): + noncebit = "%s:%s:%s:%s:%s" % ( + nonce, ncvalue, cnonce, 'auth', HA2 + ) respdig = KD(HA1, noncebit) else: # XXX handle auth-int. @@ -218,20 +213,18 @@ def sha512_utf8(x): self._thread_local.last_nonce = nonce # XXX should the partial digests be encoded too? - base = ( - f'username="{self.username}", realm="{realm}", nonce="{nonce}", ' - f'uri="{path}", response="{respdig}"' - ) + base = 'username="%s", realm="%s", nonce="%s", uri="%s", ' \ + 'response="%s"' % (self.username, realm, nonce, path, respdig) if opaque: - base += f', opaque="{opaque}"' + base += ', opaque="%s"' % opaque if algorithm: - base += f', algorithm="{algorithm}"' + base += ', algorithm="%s"' % algorithm if entdig: - base += f', digest="{entdig}"' + base += ', digest="%s"' % entdig if qop: - base += f', qop="auth", nc={ncvalue}, cnonce="{cnonce}"' + base += ', qop="auth", nc=%s, cnonce="%s"' % (ncvalue, cnonce) - return f"Digest {base}" + return 'Digest %s' % (base) def handle_redirect(self, r, **kwargs): """Reset num_401_calls counter on redirects.""" @@ -255,13 +248,13 @@ def handle_401(self, r, **kwargs): # Rewind the file position indicator of the body to where # it was to resend the request. r.request.body.seek(self._thread_local.pos) - s_auth = r.headers.get("www-authenticate", "") + s_auth = r.headers.get('www-authenticate', '') - if "digest" in s_auth.lower() and self._thread_local.num_401_calls < 2: + if 'digest' in s_auth.lower() and self._thread_local.num_401_calls < 2: self._thread_local.num_401_calls += 1 - pat = re.compile(r"digest ", flags=re.IGNORECASE) - self._thread_local.chal = parse_dict_header(pat.sub("", s_auth, count=1)) + pat = re.compile(r'digest ', flags=re.IGNORECASE) + self._thread_local.chal = parse_dict_header(pat.sub('', s_auth, count=1)) # Consume content and release the original connection # to allow our new request to reuse the same one. @@ -271,9 +264,8 @@ def handle_401(self, r, **kwargs): extract_cookies_to_jar(prep._cookies, r.request, r.raw) prep.prepare_cookies(prep._cookies) - prep.headers["Authorization"] = self.build_digest_header( - prep.method, prep.url - ) + prep.headers['Authorization'] = self.build_digest_header( + prep.method, prep.url) _r = r.connection.send(prep, **kwargs) _r.history.append(r) _r.request = prep @@ -288,7 +280,7 @@ def __call__(self, r): self.init_per_thread_state() # If we have a saved nonce, skip the 401 if self._thread_local.last_nonce: - r.headers["Authorization"] = self.build_digest_header(r.method, r.url) + r.headers['Authorization'] = self.build_digest_header(r.method, r.url) try: self._thread_local.pos = r.body.tell() except AttributeError: @@ -297,19 +289,17 @@ def __call__(self, r): # file position of the previous body. Ensure it's set to # None. self._thread_local.pos = None - r.register_hook("response", self.handle_401) - r.register_hook("response", self.handle_redirect) + r.register_hook('response', self.handle_401) + r.register_hook('response', self.handle_redirect) self._thread_local.num_401_calls = 1 return r def __eq__(self, other): - return all( - [ - self.username == getattr(other, "username", None), - self.password == getattr(other, "password", None), - ] - ) + return all([ + self.username == getattr(other, 'username', None), + self.password == getattr(other, 'password', None) + ]) def __ne__(self, other): return not self == other diff --git a/venv/lib/python3.10/site-packages/pip/_vendor/requests/certs.py b/venv/lib/python3.10/site-packages/pip/_vendor/requests/certs.py index 38696a1..06a594e 100644 --- a/venv/lib/python3.10/site-packages/pip/_vendor/requests/certs.py +++ b/venv/lib/python3.10/site-packages/pip/_vendor/requests/certs.py @@ -1,4 +1,5 @@ #!/usr/bin/env python +# -*- coding: utf-8 -*- """ requests.certs @@ -11,14 +12,7 @@ environment, you can change the definition of where() to return a separately packaged CA bundle. """ +from pip._vendor.certifi import where -import os - -if "_PIP_STANDALONE_CERT" not in os.environ: - from pip._vendor.certifi import where -else: - def where(): - return os.environ["_PIP_STANDALONE_CERT"] - -if __name__ == "__main__": +if __name__ == '__main__': print(where()) diff --git a/venv/lib/python3.10/site-packages/pip/_vendor/requests/compat.py b/venv/lib/python3.10/site-packages/pip/_vendor/requests/compat.py index 9ab2bb4..f98cc91 100644 --- a/venv/lib/python3.10/site-packages/pip/_vendor/requests/compat.py +++ b/venv/lib/python3.10/site-packages/pip/_vendor/requests/compat.py @@ -1,10 +1,11 @@ +# -*- coding: utf-8 -*- + """ requests.compat ~~~~~~~~~~~~~~~ -This module previously handled import compatibility issues -between Python 2 and Python 3. It remains for backwards -compatibility until the next major version. +This module handles import compatibility issues between Python 2 and +Python 3. """ from pip._vendor import chardet @@ -19,49 +20,58 @@ _ver = sys.version_info #: Python 2.x? -is_py2 = _ver[0] == 2 +is_py2 = (_ver[0] == 2) #: Python 3.x? -is_py3 = _ver[0] == 3 +is_py3 = (_ver[0] == 3) # Note: We've patched out simplejson support in pip because it prevents # upgrading simplejson on Windows. +# try: +# import simplejson as json +# except (ImportError, SyntaxError): +# # simplejson does not support Python 3.2, it throws a SyntaxError +# # because of u'...' Unicode literals. import json -from json import JSONDecodeError -# Keep OrderedDict for backwards compatibility. -from collections import OrderedDict -from collections.abc import Callable, Mapping, MutableMapping -from http import cookiejar as cookielib -from http.cookies import Morsel -from io import StringIO +# --------- +# Specifics +# --------- + +if is_py2: + from urllib import ( + quote, unquote, quote_plus, unquote_plus, urlencode, getproxies, + proxy_bypass, proxy_bypass_environment, getproxies_environment) + from urlparse import urlparse, urlunparse, urljoin, urlsplit, urldefrag + from urllib2 import parse_http_list + import cookielib + from Cookie import Morsel + from StringIO import StringIO + # Keep OrderedDict for backwards compatibility. + from collections import Callable, Mapping, MutableMapping, OrderedDict + + builtin_str = str + bytes = str + str = unicode + basestring = basestring + numeric_types = (int, long, float) + integer_types = (int, long) + JSONDecodeError = ValueError -# -------------- -# Legacy Imports -# -------------- -from urllib.parse import ( - quote, - quote_plus, - unquote, - unquote_plus, - urldefrag, - urlencode, - urljoin, - urlparse, - urlsplit, - urlunparse, -) -from urllib.request import ( - getproxies, - getproxies_environment, - parse_http_list, - proxy_bypass, - proxy_bypass_environment, -) +elif is_py3: + from urllib.parse import urlparse, urlunparse, urljoin, urlsplit, urlencode, quote, unquote, quote_plus, unquote_plus, urldefrag + from urllib.request import parse_http_list, getproxies, proxy_bypass, proxy_bypass_environment, getproxies_environment + from http import cookiejar as cookielib + from http.cookies import Morsel + from io import StringIO + # Keep OrderedDict for backwards compatibility. + from collections import OrderedDict + from collections.abc import Callable, Mapping, MutableMapping + from json import JSONDecodeError -builtin_str = str -str = str -bytes = bytes -basestring = (str, bytes) -numeric_types = (int, float) -integer_types = (int,) + builtin_str = str + str = str + bytes = bytes + basestring = (str, bytes) + numeric_types = (int, float) + integer_types = (int,) diff --git a/venv/lib/python3.10/site-packages/pip/_vendor/requests/cookies.py b/venv/lib/python3.10/site-packages/pip/_vendor/requests/cookies.py index bf54ab2..56fccd9 100644 --- a/venv/lib/python3.10/site-packages/pip/_vendor/requests/cookies.py +++ b/venv/lib/python3.10/site-packages/pip/_vendor/requests/cookies.py @@ -1,3 +1,5 @@ +# -*- coding: utf-8 -*- + """ requests.cookies ~~~~~~~~~~~~~~~~ @@ -7,12 +9,12 @@ requests.utils imports from here, so be careful with imports. """ -import calendar import copy import time +import calendar from ._internal_utils import to_native_string -from .compat import Morsel, MutableMapping, cookielib, urlparse, urlunparse +from .compat import cookielib, urlparse, urlunparse, Morsel, MutableMapping try: import threading @@ -20,7 +22,7 @@ import dummy_threading as threading -class MockRequest: +class MockRequest(object): """Wraps a `requests.Request` to mimic a `urllib2.Request`. The code in `cookielib.CookieJar` expects this interface in order to correctly @@ -49,22 +51,16 @@ def get_origin_req_host(self): def get_full_url(self): # Only return the response's URL if the user hadn't set the Host # header - if not self._r.headers.get("Host"): + if not self._r.headers.get('Host'): return self._r.url # If they did set it, retrieve it and reconstruct the expected domain - host = to_native_string(self._r.headers["Host"], encoding="utf-8") + host = to_native_string(self._r.headers['Host'], encoding='utf-8') parsed = urlparse(self._r.url) # Reconstruct the URL as we expect it - return urlunparse( - [ - parsed.scheme, - host, - parsed.path, - parsed.params, - parsed.query, - parsed.fragment, - ] - ) + return urlunparse([ + parsed.scheme, host, parsed.path, parsed.params, parsed.query, + parsed.fragment + ]) def is_unverifiable(self): return True @@ -77,9 +73,7 @@ def get_header(self, name, default=None): def add_header(self, key, val): """cookielib has no legitimate use for this method; add it back if you find one.""" - raise NotImplementedError( - "Cookie headers should be added with add_unredirected_header()" - ) + raise NotImplementedError("Cookie headers should be added with add_unredirected_header()") def add_unredirected_header(self, name, value): self._new_headers[name] = value @@ -100,7 +94,7 @@ def host(self): return self.get_host() -class MockResponse: +class MockResponse(object): """Wraps a `httplib.HTTPMessage` to mimic a `urllib.addinfourl`. ...what? Basically, expose the parsed HTTP headers from the server response @@ -128,7 +122,8 @@ def extract_cookies_to_jar(jar, request, response): :param request: our own requests.Request object :param response: urllib3.HTTPResponse object """ - if not (hasattr(response, "_original_response") and response._original_response): + if not (hasattr(response, '_original_response') and + response._original_response): return # the _original_response field is the wrapped httplib.HTTPResponse object, req = MockRequest(request) @@ -145,7 +140,7 @@ def get_cookie_header(jar, request): """ r = MockRequest(request) jar.add_cookie_header(r) - return r.get_new_headers().get("Cookie") + return r.get_new_headers().get('Cookie') def remove_cookie_by_name(cookiejar, name, domain=None, path=None): @@ -210,9 +205,7 @@ def set(self, name, value, **kwargs): """ # support client code that unsets cookies by assignment of a None value: if value is None: - remove_cookie_by_name( - self, name, domain=kwargs.get("domain"), path=kwargs.get("path") - ) + remove_cookie_by_name(self, name, domain=kwargs.get('domain'), path=kwargs.get('path')) return if isinstance(value, Morsel): @@ -312,15 +305,16 @@ def get_dict(self, domain=None, path=None): """ dictionary = {} for cookie in iter(self): - if (domain is None or cookie.domain == domain) and ( - path is None or cookie.path == path + if ( + (domain is None or cookie.domain == domain) and + (path is None or cookie.path == path) ): dictionary[cookie.name] = cookie.value return dictionary def __contains__(self, name): try: - return super().__contains__(name) + return super(RequestsCookieJar, self).__contains__(name) except CookieConflictError: return True @@ -347,13 +341,9 @@ def __delitem__(self, name): remove_cookie_by_name(self, name) def set_cookie(self, cookie, *args, **kwargs): - if ( - hasattr(cookie.value, "startswith") - and cookie.value.startswith('"') - and cookie.value.endswith('"') - ): - cookie.value = cookie.value.replace('\\"', "") - return super().set_cookie(cookie, *args, **kwargs) + if hasattr(cookie.value, 'startswith') and cookie.value.startswith('"') and cookie.value.endswith('"'): + cookie.value = cookie.value.replace('\\"', '') + return super(RequestsCookieJar, self).set_cookie(cookie, *args, **kwargs) def update(self, other): """Updates this jar with cookies from another CookieJar or dict-like""" @@ -361,7 +351,7 @@ def update(self, other): for cookie in other: self.set_cookie(copy.copy(cookie)) else: - super().update(other) + super(RequestsCookieJar, self).update(other) def _find(self, name, domain=None, path=None): """Requests uses this method internally to get cookie values. @@ -381,7 +371,7 @@ def _find(self, name, domain=None, path=None): if path is None or cookie.path == path: return cookie.value - raise KeyError(f"name={name!r}, domain={domain!r}, path={path!r}") + raise KeyError('name=%r, domain=%r, path=%r' % (name, domain, path)) def _find_no_duplicates(self, name, domain=None, path=None): """Both ``__get_item__`` and ``get`` call this function: it's never @@ -400,29 +390,25 @@ def _find_no_duplicates(self, name, domain=None, path=None): if cookie.name == name: if domain is None or cookie.domain == domain: if path is None or cookie.path == path: - if toReturn is not None: - # if there are multiple cookies that meet passed in criteria - raise CookieConflictError( - f"There are multiple cookies with name, {name!r}" - ) - # we will eventually return this as long as no cookie conflict - toReturn = cookie.value + if toReturn is not None: # if there are multiple cookies that meet passed in criteria + raise CookieConflictError('There are multiple cookies with name, %r' % (name)) + toReturn = cookie.value # we will eventually return this as long as no cookie conflict if toReturn: return toReturn - raise KeyError(f"name={name!r}, domain={domain!r}, path={path!r}") + raise KeyError('name=%r, domain=%r, path=%r' % (name, domain, path)) def __getstate__(self): """Unlike a normal CookieJar, this class is pickleable.""" state = self.__dict__.copy() # remove the unpickleable RLock object - state.pop("_cookies_lock") + state.pop('_cookies_lock') return state def __setstate__(self, state): """Unlike a normal CookieJar, this class is pickleable.""" self.__dict__.update(state) - if "_cookies_lock" not in self.__dict__: + if '_cookies_lock' not in self.__dict__: self._cookies_lock = threading.RLock() def copy(self): @@ -441,7 +427,7 @@ def _copy_cookie_jar(jar): if jar is None: return None - if hasattr(jar, "copy"): + if hasattr(jar, 'copy'): # We're dealing with an instance of RequestsCookieJar return jar.copy() # We're dealing with a generic CookieJar instance @@ -459,32 +445,31 @@ def create_cookie(name, value, **kwargs): and sent on every request (this is sometimes called a "supercookie"). """ result = { - "version": 0, - "name": name, - "value": value, - "port": None, - "domain": "", - "path": "/", - "secure": False, - "expires": None, - "discard": True, - "comment": None, - "comment_url": None, - "rest": {"HttpOnly": None}, - "rfc2109": False, + 'version': 0, + 'name': name, + 'value': value, + 'port': None, + 'domain': '', + 'path': '/', + 'secure': False, + 'expires': None, + 'discard': True, + 'comment': None, + 'comment_url': None, + 'rest': {'HttpOnly': None}, + 'rfc2109': False, } badargs = set(kwargs) - set(result) if badargs: - raise TypeError( - f"create_cookie() got unexpected keyword arguments: {list(badargs)}" - ) + err = 'create_cookie() got unexpected keyword arguments: %s' + raise TypeError(err % list(badargs)) result.update(kwargs) - result["port_specified"] = bool(result["port"]) - result["domain_specified"] = bool(result["domain"]) - result["domain_initial_dot"] = result["domain"].startswith(".") - result["path_specified"] = bool(result["path"]) + result['port_specified'] = bool(result['port']) + result['domain_specified'] = bool(result['domain']) + result['domain_initial_dot'] = result['domain'].startswith('.') + result['path_specified'] = bool(result['path']) return cookielib.Cookie(**result) @@ -493,28 +478,30 @@ def morsel_to_cookie(morsel): """Convert a Morsel object into a Cookie containing the one k/v pair.""" expires = None - if morsel["max-age"]: + if morsel['max-age']: try: - expires = int(time.time() + int(morsel["max-age"])) + expires = int(time.time() + int(morsel['max-age'])) except ValueError: - raise TypeError(f"max-age: {morsel['max-age']} must be integer") - elif morsel["expires"]: - time_template = "%a, %d-%b-%Y %H:%M:%S GMT" - expires = calendar.timegm(time.strptime(morsel["expires"], time_template)) + raise TypeError('max-age: %s must be integer' % morsel['max-age']) + elif morsel['expires']: + time_template = '%a, %d-%b-%Y %H:%M:%S GMT' + expires = calendar.timegm( + time.strptime(morsel['expires'], time_template) + ) return create_cookie( - comment=morsel["comment"], - comment_url=bool(morsel["comment"]), + comment=morsel['comment'], + comment_url=bool(morsel['comment']), discard=False, - domain=morsel["domain"], + domain=morsel['domain'], expires=expires, name=morsel.key, - path=morsel["path"], + path=morsel['path'], port=None, - rest={"HttpOnly": morsel["httponly"]}, + rest={'HttpOnly': morsel['httponly']}, rfc2109=False, - secure=bool(morsel["secure"]), + secure=bool(morsel['secure']), value=morsel.value, - version=morsel["version"] or 0, + version=morsel['version'] or 0, ) @@ -547,10 +534,11 @@ def merge_cookies(cookiejar, cookies): :rtype: CookieJar """ if not isinstance(cookiejar, cookielib.CookieJar): - raise ValueError("You can only merge into CookieJar") + raise ValueError('You can only merge into CookieJar') if isinstance(cookies, dict): - cookiejar = cookiejar_from_dict(cookies, cookiejar=cookiejar, overwrite=False) + cookiejar = cookiejar_from_dict( + cookies, cookiejar=cookiejar, overwrite=False) elif isinstance(cookies, cookielib.CookieJar): try: cookiejar.update(cookies) diff --git a/venv/lib/python3.10/site-packages/pip/_vendor/requests/exceptions.py b/venv/lib/python3.10/site-packages/pip/_vendor/requests/exceptions.py index 168d073..83b9232 100644 --- a/venv/lib/python3.10/site-packages/pip/_vendor/requests/exceptions.py +++ b/venv/lib/python3.10/site-packages/pip/_vendor/requests/exceptions.py @@ -1,3 +1,5 @@ +# -*- coding: utf-8 -*- + """ requests.exceptions ~~~~~~~~~~~~~~~~~~~ @@ -16,12 +18,13 @@ class RequestException(IOError): def __init__(self, *args, **kwargs): """Initialize RequestException with `request` and `response` objects.""" - response = kwargs.pop("response", None) + response = kwargs.pop('response', None) self.response = response - self.request = kwargs.pop("request", None) - if response is not None and not self.request and hasattr(response, "request"): + self.request = kwargs.pop('request', None) + if (response is not None and not self.request and + hasattr(response, 'request')): self.request = self.response.request - super().__init__(*args, **kwargs) + super(RequestException, self).__init__(*args, **kwargs) class InvalidJSONError(RequestException): @@ -31,16 +34,6 @@ class InvalidJSONError(RequestException): class JSONDecodeError(InvalidJSONError, CompatJSONDecodeError): """Couldn't decode the text into json""" - def __init__(self, *args, **kwargs): - """ - Construct the JSONDecodeError instance first with all - args. Then use it's args to construct the IOError so that - the json specific args aren't used as IOError specific args - and the error message from JSONDecodeError is preserved. - """ - CompatJSONDecodeError.__init__(self, *args) - InvalidJSONError.__init__(self, *self.args, **kwargs) - class HTTPError(RequestException): """An HTTP error occurred.""" @@ -125,7 +118,6 @@ class RetryError(RequestException): class UnrewindableBodyError(RequestException): """Requests encountered an error when trying to rewind a body.""" - # Warnings diff --git a/venv/lib/python3.10/site-packages/pip/_vendor/requests/help.py b/venv/lib/python3.10/site-packages/pip/_vendor/requests/help.py index 2d292c2..745f0d7 100644 --- a/venv/lib/python3.10/site-packages/pip/_vendor/requests/help.py +++ b/venv/lib/python3.10/site-packages/pip/_vendor/requests/help.py @@ -1,9 +1,10 @@ """Module containing bug report helper(s).""" +from __future__ import print_function import json import platform -import ssl import sys +import ssl from pip._vendor import idna from pip._vendor import urllib3 @@ -24,16 +25,16 @@ OpenSSL = None cryptography = None else: - import cryptography import OpenSSL + import cryptography def _implementation(): """Return a dict with the Python implementation and version. Provide both the name and the version of the Python implementation - currently running. For example, on CPython 3.10.3 it will return - {'name': 'CPython', 'version': '3.10.3'}. + currently running. For example, on CPython 2.7.5 it will return + {'name': 'CPython', 'version': '2.7.5'}. This function works best on CPython and PyPy: in particular, it probably doesn't work for Jython or IronPython. Future investigation should be done @@ -41,83 +42,83 @@ def _implementation(): """ implementation = platform.python_implementation() - if implementation == "CPython": + if implementation == 'CPython': implementation_version = platform.python_version() - elif implementation == "PyPy": - implementation_version = "{}.{}.{}".format( - sys.pypy_version_info.major, - sys.pypy_version_info.minor, - sys.pypy_version_info.micro, - ) - if sys.pypy_version_info.releaselevel != "final": - implementation_version = "".join( - [implementation_version, sys.pypy_version_info.releaselevel] - ) - elif implementation == "Jython": + elif implementation == 'PyPy': + implementation_version = '%s.%s.%s' % (sys.pypy_version_info.major, + sys.pypy_version_info.minor, + sys.pypy_version_info.micro) + if sys.pypy_version_info.releaselevel != 'final': + implementation_version = ''.join([ + implementation_version, sys.pypy_version_info.releaselevel + ]) + elif implementation == 'Jython': implementation_version = platform.python_version() # Complete Guess - elif implementation == "IronPython": + elif implementation == 'IronPython': implementation_version = platform.python_version() # Complete Guess else: - implementation_version = "Unknown" + implementation_version = 'Unknown' - return {"name": implementation, "version": implementation_version} + return {'name': implementation, 'version': implementation_version} def info(): """Generate information for a bug report.""" try: platform_info = { - "system": platform.system(), - "release": platform.release(), + 'system': platform.system(), + 'release': platform.release(), } - except OSError: + except IOError: platform_info = { - "system": "Unknown", - "release": "Unknown", + 'system': 'Unknown', + 'release': 'Unknown', } implementation_info = _implementation() - urllib3_info = {"version": urllib3.__version__} - charset_normalizer_info = {"version": None} - chardet_info = {"version": None} + urllib3_info = {'version': urllib3.__version__} + charset_normalizer_info = {'version': None} + chardet_info = {'version': None} if charset_normalizer: - charset_normalizer_info = {"version": charset_normalizer.__version__} + charset_normalizer_info = {'version': charset_normalizer.__version__} if chardet: - chardet_info = {"version": chardet.__version__} + chardet_info = {'version': chardet.__version__} pyopenssl_info = { - "version": None, - "openssl_version": "", + 'version': None, + 'openssl_version': '', } if OpenSSL: pyopenssl_info = { - "version": OpenSSL.__version__, - "openssl_version": f"{OpenSSL.SSL.OPENSSL_VERSION_NUMBER:x}", + 'version': OpenSSL.__version__, + 'openssl_version': '%x' % OpenSSL.SSL.OPENSSL_VERSION_NUMBER, } cryptography_info = { - "version": getattr(cryptography, "__version__", ""), + 'version': getattr(cryptography, '__version__', ''), } idna_info = { - "version": getattr(idna, "__version__", ""), + 'version': getattr(idna, '__version__', ''), } system_ssl = ssl.OPENSSL_VERSION_NUMBER - system_ssl_info = {"version": f"{system_ssl:x}" if system_ssl is not None else ""} + system_ssl_info = { + 'version': '%x' % system_ssl if system_ssl is not None else '' + } return { - "platform": platform_info, - "implementation": implementation_info, - "system_ssl": system_ssl_info, - "using_pyopenssl": pyopenssl is not None, - "using_charset_normalizer": chardet is None, - "pyOpenSSL": pyopenssl_info, - "urllib3": urllib3_info, - "chardet": chardet_info, - "charset_normalizer": charset_normalizer_info, - "cryptography": cryptography_info, - "idna": idna_info, - "requests": { - "version": requests_version, + 'platform': platform_info, + 'implementation': implementation_info, + 'system_ssl': system_ssl_info, + 'using_pyopenssl': pyopenssl is not None, + 'using_charset_normalizer': chardet is None, + 'pyOpenSSL': pyopenssl_info, + 'urllib3': urllib3_info, + 'chardet': chardet_info, + 'charset_normalizer': charset_normalizer_info, + 'cryptography': cryptography_info, + 'idna': idna_info, + 'requests': { + 'version': requests_version, }, } @@ -127,5 +128,5 @@ def main(): print(json.dumps(info(), sort_keys=True, indent=2)) -if __name__ == "__main__": +if __name__ == '__main__': main() diff --git a/venv/lib/python3.10/site-packages/pip/_vendor/requests/hooks.py b/venv/lib/python3.10/site-packages/pip/_vendor/requests/hooks.py index d181ba2..7a51f21 100644 --- a/venv/lib/python3.10/site-packages/pip/_vendor/requests/hooks.py +++ b/venv/lib/python3.10/site-packages/pip/_vendor/requests/hooks.py @@ -1,3 +1,5 @@ +# -*- coding: utf-8 -*- + """ requests.hooks ~~~~~~~~~~~~~~ @@ -9,13 +11,12 @@ ``response``: The response generated from a Request. """ -HOOKS = ["response"] +HOOKS = ['response'] def default_hooks(): return {event: [] for event in HOOKS} - # TODO: response is the only one @@ -24,7 +25,7 @@ def dispatch_hook(key, hooks, hook_data, **kwargs): hooks = hooks or {} hooks = hooks.get(key) if hooks: - if hasattr(hooks, "__call__"): + if hasattr(hooks, '__call__'): hooks = [hooks] for hook in hooks: _hook_data = hook(hook_data, **kwargs) diff --git a/venv/lib/python3.10/site-packages/pip/_vendor/requests/models.py b/venv/lib/python3.10/site-packages/pip/_vendor/requests/models.py index b45e810..f538c10 100644 --- a/venv/lib/python3.10/site-packages/pip/_vendor/requests/models.py +++ b/venv/lib/python3.10/site-packages/pip/_vendor/requests/models.py @@ -1,3 +1,5 @@ +# -*- coding: utf-8 -*- + """ requests.models ~~~~~~~~~~~~~~~ @@ -6,72 +8,48 @@ """ import datetime +import sys # Import encoding now, to avoid implicit import later. # Implicit import within threads may cause LookupError when standard library is in a ZIP, # such as in Embedded Python. See https://github.com/psf/requests/issues/3578. -import encodings.idna # noqa: F401 -from io import UnsupportedOperation +import encodings.idna -from pip._vendor.urllib3.exceptions import ( - DecodeError, - LocationParseError, - ProtocolError, - ReadTimeoutError, - SSLError, -) from pip._vendor.urllib3.fields import RequestField from pip._vendor.urllib3.filepost import encode_multipart_formdata from pip._vendor.urllib3.util import parse_url +from pip._vendor.urllib3.exceptions import ( + DecodeError, ReadTimeoutError, ProtocolError, LocationParseError) + +from io import UnsupportedOperation +from .hooks import default_hooks +from .structures import CaseInsensitiveDict -from ._internal_utils import to_native_string, unicode_is_ascii from .auth import HTTPBasicAuth -from .compat import ( - Callable, - JSONDecodeError, - Mapping, - basestring, - builtin_str, - chardet, - cookielib, -) -from .compat import json as complexjson -from .compat import urlencode, urlsplit, urlunparse -from .cookies import _copy_cookie_jar, cookiejar_from_dict, get_cookie_header +from .cookies import cookiejar_from_dict, get_cookie_header, _copy_cookie_jar from .exceptions import ( - ChunkedEncodingError, - ConnectionError, - ContentDecodingError, - HTTPError, - InvalidJSONError, - InvalidURL, -) + HTTPError, MissingSchema, InvalidURL, ChunkedEncodingError, + ContentDecodingError, ConnectionError, StreamConsumedError, + InvalidJSONError) from .exceptions import JSONDecodeError as RequestsJSONDecodeError -from .exceptions import MissingSchema -from .exceptions import SSLError as RequestsSSLError -from .exceptions import StreamConsumedError -from .hooks import default_hooks -from .status_codes import codes -from .structures import CaseInsensitiveDict +from ._internal_utils import to_native_string, unicode_is_ascii from .utils import ( - check_header_validity, - get_auth_from_url, - guess_filename, - guess_json_utf, - iter_slices, - parse_header_links, - requote_uri, - stream_decode_response_unicode, - super_len, - to_key_val_list, -) + guess_filename, get_auth_from_url, requote_uri, + stream_decode_response_unicode, to_key_val_list, parse_header_links, + iter_slices, guess_json_utf, super_len, check_header_validity) +from .compat import ( + Callable, Mapping, + cookielib, urlunparse, urlsplit, urlencode, str, bytes, + is_py2, chardet, builtin_str, basestring, JSONDecodeError) +from .compat import json as complexjson +from .status_codes import codes #: The set of HTTP status codes that indicate an automatically #: processable redirect. REDIRECT_STATI = ( - codes.moved, # 301 - codes.found, # 302 - codes.other, # 303 + codes.moved, # 301 + codes.found, # 302 + codes.other, # 303 codes.temporary_redirect, # 307 codes.permanent_redirect, # 308 ) @@ -81,7 +59,7 @@ ITER_CHUNK_SIZE = 512 -class RequestEncodingMixin: +class RequestEncodingMixin(object): @property def path_url(self): """Build the path URL to use.""" @@ -92,16 +70,16 @@ def path_url(self): path = p.path if not path: - path = "/" + path = '/' url.append(path) query = p.query if query: - url.append("?") + url.append('?') url.append(query) - return "".join(url) + return ''.join(url) @staticmethod def _encode_params(data): @@ -114,21 +92,18 @@ def _encode_params(data): if isinstance(data, (str, bytes)): return data - elif hasattr(data, "read"): + elif hasattr(data, 'read'): return data - elif hasattr(data, "__iter__"): + elif hasattr(data, '__iter__'): result = [] for k, vs in to_key_val_list(data): - if isinstance(vs, basestring) or not hasattr(vs, "__iter__"): + if isinstance(vs, basestring) or not hasattr(vs, '__iter__'): vs = [vs] for v in vs: if v is not None: result.append( - ( - k.encode("utf-8") if isinstance(k, str) else k, - v.encode("utf-8") if isinstance(v, str) else v, - ) - ) + (k.encode('utf-8') if isinstance(k, str) else k, + v.encode('utf-8') if isinstance(v, str) else v)) return urlencode(result, doseq=True) else: return data @@ -143,7 +118,7 @@ def _encode_files(files, data): The tuples may be 2-tuples (filename, fileobj), 3-tuples (filename, fileobj, contentype) or 4-tuples (filename, fileobj, contentype, custom_headers). """ - if not files: + if (not files): raise ValueError("Files must be provided.") elif isinstance(data, basestring): raise ValueError("Data must not be a string.") @@ -153,7 +128,7 @@ def _encode_files(files, data): files = to_key_val_list(files or {}) for field, val in fields: - if isinstance(val, basestring) or not hasattr(val, "__iter__"): + if isinstance(val, basestring) or not hasattr(val, '__iter__'): val = [val] for v in val: if v is not None: @@ -162,13 +137,8 @@ def _encode_files(files, data): v = str(v) new_fields.append( - ( - field.decode("utf-8") - if isinstance(field, bytes) - else field, - v.encode("utf-8") if isinstance(v, str) else v, - ) - ) + (field.decode('utf-8') if isinstance(field, bytes) else field, + v.encode('utf-8') if isinstance(v, str) else v)) for (k, v) in files: # support for explicit filename @@ -187,7 +157,7 @@ def _encode_files(files, data): if isinstance(fp, (str, bytes, bytearray)): fdata = fp - elif hasattr(fp, "read"): + elif hasattr(fp, 'read'): fdata = fp.read() elif fp is None: continue @@ -203,16 +173,16 @@ def _encode_files(files, data): return body, content_type -class RequestHooksMixin: +class RequestHooksMixin(object): def register_hook(self, event, hook): """Properly register a hook.""" if event not in self.hooks: - raise ValueError(f'Unsupported event specified, with event name "{event}"') + raise ValueError('Unsupported event specified, with event name "%s"' % (event)) if isinstance(hook, Callable): self.hooks[event].append(hook) - elif hasattr(hook, "__iter__"): + elif hasattr(hook, '__iter__'): self.hooks[event].extend(h for h in hook if isinstance(h, Callable)) def deregister_hook(self, event, hook): @@ -255,19 +225,9 @@ class Request(RequestHooksMixin): """ - def __init__( - self, - method=None, - url=None, - headers=None, - files=None, - data=None, - params=None, - auth=None, - cookies=None, - hooks=None, - json=None, - ): + def __init__(self, + method=None, url=None, headers=None, files=None, data=None, + params=None, auth=None, cookies=None, hooks=None, json=None): # Default empty dicts for dict params. data = [] if data is None else data @@ -291,7 +251,7 @@ def __init__( self.cookies = cookies def __repr__(self): - return f"" + return '' % (self.method) def prepare(self): """Constructs a :class:`PreparedRequest ` for transmission and returns it.""" @@ -349,19 +309,9 @@ def __init__(self): #: integer denoting starting position of a readable file-like body. self._body_position = None - def prepare( - self, - method=None, - url=None, - headers=None, - files=None, - data=None, - params=None, - auth=None, - cookies=None, - hooks=None, - json=None, - ): + def prepare(self, + method=None, url=None, headers=None, files=None, data=None, + params=None, auth=None, cookies=None, hooks=None, json=None): """Prepares the entire request with the given parameters.""" self.prepare_method(method) @@ -378,7 +328,7 @@ def prepare( self.prepare_hooks(hooks) def __repr__(self): - return f"" + return '' % (self.method) def copy(self): p = PreparedRequest() @@ -402,7 +352,7 @@ def _get_idna_encoded_host(host): from pip._vendor import idna try: - host = idna.encode(host, uts46=True).decode("utf-8") + host = idna.encode(host, uts46=True).decode('utf-8') except idna.IDNAError: raise UnicodeError return host @@ -415,9 +365,9 @@ def prepare_url(self, url, params): #: on python 3.x. #: https://github.com/psf/requests/pull/2238 if isinstance(url, bytes): - url = url.decode("utf8") + url = url.decode('utf8') else: - url = str(url) + url = unicode(url) if is_py2 else str(url) # Remove leading whitespaces from url url = url.lstrip() @@ -425,7 +375,7 @@ def prepare_url(self, url, params): # Don't do any URL preparation for non-HTTP schemes like `mailto`, # `data` etc to work around exceptions from `url_parse`, which # handles RFC 3986 only. - if ":" in url and not url.lower().startswith("http"): + if ':' in url and not url.lower().startswith('http'): self.url = url return @@ -436,13 +386,13 @@ def prepare_url(self, url, params): raise InvalidURL(*e.args) if not scheme: - raise MissingSchema( - f"Invalid URL {url!r}: No scheme supplied. " - f"Perhaps you meant http://{url}?" - ) + error = ("Invalid URL {0!r}: No scheme supplied. Perhaps you meant http://{0}?") + error = error.format(to_native_string(url, 'utf8')) + + raise MissingSchema(error) if not host: - raise InvalidURL(f"Invalid URL {url!r}: No host supplied") + raise InvalidURL("Invalid URL %r: No host supplied" % url) # In general, we want to try IDNA encoding the hostname if the string contains # non-ASCII characters. This allows users to automatically get the correct IDNA @@ -452,21 +402,33 @@ def prepare_url(self, url, params): try: host = self._get_idna_encoded_host(host) except UnicodeError: - raise InvalidURL("URL has an invalid label.") - elif host.startswith(("*", ".")): - raise InvalidURL("URL has an invalid label.") + raise InvalidURL('URL has an invalid label.') + elif host.startswith((u'*', u'.')): + raise InvalidURL('URL has an invalid label.') # Carefully reconstruct the network location - netloc = auth or "" + netloc = auth or '' if netloc: - netloc += "@" + netloc += '@' netloc += host if port: - netloc += f":{port}" + netloc += ':' + str(port) # Bare domains aren't valid URLs. if not path: - path = "/" + path = '/' + + if is_py2: + if isinstance(scheme, str): + scheme = scheme.encode('utf-8') + if isinstance(netloc, str): + netloc = netloc.encode('utf-8') + if isinstance(path, str): + path = path.encode('utf-8') + if isinstance(query, str): + query = query.encode('utf-8') + if isinstance(fragment, str): + fragment = fragment.encode('utf-8') if isinstance(params, (str, bytes)): params = to_native_string(params) @@ -474,7 +436,7 @@ def prepare_url(self, url, params): enc_params = self._encode_params(params) if enc_params: if query: - query = f"{query}&{enc_params}" + query = '%s&%s' % (query, enc_params) else: query = enc_params @@ -505,7 +467,7 @@ def prepare_body(self, data, files, json=None): if not data and json is not None: # urllib3 requires a bytes-like body. Python 2's json.dumps # provides this natively, but Python 3 gives a Unicode string. - content_type = "application/json" + content_type = 'application/json' try: body = complexjson.dumps(json, allow_nan=False) @@ -513,14 +475,12 @@ def prepare_body(self, data, files, json=None): raise InvalidJSONError(ve, request=self) if not isinstance(body, bytes): - body = body.encode("utf-8") + body = body.encode('utf-8') - is_stream = all( - [ - hasattr(data, "__iter__"), - not isinstance(data, (basestring, list, tuple, Mapping)), - ] - ) + is_stream = all([ + hasattr(data, '__iter__'), + not isinstance(data, (basestring, list, tuple, Mapping)) + ]) if is_stream: try: @@ -530,26 +490,24 @@ def prepare_body(self, data, files, json=None): body = data - if getattr(body, "tell", None) is not None: + if getattr(body, 'tell', None) is not None: # Record the current file position before reading. # This will allow us to rewind a file in the event # of a redirect. try: self._body_position = body.tell() - except OSError: + except (IOError, OSError): # This differentiates from None, allowing us to catch # a failed `tell()` later when trying to rewind the body self._body_position = object() if files: - raise NotImplementedError( - "Streamed bodies and files are mutually exclusive." - ) + raise NotImplementedError('Streamed bodies and files are mutually exclusive.') if length: - self.headers["Content-Length"] = builtin_str(length) + self.headers['Content-Length'] = builtin_str(length) else: - self.headers["Transfer-Encoding"] = "chunked" + self.headers['Transfer-Encoding'] = 'chunked' else: # Multi-part file uploads. if files: @@ -557,16 +515,16 @@ def prepare_body(self, data, files, json=None): else: if data: body = self._encode_params(data) - if isinstance(data, basestring) or hasattr(data, "read"): + if isinstance(data, basestring) or hasattr(data, 'read'): content_type = None else: - content_type = "application/x-www-form-urlencoded" + content_type = 'application/x-www-form-urlencoded' self.prepare_content_length(body) # Add content-type if it wasn't explicitly provided. - if content_type and ("content-type" not in self.headers): - self.headers["Content-Type"] = content_type + if content_type and ('content-type' not in self.headers): + self.headers['Content-Type'] = content_type self.body = body @@ -577,16 +535,13 @@ def prepare_content_length(self, body): if length: # If length exists, set it. Otherwise, we fallback # to Transfer-Encoding: chunked. - self.headers["Content-Length"] = builtin_str(length) - elif ( - self.method not in ("GET", "HEAD") - and self.headers.get("Content-Length") is None - ): + self.headers['Content-Length'] = builtin_str(length) + elif self.method not in ('GET', 'HEAD') and self.headers.get('Content-Length') is None: # Set Content-Length to 0 for methods that can have a body # but don't provide one. (i.e. not GET or HEAD) - self.headers["Content-Length"] = "0" + self.headers['Content-Length'] = '0' - def prepare_auth(self, auth, url=""): + def prepare_auth(self, auth, url=''): """Prepares the given HTTP auth data.""" # If no Auth is explicitly provided, extract it from the URL first. @@ -626,7 +581,7 @@ def prepare_cookies(self, cookies): cookie_header = get_cookie_header(self._cookies, self) if cookie_header is not None: - self.headers["Cookie"] = cookie_header + self.headers['Cookie'] = cookie_header def prepare_hooks(self, hooks): """Prepares the given hooks.""" @@ -638,22 +593,14 @@ def prepare_hooks(self, hooks): self.register_hook(event, hooks[event]) -class Response: +class Response(object): """The :class:`Response ` object, which contains a server's response to an HTTP request. """ __attrs__ = [ - "_content", - "status_code", - "headers", - "url", - "history", - "encoding", - "reason", - "cookies", - "elapsed", - "request", + '_content', 'status_code', 'headers', 'url', 'history', + 'encoding', 'reason', 'cookies', 'elapsed', 'request' ] def __init__(self): @@ -722,11 +669,11 @@ def __setstate__(self, state): setattr(self, name, value) # pickled objects do not have .raw - setattr(self, "_content_consumed", True) - setattr(self, "raw", None) + setattr(self, '_content_consumed', True) + setattr(self, 'raw', None) def __repr__(self): - return f"" + return '' % (self.status_code) def __bool__(self): """Returns True if :attr:`status_code` is less than 400. @@ -772,15 +719,12 @@ def is_redirect(self): """True if this Response is a well-formed HTTP redirect that could have been processed automatically (by :meth:`Session.resolve_redirects`). """ - return "location" in self.headers and self.status_code in REDIRECT_STATI + return ('location' in self.headers and self.status_code in REDIRECT_STATI) @property def is_permanent_redirect(self): """True if this Response one of the permanent versions of redirect.""" - return "location" in self.headers and self.status_code in ( - codes.moved_permanently, - codes.permanent_redirect, - ) + return ('location' in self.headers and self.status_code in (codes.moved_permanently, codes.permanent_redirect)) @property def next(self): @@ -790,7 +734,7 @@ def next(self): @property def apparent_encoding(self): """The apparent encoding, provided by the charset_normalizer or chardet libraries.""" - return chardet.detect(self.content)["encoding"] + return chardet.detect(self.content)['encoding'] def iter_content(self, chunk_size=1, decode_unicode=False): """Iterates over the response data. When stream=True is set on the @@ -811,17 +755,16 @@ def iter_content(self, chunk_size=1, decode_unicode=False): def generate(): # Special case for urllib3. - if hasattr(self.raw, "stream"): + if hasattr(self.raw, 'stream'): try: - yield from self.raw.stream(chunk_size, decode_content=True) + for chunk in self.raw.stream(chunk_size, decode_content=True): + yield chunk except ProtocolError as e: raise ChunkedEncodingError(e) except DecodeError as e: raise ContentDecodingError(e) except ReadTimeoutError as e: raise ConnectionError(e) - except SSLError as e: - raise RequestsSSLError(e) else: # Standard file-like object. while True: @@ -835,9 +778,7 @@ def generate(): if self._content_consumed and isinstance(self._content, bool): raise StreamConsumedError() elif chunk_size is not None and not isinstance(chunk_size, int): - raise TypeError( - f"chunk_size must be an int, it is instead a {type(chunk_size)}." - ) + raise TypeError("chunk_size must be an int, it is instead a %s." % type(chunk_size)) # simulate reading small chunks of the content reused_chunks = iter_slices(self._content, chunk_size) @@ -850,9 +791,7 @@ def generate(): return chunks - def iter_lines( - self, chunk_size=ITER_CHUNK_SIZE, decode_unicode=False, delimiter=None - ): + def iter_lines(self, chunk_size=ITER_CHUNK_SIZE, decode_unicode=False, delimiter=None): """Iterates over the response data, one line at a time. When stream=True is set on the request, this avoids reading the content at once into memory for large responses. @@ -862,9 +801,7 @@ def iter_lines( pending = None - for chunk in self.iter_content( - chunk_size=chunk_size, decode_unicode=decode_unicode - ): + for chunk in self.iter_content(chunk_size=chunk_size, decode_unicode=decode_unicode): if pending is not None: chunk = pending + chunk @@ -879,7 +816,8 @@ def iter_lines( else: pending = None - yield from lines + for line in lines: + yield line if pending is not None: yield pending @@ -891,12 +829,13 @@ def content(self): if self._content is False: # Read the contents. if self._content_consumed: - raise RuntimeError("The content for this response was already consumed") + raise RuntimeError( + 'The content for this response was already consumed') if self.status_code == 0 or self.raw is None: self._content = None else: - self._content = b"".join(self.iter_content(CONTENT_CHUNK_SIZE)) or b"" + self._content = b''.join(self.iter_content(CONTENT_CHUNK_SIZE)) or b'' self._content_consumed = True # don't need to release the connection; that's been handled by urllib3 @@ -921,7 +860,7 @@ def text(self): encoding = self.encoding if not self.content: - return "" + return str('') # Fallback to auto-detected encoding. if self.encoding is None: @@ -929,7 +868,7 @@ def text(self): # Decode unicode from given encoding. try: - content = str(self.content, encoding, errors="replace") + content = str(self.content, encoding, errors='replace') except (LookupError, TypeError): # A LookupError is raised if the encoding was not found which could # indicate a misspelling or similar mistake. @@ -937,7 +876,7 @@ def text(self): # A TypeError can be raised if encoding is None # # So we try blindly encoding. - content = str(self.content, errors="replace") + content = str(self.content, errors='replace') return content @@ -957,65 +896,65 @@ def json(self, **kwargs): encoding = guess_json_utf(self.content) if encoding is not None: try: - return complexjson.loads(self.content.decode(encoding), **kwargs) + return complexjson.loads( + self.content.decode(encoding), **kwargs + ) except UnicodeDecodeError: # Wrong UTF codec detected; usually because it's not UTF-8 # but some other 8-bit codec. This is an RFC violation, # and the server didn't bother to tell us what codec *was* # used. pass - except JSONDecodeError as e: - raise RequestsJSONDecodeError(e.msg, e.doc, e.pos) try: return complexjson.loads(self.text, **kwargs) except JSONDecodeError as e: # Catch JSON-related errors and raise as requests.JSONDecodeError # This aliases json.JSONDecodeError and simplejson.JSONDecodeError - raise RequestsJSONDecodeError(e.msg, e.doc, e.pos) + if is_py2: # e is a ValueError + raise RequestsJSONDecodeError(e.message) + else: + raise RequestsJSONDecodeError(e.msg, e.doc, e.pos) @property def links(self): """Returns the parsed header links of the response, if any.""" - header = self.headers.get("link") + header = self.headers.get('link') - resolved_links = {} + # l = MultiDict() + l = {} if header: links = parse_header_links(header) for link in links: - key = link.get("rel") or link.get("url") - resolved_links[key] = link + key = link.get('rel') or link.get('url') + l[key] = link - return resolved_links + return l def raise_for_status(self): """Raises :class:`HTTPError`, if one occurred.""" - http_error_msg = "" + http_error_msg = '' if isinstance(self.reason, bytes): # We attempt to decode utf-8 first because some servers # choose to localize their reason strings. If the string # isn't utf-8, we fall back to iso-8859-1 for all other # encodings. (See PR #3538) try: - reason = self.reason.decode("utf-8") + reason = self.reason.decode('utf-8') except UnicodeDecodeError: - reason = self.reason.decode("iso-8859-1") + reason = self.reason.decode('iso-8859-1') else: reason = self.reason if 400 <= self.status_code < 500: - http_error_msg = ( - f"{self.status_code} Client Error: {reason} for url: {self.url}" - ) + http_error_msg = u'%s Client Error: %s for url: %s' % (self.status_code, reason, self.url) elif 500 <= self.status_code < 600: - http_error_msg = ( - f"{self.status_code} Server Error: {reason} for url: {self.url}" - ) + http_error_msg = u'%s Server Error: %s for url: %s' % (self.status_code, reason, self.url) if http_error_msg: raise HTTPError(http_error_msg, response=self) @@ -1029,6 +968,6 @@ def close(self): if not self._content_consumed: self.raw.close() - release_conn = getattr(self.raw, "release_conn", None) + release_conn = getattr(self.raw, 'release_conn', None) if release_conn is not None: release_conn() diff --git a/venv/lib/python3.10/site-packages/pip/_vendor/requests/sessions.py b/venv/lib/python3.10/site-packages/pip/_vendor/requests/sessions.py index 6cb3b4d..3f59cab 100644 --- a/venv/lib/python3.10/site-packages/pip/_vendor/requests/sessions.py +++ b/venv/lib/python3.10/site-packages/pip/_vendor/requests/sessions.py @@ -1,3 +1,5 @@ +# -*- coding: utf-8 -*- + """ requests.sessions ~~~~~~~~~~~~~~~~~ @@ -8,52 +10,39 @@ import os import sys import time -from collections import OrderedDict from datetime import timedelta +from collections import OrderedDict -from ._internal_utils import to_native_string -from .adapters import HTTPAdapter from .auth import _basic_auth_str -from .compat import Mapping, cookielib, urljoin, urlparse +from .compat import cookielib, is_py3, urljoin, urlparse, Mapping from .cookies import ( - RequestsCookieJar, - cookiejar_from_dict, - extract_cookies_to_jar, - merge_cookies, -) -from .exceptions import ( - ChunkedEncodingError, - ContentDecodingError, - InvalidSchema, - TooManyRedirects, -) + cookiejar_from_dict, extract_cookies_to_jar, RequestsCookieJar, merge_cookies) +from .models import Request, PreparedRequest, DEFAULT_REDIRECT_LIMIT from .hooks import default_hooks, dispatch_hook +from ._internal_utils import to_native_string +from .utils import to_key_val_list, default_headers, DEFAULT_PORTS +from .exceptions import ( + TooManyRedirects, InvalidSchema, ChunkedEncodingError, ContentDecodingError) -# formerly defined here, reexposed here for backward compatibility -from .models import ( # noqa: F401 - DEFAULT_REDIRECT_LIMIT, - REDIRECT_STATI, - PreparedRequest, - Request, -) -from .status_codes import codes from .structures import CaseInsensitiveDict -from .utils import ( # noqa: F401 - DEFAULT_PORTS, - default_headers, - get_auth_from_url, - get_environ_proxies, - get_netrc_auth, - requote_uri, - resolve_proxies, - rewind_body, - should_bypass_proxies, - to_key_val_list, +from .adapters import HTTPAdapter + +from .utils import ( + requote_uri, get_environ_proxies, get_netrc_auth, should_bypass_proxies, + get_auth_from_url, rewind_body, resolve_proxies ) +from .status_codes import codes + +# formerly defined here, reexposed here for backward compatibility +from .models import REDIRECT_STATI + # Preferred clock, based on which one is more accurate on a given system. -if sys.platform == "win32": - preferred_clock = time.perf_counter +if sys.platform == 'win32': + try: # Python 3.4+ + preferred_clock = time.perf_counter + except AttributeError: # Earlier than Python 3. + preferred_clock = time.clock else: preferred_clock = time.time @@ -72,7 +61,8 @@ def merge_setting(request_setting, session_setting, dict_class=OrderedDict): # Bypass if not a dictionary (e.g. verify) if not ( - isinstance(session_setting, Mapping) and isinstance(request_setting, Mapping) + isinstance(session_setting, Mapping) and + isinstance(request_setting, Mapping) ): return request_setting @@ -94,16 +84,17 @@ def merge_hooks(request_hooks, session_hooks, dict_class=OrderedDict): This is necessary because when request_hooks == {'response': []}, the merge breaks Session hooks entirely. """ - if session_hooks is None or session_hooks.get("response") == []: + if session_hooks is None or session_hooks.get('response') == []: return request_hooks - if request_hooks is None or request_hooks.get("response") == []: + if request_hooks is None or request_hooks.get('response') == []: return session_hooks return merge_setting(request_hooks, session_hooks, dict_class) -class SessionRedirectMixin: +class SessionRedirectMixin(object): + def get_redirect_target(self, resp): """Receives a Response. Returns a redirect URI or ``None``""" # Due to the nature of how requests processes redirects this method will @@ -113,15 +104,16 @@ def get_redirect_target(self, resp): # to cache the redirect location onto the response object as a private # attribute. if resp.is_redirect: - location = resp.headers["location"] + location = resp.headers['location'] # Currently the underlying http module on py3 decode headers # in latin1, but empirical evidence suggests that latin1 is very # rarely used with non-ASCII characters in HTTP headers. # It is more likely to get UTF8 header rather than latin1. # This causes incorrect handling of UTF8 encoded location headers. # To solve this, we re-encode the location in latin1. - location = location.encode("latin1") - return to_native_string(location, "utf8") + if is_py3: + location = location.encode('latin1') + return to_native_string(location, 'utf8') return None def should_strip_auth(self, old_url, new_url): @@ -134,40 +126,23 @@ def should_strip_auth(self, old_url, new_url): # ports. This isn't specified by RFC 7235, but is kept to avoid # breaking backwards compatibility with older versions of requests # that allowed any redirects on the same host. - if ( - old_parsed.scheme == "http" - and old_parsed.port in (80, None) - and new_parsed.scheme == "https" - and new_parsed.port in (443, None) - ): + if (old_parsed.scheme == 'http' and old_parsed.port in (80, None) + and new_parsed.scheme == 'https' and new_parsed.port in (443, None)): return False # Handle default port usage corresponding to scheme. changed_port = old_parsed.port != new_parsed.port changed_scheme = old_parsed.scheme != new_parsed.scheme default_port = (DEFAULT_PORTS.get(old_parsed.scheme, None), None) - if ( - not changed_scheme - and old_parsed.port in default_port - and new_parsed.port in default_port - ): + if (not changed_scheme and old_parsed.port in default_port + and new_parsed.port in default_port): return False # Standard case: root URI must match return changed_port or changed_scheme - def resolve_redirects( - self, - resp, - req, - stream=False, - timeout=None, - verify=True, - cert=None, - proxies=None, - yield_requests=False, - **adapter_kwargs, - ): + def resolve_redirects(self, resp, req, stream=False, timeout=None, + verify=True, cert=None, proxies=None, yield_requests=False, **adapter_kwargs): """Receives a Response. Returns a generator of Responses or Requests.""" hist = [] # keep track of history @@ -188,21 +163,19 @@ def resolve_redirects( resp.raw.read(decode_content=False) if len(resp.history) >= self.max_redirects: - raise TooManyRedirects( - f"Exceeded {self.max_redirects} redirects.", response=resp - ) + raise TooManyRedirects('Exceeded {} redirects.'.format(self.max_redirects), response=resp) # Release the connection back into the pool. resp.close() # Handle redirection without scheme (see: RFC 1808 Section 4) - if url.startswith("//"): + if url.startswith('//'): parsed_rurl = urlparse(resp.url) - url = ":".join([to_native_string(parsed_rurl.scheme), url]) + url = ':'.join([to_native_string(parsed_rurl.scheme), url]) # Normalize url case and attach previous fragment if needed (RFC 7231 7.1.2) parsed = urlparse(url) - if parsed.fragment == "" and previous_fragment: + if parsed.fragment == '' and previous_fragment: parsed = parsed._replace(fragment=previous_fragment) elif parsed.fragment: previous_fragment = parsed.fragment @@ -221,18 +194,15 @@ def resolve_redirects( self.rebuild_method(prepared_request, resp) # https://github.com/psf/requests/issues/1084 - if resp.status_code not in ( - codes.temporary_redirect, - codes.permanent_redirect, - ): + if resp.status_code not in (codes.temporary_redirect, codes.permanent_redirect): # https://github.com/psf/requests/issues/3490 - purged_headers = ("Content-Length", "Content-Type", "Transfer-Encoding") + purged_headers = ('Content-Length', 'Content-Type', 'Transfer-Encoding') for header in purged_headers: prepared_request.headers.pop(header, None) prepared_request.body = None headers = prepared_request.headers - headers.pop("Cookie", None) + headers.pop('Cookie', None) # Extract any cookies sent on the response to the cookiejar # in the new request. Because we've mutated our copied prepared @@ -248,8 +218,9 @@ def resolve_redirects( # A failed tell() sets `_body_position` to `object()`. This non-None # value ensures `rewindable` will be True, allowing us to raise an # UnrewindableBodyError, instead of hanging the connection. - rewindable = prepared_request._body_position is not None and ( - "Content-Length" in headers or "Transfer-Encoding" in headers + rewindable = ( + prepared_request._body_position is not None and + ('Content-Length' in headers or 'Transfer-Encoding' in headers) ) # Attempt to rewind consumed file-like object. @@ -271,7 +242,7 @@ def resolve_redirects( cert=cert, proxies=proxies, allow_redirects=False, - **adapter_kwargs, + **adapter_kwargs ) extract_cookies_to_jar(self.cookies, prepared_request, resp.raw) @@ -288,12 +259,10 @@ def rebuild_auth(self, prepared_request, response): headers = prepared_request.headers url = prepared_request.url - if "Authorization" in headers and self.should_strip_auth( - response.request.url, url - ): + if 'Authorization' in headers and self.should_strip_auth(response.request.url, url): # If we get redirected to a new host, we should strip out any # authentication headers. - del headers["Authorization"] + del headers['Authorization'] # .netrc might have more auth for us on our new host. new_auth = get_netrc_auth(url) if self.trust_env else None @@ -316,8 +285,8 @@ def rebuild_proxies(self, prepared_request, proxies): scheme = urlparse(prepared_request.url).scheme new_proxies = resolve_proxies(prepared_request, proxies, self.trust_env) - if "Proxy-Authorization" in headers: - del headers["Proxy-Authorization"] + if 'Proxy-Authorization' in headers: + del headers['Proxy-Authorization'] try: username, password = get_auth_from_url(new_proxies[scheme]) @@ -325,7 +294,7 @@ def rebuild_proxies(self, prepared_request, proxies): username, password = None, None if username and password: - headers["Proxy-Authorization"] = _basic_auth_str(username, password) + headers['Proxy-Authorization'] = _basic_auth_str(username, password) return new_proxies @@ -336,18 +305,18 @@ def rebuild_method(self, prepared_request, response): method = prepared_request.method # https://tools.ietf.org/html/rfc7231#section-6.4.4 - if response.status_code == codes.see_other and method != "HEAD": - method = "GET" + if response.status_code == codes.see_other and method != 'HEAD': + method = 'GET' # Do what the browsers do, despite standards... # First, turn 302s into GETs. - if response.status_code == codes.found and method != "HEAD": - method = "GET" + if response.status_code == codes.found and method != 'HEAD': + method = 'GET' # Second, if a POST is responded to with a 301, turn it into a GET. # This bizarre behaviour is explained in Issue 1704. - if response.status_code == codes.moved and method == "POST": - method = "GET" + if response.status_code == codes.moved and method == 'POST': + method = 'GET' prepared_request.method = method @@ -372,18 +341,9 @@ class Session(SessionRedirectMixin): """ __attrs__ = [ - "headers", - "cookies", - "auth", - "proxies", - "hooks", - "params", - "verify", - "cert", - "adapters", - "stream", - "trust_env", - "max_redirects", + 'headers', 'cookies', 'auth', 'proxies', 'hooks', 'params', 'verify', + 'cert', 'adapters', 'stream', 'trust_env', + 'max_redirects', ] def __init__(self): @@ -445,8 +405,8 @@ def __init__(self): # Default connection adapters. self.adapters = OrderedDict() - self.mount("https://", HTTPAdapter()) - self.mount("http://", HTTPAdapter()) + self.mount('https://', HTTPAdapter()) + self.mount('http://', HTTPAdapter()) def __enter__(self): return self @@ -472,8 +432,7 @@ def prepare_request(self, request): # Merge with session cookies merged_cookies = merge_cookies( - merge_cookies(RequestsCookieJar(), self.cookies), cookies - ) + merge_cookies(RequestsCookieJar(), self.cookies), cookies) # Set environment's basic authentication if not explicitly set. auth = request.auth @@ -487,9 +446,7 @@ def prepare_request(self, request): files=request.files, data=request.data, json=request.json, - headers=merge_setting( - request.headers, self.headers, dict_class=CaseInsensitiveDict - ), + headers=merge_setting(request.headers, self.headers, dict_class=CaseInsensitiveDict), params=merge_setting(request.params, self.params), auth=merge_setting(auth, self.auth), cookies=merged_cookies, @@ -497,25 +454,10 @@ def prepare_request(self, request): ) return p - def request( - self, - method, - url, - params=None, - data=None, - headers=None, - cookies=None, - files=None, - auth=None, - timeout=None, - allow_redirects=True, - proxies=None, - hooks=None, - stream=None, - verify=None, - cert=None, - json=None, - ): + def request(self, method, url, + params=None, data=None, headers=None, cookies=None, files=None, + auth=None, timeout=None, allow_redirects=True, proxies=None, + hooks=None, stream=None, verify=None, cert=None, json=None): """Constructs a :class:`Request `, prepares it and sends it. Returns :class:`Response ` object. @@ -551,7 +493,7 @@ def request( ``False``, requests will accept any TLS certificate presented by the server, and will ignore hostname mismatches and/or expired certificates, which will make your application vulnerable to - man-in-the-middle (MitM) attacks. Setting verify to ``False`` + man-in-the-middle (MitM) attacks. Setting verify to ``False`` may be useful during local development or testing. :param cert: (optional) if String, path to ssl client cert file (.pem). If Tuple, ('cert', 'key') pair. @@ -580,8 +522,8 @@ def request( # Send the request. send_kwargs = { - "timeout": timeout, - "allow_redirects": allow_redirects, + 'timeout': timeout, + 'allow_redirects': allow_redirects, } send_kwargs.update(settings) resp = self.send(prep, **send_kwargs) @@ -596,8 +538,8 @@ def get(self, url, **kwargs): :rtype: requests.Response """ - kwargs.setdefault("allow_redirects", True) - return self.request("GET", url, **kwargs) + kwargs.setdefault('allow_redirects', True) + return self.request('GET', url, **kwargs) def options(self, url, **kwargs): r"""Sends a OPTIONS request. Returns :class:`Response` object. @@ -607,8 +549,8 @@ def options(self, url, **kwargs): :rtype: requests.Response """ - kwargs.setdefault("allow_redirects", True) - return self.request("OPTIONS", url, **kwargs) + kwargs.setdefault('allow_redirects', True) + return self.request('OPTIONS', url, **kwargs) def head(self, url, **kwargs): r"""Sends a HEAD request. Returns :class:`Response` object. @@ -618,8 +560,8 @@ def head(self, url, **kwargs): :rtype: requests.Response """ - kwargs.setdefault("allow_redirects", False) - return self.request("HEAD", url, **kwargs) + kwargs.setdefault('allow_redirects', False) + return self.request('HEAD', url, **kwargs) def post(self, url, data=None, json=None, **kwargs): r"""Sends a POST request. Returns :class:`Response` object. @@ -632,7 +574,7 @@ def post(self, url, data=None, json=None, **kwargs): :rtype: requests.Response """ - return self.request("POST", url, data=data, json=json, **kwargs) + return self.request('POST', url, data=data, json=json, **kwargs) def put(self, url, data=None, **kwargs): r"""Sends a PUT request. Returns :class:`Response` object. @@ -644,7 +586,7 @@ def put(self, url, data=None, **kwargs): :rtype: requests.Response """ - return self.request("PUT", url, data=data, **kwargs) + return self.request('PUT', url, data=data, **kwargs) def patch(self, url, data=None, **kwargs): r"""Sends a PATCH request. Returns :class:`Response` object. @@ -656,7 +598,7 @@ def patch(self, url, data=None, **kwargs): :rtype: requests.Response """ - return self.request("PATCH", url, data=data, **kwargs) + return self.request('PATCH', url, data=data, **kwargs) def delete(self, url, **kwargs): r"""Sends a DELETE request. Returns :class:`Response` object. @@ -666,7 +608,7 @@ def delete(self, url, **kwargs): :rtype: requests.Response """ - return self.request("DELETE", url, **kwargs) + return self.request('DELETE', url, **kwargs) def send(self, request, **kwargs): """Send a given PreparedRequest. @@ -675,20 +617,22 @@ def send(self, request, **kwargs): """ # Set defaults that the hooks can utilize to ensure they always have # the correct parameters to reproduce the previous request. - kwargs.setdefault("stream", self.stream) - kwargs.setdefault("verify", self.verify) - kwargs.setdefault("cert", self.cert) - if "proxies" not in kwargs: - kwargs["proxies"] = resolve_proxies(request, self.proxies, self.trust_env) + kwargs.setdefault('stream', self.stream) + kwargs.setdefault('verify', self.verify) + kwargs.setdefault('cert', self.cert) + if 'proxies' not in kwargs: + kwargs['proxies'] = resolve_proxies( + request, self.proxies, self.trust_env + ) # It's possible that users might accidentally send a Request object. # Guard against that specific failure case. if isinstance(request, Request): - raise ValueError("You can only send PreparedRequests.") + raise ValueError('You can only send PreparedRequests.') # Set up variables needed for resolve_redirects and dispatching of hooks - allow_redirects = kwargs.pop("allow_redirects", True) - stream = kwargs.get("stream") + allow_redirects = kwargs.pop('allow_redirects', True) + stream = kwargs.get('stream') hooks = request.hooks # Get the appropriate adapter to use @@ -705,7 +649,7 @@ def send(self, request, **kwargs): r.elapsed = timedelta(seconds=elapsed) # Response manipulation hooks - r = dispatch_hook("response", hooks, r, **kwargs) + r = dispatch_hook('response', hooks, r, **kwargs) # Persist cookies if r.history: @@ -735,9 +679,7 @@ def send(self, request, **kwargs): # If redirects aren't being followed, store the response on the Request for Response.next(). if not allow_redirects: try: - r._next = next( - self.resolve_redirects(r, request, yield_requests=True, **kwargs) - ) + r._next = next(self.resolve_redirects(r, request, yield_requests=True, **kwargs)) except StopIteration: pass @@ -755,19 +697,16 @@ def merge_environment_settings(self, url, proxies, stream, verify, cert): # Gather clues from the surrounding environment. if self.trust_env: # Set environment's proxies. - no_proxy = proxies.get("no_proxy") if proxies is not None else None + no_proxy = proxies.get('no_proxy') if proxies is not None else None env_proxies = get_environ_proxies(url, no_proxy=no_proxy) for (k, v) in env_proxies.items(): proxies.setdefault(k, v) - # Look for requests environment configuration - # and be compatible with cURL. + # Look for requests environment configuration and be compatible + # with cURL. if verify is True or verify is None: - verify = ( - os.environ.get("REQUESTS_CA_BUNDLE") - or os.environ.get("CURL_CA_BUNDLE") - or verify - ) + verify = (os.environ.get('REQUESTS_CA_BUNDLE') or + os.environ.get('CURL_CA_BUNDLE')) # Merge all the kwargs. proxies = merge_setting(proxies, self.proxies) @@ -775,7 +714,8 @@ def merge_environment_settings(self, url, proxies, stream, verify, cert): verify = merge_setting(verify, self.verify) cert = merge_setting(cert, self.cert) - return {"proxies": proxies, "stream": stream, "verify": verify, "cert": cert} + return {'verify': verify, 'proxies': proxies, 'stream': stream, + 'cert': cert} def get_adapter(self, url): """ @@ -789,7 +729,7 @@ def get_adapter(self, url): return adapter # Nothing matches :-/ - raise InvalidSchema(f"No connection adapters were found for {url!r}") + raise InvalidSchema("No connection adapters were found for {!r}".format(url)) def close(self): """Closes all adapters and as such the session""" diff --git a/venv/lib/python3.10/site-packages/pip/_vendor/requests/status_codes.py b/venv/lib/python3.10/site-packages/pip/_vendor/requests/status_codes.py index 4bd072b..d80a7cd 100644 --- a/venv/lib/python3.10/site-packages/pip/_vendor/requests/status_codes.py +++ b/venv/lib/python3.10/site-packages/pip/_vendor/requests/status_codes.py @@ -1,3 +1,5 @@ +# -*- coding: utf-8 -*- + r""" The ``codes`` object defines a mapping from common names for HTTP statuses to their numerical codes, accessible either as attributes or as dictionary @@ -21,108 +23,101 @@ from .structures import LookupDict _codes = { + # Informational. - 100: ("continue",), - 101: ("switching_protocols",), - 102: ("processing",), - 103: ("checkpoint",), - 122: ("uri_too_long", "request_uri_too_long"), - 200: ("ok", "okay", "all_ok", "all_okay", "all_good", "\\o/", "✓"), - 201: ("created",), - 202: ("accepted",), - 203: ("non_authoritative_info", "non_authoritative_information"), - 204: ("no_content",), - 205: ("reset_content", "reset"), - 206: ("partial_content", "partial"), - 207: ("multi_status", "multiple_status", "multi_stati", "multiple_stati"), - 208: ("already_reported",), - 226: ("im_used",), + 100: ('continue',), + 101: ('switching_protocols',), + 102: ('processing',), + 103: ('checkpoint',), + 122: ('uri_too_long', 'request_uri_too_long'), + 200: ('ok', 'okay', 'all_ok', 'all_okay', 'all_good', '\\o/', '✓'), + 201: ('created',), + 202: ('accepted',), + 203: ('non_authoritative_info', 'non_authoritative_information'), + 204: ('no_content',), + 205: ('reset_content', 'reset'), + 206: ('partial_content', 'partial'), + 207: ('multi_status', 'multiple_status', 'multi_stati', 'multiple_stati'), + 208: ('already_reported',), + 226: ('im_used',), + # Redirection. - 300: ("multiple_choices",), - 301: ("moved_permanently", "moved", "\\o-"), - 302: ("found",), - 303: ("see_other", "other"), - 304: ("not_modified",), - 305: ("use_proxy",), - 306: ("switch_proxy",), - 307: ("temporary_redirect", "temporary_moved", "temporary"), - 308: ( - "permanent_redirect", - "resume_incomplete", - "resume", - ), # "resume" and "resume_incomplete" to be removed in 3.0 + 300: ('multiple_choices',), + 301: ('moved_permanently', 'moved', '\\o-'), + 302: ('found',), + 303: ('see_other', 'other'), + 304: ('not_modified',), + 305: ('use_proxy',), + 306: ('switch_proxy',), + 307: ('temporary_redirect', 'temporary_moved', 'temporary'), + 308: ('permanent_redirect', + 'resume_incomplete', 'resume',), # These 2 to be removed in 3.0 + # Client Error. - 400: ("bad_request", "bad"), - 401: ("unauthorized",), - 402: ("payment_required", "payment"), - 403: ("forbidden",), - 404: ("not_found", "-o-"), - 405: ("method_not_allowed", "not_allowed"), - 406: ("not_acceptable",), - 407: ("proxy_authentication_required", "proxy_auth", "proxy_authentication"), - 408: ("request_timeout", "timeout"), - 409: ("conflict",), - 410: ("gone",), - 411: ("length_required",), - 412: ("precondition_failed", "precondition"), - 413: ("request_entity_too_large",), - 414: ("request_uri_too_large",), - 415: ("unsupported_media_type", "unsupported_media", "media_type"), - 416: ( - "requested_range_not_satisfiable", - "requested_range", - "range_not_satisfiable", - ), - 417: ("expectation_failed",), - 418: ("im_a_teapot", "teapot", "i_am_a_teapot"), - 421: ("misdirected_request",), - 422: ("unprocessable_entity", "unprocessable"), - 423: ("locked",), - 424: ("failed_dependency", "dependency"), - 425: ("unordered_collection", "unordered"), - 426: ("upgrade_required", "upgrade"), - 428: ("precondition_required", "precondition"), - 429: ("too_many_requests", "too_many"), - 431: ("header_fields_too_large", "fields_too_large"), - 444: ("no_response", "none"), - 449: ("retry_with", "retry"), - 450: ("blocked_by_windows_parental_controls", "parental_controls"), - 451: ("unavailable_for_legal_reasons", "legal_reasons"), - 499: ("client_closed_request",), + 400: ('bad_request', 'bad'), + 401: ('unauthorized',), + 402: ('payment_required', 'payment'), + 403: ('forbidden',), + 404: ('not_found', '-o-'), + 405: ('method_not_allowed', 'not_allowed'), + 406: ('not_acceptable',), + 407: ('proxy_authentication_required', 'proxy_auth', 'proxy_authentication'), + 408: ('request_timeout', 'timeout'), + 409: ('conflict',), + 410: ('gone',), + 411: ('length_required',), + 412: ('precondition_failed', 'precondition'), + 413: ('request_entity_too_large',), + 414: ('request_uri_too_large',), + 415: ('unsupported_media_type', 'unsupported_media', 'media_type'), + 416: ('requested_range_not_satisfiable', 'requested_range', 'range_not_satisfiable'), + 417: ('expectation_failed',), + 418: ('im_a_teapot', 'teapot', 'i_am_a_teapot'), + 421: ('misdirected_request',), + 422: ('unprocessable_entity', 'unprocessable'), + 423: ('locked',), + 424: ('failed_dependency', 'dependency'), + 425: ('unordered_collection', 'unordered'), + 426: ('upgrade_required', 'upgrade'), + 428: ('precondition_required', 'precondition'), + 429: ('too_many_requests', 'too_many'), + 431: ('header_fields_too_large', 'fields_too_large'), + 444: ('no_response', 'none'), + 449: ('retry_with', 'retry'), + 450: ('blocked_by_windows_parental_controls', 'parental_controls'), + 451: ('unavailable_for_legal_reasons', 'legal_reasons'), + 499: ('client_closed_request',), + # Server Error. - 500: ("internal_server_error", "server_error", "/o\\", "✗"), - 501: ("not_implemented",), - 502: ("bad_gateway",), - 503: ("service_unavailable", "unavailable"), - 504: ("gateway_timeout",), - 505: ("http_version_not_supported", "http_version"), - 506: ("variant_also_negotiates",), - 507: ("insufficient_storage",), - 509: ("bandwidth_limit_exceeded", "bandwidth"), - 510: ("not_extended",), - 511: ("network_authentication_required", "network_auth", "network_authentication"), + 500: ('internal_server_error', 'server_error', '/o\\', '✗'), + 501: ('not_implemented',), + 502: ('bad_gateway',), + 503: ('service_unavailable', 'unavailable'), + 504: ('gateway_timeout',), + 505: ('http_version_not_supported', 'http_version'), + 506: ('variant_also_negotiates',), + 507: ('insufficient_storage',), + 509: ('bandwidth_limit_exceeded', 'bandwidth'), + 510: ('not_extended',), + 511: ('network_authentication_required', 'network_auth', 'network_authentication'), } -codes = LookupDict(name="status_codes") - +codes = LookupDict(name='status_codes') def _init(): for code, titles in _codes.items(): for title in titles: setattr(codes, title, code) - if not title.startswith(("\\", "/")): + if not title.startswith(('\\', '/')): setattr(codes, title.upper(), code) def doc(code): - names = ", ".join(f"``{n}``" for n in _codes[code]) - return "* %d: %s" % (code, names) + names = ', '.join('``%s``' % n for n in _codes[code]) + return '* %d: %s' % (code, names) global __doc__ - __doc__ = ( - __doc__ + "\n" + "\n".join(doc(code) for code in sorted(_codes)) - if __doc__ is not None - else None - ) - + __doc__ = (__doc__ + '\n' + + '\n'.join(doc(code) for code in sorted(_codes)) + if __doc__ is not None else None) _init() diff --git a/venv/lib/python3.10/site-packages/pip/_vendor/requests/structures.py b/venv/lib/python3.10/site-packages/pip/_vendor/requests/structures.py index 188e13e..8ee0ba7 100644 --- a/venv/lib/python3.10/site-packages/pip/_vendor/requests/structures.py +++ b/venv/lib/python3.10/site-packages/pip/_vendor/requests/structures.py @@ -1,3 +1,5 @@ +# -*- coding: utf-8 -*- + """ requests.structures ~~~~~~~~~~~~~~~~~~~ @@ -62,7 +64,11 @@ def __len__(self): def lower_items(self): """Like iteritems(), but with all lowercase keys.""" - return ((lowerkey, keyval[1]) for (lowerkey, keyval) in self._store.items()) + return ( + (lowerkey, keyval[1]) + for (lowerkey, keyval) + in self._store.items() + ) def __eq__(self, other): if isinstance(other, Mapping): @@ -85,10 +91,10 @@ class LookupDict(dict): def __init__(self, name=None): self.name = name - super().__init__() + super(LookupDict, self).__init__() def __repr__(self): - return f"" + return '' % (self.name) def __getitem__(self, key): # We allow fall-through here, so values default to None diff --git a/venv/lib/python3.10/site-packages/pip/_vendor/requests/utils.py b/venv/lib/python3.10/site-packages/pip/_vendor/requests/utils.py index 33f394d..1e5857a 100644 --- a/venv/lib/python3.10/site-packages/pip/_vendor/requests/utils.py +++ b/venv/lib/python3.10/site-packages/pip/_vendor/requests/utils.py @@ -1,3 +1,5 @@ +# -*- coding: utf-8 -*- + """ requests.utils ~~~~~~~~~~~~~~ @@ -18,46 +20,28 @@ import warnings import zipfile from collections import OrderedDict +from pip._vendor.urllib3.util import make_headers +from pip._vendor.urllib3.util import parse_url -from pip._vendor.urllib3.util import make_headers, parse_url - -from . import certs from .__version__ import __version__ - +from . import certs # to_native_string is unused here, but imported here for backwards compatibility -from ._internal_utils import HEADER_VALIDATORS, to_native_string # noqa: F401 -from .compat import ( - Mapping, - basestring, - bytes, - getproxies, - getproxies_environment, - integer_types, -) +from ._internal_utils import to_native_string from .compat import parse_http_list as _parse_list_header from .compat import ( - proxy_bypass, - proxy_bypass_environment, - quote, - str, - unquote, - urlparse, - urlunparse, -) + quote, urlparse, bytes, str, unquote, getproxies, + proxy_bypass, urlunparse, basestring, integer_types, is_py3, + proxy_bypass_environment, getproxies_environment, Mapping) from .cookies import cookiejar_from_dict -from .exceptions import ( - FileModeWarning, - InvalidHeader, - InvalidURL, - UnrewindableBodyError, -) from .structures import CaseInsensitiveDict +from .exceptions import ( + InvalidURL, InvalidHeader, FileModeWarning, UnrewindableBodyError) -NETRC_FILES = (".netrc", "_netrc") +NETRC_FILES = ('.netrc', '_netrc') DEFAULT_CA_BUNDLE_PATH = certs.where() -DEFAULT_PORTS = {"http": 80, "https": 443} +DEFAULT_PORTS = {'http': 80, 'https': 443} # Ensure that ', ' is used to preserve previous delimiter behavior. DEFAULT_ACCEPT_ENCODING = ", ".join( @@ -65,25 +49,28 @@ ) -if sys.platform == "win32": +if sys.platform == 'win32': # provide a proxy_bypass version on Windows without DNS lookups def proxy_bypass_registry(host): try: - import winreg + if is_py3: + import winreg + else: + import _winreg as winreg except ImportError: return False try: - internetSettings = winreg.OpenKey( - winreg.HKEY_CURRENT_USER, - r"Software\Microsoft\Windows\CurrentVersion\Internet Settings", - ) + internetSettings = winreg.OpenKey(winreg.HKEY_CURRENT_USER, + r'Software\Microsoft\Windows\CurrentVersion\Internet Settings') # ProxyEnable could be REG_SZ or REG_DWORD, normalizing it - proxyEnable = int(winreg.QueryValueEx(internetSettings, "ProxyEnable")[0]) + proxyEnable = int(winreg.QueryValueEx(internetSettings, + 'ProxyEnable')[0]) # ProxyOverride is almost always a string - proxyOverride = winreg.QueryValueEx(internetSettings, "ProxyOverride")[0] - except (OSError, ValueError): + proxyOverride = winreg.QueryValueEx(internetSettings, + 'ProxyOverride')[0] + except OSError: return False if not proxyEnable or not proxyOverride: return False @@ -91,15 +78,15 @@ def proxy_bypass_registry(host): # make a check value list from the registry entry: replace the # '' string by the localhost entry and the corresponding # canonical entry. - proxyOverride = proxyOverride.split(";") + proxyOverride = proxyOverride.split(';') # now check if we match one of the registry values. for test in proxyOverride: - if test == "": - if "." not in host: + if test == '': + if '.' not in host: return True - test = test.replace(".", r"\.") # mask dots - test = test.replace("*", r".*") # change glob sequence - test = test.replace("?", r".") # change glob char + test = test.replace(".", r"\.") # mask dots + test = test.replace("*", r".*") # change glob sequence + test = test.replace("?", r".") # change glob char if re.match(test, host, re.I): return True return False @@ -119,7 +106,7 @@ def proxy_bypass(host): # noqa def dict_to_sequence(d): """Returns an internal sequence dictionary update.""" - if hasattr(d, "items"): + if hasattr(d, 'items'): d = d.items() return d @@ -129,13 +116,13 @@ def super_len(o): total_length = None current_position = 0 - if hasattr(o, "__len__"): + if hasattr(o, '__len__'): total_length = len(o) - elif hasattr(o, "len"): + elif hasattr(o, 'len'): total_length = o.len - elif hasattr(o, "fileno"): + elif hasattr(o, 'fileno'): try: fileno = o.fileno() except (io.UnsupportedOperation, AttributeError): @@ -148,23 +135,21 @@ def super_len(o): # Having used fstat to determine the file length, we need to # confirm that this file was opened up in binary mode. - if "b" not in o.mode: - warnings.warn( - ( - "Requests has determined the content-length for this " - "request using the binary size of the file: however, the " - "file has been opened in text mode (i.e. without the 'b' " - "flag in the mode). This may lead to an incorrect " - "content-length. In Requests 3.0, support will be removed " - "for files in text mode." - ), - FileModeWarning, + if 'b' not in o.mode: + warnings.warn(( + "Requests has determined the content-length for this " + "request using the binary size of the file: however, the " + "file has been opened in text mode (i.e. without the 'b' " + "flag in the mode). This may lead to an incorrect " + "content-length. In Requests 3.0, support will be removed " + "for files in text mode."), + FileModeWarning ) - if hasattr(o, "tell"): + if hasattr(o, 'tell'): try: current_position = o.tell() - except OSError: + except (OSError, IOError): # This can happen in some weird situations, such as when the file # is actually a special file descriptor like stdin. In this # instance, we don't know what the length is, so set it to zero and @@ -172,7 +157,7 @@ def super_len(o): if total_length is not None: current_position = total_length else: - if hasattr(o, "seek") and total_length is None: + if hasattr(o, 'seek') and total_length is None: # StringIO and BytesIO have seek but no usable fileno try: # seek to end of file @@ -182,7 +167,7 @@ def super_len(o): # seek back to current position to support # partially read file-like objects o.seek(current_position or 0) - except OSError: + except (OSError, IOError): total_length = 0 if total_length is None: @@ -194,14 +179,14 @@ def super_len(o): def get_netrc_auth(url, raise_errors=False): """Returns the Requests tuple auth for a given url from netrc.""" - netrc_file = os.environ.get("NETRC") + netrc_file = os.environ.get('NETRC') if netrc_file is not None: netrc_locations = (netrc_file,) else: - netrc_locations = (f"~/{f}" for f in NETRC_FILES) + netrc_locations = ('~/{}'.format(f) for f in NETRC_FILES) try: - from netrc import NetrcParseError, netrc + from netrc import netrc, NetrcParseError netrc_path = None @@ -226,18 +211,18 @@ def get_netrc_auth(url, raise_errors=False): # Strip port numbers from netloc. This weird `if...encode`` dance is # used for Python 3.2, which doesn't support unicode literals. - splitstr = b":" + splitstr = b':' if isinstance(url, str): - splitstr = splitstr.decode("ascii") + splitstr = splitstr.decode('ascii') host = ri.netloc.split(splitstr)[0] try: _netrc = netrc(netrc_path).authenticators(host) if _netrc: # Return with login / password - login_i = 0 if _netrc[0] else 1 + login_i = (0 if _netrc[0] else 1) return (_netrc[login_i], _netrc[2]) - except (NetrcParseError, OSError): + except (NetrcParseError, IOError): # If there was a parsing error or a permissions issue reading the file, # we'll just skip netrc auth unless explicitly asked to raise errors. if raise_errors: @@ -250,8 +235,9 @@ def get_netrc_auth(url, raise_errors=False): def guess_filename(obj): """Tries to guess the filename of the given object.""" - name = getattr(obj, "name", None) - if name and isinstance(name, basestring) and name[0] != "<" and name[-1] != ">": + name = getattr(obj, 'name', None) + if (name and isinstance(name, basestring) and name[0] != '<' and + name[-1] != '>'): return os.path.basename(name) @@ -273,7 +259,7 @@ def extract_zipped_paths(path): # If we don't check for an empty prefix after the split (in other words, archive remains unchanged after the split), # we _can_ end up in an infinite loop on a rare corner case affecting a small number of users break - member = "/".join([prefix, member]) + member = '/'.join([prefix, member]) if not zipfile.is_zipfile(archive): return path @@ -284,7 +270,7 @@ def extract_zipped_paths(path): # we have a valid zip archive and a valid member of that archive tmp = tempfile.gettempdir() - extracted_path = os.path.join(tmp, member.split("/")[-1]) + extracted_path = os.path.join(tmp, member.split('/')[-1]) if not os.path.exists(extracted_path): # use read + write to avoid the creating nested folders, we only want the file, avoids mkdir racing condition with atomic_open(extracted_path) as file_handler: @@ -295,11 +281,12 @@ def extract_zipped_paths(path): @contextlib.contextmanager def atomic_open(filename): """Write a file to the disk in an atomic fashion""" + replacer = os.rename if sys.version_info[0] == 2 else os.replace tmp_descriptor, tmp_name = tempfile.mkstemp(dir=os.path.dirname(filename)) try: - with os.fdopen(tmp_descriptor, "wb") as tmp_handler: + with os.fdopen(tmp_descriptor, 'wb') as tmp_handler: yield tmp_handler - os.replace(tmp_name, filename) + replacer(tmp_name, filename) except BaseException: os.remove(tmp_name) raise @@ -327,7 +314,7 @@ def from_key_val_list(value): return None if isinstance(value, (str, bytes, bool, int)): - raise ValueError("cannot encode objects that are not 2-tuples") + raise ValueError('cannot encode objects that are not 2-tuples') return OrderedDict(value) @@ -353,7 +340,7 @@ def to_key_val_list(value): return None if isinstance(value, (str, bytes, bool, int)): - raise ValueError("cannot encode objects that are not 2-tuples") + raise ValueError('cannot encode objects that are not 2-tuples') if isinstance(value, Mapping): value = value.items() @@ -418,10 +405,10 @@ def parse_dict_header(value): """ result = {} for item in _parse_list_header(value): - if "=" not in item: + if '=' not in item: result[item] = None continue - name, value = item.split("=", 1) + name, value = item.split('=', 1) if value[:1] == value[-1:] == '"': value = unquote_header_value(value[1:-1]) result[name] = value @@ -449,8 +436,8 @@ def unquote_header_value(value, is_filename=False): # replace sequence below on a UNC path has the effect of turning # the leading double slash into a single slash and then # _fix_ie_filename() doesn't work correctly. See #458. - if not is_filename or value[:2] != "\\\\": - return value.replace("\\\\", "\\").replace('\\"', '"') + if not is_filename or value[:2] != '\\\\': + return value.replace('\\\\', '\\').replace('\\"', '"') return value @@ -485,24 +472,19 @@ def get_encodings_from_content(content): :param content: bytestring to extract encodings from. """ - warnings.warn( - ( - "In requests 3.0, get_encodings_from_content will be removed. For " - "more information, please see the discussion on issue #2266. (This" - " warning should only appear once.)" - ), - DeprecationWarning, - ) + warnings.warn(( + 'In requests 3.0, get_encodings_from_content will be removed. For ' + 'more information, please see the discussion on issue #2266. (This' + ' warning should only appear once.)'), + DeprecationWarning) charset_re = re.compile(r']', flags=re.I) pragma_re = re.compile(r']', flags=re.I) xml_re = re.compile(r'^<\?xml.*?encoding=["\']*(.+?)["\'>]') - return ( - charset_re.findall(content) - + pragma_re.findall(content) - + xml_re.findall(content) - ) + return (charset_re.findall(content) + + pragma_re.findall(content) + + xml_re.findall(content)) def _parse_content_type_header(header): @@ -513,7 +495,7 @@ def _parse_content_type_header(header): parameters """ - tokens = header.split(";") + tokens = header.split(';') content_type, params = tokens[0].strip(), tokens[1:] params_dict = {} items_to_strip = "\"' " @@ -525,7 +507,7 @@ def _parse_content_type_header(header): index_of_equals = param.find("=") if index_of_equals != -1: key = param[:index_of_equals].strip(items_to_strip) - value = param[index_of_equals + 1 :].strip(items_to_strip) + value = param[index_of_equals + 1:].strip(items_to_strip) params_dict[key.lower()] = value return content_type, params_dict @@ -537,37 +519,38 @@ def get_encoding_from_headers(headers): :rtype: str """ - content_type = headers.get("content-type") + content_type = headers.get('content-type') if not content_type: return None content_type, params = _parse_content_type_header(content_type) - if "charset" in params: - return params["charset"].strip("'\"") + if 'charset' in params: + return params['charset'].strip("'\"") - if "text" in content_type: - return "ISO-8859-1" + if 'text' in content_type: + return 'ISO-8859-1' - if "application/json" in content_type: + if 'application/json' in content_type: # Assume UTF-8 based on RFC 4627: https://www.ietf.org/rfc/rfc4627.txt since the charset was unset - return "utf-8" + return 'utf-8' def stream_decode_response_unicode(iterator, r): - """Stream decodes an iterator.""" + """Stream decodes a iterator.""" if r.encoding is None: - yield from iterator + for item in iterator: + yield item return - decoder = codecs.getincrementaldecoder(r.encoding)(errors="replace") + decoder = codecs.getincrementaldecoder(r.encoding)(errors='replace') for chunk in iterator: rv = decoder.decode(chunk) if rv: yield rv - rv = decoder.decode(b"", final=True) + rv = decoder.decode(b'', final=True) if rv: yield rv @@ -578,7 +561,7 @@ def iter_slices(string, slice_length): if slice_length is None or slice_length <= 0: slice_length = len(string) while pos < len(string): - yield string[pos : pos + slice_length] + yield string[pos:pos + slice_length] pos += slice_length @@ -594,14 +577,11 @@ def get_unicode_from_response(r): :rtype: str """ - warnings.warn( - ( - "In requests 3.0, get_unicode_from_response will be removed. For " - "more information, please see the discussion on issue #2266. (This" - " warning should only appear once.)" - ), - DeprecationWarning, - ) + warnings.warn(( + 'In requests 3.0, get_unicode_from_response will be removed. For ' + 'more information, please see the discussion on issue #2266. (This' + ' warning should only appear once.)'), + DeprecationWarning) tried_encodings = [] @@ -616,15 +596,14 @@ def get_unicode_from_response(r): # Fall back: try: - return str(r.content, encoding, errors="replace") + return str(r.content, encoding, errors='replace') except TypeError: return r.content # The unreserved URI characters (RFC 3986) UNRESERVED_SET = frozenset( - "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz" + "0123456789-._~" -) + "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz" + "0123456789-._~") def unquote_unreserved(uri): @@ -633,22 +612,22 @@ def unquote_unreserved(uri): :rtype: str """ - parts = uri.split("%") + parts = uri.split('%') for i in range(1, len(parts)): h = parts[i][0:2] if len(h) == 2 and h.isalnum(): try: c = chr(int(h, 16)) except ValueError: - raise InvalidURL(f"Invalid percent-escape sequence: '{h}'") + raise InvalidURL("Invalid percent-escape sequence: '%s'" % h) if c in UNRESERVED_SET: parts[i] = c + parts[i][2:] else: - parts[i] = f"%{parts[i]}" + parts[i] = '%' + parts[i] else: - parts[i] = f"%{parts[i]}" - return "".join(parts) + parts[i] = '%' + parts[i] + return ''.join(parts) def requote_uri(uri): @@ -681,10 +660,10 @@ def address_in_network(ip, net): :rtype: bool """ - ipaddr = struct.unpack("=L", socket.inet_aton(ip))[0] - netaddr, bits = net.split("/") - netmask = struct.unpack("=L", socket.inet_aton(dotted_netmask(int(bits))))[0] - network = struct.unpack("=L", socket.inet_aton(netaddr))[0] & netmask + ipaddr = struct.unpack('=L', socket.inet_aton(ip))[0] + netaddr, bits = net.split('/') + netmask = struct.unpack('=L', socket.inet_aton(dotted_netmask(int(bits))))[0] + network = struct.unpack('=L', socket.inet_aton(netaddr))[0] & netmask return (ipaddr & netmask) == (network & netmask) @@ -695,8 +674,8 @@ def dotted_netmask(mask): :rtype: str """ - bits = 0xFFFFFFFF ^ (1 << 32 - mask) - 1 - return socket.inet_ntoa(struct.pack(">I", bits)) + bits = 0xffffffff ^ (1 << 32 - mask) - 1 + return socket.inet_ntoa(struct.pack('>I', bits)) def is_ipv4_address(string_ip): @@ -705,7 +684,7 @@ def is_ipv4_address(string_ip): """ try: socket.inet_aton(string_ip) - except OSError: + except socket.error: return False return True @@ -716,9 +695,9 @@ def is_valid_cidr(string_network): :rtype: bool """ - if string_network.count("/") == 1: + if string_network.count('/') == 1: try: - mask = int(string_network.split("/")[1]) + mask = int(string_network.split('/')[1]) except ValueError: return False @@ -726,8 +705,8 @@ def is_valid_cidr(string_network): return False try: - socket.inet_aton(string_network.split("/")[0]) - except OSError: + socket.inet_aton(string_network.split('/')[0]) + except socket.error: return False else: return False @@ -764,14 +743,13 @@ def should_bypass_proxies(url, no_proxy): """ # Prioritize lowercase environment variables over uppercase # to keep a consistent behaviour with other http projects (curl, wget). - def get_proxy(key): - return os.environ.get(key) or os.environ.get(key.upper()) + get_proxy = lambda k: os.environ.get(k) or os.environ.get(k.upper()) # First check whether no_proxy is defined. If it is, check that the URL # we're getting isn't in the no_proxy list. no_proxy_arg = no_proxy if no_proxy is None: - no_proxy = get_proxy("no_proxy") + no_proxy = get_proxy('no_proxy') parsed = urlparse(url) if parsed.hostname is None: @@ -781,7 +759,9 @@ def get_proxy(key): if no_proxy: # We need to check whether we match here. We need to see if we match # the end of the hostname, both with and without the port. - no_proxy = (host for host in no_proxy.replace(" ", "").split(",") if host) + no_proxy = ( + host for host in no_proxy.replace(' ', '').split(',') if host + ) if is_ipv4_address(parsed.hostname): for proxy_ip in no_proxy: @@ -795,7 +775,7 @@ def get_proxy(key): else: host_with_port = parsed.hostname if parsed.port: - host_with_port += f":{parsed.port}" + host_with_port += ':{}'.format(parsed.port) for host in no_proxy: if parsed.hostname.endswith(host) or host_with_port.endswith(host): @@ -803,7 +783,7 @@ def get_proxy(key): # to apply the proxies on this URL. return True - with set_environ("no_proxy", no_proxy_arg): + with set_environ('no_proxy', no_proxy_arg): # parsed.hostname can be `None` in cases such as a file URI. try: bypass = proxy_bypass(parsed.hostname) @@ -837,13 +817,13 @@ def select_proxy(url, proxies): proxies = proxies or {} urlparts = urlparse(url) if urlparts.hostname is None: - return proxies.get(urlparts.scheme, proxies.get("all")) + return proxies.get(urlparts.scheme, proxies.get('all')) proxy_keys = [ - urlparts.scheme + "://" + urlparts.hostname, + urlparts.scheme + '://' + urlparts.hostname, urlparts.scheme, - "all://" + urlparts.hostname, - "all", + 'all://' + urlparts.hostname, + 'all', ] proxy = None for proxy_key in proxy_keys: @@ -868,13 +848,13 @@ def resolve_proxies(request, proxies, trust_env=True): proxies = proxies if proxies is not None else {} url = request.url scheme = urlparse(url).scheme - no_proxy = proxies.get("no_proxy") + no_proxy = proxies.get('no_proxy') new_proxies = proxies.copy() if trust_env and not should_bypass_proxies(url, no_proxy=no_proxy): environ_proxies = get_environ_proxies(url, no_proxy=no_proxy) - proxy = environ_proxies.get(scheme, environ_proxies.get("all")) + proxy = environ_proxies.get(scheme, environ_proxies.get('all')) if proxy: new_proxies.setdefault(scheme, proxy) @@ -887,21 +867,19 @@ def default_user_agent(name="python-requests"): :rtype: str """ - return f"{name}/{__version__}" + return '%s/%s' % (name, __version__) def default_headers(): """ :rtype: requests.structures.CaseInsensitiveDict """ - return CaseInsensitiveDict( - { - "User-Agent": default_user_agent(), - "Accept-Encoding": DEFAULT_ACCEPT_ENCODING, - "Accept": "*/*", - "Connection": "keep-alive", - } - ) + return CaseInsensitiveDict({ + 'User-Agent': default_user_agent(), + 'Accept-Encoding': DEFAULT_ACCEPT_ENCODING, + 'Accept': '*/*', + 'Connection': 'keep-alive', + }) def parse_header_links(value): @@ -914,23 +892,23 @@ def parse_header_links(value): links = [] - replace_chars = " '\"" + replace_chars = ' \'"' value = value.strip(replace_chars) if not value: return links - for val in re.split(", *<", value): + for val in re.split(', *<', value): try: - url, params = val.split(";", 1) + url, params = val.split(';', 1) except ValueError: - url, params = val, "" + url, params = val, '' - link = {"url": url.strip("<> '\"")} + link = {'url': url.strip('<> \'"')} - for param in params.split(";"): + for param in params.split(';'): try: - key, value = param.split("=") + key, value = param.split('=') except ValueError: break @@ -942,7 +920,7 @@ def parse_header_links(value): # Null bytes; no need to recreate these on each call to guess_json_utf -_null = "\x00".encode("ascii") # encoding to ASCII for Python 3 +_null = '\x00'.encode('ascii') # encoding to ASCII for Python 3 _null2 = _null * 2 _null3 = _null * 3 @@ -956,25 +934,25 @@ def guess_json_utf(data): # determine the encoding. Also detect a BOM, if present. sample = data[:4] if sample in (codecs.BOM_UTF32_LE, codecs.BOM_UTF32_BE): - return "utf-32" # BOM included + return 'utf-32' # BOM included if sample[:3] == codecs.BOM_UTF8: - return "utf-8-sig" # BOM included, MS style (discouraged) + return 'utf-8-sig' # BOM included, MS style (discouraged) if sample[:2] in (codecs.BOM_UTF16_LE, codecs.BOM_UTF16_BE): - return "utf-16" # BOM included + return 'utf-16' # BOM included nullcount = sample.count(_null) if nullcount == 0: - return "utf-8" + return 'utf-8' if nullcount == 2: - if sample[::2] == _null2: # 1st and 3rd are null - return "utf-16-be" + if sample[::2] == _null2: # 1st and 3rd are null + return 'utf-16-be' if sample[1::2] == _null2: # 2nd and 4th are null - return "utf-16-le" + return 'utf-16-le' # Did not detect 2 valid UTF-16 ascii-range characters if nullcount == 3: if sample[:3] == _null3: - return "utf-32-be" + return 'utf-32-be' if sample[1:] == _null3: - return "utf-32-le" + return 'utf-32-le' # Did not detect a valid UTF-32 ascii-range character return None @@ -999,13 +977,13 @@ def prepend_scheme_if_needed(url, new_scheme): if auth: # parse_url doesn't provide the netloc with auth # so we'll add it ourselves. - netloc = "@".join([auth, netloc]) + netloc = '@'.join([auth, netloc]) if scheme is None: scheme = new_scheme if path is None: - path = "" + path = '' - return urlunparse((scheme, netloc, path, "", query, fragment)) + return urlunparse((scheme, netloc, path, '', query, fragment)) def get_auth_from_url(url): @@ -1019,36 +997,35 @@ def get_auth_from_url(url): try: auth = (unquote(parsed.username), unquote(parsed.password)) except (AttributeError, TypeError): - auth = ("", "") + auth = ('', '') return auth +# Moved outside of function to avoid recompile every call +_CLEAN_HEADER_REGEX_BYTE = re.compile(b'^\\S[^\\r\\n]*$|^$') +_CLEAN_HEADER_REGEX_STR = re.compile(r'^\S[^\r\n]*$|^$') + + def check_header_validity(header): - """Verifies that header parts don't contain leading whitespace - reserved characters, or return characters. + """Verifies that header value is a string which doesn't contain + leading whitespace or return characters. This prevents unintended + header injection. :param header: tuple, in the format (name, value). """ name, value = header - for part in header: - if type(part) not in HEADER_VALIDATORS: - raise InvalidHeader( - f"Header part ({part!r}) from {{{name!r}: {value!r}}} must be " - f"of type str or bytes, not {type(part)}" - ) - - _validate_header_part(name, "name", HEADER_VALIDATORS[type(name)][0]) - _validate_header_part(value, "value", HEADER_VALIDATORS[type(value)][1]) - - -def _validate_header_part(header_part, header_kind, validator): - if not validator.match(header_part): - raise InvalidHeader( - f"Invalid leading whitespace, reserved character(s), or return" - f"character(s) in header {header_kind}: {header_part!r}" - ) + if isinstance(value, bytes): + pat = _CLEAN_HEADER_REGEX_BYTE + else: + pat = _CLEAN_HEADER_REGEX_STR + try: + if not pat.match(value): + raise InvalidHeader("Invalid return character or leading space in header: %s" % name) + except TypeError: + raise InvalidHeader("Value for header {%s: %s} must be of type str or " + "bytes, not %s" % (name, value, type(value))) def urldefragauth(url): @@ -1063,24 +1040,21 @@ def urldefragauth(url): if not netloc: netloc, path = path, netloc - netloc = netloc.rsplit("@", 1)[-1] + netloc = netloc.rsplit('@', 1)[-1] - return urlunparse((scheme, netloc, path, params, query, "")) + return urlunparse((scheme, netloc, path, params, query, '')) def rewind_body(prepared_request): """Move file pointer back to its recorded starting position so it can be read again on redirect. """ - body_seek = getattr(prepared_request.body, "seek", None) - if body_seek is not None and isinstance( - prepared_request._body_position, integer_types - ): + body_seek = getattr(prepared_request.body, 'seek', None) + if body_seek is not None and isinstance(prepared_request._body_position, integer_types): try: body_seek(prepared_request._body_position) - except OSError: - raise UnrewindableBodyError( - "An error occurred when rewinding request body for redirect." - ) + except (IOError, OSError): + raise UnrewindableBodyError("An error occurred when rewinding request " + "body for redirect.") else: raise UnrewindableBodyError("Unable to rewind request body for redirect.") diff --git a/venv/lib/python3.10/site-packages/pip/_vendor/rich/__init__.py b/venv/lib/python3.10/site-packages/pip/_vendor/rich/__init__.py index d35875d..50f3815 100644 --- a/venv/lib/python3.10/site-packages/pip/_vendor/rich/__init__.py +++ b/venv/lib/python3.10/site-packages/pip/_vendor/rich/__init__.py @@ -1,9 +1,9 @@ """Rich text and beautiful formatting in the terminal.""" import os -from typing import IO, TYPE_CHECKING, Any, Callable, Optional, Union +from typing import Callable, IO, TYPE_CHECKING, Any, Optional -from ._extension import load_ipython_extension # noqa: F401 +from ._extension import load_ipython_extension __all__ = ["get_console", "reconfigure", "print", "inspect"] @@ -13,11 +13,7 @@ # Global console used by alternative print _console: Optional["Console"] = None -try: - _IMPORT_CWD = os.path.abspath(os.getcwd()) -except FileNotFoundError: - # Can happen if the cwd has been deleted - _IMPORT_CWD = "" +_IMPORT_CWD = os.path.abspath(os.getcwd()) def get_console() -> "Console": @@ -77,7 +73,7 @@ def print_json( json: Optional[str] = None, *, data: Any = None, - indent: Union[None, int, str] = 2, + indent: int = 2, highlight: bool = True, skip_keys: bool = False, ensure_ascii: bool = True, diff --git a/venv/lib/python3.10/site-packages/pip/_vendor/rich/__main__.py b/venv/lib/python3.10/site-packages/pip/_vendor/rich/__main__.py index 54e6d5e..8692d37 100644 --- a/venv/lib/python3.10/site-packages/pip/_vendor/rich/__main__.py +++ b/venv/lib/python3.10/site-packages/pip/_vendor/rich/__main__.py @@ -51,6 +51,7 @@ def make_test_card() -> Table: pad_edge=False, ) color_table.add_row( + # "[bold yellow]256[/] colors or [bold green]16.7 million[/] colors [blue](if supported by your terminal)[/].", ( "✓ [bold green]4-bit color[/]\n" "✓ [bold blue]8-bit color[/]\n" @@ -225,12 +226,10 @@ def iter_last(values: Iterable[T]) -> Iterable[Tuple[bool, T]]: console.print(test_card) taken = round((process_time() - start) * 1000.0, 1) - c = Console(record=True) - c.print(test_card) - # c.save_svg( - # path="/Users/darrenburns/Library/Application Support/JetBrains/PyCharm2021.3/scratches/svg_export.svg", - # title="Rich can export to SVG", - # ) + text = console.file.getvalue() + # https://bugs.python.org/issue37871 + for line in text.splitlines(True): + print(line, end="") print(f"rendered in {pre_cache_taken}ms (cold cache)") print(f"rendered in {taken}ms (warm cache)") @@ -243,10 +242,6 @@ def iter_last(values: Iterable[T]) -> Iterable[Tuple[bool, T]]: sponsor_message.add_column(style="green", justify="right") sponsor_message.add_column(no_wrap=True) - sponsor_message.add_row( - "Textualize", - "[u blue link=https://github.com/textualize]https://github.com/textualize", - ) sponsor_message.add_row( "Buy devs a :coffee:", "[u blue link=https://ko-fi.com/textualize]https://ko-fi.com/textualize", @@ -255,12 +250,15 @@ def iter_last(values: Iterable[T]) -> Iterable[Tuple[bool, T]]: "Twitter", "[u blue link=https://twitter.com/willmcgugan]https://twitter.com/willmcgugan", ) + sponsor_message.add_row( + "Blog", "[u blue link=https://www.willmcgugan.com]https://www.willmcgugan.com" + ) intro_message = Text.from_markup( """\ We hope you enjoy using Rich! -Rich is maintained with [red]:heart:[/] by [link=https://www.textualize.io]Textualize.io[/] +Rich is maintained with :heart: by [link=https://www.textualize.io]Textualize.io[/] - Will McGugan""" ) diff --git a/venv/lib/python3.10/site-packages/pip/_vendor/rich/_export_format.py b/venv/lib/python3.10/site-packages/pip/_vendor/rich/_export_format.py deleted file mode 100644 index b79c130..0000000 --- a/venv/lib/python3.10/site-packages/pip/_vendor/rich/_export_format.py +++ /dev/null @@ -1,78 +0,0 @@ -CONSOLE_HTML_FORMAT = """\ - - - - - - - - -
{code}
-
- - -""" - -CONSOLE_SVG_FORMAT = """\ - - - - - - - - - {lines} - - - {chrome} - - {backgrounds} - - {matrix} - - - -""" - -_SVG_FONT_FAMILY = "Rich Fira Code" -_SVG_CLASSES_PREFIX = "rich-svg" diff --git a/venv/lib/python3.10/site-packages/pip/_vendor/rich/_inspect.py b/venv/lib/python3.10/site-packages/pip/_vendor/rich/_inspect.py index 30446ce..262695b 100644 --- a/venv/lib/python3.10/site-packages/pip/_vendor/rich/_inspect.py +++ b/venv/lib/python3.10/site-packages/pip/_vendor/rich/_inspect.py @@ -1,11 +1,9 @@ from __future__ import absolute_import -import inspect from inspect import cleandoc, getdoc, getfile, isclass, ismodule, signature -from typing import Any, Collection, Iterable, Optional, Tuple, Type, Union +from typing import Any, Iterable, Optional, Tuple -from .console import Group, RenderableType -from .control import escape_control_codes +from .console import RenderableType, Group from .highlighter import ReprHighlighter from .jupyter import JupyterMixin from .panel import Panel @@ -20,6 +18,12 @@ def _first_paragraph(doc: str) -> str: return paragraph +def _reformat_doc(doc: str) -> str: + """Reformat docstring.""" + doc = cleandoc(doc).strip() + return doc + + class Inspect(JupyterMixin): """A renderable to inspect any Python Object. @@ -93,8 +97,7 @@ def _get_signature(self, name: str, obj: Any) -> Optional[Text]: source_filename: Optional[str] = None try: source_filename = getfile(obj) - except (OSError, TypeError): - # OSError is raised if obj has no source file, e.g. when defined in REPL. + except TypeError: pass callable_name = Text(name, style="inspect.callable") @@ -103,19 +106,8 @@ def _get_signature(self, name: str, obj: Any) -> Optional[Text]: signature_text = self.highlighter(_signature) qualname = name or getattr(obj, "__qualname__", name) - - # If obj is a module, there may be classes (which are callable) to display - if inspect.isclass(obj): - prefix = "class" - elif inspect.iscoroutinefunction(obj): - prefix = "async def" - else: - prefix = "def" - qual_signature = Text.assemble( - (f"{prefix} ", f"inspect.{prefix.replace(' ', '_')}"), - (qualname, "inspect.callable"), - signature_text, + ("def ", "inspect.def"), (qualname, "inspect.callable"), signature_text ) return qual_signature @@ -158,9 +150,11 @@ def safe_getattr(attr_name: str) -> Tuple[Any, Any]: yield "" if self.docs: - _doc = self._get_formatted_doc(obj) + _doc = getdoc(obj) if _doc is not None: - doc_text = Text(_doc, style="inspect.help") + if not self.help: + _doc = _first_paragraph(_doc) + doc_text = Text(_reformat_doc(_doc), style="inspect.help") doc_text = highlighter(doc_text) yield doc_text yield "" @@ -195,10 +189,13 @@ def safe_getattr(attr_name: str) -> Tuple[Any, Any]: add_row(key_text, Pretty(value, highlighter=highlighter)) else: if self.docs: - docs = self._get_formatted_doc(value) + docs = getdoc(value) if docs is not None: - _signature_text.append("\n" if "\n" in docs else " ") - doc = highlighter(docs) + _doc = _reformat_doc(str(docs)) + if not self.help: + _doc = _first_paragraph(_doc) + _signature_text.append("\n" if "\n" in _doc else " ") + doc = highlighter(_doc) doc.stylize("inspect.doc") _signature_text.append(doc) @@ -207,64 +204,7 @@ def safe_getattr(attr_name: str) -> Tuple[Any, Any]: add_row(key_text, Pretty(value, highlighter=highlighter)) if items_table.row_count: yield items_table - elif not_shown_count: + else: yield Text.from_markup( - f"[b cyan]{not_shown_count}[/][i] attribute(s) not shown.[/i] " - f"Run [b][magenta]inspect[/]([not b]inspect[/])[/b] for options." + f"[b cyan]{not_shown_count}[/][i] attribute(s) not shown.[/i] Run [b][magenta]inspect[/]([not b]inspect[/])[/b] for options." ) - - def _get_formatted_doc(self, object_: Any) -> Optional[str]: - """ - Extract the docstring of an object, process it and returns it. - The processing consists in cleaning up the doctring's indentation, - taking only its 1st paragraph if `self.help` is not True, - and escape its control codes. - - Args: - object_ (Any): the object to get the docstring from. - - Returns: - Optional[str]: the processed docstring, or None if no docstring was found. - """ - docs = getdoc(object_) - if docs is None: - return None - docs = cleandoc(docs).strip() - if not self.help: - docs = _first_paragraph(docs) - return escape_control_codes(docs) - - -def get_object_types_mro(obj: Union[object, Type[Any]]) -> Tuple[type, ...]: - """Returns the MRO of an object's class, or of the object itself if it's a class.""" - if not hasattr(obj, "__mro__"): - # N.B. we cannot use `if type(obj) is type` here because it doesn't work with - # some types of classes, such as the ones that use abc.ABCMeta. - obj = type(obj) - return getattr(obj, "__mro__", ()) - - -def get_object_types_mro_as_strings(obj: object) -> Collection[str]: - """ - Returns the MRO of an object's class as full qualified names, or of the object itself if it's a class. - - Examples: - `object_types_mro_as_strings(JSONDecoder)` will return `['json.decoder.JSONDecoder', 'builtins.object']` - """ - return [ - f'{getattr(type_, "__module__", "")}.{getattr(type_, "__qualname__", "")}' - for type_ in get_object_types_mro(obj) - ] - - -def is_object_one_of_types( - obj: object, fully_qualified_types_names: Collection[str] -) -> bool: - """ - Returns `True` if the given object's class (or the object itself, if it's a class) has one of the - fully qualified names in its MRO. - """ - for type_name in get_object_types_mro_as_strings(obj): - if type_name in fully_qualified_types_names: - return True - return False diff --git a/venv/lib/python3.10/site-packages/pip/_vendor/rich/_spinners.py b/venv/lib/python3.10/site-packages/pip/_vendor/rich/_spinners.py index d0bb1fe..dc1db07 100644 --- a/venv/lib/python3.10/site-packages/pip/_vendor/rich/_spinners.py +++ b/venv/lib/python3.10/site-packages/pip/_vendor/rich/_spinners.py @@ -22,36 +22,149 @@ SPINNERS = { "dots": { "interval": 80, - "frames": "⠋⠙⠹⠸⠼⠴⠦⠧⠇⠏", + "frames": ["⠋", "⠙", "⠹", "⠸", "⠼", "⠴", "⠦", "⠧", "⠇", "⠏"], }, - "dots2": {"interval": 80, "frames": "⣾⣽⣻⢿⡿⣟⣯⣷"}, + "dots2": {"interval": 80, "frames": ["⣾", "⣽", "⣻", "⢿", "⡿", "⣟", "⣯", "⣷"]}, "dots3": { "interval": 80, - "frames": "⠋⠙⠚⠞⠖⠦⠴⠲⠳⠓", + "frames": ["⠋", "⠙", "⠚", "⠞", "⠖", "⠦", "⠴", "⠲", "⠳", "⠓"], }, "dots4": { "interval": 80, - "frames": "⠄⠆⠇⠋⠙⠸⠰⠠⠰⠸⠙⠋⠇⠆", + "frames": [ + "⠄", + "⠆", + "⠇", + "⠋", + "⠙", + "⠸", + "⠰", + "⠠", + "⠰", + "⠸", + "⠙", + "⠋", + "⠇", + "⠆", + ], }, "dots5": { "interval": 80, - "frames": "⠋⠙⠚⠒⠂⠂⠒⠲⠴⠦⠖⠒⠐⠐⠒⠓⠋", + "frames": [ + "⠋", + "⠙", + "⠚", + "⠒", + "⠂", + "⠂", + "⠒", + "⠲", + "⠴", + "⠦", + "⠖", + "⠒", + "⠐", + "⠐", + "⠒", + "⠓", + "⠋", + ], }, "dots6": { "interval": 80, - "frames": "⠁⠉⠙⠚⠒⠂⠂⠒⠲⠴⠤⠄⠄⠤⠴⠲⠒⠂⠂⠒⠚⠙⠉⠁", + "frames": [ + "⠁", + "⠉", + "⠙", + "⠚", + "⠒", + "⠂", + "⠂", + "⠒", + "⠲", + "⠴", + "⠤", + "⠄", + "⠄", + "⠤", + "⠴", + "⠲", + "⠒", + "⠂", + "⠂", + "⠒", + "⠚", + "⠙", + "⠉", + "⠁", + ], }, "dots7": { "interval": 80, - "frames": "⠈⠉⠋⠓⠒⠐⠐⠒⠖⠦⠤⠠⠠⠤⠦⠖⠒⠐⠐⠒⠓⠋⠉⠈", + "frames": [ + "⠈", + "⠉", + "⠋", + "⠓", + "⠒", + "⠐", + "⠐", + "⠒", + "⠖", + "⠦", + "⠤", + "⠠", + "⠠", + "⠤", + "⠦", + "⠖", + "⠒", + "⠐", + "⠐", + "⠒", + "⠓", + "⠋", + "⠉", + "⠈", + ], }, "dots8": { "interval": 80, - "frames": "⠁⠁⠉⠙⠚⠒⠂⠂⠒⠲⠴⠤⠄⠄⠤⠠⠠⠤⠦⠖⠒⠐⠐⠒⠓⠋⠉⠈⠈", + "frames": [ + "⠁", + "⠁", + "⠉", + "⠙", + "⠚", + "⠒", + "⠂", + "⠂", + "⠒", + "⠲", + "⠴", + "⠤", + "⠄", + "⠄", + "⠤", + "⠠", + "⠠", + "⠤", + "⠦", + "⠖", + "⠒", + "⠐", + "⠐", + "⠒", + "⠓", + "⠋", + "⠉", + "⠈", + "⠈", + ], }, - "dots9": {"interval": 80, "frames": "⢹⢺⢼⣸⣇⡧⡗⡏"}, - "dots10": {"interval": 80, "frames": "⢄⢂⢁⡁⡈⡐⡠"}, - "dots11": {"interval": 100, "frames": "⠁⠂⠄⡀⢀⠠⠐⠈"}, + "dots9": {"interval": 80, "frames": ["⢹", "⢺", "⢼", "⣸", "⣇", "⡧", "⡗", "⡏"]}, + "dots10": {"interval": 80, "frames": ["⢄", "⢂", "⢁", "⡁", "⡈", "⡐", "⡠"]}, + "dots11": {"interval": 100, "frames": ["⠁", "⠂", "⠄", "⡀", "⢀", "⠠", "⠐", "⠈"]}, "dots12": { "interval": 80, "frames": [ @@ -115,62 +228,315 @@ }, "dots8Bit": { "interval": 80, - "frames": "⠀⠁⠂⠃⠄⠅⠆⠇⡀⡁⡂⡃⡄⡅⡆⡇⠈⠉⠊⠋⠌⠍⠎⠏⡈⡉⡊⡋⡌⡍⡎⡏⠐⠑⠒⠓⠔⠕⠖⠗⡐⡑⡒⡓⡔⡕⡖⡗⠘⠙⠚⠛⠜⠝⠞⠟⡘⡙" - "⡚⡛⡜⡝⡞⡟⠠⠡⠢⠣⠤⠥⠦⠧⡠⡡⡢⡣⡤⡥⡦⡧⠨⠩⠪⠫⠬⠭⠮⠯⡨⡩⡪⡫⡬⡭⡮⡯⠰⠱⠲⠳⠴⠵⠶⠷⡰⡱⡲⡳⡴⡵⡶⡷⠸⠹⠺⠻" - "⠼⠽⠾⠿⡸⡹⡺⡻⡼⡽⡾⡿⢀⢁⢂⢃⢄⢅⢆⢇⣀⣁⣂⣃⣄⣅⣆⣇⢈⢉⢊⢋⢌⢍⢎⢏⣈⣉⣊⣋⣌⣍⣎⣏⢐⢑⢒⢓⢔⢕⢖⢗⣐⣑⣒⣓⣔⣕" - "⣖⣗⢘⢙⢚⢛⢜⢝⢞⢟⣘⣙⣚⣛⣜⣝⣞⣟⢠⢡⢢⢣⢤⢥⢦⢧⣠⣡⣢⣣⣤⣥⣦⣧⢨⢩⢪⢫⢬⢭⢮⢯⣨⣩⣪⣫⣬⣭⣮⣯⢰⢱⢲⢳⢴⢵⢶⢷" - "⣰⣱⣲⣳⣴⣵⣶⣷⢸⢹⢺⢻⢼⢽⢾⢿⣸⣹⣺⣻⣼⣽⣾⣿", + "frames": [ + "⠀", + "⠁", + "⠂", + "⠃", + "⠄", + "⠅", + "⠆", + "⠇", + "⡀", + "⡁", + "⡂", + "⡃", + "⡄", + "⡅", + "⡆", + "⡇", + "⠈", + "⠉", + "⠊", + "⠋", + "⠌", + "⠍", + "⠎", + "⠏", + "⡈", + "⡉", + "⡊", + "⡋", + "⡌", + "⡍", + "⡎", + "⡏", + "⠐", + "⠑", + "⠒", + "⠓", + "⠔", + "⠕", + "⠖", + "⠗", + "⡐", + "⡑", + "⡒", + "⡓", + "⡔", + "⡕", + "⡖", + "⡗", + "⠘", + "⠙", + "⠚", + "⠛", + "⠜", + "⠝", + "⠞", + "⠟", + "⡘", + "⡙", + "⡚", + "⡛", + "⡜", + "⡝", + "⡞", + "⡟", + "⠠", + "⠡", + "⠢", + "⠣", + "⠤", + "⠥", + "⠦", + "⠧", + "⡠", + "⡡", + "⡢", + "⡣", + "⡤", + "⡥", + "⡦", + "⡧", + "⠨", + "⠩", + "⠪", + "⠫", + "⠬", + "⠭", + "⠮", + "⠯", + "⡨", + "⡩", + "⡪", + "⡫", + "⡬", + "⡭", + "⡮", + "⡯", + "⠰", + "⠱", + "⠲", + "⠳", + "⠴", + "⠵", + "⠶", + "⠷", + "⡰", + "⡱", + "⡲", + "⡳", + "⡴", + "⡵", + "⡶", + "⡷", + "⠸", + "⠹", + "⠺", + "⠻", + "⠼", + "⠽", + "⠾", + "⠿", + "⡸", + "⡹", + "⡺", + "⡻", + "⡼", + "⡽", + "⡾", + "⡿", + "⢀", + "⢁", + "⢂", + "⢃", + "⢄", + "⢅", + "⢆", + "⢇", + "⣀", + "⣁", + "⣂", + "⣃", + "⣄", + "⣅", + "⣆", + "⣇", + "⢈", + "⢉", + "⢊", + "⢋", + "⢌", + "⢍", + "⢎", + "⢏", + "⣈", + "⣉", + "⣊", + "⣋", + "⣌", + "⣍", + "⣎", + "⣏", + "⢐", + "⢑", + "⢒", + "⢓", + "⢔", + "⢕", + "⢖", + "⢗", + "⣐", + "⣑", + "⣒", + "⣓", + "⣔", + "⣕", + "⣖", + "⣗", + "⢘", + "⢙", + "⢚", + "⢛", + "⢜", + "⢝", + "⢞", + "⢟", + "⣘", + "⣙", + "⣚", + "⣛", + "⣜", + "⣝", + "⣞", + "⣟", + "⢠", + "⢡", + "⢢", + "⢣", + "⢤", + "⢥", + "⢦", + "⢧", + "⣠", + "⣡", + "⣢", + "⣣", + "⣤", + "⣥", + "⣦", + "⣧", + "⢨", + "⢩", + "⢪", + "⢫", + "⢬", + "⢭", + "⢮", + "⢯", + "⣨", + "⣩", + "⣪", + "⣫", + "⣬", + "⣭", + "⣮", + "⣯", + "⢰", + "⢱", + "⢲", + "⢳", + "⢴", + "⢵", + "⢶", + "⢷", + "⣰", + "⣱", + "⣲", + "⣳", + "⣴", + "⣵", + "⣶", + "⣷", + "⢸", + "⢹", + "⢺", + "⢻", + "⢼", + "⢽", + "⢾", + "⢿", + "⣸", + "⣹", + "⣺", + "⣻", + "⣼", + "⣽", + "⣾", + "⣿", + ], }, "line": {"interval": 130, "frames": ["-", "\\", "|", "/"]}, - "line2": {"interval": 100, "frames": "⠂-–—–-"}, - "pipe": {"interval": 100, "frames": "┤┘┴└├┌┬┐"}, + "line2": {"interval": 100, "frames": ["⠂", "-", "–", "—", "–", "-"]}, + "pipe": {"interval": 100, "frames": ["┤", "┘", "┴", "└", "├", "┌", "┬", "┐"]}, "simpleDots": {"interval": 400, "frames": [". ", ".. ", "...", " "]}, "simpleDotsScrolling": { "interval": 200, "frames": [". ", ".. ", "...", " ..", " .", " "], }, - "star": {"interval": 70, "frames": "✶✸✹✺✹✷"}, - "star2": {"interval": 80, "frames": "+x*"}, + "star": {"interval": 70, "frames": ["✶", "✸", "✹", "✺", "✹", "✷"]}, + "star2": {"interval": 80, "frames": ["+", "x", "*"]}, "flip": { "interval": 70, - "frames": "___-``'´-___", + "frames": ["_", "_", "_", "-", "`", "`", "'", "´", "-", "_", "_", "_"], }, - "hamburger": {"interval": 100, "frames": "☱☲☴"}, + "hamburger": {"interval": 100, "frames": ["☱", "☲", "☴"]}, "growVertical": { "interval": 120, - "frames": "▁▃▄▅▆▇▆▅▄▃", + "frames": ["▁", "▃", "▄", "▅", "▆", "▇", "▆", "▅", "▄", "▃"], }, "growHorizontal": { "interval": 120, - "frames": "▏▎▍▌▋▊▉▊▋▌▍▎", + "frames": ["▏", "▎", "▍", "▌", "▋", "▊", "▉", "▊", "▋", "▌", "▍", "▎"], }, - "balloon": {"interval": 140, "frames": " .oO@* "}, - "balloon2": {"interval": 120, "frames": ".oO°Oo."}, - "noise": {"interval": 100, "frames": "▓▒░"}, - "bounce": {"interval": 120, "frames": "⠁⠂⠄⠂"}, - "boxBounce": {"interval": 120, "frames": "▖▘▝▗"}, - "boxBounce2": {"interval": 100, "frames": "▌▀▐▄"}, - "triangle": {"interval": 50, "frames": "◢◣◤◥"}, - "arc": {"interval": 100, "frames": "◜◠◝◞◡◟"}, - "circle": {"interval": 120, "frames": "◡⊙◠"}, - "squareCorners": {"interval": 180, "frames": "◰◳◲◱"}, - "circleQuarters": {"interval": 120, "frames": "◴◷◶◵"}, - "circleHalves": {"interval": 50, "frames": "◐◓◑◒"}, - "squish": {"interval": 100, "frames": "╫╪"}, - "toggle": {"interval": 250, "frames": "⊶⊷"}, - "toggle2": {"interval": 80, "frames": "▫▪"}, - "toggle3": {"interval": 120, "frames": "□■"}, - "toggle4": {"interval": 100, "frames": "■□▪▫"}, - "toggle5": {"interval": 100, "frames": "▮▯"}, - "toggle6": {"interval": 300, "frames": "ဝ၀"}, - "toggle7": {"interval": 80, "frames": "⦾⦿"}, - "toggle8": {"interval": 100, "frames": "◍◌"}, - "toggle9": {"interval": 100, "frames": "◉◎"}, - "toggle10": {"interval": 100, "frames": "㊂㊀㊁"}, - "toggle11": {"interval": 50, "frames": "⧇⧆"}, - "toggle12": {"interval": 120, "frames": "☗☖"}, - "toggle13": {"interval": 80, "frames": "=*-"}, - "arrow": {"interval": 100, "frames": "←↖↑↗→↘↓↙"}, + "balloon": {"interval": 140, "frames": [" ", ".", "o", "O", "@", "*", " "]}, + "balloon2": {"interval": 120, "frames": [".", "o", "O", "°", "O", "o", "."]}, + "noise": {"interval": 100, "frames": ["▓", "▒", "░"]}, + "bounce": {"interval": 120, "frames": ["⠁", "⠂", "⠄", "⠂"]}, + "boxBounce": {"interval": 120, "frames": ["▖", "▘", "▝", "▗"]}, + "boxBounce2": {"interval": 100, "frames": ["▌", "▀", "▐", "▄"]}, + "triangle": {"interval": 50, "frames": ["◢", "◣", "◤", "◥"]}, + "arc": {"interval": 100, "frames": ["◜", "◠", "◝", "◞", "◡", "◟"]}, + "circle": {"interval": 120, "frames": ["◡", "⊙", "◠"]}, + "squareCorners": {"interval": 180, "frames": ["◰", "◳", "◲", "◱"]}, + "circleQuarters": {"interval": 120, "frames": ["◴", "◷", "◶", "◵"]}, + "circleHalves": {"interval": 50, "frames": ["◐", "◓", "◑", "◒"]}, + "squish": {"interval": 100, "frames": ["╫", "╪"]}, + "toggle": {"interval": 250, "frames": ["⊶", "⊷"]}, + "toggle2": {"interval": 80, "frames": ["▫", "▪"]}, + "toggle3": {"interval": 120, "frames": ["□", "■"]}, + "toggle4": {"interval": 100, "frames": ["■", "□", "▪", "▫"]}, + "toggle5": {"interval": 100, "frames": ["▮", "▯"]}, + "toggle6": {"interval": 300, "frames": ["ဝ", "၀"]}, + "toggle7": {"interval": 80, "frames": ["⦾", "⦿"]}, + "toggle8": {"interval": 100, "frames": ["◍", "◌"]}, + "toggle9": {"interval": 100, "frames": ["◉", "◎"]}, + "toggle10": {"interval": 100, "frames": ["㊂", "㊀", "㊁"]}, + "toggle11": {"interval": 50, "frames": ["⧇", "⧆"]}, + "toggle12": {"interval": 120, "frames": ["☗", "☖"]}, + "toggle13": {"interval": 80, "frames": ["=", "*", "-"]}, + "arrow": {"interval": 100, "frames": ["←", "↖", "↑", "↗", "→", "↘", "↓", "↙"]}, "arrow2": { "interval": 80, "frames": ["⬆️ ", "↗️ ", "➡️ ", "↘️ ", "⬇️ ", "↙️ ", "⬅️ ", "↖️ "], @@ -403,7 +769,7 @@ "▐/|____________▌", ], }, - "dqpb": {"interval": 100, "frames": "dqpb"}, + "dqpb": {"interval": 100, "frames": ["d", "q", "p", "b"]}, "weather": { "interval": 100, "frames": [ @@ -432,7 +798,7 @@ "☀️ ", ], }, - "christmas": {"interval": 400, "frames": "🌲🎄"}, + "christmas": {"interval": 400, "frames": ["🌲", "🎄"]}, "grenade": { "interval": 80, "frames": [ @@ -453,7 +819,7 @@ ], }, "point": {"interval": 125, "frames": ["∙∙∙", "●∙∙", "∙●∙", "∙∙●", "∙∙∙"]}, - "layer": {"interval": 150, "frames": "-=≡"}, + "layer": {"interval": 150, "frames": ["-", "=", "≡"]}, "betaWave": { "interval": 80, "frames": [ diff --git a/venv/lib/python3.10/site-packages/pip/_vendor/rich/_win32_console.py b/venv/lib/python3.10/site-packages/pip/_vendor/rich/_win32_console.py deleted file mode 100644 index 81b1082..0000000 --- a/venv/lib/python3.10/site-packages/pip/_vendor/rich/_win32_console.py +++ /dev/null @@ -1,662 +0,0 @@ -"""Light wrapper around the Win32 Console API - this module should only be imported on Windows - -The API that this module wraps is documented at https://docs.microsoft.com/en-us/windows/console/console-functions -""" -import ctypes -import sys -from typing import Any - -windll: Any = None -if sys.platform == "win32": - windll = ctypes.LibraryLoader(ctypes.WinDLL) -else: - raise ImportError(f"{__name__} can only be imported on Windows") - -import time -from ctypes import Structure, byref, wintypes -from typing import IO, NamedTuple, Type, cast - -from pip._vendor.rich.color import ColorSystem -from pip._vendor.rich.style import Style - -STDOUT = -11 -ENABLE_VIRTUAL_TERMINAL_PROCESSING = 4 - -COORD = wintypes._COORD - - -class LegacyWindowsError(Exception): - pass - - -class WindowsCoordinates(NamedTuple): - """Coordinates in the Windows Console API are (y, x), not (x, y). - This class is intended to prevent that confusion. - Rows and columns are indexed from 0. - This class can be used in place of wintypes._COORD in arguments and argtypes. - """ - - row: int - col: int - - @classmethod - def from_param(cls, value: "WindowsCoordinates") -> COORD: - """Converts a WindowsCoordinates into a wintypes _COORD structure. - This classmethod is internally called by ctypes to perform the conversion. - - Args: - value (WindowsCoordinates): The input coordinates to convert. - - Returns: - wintypes._COORD: The converted coordinates struct. - """ - return COORD(value.col, value.row) - - -class CONSOLE_SCREEN_BUFFER_INFO(Structure): - _fields_ = [ - ("dwSize", COORD), - ("dwCursorPosition", COORD), - ("wAttributes", wintypes.WORD), - ("srWindow", wintypes.SMALL_RECT), - ("dwMaximumWindowSize", COORD), - ] - - -class CONSOLE_CURSOR_INFO(ctypes.Structure): - _fields_ = [("dwSize", wintypes.DWORD), ("bVisible", wintypes.BOOL)] - - -_GetStdHandle = windll.kernel32.GetStdHandle -_GetStdHandle.argtypes = [ - wintypes.DWORD, -] -_GetStdHandle.restype = wintypes.HANDLE - - -def GetStdHandle(handle: int = STDOUT) -> wintypes.HANDLE: - """Retrieves a handle to the specified standard device (standard input, standard output, or standard error). - - Args: - handle (int): Integer identifier for the handle. Defaults to -11 (stdout). - - Returns: - wintypes.HANDLE: The handle - """ - return cast(wintypes.HANDLE, _GetStdHandle(handle)) - - -_GetConsoleMode = windll.kernel32.GetConsoleMode -_GetConsoleMode.argtypes = [wintypes.HANDLE, wintypes.LPDWORD] -_GetConsoleMode.restype = wintypes.BOOL - - -def GetConsoleMode(std_handle: wintypes.HANDLE) -> int: - """Retrieves the current input mode of a console's input buffer - or the current output mode of a console screen buffer. - - Args: - std_handle (wintypes.HANDLE): A handle to the console input buffer or the console screen buffer. - - Raises: - LegacyWindowsError: If any error occurs while calling the Windows console API. - - Returns: - int: Value representing the current console mode as documented at - https://docs.microsoft.com/en-us/windows/console/getconsolemode#parameters - """ - - console_mode = wintypes.DWORD() - success = bool(_GetConsoleMode(std_handle, console_mode)) - if not success: - raise LegacyWindowsError("Unable to get legacy Windows Console Mode") - return console_mode.value - - -_FillConsoleOutputCharacterW = windll.kernel32.FillConsoleOutputCharacterW -_FillConsoleOutputCharacterW.argtypes = [ - wintypes.HANDLE, - ctypes.c_char, - wintypes.DWORD, - cast(Type[COORD], WindowsCoordinates), - ctypes.POINTER(wintypes.DWORD), -] -_FillConsoleOutputCharacterW.restype = wintypes.BOOL - - -def FillConsoleOutputCharacter( - std_handle: wintypes.HANDLE, - char: str, - length: int, - start: WindowsCoordinates, -) -> int: - """Writes a character to the console screen buffer a specified number of times, beginning at the specified coordinates. - - Args: - std_handle (wintypes.HANDLE): A handle to the console input buffer or the console screen buffer. - char (str): The character to write. Must be a string of length 1. - length (int): The number of times to write the character. - start (WindowsCoordinates): The coordinates to start writing at. - - Returns: - int: The number of characters written. - """ - character = ctypes.c_char(char.encode()) - num_characters = wintypes.DWORD(length) - num_written = wintypes.DWORD(0) - _FillConsoleOutputCharacterW( - std_handle, - character, - num_characters, - start, - byref(num_written), - ) - return num_written.value - - -_FillConsoleOutputAttribute = windll.kernel32.FillConsoleOutputAttribute -_FillConsoleOutputAttribute.argtypes = [ - wintypes.HANDLE, - wintypes.WORD, - wintypes.DWORD, - cast(Type[COORD], WindowsCoordinates), - ctypes.POINTER(wintypes.DWORD), -] -_FillConsoleOutputAttribute.restype = wintypes.BOOL - - -def FillConsoleOutputAttribute( - std_handle: wintypes.HANDLE, - attributes: int, - length: int, - start: WindowsCoordinates, -) -> int: - """Sets the character attributes for a specified number of character cells, - beginning at the specified coordinates in a screen buffer. - - Args: - std_handle (wintypes.HANDLE): A handle to the console input buffer or the console screen buffer. - attributes (int): Integer value representing the foreground and background colours of the cells. - length (int): The number of cells to set the output attribute of. - start (WindowsCoordinates): The coordinates of the first cell whose attributes are to be set. - - Returns: - int: The number of cells whose attributes were actually set. - """ - num_cells = wintypes.DWORD(length) - style_attrs = wintypes.WORD(attributes) - num_written = wintypes.DWORD(0) - _FillConsoleOutputAttribute( - std_handle, style_attrs, num_cells, start, byref(num_written) - ) - return num_written.value - - -_SetConsoleTextAttribute = windll.kernel32.SetConsoleTextAttribute -_SetConsoleTextAttribute.argtypes = [ - wintypes.HANDLE, - wintypes.WORD, -] -_SetConsoleTextAttribute.restype = wintypes.BOOL - - -def SetConsoleTextAttribute( - std_handle: wintypes.HANDLE, attributes: wintypes.WORD -) -> bool: - """Set the colour attributes for all text written after this function is called. - - Args: - std_handle (wintypes.HANDLE): A handle to the console input buffer or the console screen buffer. - attributes (int): Integer value representing the foreground and background colours. - - - Returns: - bool: True if the attribute was set successfully, otherwise False. - """ - return bool(_SetConsoleTextAttribute(std_handle, attributes)) - - -_GetConsoleScreenBufferInfo = windll.kernel32.GetConsoleScreenBufferInfo -_GetConsoleScreenBufferInfo.argtypes = [ - wintypes.HANDLE, - ctypes.POINTER(CONSOLE_SCREEN_BUFFER_INFO), -] -_GetConsoleScreenBufferInfo.restype = wintypes.BOOL - - -def GetConsoleScreenBufferInfo( - std_handle: wintypes.HANDLE, -) -> CONSOLE_SCREEN_BUFFER_INFO: - """Retrieves information about the specified console screen buffer. - - Args: - std_handle (wintypes.HANDLE): A handle to the console input buffer or the console screen buffer. - - Returns: - CONSOLE_SCREEN_BUFFER_INFO: A CONSOLE_SCREEN_BUFFER_INFO ctype struct contain information about - screen size, cursor position, colour attributes, and more.""" - console_screen_buffer_info = CONSOLE_SCREEN_BUFFER_INFO() - _GetConsoleScreenBufferInfo(std_handle, byref(console_screen_buffer_info)) - return console_screen_buffer_info - - -_SetConsoleCursorPosition = windll.kernel32.SetConsoleCursorPosition -_SetConsoleCursorPosition.argtypes = [ - wintypes.HANDLE, - cast(Type[COORD], WindowsCoordinates), -] -_SetConsoleCursorPosition.restype = wintypes.BOOL - - -def SetConsoleCursorPosition( - std_handle: wintypes.HANDLE, coords: WindowsCoordinates -) -> bool: - """Set the position of the cursor in the console screen - - Args: - std_handle (wintypes.HANDLE): A handle to the console input buffer or the console screen buffer. - coords (WindowsCoordinates): The coordinates to move the cursor to. - - Returns: - bool: True if the function succeeds, otherwise False. - """ - return bool(_SetConsoleCursorPosition(std_handle, coords)) - - -_GetConsoleCursorInfo = windll.kernel32.GetConsoleCursorInfo -_GetConsoleCursorInfo.argtypes = [ - wintypes.HANDLE, - ctypes.POINTER(CONSOLE_CURSOR_INFO), -] -_GetConsoleCursorInfo.restype = wintypes.BOOL - - -def GetConsoleCursorInfo( - std_handle: wintypes.HANDLE, cursor_info: CONSOLE_CURSOR_INFO -) -> bool: - """Get the cursor info - used to get cursor visibility and width - - Args: - std_handle (wintypes.HANDLE): A handle to the console input buffer or the console screen buffer. - cursor_info (CONSOLE_CURSOR_INFO): CONSOLE_CURSOR_INFO ctype struct that receives information - about the console's cursor. - - Returns: - bool: True if the function succeeds, otherwise False. - """ - return bool(_GetConsoleCursorInfo(std_handle, byref(cursor_info))) - - -_SetConsoleCursorInfo = windll.kernel32.SetConsoleCursorInfo -_SetConsoleCursorInfo.argtypes = [ - wintypes.HANDLE, - ctypes.POINTER(CONSOLE_CURSOR_INFO), -] -_SetConsoleCursorInfo.restype = wintypes.BOOL - - -def SetConsoleCursorInfo( - std_handle: wintypes.HANDLE, cursor_info: CONSOLE_CURSOR_INFO -) -> bool: - """Set the cursor info - used for adjusting cursor visibility and width - - Args: - std_handle (wintypes.HANDLE): A handle to the console input buffer or the console screen buffer. - cursor_info (CONSOLE_CURSOR_INFO): CONSOLE_CURSOR_INFO ctype struct containing the new cursor info. - - Returns: - bool: True if the function succeeds, otherwise False. - """ - return bool(_SetConsoleCursorInfo(std_handle, byref(cursor_info))) - - -_SetConsoleTitle = windll.kernel32.SetConsoleTitleW -_SetConsoleTitle.argtypes = [wintypes.LPCWSTR] -_SetConsoleTitle.restype = wintypes.BOOL - - -def SetConsoleTitle(title: str) -> bool: - """Sets the title of the current console window - - Args: - title (str): The new title of the console window. - - Returns: - bool: True if the function succeeds, otherwise False. - """ - return bool(_SetConsoleTitle(title)) - - -class LegacyWindowsTerm: - """This class allows interaction with the legacy Windows Console API. It should only be used in the context - of environments where virtual terminal processing is not available. However, if it is used in a Windows environment, - the entire API should work. - - Args: - file (IO[str]): The file which the Windows Console API HANDLE is retrieved from, defaults to sys.stdout. - """ - - BRIGHT_BIT = 8 - - # Indices are ANSI color numbers, values are the corresponding Windows Console API color numbers - ANSI_TO_WINDOWS = [ - 0, # black The Windows colours are defined in wincon.h as follows: - 4, # red define FOREGROUND_BLUE 0x0001 -- 0000 0001 - 2, # green define FOREGROUND_GREEN 0x0002 -- 0000 0010 - 6, # yellow define FOREGROUND_RED 0x0004 -- 0000 0100 - 1, # blue define FOREGROUND_INTENSITY 0x0008 -- 0000 1000 - 5, # magenta define BACKGROUND_BLUE 0x0010 -- 0001 0000 - 3, # cyan define BACKGROUND_GREEN 0x0020 -- 0010 0000 - 7, # white define BACKGROUND_RED 0x0040 -- 0100 0000 - 8, # bright black (grey) define BACKGROUND_INTENSITY 0x0080 -- 1000 0000 - 12, # bright red - 10, # bright green - 14, # bright yellow - 9, # bright blue - 13, # bright magenta - 11, # bright cyan - 15, # bright white - ] - - def __init__(self, file: "IO[str]") -> None: - handle = GetStdHandle(STDOUT) - self._handle = handle - default_text = GetConsoleScreenBufferInfo(handle).wAttributes - self._default_text = default_text - - self._default_fore = default_text & 7 - self._default_back = (default_text >> 4) & 7 - self._default_attrs = self._default_fore | (self._default_back << 4) - - self._file = file - self.write = file.write - self.flush = file.flush - - @property - def cursor_position(self) -> WindowsCoordinates: - """Returns the current position of the cursor (0-based) - - Returns: - WindowsCoordinates: The current cursor position. - """ - coord: COORD = GetConsoleScreenBufferInfo(self._handle).dwCursorPosition - return WindowsCoordinates(row=cast(int, coord.Y), col=cast(int, coord.X)) - - @property - def screen_size(self) -> WindowsCoordinates: - """Returns the current size of the console screen buffer, in character columns and rows - - Returns: - WindowsCoordinates: The width and height of the screen as WindowsCoordinates. - """ - screen_size: COORD = GetConsoleScreenBufferInfo(self._handle).dwSize - return WindowsCoordinates( - row=cast(int, screen_size.Y), col=cast(int, screen_size.X) - ) - - def write_text(self, text: str) -> None: - """Write text directly to the terminal without any modification of styles - - Args: - text (str): The text to write to the console - """ - self.write(text) - self.flush() - - def write_styled(self, text: str, style: Style) -> None: - """Write styled text to the terminal. - - Args: - text (str): The text to write - style (Style): The style of the text - """ - color = style.color - bgcolor = style.bgcolor - if style.reverse: - color, bgcolor = bgcolor, color - - if color: - fore = color.downgrade(ColorSystem.WINDOWS).number - fore = fore if fore is not None else 7 # Default to ANSI 7: White - if style.bold: - fore = fore | self.BRIGHT_BIT - if style.dim: - fore = fore & ~self.BRIGHT_BIT - fore = self.ANSI_TO_WINDOWS[fore] - else: - fore = self._default_fore - - if bgcolor: - back = bgcolor.downgrade(ColorSystem.WINDOWS).number - back = back if back is not None else 0 # Default to ANSI 0: Black - back = self.ANSI_TO_WINDOWS[back] - else: - back = self._default_back - - assert fore is not None - assert back is not None - - SetConsoleTextAttribute( - self._handle, attributes=ctypes.c_ushort(fore | (back << 4)) - ) - self.write_text(text) - SetConsoleTextAttribute(self._handle, attributes=self._default_text) - - def move_cursor_to(self, new_position: WindowsCoordinates) -> None: - """Set the position of the cursor - - Args: - new_position (WindowsCoordinates): The WindowsCoordinates representing the new position of the cursor. - """ - if new_position.col < 0 or new_position.row < 0: - return - SetConsoleCursorPosition(self._handle, coords=new_position) - - def erase_line(self) -> None: - """Erase all content on the line the cursor is currently located at""" - screen_size = self.screen_size - cursor_position = self.cursor_position - cells_to_erase = screen_size.col - start_coordinates = WindowsCoordinates(row=cursor_position.row, col=0) - FillConsoleOutputCharacter( - self._handle, " ", length=cells_to_erase, start=start_coordinates - ) - FillConsoleOutputAttribute( - self._handle, - self._default_attrs, - length=cells_to_erase, - start=start_coordinates, - ) - - def erase_end_of_line(self) -> None: - """Erase all content from the cursor position to the end of that line""" - cursor_position = self.cursor_position - cells_to_erase = self.screen_size.col - cursor_position.col - FillConsoleOutputCharacter( - self._handle, " ", length=cells_to_erase, start=cursor_position - ) - FillConsoleOutputAttribute( - self._handle, - self._default_attrs, - length=cells_to_erase, - start=cursor_position, - ) - - def erase_start_of_line(self) -> None: - """Erase all content from the cursor position to the start of that line""" - row, col = self.cursor_position - start = WindowsCoordinates(row, 0) - FillConsoleOutputCharacter(self._handle, " ", length=col, start=start) - FillConsoleOutputAttribute( - self._handle, self._default_attrs, length=col, start=start - ) - - def move_cursor_up(self) -> None: - """Move the cursor up a single cell""" - cursor_position = self.cursor_position - SetConsoleCursorPosition( - self._handle, - coords=WindowsCoordinates( - row=cursor_position.row - 1, col=cursor_position.col - ), - ) - - def move_cursor_down(self) -> None: - """Move the cursor down a single cell""" - cursor_position = self.cursor_position - SetConsoleCursorPosition( - self._handle, - coords=WindowsCoordinates( - row=cursor_position.row + 1, - col=cursor_position.col, - ), - ) - - def move_cursor_forward(self) -> None: - """Move the cursor forward a single cell. Wrap to the next line if required.""" - row, col = self.cursor_position - if col == self.screen_size.col - 1: - row += 1 - col = 0 - else: - col += 1 - SetConsoleCursorPosition( - self._handle, coords=WindowsCoordinates(row=row, col=col) - ) - - def move_cursor_to_column(self, column: int) -> None: - """Move cursor to the column specified by the zero-based column index, staying on the same row - - Args: - column (int): The zero-based column index to move the cursor to. - """ - row, _ = self.cursor_position - SetConsoleCursorPosition(self._handle, coords=WindowsCoordinates(row, column)) - - def move_cursor_backward(self) -> None: - """Move the cursor backward a single cell. Wrap to the previous line if required.""" - row, col = self.cursor_position - if col == 0: - row -= 1 - col = self.screen_size.col - 1 - else: - col -= 1 - SetConsoleCursorPosition( - self._handle, coords=WindowsCoordinates(row=row, col=col) - ) - - def hide_cursor(self) -> None: - """Hide the cursor""" - current_cursor_size = self._get_cursor_size() - invisible_cursor = CONSOLE_CURSOR_INFO(dwSize=current_cursor_size, bVisible=0) - SetConsoleCursorInfo(self._handle, cursor_info=invisible_cursor) - - def show_cursor(self) -> None: - """Show the cursor""" - current_cursor_size = self._get_cursor_size() - visible_cursor = CONSOLE_CURSOR_INFO(dwSize=current_cursor_size, bVisible=1) - SetConsoleCursorInfo(self._handle, cursor_info=visible_cursor) - - def set_title(self, title: str) -> None: - """Set the title of the terminal window - - Args: - title (str): The new title of the console window - """ - assert len(title) < 255, "Console title must be less than 255 characters" - SetConsoleTitle(title) - - def _get_cursor_size(self) -> int: - """Get the percentage of the character cell that is filled by the cursor""" - cursor_info = CONSOLE_CURSOR_INFO() - GetConsoleCursorInfo(self._handle, cursor_info=cursor_info) - return int(cursor_info.dwSize) - - -if __name__ == "__main__": - handle = GetStdHandle() - - from pip._vendor.rich.console import Console - - console = Console() - - term = LegacyWindowsTerm(sys.stdout) - term.set_title("Win32 Console Examples") - - style = Style(color="black", bgcolor="red") - - heading = Style.parse("black on green") - - # Check colour output - console.rule("Checking colour output") - console.print("[on red]on red!") - console.print("[blue]blue!") - console.print("[yellow]yellow!") - console.print("[bold yellow]bold yellow!") - console.print("[bright_yellow]bright_yellow!") - console.print("[dim bright_yellow]dim bright_yellow!") - console.print("[italic cyan]italic cyan!") - console.print("[bold white on blue]bold white on blue!") - console.print("[reverse bold white on blue]reverse bold white on blue!") - console.print("[bold black on cyan]bold black on cyan!") - console.print("[black on green]black on green!") - console.print("[blue on green]blue on green!") - console.print("[white on black]white on black!") - console.print("[black on white]black on white!") - console.print("[#1BB152 on #DA812D]#1BB152 on #DA812D!") - - # Check cursor movement - console.rule("Checking cursor movement") - console.print() - term.move_cursor_backward() - term.move_cursor_backward() - term.write_text("went back and wrapped to prev line") - time.sleep(1) - term.move_cursor_up() - term.write_text("we go up") - time.sleep(1) - term.move_cursor_down() - term.write_text("and down") - time.sleep(1) - term.move_cursor_up() - term.move_cursor_backward() - term.move_cursor_backward() - term.write_text("we went up and back 2") - time.sleep(1) - term.move_cursor_down() - term.move_cursor_backward() - term.move_cursor_backward() - term.write_text("we went down and back 2") - time.sleep(1) - - # Check erasing of lines - term.hide_cursor() - console.print() - console.rule("Checking line erasing") - console.print("\n...Deleting to the start of the line...") - term.write_text("The red arrow shows the cursor location, and direction of erase") - time.sleep(1) - term.move_cursor_to_column(16) - term.write_styled("<", Style.parse("black on red")) - term.move_cursor_backward() - time.sleep(1) - term.erase_start_of_line() - time.sleep(1) - - console.print("\n\n...And to the end of the line...") - term.write_text("The red arrow shows the cursor location, and direction of erase") - time.sleep(1) - - term.move_cursor_to_column(16) - term.write_styled(">", Style.parse("black on red")) - time.sleep(1) - term.erase_end_of_line() - time.sleep(1) - - console.print("\n\n...Now the whole line will be erased...") - term.write_styled("I'm going to disappear!", style=Style.parse("black on cyan")) - time.sleep(1) - term.erase_line() - - term.show_cursor() - print("\n") diff --git a/venv/lib/python3.10/site-packages/pip/_vendor/rich/_windows.py b/venv/lib/python3.10/site-packages/pip/_vendor/rich/_windows.py index 10fc0d7..ca3a680 100644 --- a/venv/lib/python3.10/site-packages/pip/_vendor/rich/_windows.py +++ b/venv/lib/python3.10/site-packages/pip/_vendor/rich/_windows.py @@ -14,21 +14,13 @@ class WindowsConsoleFeatures: try: import ctypes - from ctypes import LibraryLoader + from ctypes import LibraryLoader, wintypes if sys.platform == "win32": windll = LibraryLoader(ctypes.WinDLL) else: windll = None raise ImportError("Not windows") - - from pip._vendor.rich._win32_console import ( - ENABLE_VIRTUAL_TERMINAL_PROCESSING, - GetConsoleMode, - GetStdHandle, - LegacyWindowsError, - ) - except (AttributeError, ImportError, ValueError): # Fallback if we can't load the Windows DLL @@ -38,20 +30,28 @@ def get_windows_console_features() -> WindowsConsoleFeatures: else: + STDOUT = -11 + ENABLE_VIRTUAL_TERMINAL_PROCESSING = 4 + _GetConsoleMode = windll.kernel32.GetConsoleMode + _GetConsoleMode.argtypes = [wintypes.HANDLE, wintypes.LPDWORD] + _GetConsoleMode.restype = wintypes.BOOL + + _GetStdHandle = windll.kernel32.GetStdHandle + _GetStdHandle.argtypes = [ + wintypes.DWORD, + ] + _GetStdHandle.restype = wintypes.HANDLE + def get_windows_console_features() -> WindowsConsoleFeatures: """Get windows console features. Returns: WindowsConsoleFeatures: An instance of WindowsConsoleFeatures. """ - handle = GetStdHandle() - try: - console_mode = GetConsoleMode(handle) - success = True - except LegacyWindowsError: - console_mode = 0 - success = False - vt = bool(success and console_mode & ENABLE_VIRTUAL_TERMINAL_PROCESSING) + handle = _GetStdHandle(STDOUT) + console_mode = wintypes.DWORD() + result = _GetConsoleMode(handle, console_mode) + vt = bool(result and console_mode.value & ENABLE_VIRTUAL_TERMINAL_PROCESSING) truecolor = False if vt: win_version = sys.getwindowsversion() diff --git a/venv/lib/python3.10/site-packages/pip/_vendor/rich/_windows_renderer.py b/venv/lib/python3.10/site-packages/pip/_vendor/rich/_windows_renderer.py deleted file mode 100644 index 5ece056..0000000 --- a/venv/lib/python3.10/site-packages/pip/_vendor/rich/_windows_renderer.py +++ /dev/null @@ -1,56 +0,0 @@ -from typing import Iterable, Sequence, Tuple, cast - -from pip._vendor.rich._win32_console import LegacyWindowsTerm, WindowsCoordinates -from pip._vendor.rich.segment import ControlCode, ControlType, Segment - - -def legacy_windows_render(buffer: Iterable[Segment], term: LegacyWindowsTerm) -> None: - """Makes appropriate Windows Console API calls based on the segments in the buffer. - - Args: - buffer (Iterable[Segment]): Iterable of Segments to convert to Win32 API calls. - term (LegacyWindowsTerm): Used to call the Windows Console API. - """ - for text, style, control in buffer: - if not control: - if style: - term.write_styled(text, style) - else: - term.write_text(text) - else: - control_codes: Sequence[ControlCode] = control - for control_code in control_codes: - control_type = control_code[0] - if control_type == ControlType.CURSOR_MOVE_TO: - _, x, y = cast(Tuple[ControlType, int, int], control_code) - term.move_cursor_to(WindowsCoordinates(row=y - 1, col=x - 1)) - elif control_type == ControlType.CARRIAGE_RETURN: - term.write_text("\r") - elif control_type == ControlType.HOME: - term.move_cursor_to(WindowsCoordinates(0, 0)) - elif control_type == ControlType.CURSOR_UP: - term.move_cursor_up() - elif control_type == ControlType.CURSOR_DOWN: - term.move_cursor_down() - elif control_type == ControlType.CURSOR_FORWARD: - term.move_cursor_forward() - elif control_type == ControlType.CURSOR_BACKWARD: - term.move_cursor_backward() - elif control_type == ControlType.CURSOR_MOVE_TO_COLUMN: - _, column = cast(Tuple[ControlType, int], control_code) - term.move_cursor_to_column(column - 1) - elif control_type == ControlType.HIDE_CURSOR: - term.hide_cursor() - elif control_type == ControlType.SHOW_CURSOR: - term.show_cursor() - elif control_type == ControlType.ERASE_IN_LINE: - _, mode = cast(Tuple[ControlType, int], control_code) - if mode == 0: - term.erase_end_of_line() - elif mode == 1: - term.erase_start_of_line() - elif mode == 2: - term.erase_line() - elif control_type == ControlType.SET_WINDOW_TITLE: - _, title = cast(Tuple[ControlType, str], control_code) - term.set_title(title) diff --git a/venv/lib/python3.10/site-packages/pip/_vendor/rich/_wrap.py b/venv/lib/python3.10/site-packages/pip/_vendor/rich/_wrap.py index c45f193..b537757 100644 --- a/venv/lib/python3.10/site-packages/pip/_vendor/rich/_wrap.py +++ b/venv/lib/python3.10/site-packages/pip/_vendor/rich/_wrap.py @@ -1,8 +1,8 @@ import re from typing import Iterable, List, Tuple -from ._loop import loop_last from .cells import cell_len, chop_cells +from ._loop import loop_last re_word = re.compile(r"\s*\S+\s*") @@ -27,15 +27,14 @@ def divide_line(text: str, width: int, fold: bool = True) -> List[int]: if line_position + word_length > width: if word_length > width: if fold: - chopped_words = chop_cells(word, max_size=width, position=0) - for last, line in loop_last(chopped_words): - if start: - append(start) - + for last, line in loop_last( + chop_cells(word, width, position=line_position) + ): if last: line_position = _cell_len(line) else: start += len(line) + append(start) else: if start: append(start) diff --git a/venv/lib/python3.10/site-packages/pip/_vendor/rich/align.py b/venv/lib/python3.10/site-packages/pip/_vendor/rich/align.py index d5abb59..4344ae1 100644 --- a/venv/lib/python3.10/site-packages/pip/_vendor/rich/align.py +++ b/venv/lib/python3.10/site-packages/pip/_vendor/rich/align.py @@ -18,6 +18,7 @@ AlignMethod = Literal["left", "center", "right"] VerticalAlignMethod = Literal["top", "middle", "bottom"] +AlignValues = AlignMethod # TODO: deprecate AlignValues class Align(JupyterMixin): diff --git a/venv/lib/python3.10/site-packages/pip/_vendor/rich/ansi.py b/venv/lib/python3.10/site-packages/pip/_vendor/rich/ansi.py index d4c32ce..92e4772 100644 --- a/venv/lib/python3.10/site-packages/pip/_vendor/rich/ansi.py +++ b/venv/lib/python3.10/site-packages/pip/_vendor/rich/ansi.py @@ -1,27 +1,21 @@ -import re -import sys from contextlib import suppress -from typing import Iterable, NamedTuple, Optional +import re +from typing import Iterable, NamedTuple from .color import Color from .style import Style from .text import Text -re_ansi = re.compile( - r""" -(?:\x1b\](.*?)\x1b\\)| -(?:\x1b([(@-Z\\-_]|\[[0-?]*[ -/]*[@-~])) -""", - re.VERBOSE, -) +re_ansi = re.compile(r"(?:\x1b\[(.*?)m)|(?:\x1b\](.*?)\x1b\\)") +re_csi = re.compile(r"\x1B(?:[@-Z\\-_]|\[[0-?]*[ -/]*[@-~])") class _AnsiToken(NamedTuple): """Result of ansi tokenized string.""" plain: str = "" - sgr: Optional[str] = "" - osc: Optional[str] = "" + sgr: str = "" + osc: str = "" def _ansi_tokenize(ansi_text: str) -> Iterable[_AnsiToken]: @@ -34,22 +28,20 @@ def _ansi_tokenize(ansi_text: str) -> Iterable[_AnsiToken]: AnsiToken: A named tuple of (plain, sgr, osc) """ + def remove_csi(ansi_text: str) -> str: + """Remove unknown CSI sequences.""" + return re_csi.sub("", ansi_text) + position = 0 - sgr: Optional[str] - osc: Optional[str] for match in re_ansi.finditer(ansi_text): start, end = match.span(0) - osc, sgr = match.groups() + sgr, osc = match.groups() if start > position: - yield _AnsiToken(ansi_text[position:start]) - if sgr: - if sgr.endswith("m"): - yield _AnsiToken("", sgr[1:-1], osc) - else: - yield _AnsiToken("", sgr, osc) + yield _AnsiToken(remove_csi(ansi_text[position:start])) + yield _AnsiToken("", sgr, osc) position = end if position < len(ansi_text): - yield _AnsiToken(ansi_text[position:]) + yield _AnsiToken(remove_csi(ansi_text[position:])) SGR_STYLE_MAP = { @@ -146,21 +138,20 @@ def decode_line(self, line: str) -> Text: text = Text() append = text.append line = line.rsplit("\r", 1)[-1] - for plain_text, sgr, osc in _ansi_tokenize(line): + for token in _ansi_tokenize(line): + plain_text, sgr, osc = token if plain_text: append(plain_text, self.style or None) - elif osc is not None: + elif osc: if osc.startswith("8;"): _params, semicolon, link = osc[2:].partition(";") if semicolon: self.style = self.style.update_link(link or None) - elif sgr is not None: + elif sgr: # Translate in to semi-colon separated codes # Ignore invalid codes, because we want to be lenient codes = [ - min(255, int(_code) if _code else 0) - for _code in sgr.split(";") - if _code.isdigit() or _code == "" + min(255, int(_code)) for _code in sgr.split(";") if _code.isdigit() ] iter_codes = iter(codes) for code in iter_codes: @@ -207,10 +198,10 @@ def decode_line(self, line: str) -> Text: return text -if sys.platform != "win32" and __name__ == "__main__": # pragma: no cover +if __name__ == "__main__": # pragma: no cover + import pty import io import os - import pty import sys decoder = AnsiDecoder() diff --git a/venv/lib/python3.10/site-packages/pip/_vendor/rich/box.py b/venv/lib/python3.10/site-packages/pip/_vendor/rich/box.py index d0b07cf..aec2926 100644 --- a/venv/lib/python3.10/site-packages/pip/_vendor/rich/box.py +++ b/venv/lib/python3.10/site-packages/pip/_vendor/rich/box.py @@ -88,16 +88,6 @@ def substitute(self, options: "ConsoleOptions", safe: bool = True) -> "Box": box = ASCII return box - def get_plain_headed_box(self) -> "Box": - """If this box uses special characters for the borders of the header, then - return the equivalent box that does not. - - Returns: - Box: The most similar Box that doesn't use header-specific box characters. - If the current Box already satisfies this criterion, then it's returned. - """ - return PLAIN_HEADED_SUBSTITUTIONS.get(self, self) - def get_top(self, widths: Iterable[int]) -> str: """Get the top of a simple box. @@ -429,20 +419,6 @@ def get_bottom(self, widths: Iterable[int]) -> str: """ ) -MARKDOWN: Box = Box( - """\ - -| || -|-|| -| || -|-|| -|-|| -| || - -""", - ascii=True, -) - # Map Boxes that don't render with raster fonts on to equivalent that do LEGACY_WINDOWS_SUBSTITUTIONS = { ROUNDED: SQUARE, @@ -453,15 +429,6 @@ def get_bottom(self, widths: Iterable[int]) -> str: HEAVY_HEAD: SQUARE, } -# Map headed boxes to their headerless equivalents -PLAIN_HEADED_SUBSTITUTIONS = { - HEAVY_HEAD: SQUARE, - SQUARE_DOUBLE_HEAD: SQUARE, - MINIMAL_DOUBLE_HEAD: MINIMAL, - MINIMAL_HEAVY_HEAD: MINIMAL, - ASCII_DOUBLE_HEAD: ASCII2, -} - if __name__ == "__main__": # pragma: no cover @@ -494,7 +461,6 @@ def get_bottom(self, widths: Iterable[int]) -> str: "HEAVY_HEAD", "DOUBLE", "DOUBLE_EDGE", - "MARKDOWN", ] console.print(Panel("[bold green]Box Constants", style="green"), justify="center") diff --git a/venv/lib/python3.10/site-packages/pip/_vendor/rich/cells.py b/venv/lib/python3.10/site-packages/pip/_vendor/rich/cells.py index 139b949..e824ea2 100644 --- a/venv/lib/python3.10/site-packages/pip/_vendor/rich/cells.py +++ b/venv/lib/python3.10/site-packages/pip/_vendor/rich/cells.py @@ -1,44 +1,34 @@ -import re from functools import lru_cache -from typing import Callable, List +import re +from typing import Dict, List from ._cell_widths import CELL_WIDTHS +from ._lru_cache import LRUCache # Regex to match sequence of the most common character ranges _is_single_cell_widths = re.compile("^[\u0020-\u006f\u00a0\u02ff\u0370-\u0482]*$").match -@lru_cache(4096) -def cached_cell_len(text: str) -> int: +def cell_len(text: str, _cache: Dict[str, int] = LRUCache(1024 * 4)) -> int: """Get the number of cells required to display text. - This method always caches, which may use up a lot of memory. It is recommended to use - `cell_len` over this method. - Args: text (str): Text to display. Returns: int: Get the number of cells required to display text. """ - _get_size = get_character_cell_size - total_size = sum(_get_size(character) for character in text) - return total_size - - -def cell_len(text: str, _cell_len: Callable[[str], int] = cached_cell_len) -> int: - """Get the number of cells required to display text. - - Args: - text (str): Text to display. - Returns: - int: Get the number of cells required to display text. - """ - if len(text) < 512: - return _cell_len(text) - _get_size = get_character_cell_size - total_size = sum(_get_size(character) for character in text) + if _is_single_cell_widths(text): + return len(text) + else: + cached_result = _cache.get(text, None) + if cached_result is not None: + return cached_result + _get_size = get_character_cell_size + total_size = sum(_get_size(character) for character in text) + if len(text) <= 64: + _cache[text] = total_size return total_size @@ -52,6 +42,9 @@ def get_character_cell_size(character: str) -> int: Returns: int: Number of cells (0, 1 or 2) occupied by that character. """ + if _is_single_cell_widths(character): + return 1 + return _get_codepoint_cell_size(ord(character)) @@ -93,7 +86,7 @@ def set_cell_size(text: str, total: int) -> str: return text + " " * (total - size) return text[:total] - if total <= 0: + if not total: return "" cell_size = cell_len(text) if cell_size == total: @@ -122,17 +115,18 @@ def set_cell_size(text: str, total: int) -> str: # TODO: This is inefficient # TODO: This might not work with CWJ type characters def chop_cells(text: str, max_size: int, position: int = 0) -> List[str]: - """Break text in to equal (cell) length strings, returning the characters in reverse - order""" + """Break text in to equal (cell) length strings.""" _get_character_cell_size = get_character_cell_size characters = [ (character, _get_character_cell_size(character)) for character in text - ] + ][::-1] total_size = position lines: List[List[str]] = [[]] append = lines[-1].append - for character, size in reversed(characters): + pop = characters.pop + while characters: + character, size = pop() if total_size + size > max_size: lines.append([character]) append = lines[-1].append @@ -140,7 +134,6 @@ def chop_cells(text: str, max_size: int, position: int = 0) -> List[str]: else: total_size += size append(character) - return ["".join(line) for line in lines] diff --git a/venv/lib/python3.10/site-packages/pip/_vendor/rich/color.py b/venv/lib/python3.10/site-packages/pip/_vendor/rich/color.py index 6bca2da..f0fa026 100644 --- a/venv/lib/python3.10/site-packages/pip/_vendor/rich/color.py +++ b/venv/lib/python3.10/site-packages/pip/_vendor/rich/color.py @@ -7,7 +7,7 @@ from ._palettes import EIGHT_BIT_PALETTE, STANDARD_PALETTE, WINDOWS_PALETTE from .color_triplet import ColorTriplet -from .repr import Result, rich_repr +from .repr import rich_repr, Result from .terminal_theme import DEFAULT_TERMINAL_THEME if TYPE_CHECKING: # pragma: no cover @@ -61,7 +61,6 @@ def __repr__(self) -> str: "bright_cyan": 14, "bright_white": 15, "grey0": 16, - "gray0": 16, "navy_blue": 17, "dark_blue": 18, "blue3": 20, @@ -97,7 +96,6 @@ def __repr__(self) -> str: "blue_violet": 57, "orange4": 94, "grey37": 59, - "gray37": 59, "medium_purple4": 60, "slate_blue3": 62, "royal_blue1": 63, @@ -130,9 +128,7 @@ def __repr__(self) -> str: "yellow4": 106, "wheat4": 101, "grey53": 102, - "gray53": 102, "light_slate_grey": 103, - "light_slate_gray": 103, "medium_purple": 104, "light_slate_blue": 105, "dark_olive_green3": 149, @@ -159,13 +155,11 @@ def __repr__(self) -> str: "light_salmon3": 173, "rosy_brown": 138, "grey63": 139, - "gray63": 139, "medium_purple1": 141, "gold3": 178, "dark_khaki": 143, "navajo_white3": 144, "grey69": 145, - "gray69": 145, "light_steel_blue3": 146, "light_steel_blue": 147, "yellow3": 184, @@ -195,7 +189,6 @@ def __repr__(self) -> str: "light_goldenrod2": 222, "light_yellow3": 187, "grey84": 188, - "gray84": 188, "light_steel_blue1": 189, "yellow2": 190, "dark_olive_green1": 192, @@ -230,55 +223,30 @@ def __repr__(self) -> str: "wheat1": 229, "cornsilk1": 230, "grey100": 231, - "gray100": 231, "grey3": 232, - "gray3": 232, "grey7": 233, - "gray7": 233, "grey11": 234, - "gray11": 234, "grey15": 235, - "gray15": 235, "grey19": 236, - "gray19": 236, "grey23": 237, - "gray23": 237, "grey27": 238, - "gray27": 238, "grey30": 239, - "gray30": 239, "grey35": 240, - "gray35": 240, "grey39": 241, - "gray39": 241, "grey42": 242, - "gray42": 242, "grey46": 243, - "gray46": 243, "grey50": 244, - "gray50": 244, "grey54": 245, - "gray54": 245, "grey58": 246, - "gray58": 246, "grey62": 247, - "gray62": 247, "grey66": 248, - "gray66": 248, "grey70": 249, - "gray70": 249, "grey74": 250, - "gray74": 250, "grey78": 251, - "gray78": 251, "grey82": 252, - "gray82": 252, "grey85": 253, - "gray85": 253, "grey89": 254, - "gray89": 254, "grey93": 255, - "gray93": 255, } @@ -311,8 +279,8 @@ class Color(NamedTuple): def __rich__(self) -> "Text": """Dispays the actual color if Rich printed.""" - from .style import Style from .text import Text + from .style import Style return Text.assemble( f" + + + + + + + +
{code}
+
+ + +""" _TERM_COLORS = {"256color": ColorSystem.EIGHT_BIT, "16color": ColorSystem.STANDARD} @@ -224,16 +224,6 @@ def update_height(self, height: int) -> "ConsoleOptions": options.max_height = options.height = height return options - def reset_height(self) -> "ConsoleOptions": - """Return a copy of the options with height set to ``None``. - - Returns: - ~ConsoleOptions: New console options instance. - """ - options = self.copy() - options.height = None - return options - def update_dimensions(self, width: int, height: int) -> "ConsoleOptions": """Update the width and height, and return a copy. @@ -254,9 +244,7 @@ def update_dimensions(self, width: int, height: int) -> "ConsoleOptions": class RichCast(Protocol): """An object that may be 'cast' to a console renderable.""" - def __rich__( - self, - ) -> Union["ConsoleRenderable", "RichCast", str]: # pragma: no cover + def __rich__(self) -> Union["ConsoleRenderable", str]: # pragma: no cover ... @@ -273,9 +261,11 @@ def __rich_console__( # A type that may be rendered by Console. RenderableType = Union[ConsoleRenderable, RichCast, str] + # The result of calling a __rich_console__ method. RenderResult = Iterable[Union[RenderableType, Segment]] + _null_highlighter = NullHighlighter() @@ -511,10 +501,10 @@ def _replace(*args: Any, **kwargs: Any) -> Group: def _is_jupyter() -> bool: # pragma: no cover """Check if we're running in a Jupyter notebook.""" try: - get_ipython # type: ignore[name-defined] + get_ipython # type: ignore except NameError: return False - ipython = get_ipython() # type: ignore[name-defined] + ipython = get_ipython() # type: ignore shell = ipython.__class__.__name__ if "google.colab" in str(ipython.__class__) or shell == "ZMQInteractiveShell": return True # Jupyter notebook or qtconsole @@ -531,6 +521,7 @@ def _is_jupyter() -> bool: # pragma: no cover "windows": ColorSystem.WINDOWS, } + _COLOR_SYSTEMS_NAMES = {system: name for name, system in COLOR_SYSTEMS.items()} @@ -580,6 +571,12 @@ def detect_legacy_windows() -> bool: return WINDOWS and not get_windows_console_features().vt +if detect_legacy_windows(): # pragma: no cover + from pip._vendor.colorama import init + + init(strip=False) + + class Console: """A high level console interface. @@ -600,7 +597,7 @@ class Console: no_color (Optional[bool], optional): Enabled no color mode, or None to auto detect. Defaults to None. tab_size (int, optional): Number of spaces used to replace a tab character. Defaults to 8. record (bool, optional): Boolean to enable recording of terminal output, - required to call :meth:`export_html`, :meth:`export_svg`, and :meth:`export_text`. Defaults to False. + required to call :meth:`export_html` and :meth:`export_text`. Defaults to False. markup (bool, optional): Boolean to enable :ref:`console_markup`. Defaults to True. emoji (bool, optional): Enable emoji code. Defaults to True. emoji_variant (str, optional): Optional emoji variant, either "text" or "emoji". Defaults to None. @@ -658,19 +655,12 @@ def __init__( self.is_jupyter = _is_jupyter() if force_jupyter is None else force_jupyter if self.is_jupyter: - if width is None: - jupyter_columns = self._environ.get("JUPYTER_COLUMNS") - if jupyter_columns is not None and jupyter_columns.isdigit(): - width = int(jupyter_columns) - else: - width = JUPYTER_DEFAULT_COLUMNS - if height is None: - jupyter_lines = self._environ.get("JUPYTER_LINES") - if jupyter_lines is not None and jupyter_lines.isdigit(): - height = int(jupyter_lines) - else: - height = JUPYTER_DEFAULT_LINES + width = width or 93 + height = height or 100 + self.soft_wrap = soft_wrap + self._width = width + self._height = height self.tab_size = tab_size self.record = record self._markup = markup @@ -682,7 +672,6 @@ def __init__( if legacy_windows is None else legacy_windows ) - if width is None: columns = self._environ.get("COLUMNS") if columns is not None and columns.isdigit(): @@ -921,13 +910,6 @@ def is_terminal(self) -> bool: """ if self._force_terminal is not None: return self._force_terminal - - if hasattr(sys.stdin, "__module__") and sys.stdin.__module__.startswith( - "idlelib" - ): - # Return False for Idle which claims to be a tty but can't handle ansi codes - return False - isatty: Optional[Callable[[], bool]] = getattr(self.file, "isatty", None) try: return False if isatty is None else isatty() @@ -982,16 +964,16 @@ def size(self) -> ConsoleDimensions: if WINDOWS: # pragma: no cover try: width, height = os.get_terminal_size() - except (AttributeError, ValueError, OSError): # Probably not a terminal + except OSError: # Probably not a terminal pass else: - for file_descriptor in _STD_STREAMS: + try: + width, height = os.get_terminal_size(sys.__stdin__.fileno()) + except (AttributeError, ValueError, OSError): try: - width, height = os.get_terminal_size(file_descriptor) + width, height = os.get_terminal_size(sys.__stdout__.fileno()) except (AttributeError, ValueError, OSError): pass - else: - break columns = self._environ.get("COLUMNS") if columns is not None and columns.isdigit(): @@ -1159,7 +1141,7 @@ def show_cursor(self, show: bool = True) -> bool: Args: show (bool, optional): Set visibility of the cursor. """ - if self.is_terminal: + if self.is_terminal and not self.legacy_windows: self.control(Control.show_cursor(show)) return True return False @@ -1194,38 +1176,6 @@ def is_alt_screen(self) -> bool: """ return self._is_alt_screen - def set_window_title(self, title: str) -> bool: - """Set the title of the console terminal window. - - Warning: There is no means within Rich of "resetting" the window title to its - previous value, meaning the title you set will persist even after your application - exits. - - ``fish`` shell resets the window title before and after each command by default, - negating this issue. Windows Terminal and command prompt will also reset the title for you. - Most other shells and terminals, however, do not do this. - - Some terminals may require configuration changes before you can set the title. - Some terminals may not support setting the title at all. - - Other software (including the terminal itself, the shell, custom prompts, plugins, etc.) - may also set the terminal window title. This could result in whatever value you write - using this method being overwritten. - - Args: - title (str): The new title of the terminal window. - - Returns: - bool: True if the control code to change the terminal title was - written, otherwise False. Note that a return value of True - does not guarantee that the window title has actually changed, - since the feature may be unsupported/disabled in some terminals. - """ - if self.is_terminal: - self.control(Control.title(title)) - return True - return False - def screen( self, hide_cursor: bool = True, style: Optional[StyleType] = None ) -> "ScreenContext": @@ -1282,7 +1232,7 @@ def render( renderable = rich_cast(renderable) if hasattr(renderable, "__rich_console__") and not isclass(renderable): - render_iterable = renderable.__rich_console__(self, _options) # type: ignore[union-attr] + render_iterable = renderable.__rich_console__(self, _options) # type: ignore elif isinstance(renderable, str): text_renderable = self.render_str( renderable, highlight=_options.highlight, markup=_options.markup @@ -1301,7 +1251,6 @@ def render( f"object {render_iterable!r} is not renderable" ) _Segment = Segment - _options = _options.reset_height() for render_output in iter_render: if isinstance(render_output, _Segment): yield render_output @@ -1337,11 +1286,6 @@ def render_lines( _rendered = self.render(renderable, render_options) if style: _rendered = Segment.apply_style(_rendered, style) - - render_height = render_options.height - if render_height is not None: - render_height = max(0, render_height) - lines = list( islice( Segment.split_and_crop_lines( @@ -1349,10 +1293,9 @@ def render_lines( render_options.max_width, include_new_lines=new_lines, pad=pad, - style=style, ), None, - render_height, + render_options.height, ) ) if render_options.height is not None: @@ -1379,7 +1322,7 @@ def render_str( highlight: Optional[bool] = None, highlighter: Optional[HighlighterType] = None, ) -> "Text": - """Convert a string to a Text instance. This is called automatically if + """Convert a string to a Text instance. This is is called automatically if you print or log a string. Args: @@ -1429,7 +1372,7 @@ def render_str( def get_style( self, name: Union[str, Style], *, default: Optional[Union[Style, str]] = None ) -> Style: - """Get a Style instance by its theme name or parse a definition. + """Get a Style instance by it's theme name or parse a definition. Args: name (str): The name of a style or a style definition. @@ -1822,7 +1765,7 @@ def print_exception( """Prints a rich render of the last exception and traceback. Args: - width (Optional[int], optional): Number of characters used to render code. Defaults to 100. + width (Optional[int], optional): Number of characters used to render code. Defaults to 88. extra_lines (int, optional): Additional lines of code to render. Defaults to 3. theme (str, optional): Override pygments theme used in traceback word_wrap (bool, optional): Enable word wrapping of long lines. Defaults to False. @@ -1868,7 +1811,7 @@ def _caller_frame_info( frame = currentframe() if frame is not None: # Use the faster currentframe where implemented - while offset and frame is not None: + while offset and frame: frame = frame.f_back offset -= 1 assert frame is not None @@ -1961,72 +1904,43 @@ def log( buffer_extend(line) def _check_buffer(self) -> None: - """Check if the buffer may be rendered. Render it if it can (e.g. Console.quiet is False) - Rendering is supported on Windows, Unix and Jupyter environments. For - legacy Windows consoles, the win32 API is called directly. - This method will also record what it renders if recording is enabled via Console.record. - """ + """Check if the buffer may be rendered.""" if self.quiet: del self._buffer[:] return with self._lock: - if self.record: - with self._record_buffer_lock: - self._record_buffer.extend(self._buffer[:]) - if self._buffer_index == 0: - if self.is_jupyter: # pragma: no cover from .jupyter import display display(self._buffer, self._render_buffer(self._buffer[:])) del self._buffer[:] else: - if WINDOWS: - use_legacy_windows_render = False - if self.legacy_windows: - try: - use_legacy_windows_render = ( - self.file.fileno() in _STD_STREAMS_OUTPUT - ) - except (ValueError, io.UnsupportedOperation): - pass - - if use_legacy_windows_render: - from pip._vendor.rich._win32_console import LegacyWindowsTerm - from pip._vendor.rich._windows_renderer import legacy_windows_render - - legacy_windows_render( - self._buffer[:], LegacyWindowsTerm(self.file) - ) - else: - # Either a non-std stream on legacy Windows, or modern Windows. - text = self._render_buffer(self._buffer[:]) - # https://bugs.python.org/issue37871 - write = self.file.write - for line in text.splitlines(True): - try: - write(line) - except UnicodeEncodeError as error: - error.reason = f"{error.reason}\n*** You may need to add PYTHONIOENCODING=utf-8 to your environment ***" - raise - else: - text = self._render_buffer(self._buffer[:]) + text = self._render_buffer(self._buffer[:]) + del self._buffer[:] + if text: try: - self.file.write(text) + if WINDOWS: # pragma: no cover + # https://bugs.python.org/issue37871 + write = self.file.write + for line in text.splitlines(True): + write(line) + else: + self.file.write(text) + self.file.flush() except UnicodeEncodeError as error: error.reason = f"{error.reason}\n*** You may need to add PYTHONIOENCODING=utf-8 to your environment ***" raise - self.file.flush() - del self._buffer[:] - def _render_buffer(self, buffer: Iterable[Segment]) -> str: """Render buffered output, and clear buffer.""" output: List[str] = [] append = output.append color_system = self._color_system legacy_windows = self.legacy_windows + if self.record: + with self._record_buffer_lock: + self._record_buffer.extend(buffer) not_terminal = not self.is_terminal if self.no_color and color_system: buffer = Segment.remove_color(buffer) @@ -2068,15 +1982,23 @@ def input( Returns: str: Text read from stdin. """ + prompt_str = "" if prompt: - self.print(prompt, markup=markup, emoji=emoji, end="") + with self.capture() as capture: + self.print(prompt, markup=markup, emoji=emoji, end="") + prompt_str = capture.get() + if self.legacy_windows: + # Legacy windows doesn't like ANSI codes in getpass or input (colorama bug)? + self.file.write(prompt_str) + prompt_str = "" if password: - result = getpass("", stream=stream) + result = getpass(prompt_str, stream=stream) else: if stream: + self.file.write(prompt_str) result = stream.readline() else: - result = input() + result = input(prompt_str) return result def export_text(self, *, clear: bool = True, styles: bool = False) -> str: @@ -2138,8 +2060,8 @@ def export_html( Args: theme (TerminalTheme, optional): TerminalTheme object containing console colors. clear (bool, optional): Clear record buffer after exporting. Defaults to ``True``. - code_format (str, optional): Format string to render HTML. In addition to '{foreground}', - '{background}', and '{code}', should contain '{stylesheet}' if inline_styles is ``False``. + code_format (str, optional): Format string to render HTML, should contain {foreground} + {background} and {code}. inline_styles (bool, optional): If ``True`` styles will be inlined in to spans, which makes files larger but easier to cut and paste markup. If ``False``, styles will be embedded in a style tag. Defaults to False. @@ -2215,8 +2137,8 @@ def save_html( path (str): Path to write html file. theme (TerminalTheme, optional): TerminalTheme object containing console colors. clear (bool, optional): Clear record buffer after exporting. Defaults to ``True``. - code_format (str, optional): Format string to render HTML. In addition to '{foreground}', - '{background}', and '{code}', should contain '{stylesheet}' if inline_styles is ``False``. + code_format (str, optional): Format string to render HTML, should contain {foreground} + {background} and {code}. inline_styles (bool, optional): If ``True`` styles will be inlined in to spans, which makes files larger but easier to cut and paste markup. If ``False``, styles will be embedded in a style tag. Defaults to False. @@ -2231,293 +2153,9 @@ def save_html( with open(path, "wt", encoding="utf-8") as write_file: write_file.write(html) - def export_svg( - self, - *, - title: str = "Rich", - theme: Optional[TerminalTheme] = None, - clear: bool = True, - code_format: str = CONSOLE_SVG_FORMAT, - ) -> str: - """ - Generate an SVG from the console contents (requires record=True in Console constructor). - - Args: - path (str): The path to write the SVG to. - title (str): The title of the tab in the output image - theme (TerminalTheme, optional): The ``TerminalTheme`` object to use to style the terminal - clear (bool, optional): Clear record buffer after exporting. Defaults to ``True`` - code_format (str): Format string used to generate the SVG. Rich will inject a number of variables - into the string in order to form the final SVG output. The default template used and the variables - injected by Rich can be found by inspecting the ``console.CONSOLE_SVG_FORMAT`` variable. - """ - - from pip._vendor.rich.cells import cell_len - - style_cache: Dict[Style, str] = {} - - def get_svg_style(style: Style) -> str: - """Convert a Style to CSS rules for SVG.""" - if style in style_cache: - return style_cache[style] - css_rules = [] - color = ( - _theme.foreground_color - if (style.color is None or style.color.is_default) - else style.color.get_truecolor(_theme) - ) - bgcolor = ( - _theme.background_color - if (style.bgcolor is None or style.bgcolor.is_default) - else style.bgcolor.get_truecolor(_theme) - ) - if style.reverse: - color, bgcolor = bgcolor, color - if style.dim: - color = blend_rgb(color, bgcolor, 0.4) - css_rules.append(f"fill: {color.hex}") - if style.bold: - css_rules.append("font-weight: bold") - if style.italic: - css_rules.append("font-style: italic;") - if style.underline: - css_rules.append("text-decoration: underline;") - if style.strike: - css_rules.append("text-decoration: line-through;") - - css = ";".join(css_rules) - style_cache[style] = css - return css - - _theme = theme or SVG_EXPORT_THEME - - width = self.width - char_height = 20 - char_width = char_height * 0.61 - line_height = char_height * 1.22 - - margin_top = 1 - margin_right = 1 - margin_bottom = 1 - margin_left = 1 - - padding_top = 40 - padding_right = 8 - padding_bottom = 8 - padding_left = 8 - - padding_width = padding_left + padding_right - padding_height = padding_top + padding_bottom - margin_width = margin_left + margin_right - margin_height = margin_top + margin_bottom - - text_backgrounds: List[str] = [] - text_group: List[str] = [] - classes: Dict[str, int] = {} - style_no = 1 - - def escape_text(text: str) -> str: - """HTML escape text and replace spaces with nbsp.""" - return escape(text).replace(" ", " ") - - def make_tag( - name: str, content: Optional[str] = None, **attribs: object - ) -> str: - """Make a tag from name, content, and attributes.""" - - def stringify(value: object) -> str: - if isinstance(value, (float)): - return format(value, "g") - return str(value) - - tag_attribs = " ".join( - f'{k.lstrip("_").replace("_", "-")}="{stringify(v)}"' - for k, v in attribs.items() - ) - return ( - f"<{name} {tag_attribs}>{content}" - if content - else f"<{name} {tag_attribs}/>" - ) - - with self._record_buffer_lock: - segments = list(Segment.filter_control(self._record_buffer)) - if clear: - self._record_buffer.clear() - - unique_id = "terminal-" + str( - zlib.adler32( - ("".join(segment.text for segment in segments)).encode( - "utf-8", "ignore" - ) - + title.encode("utf-8", "ignore") - ) - ) - y = 0 - for y, line in enumerate(Segment.split_and_crop_lines(segments, length=width)): - x = 0 - for text, style, _control in line: - style = style or Style() - rules = get_svg_style(style) - if rules not in classes: - classes[rules] = style_no - style_no += 1 - class_name = f"r{classes[rules]}" - - if style.reverse: - has_background = True - background = ( - _theme.foreground_color.hex - if style.color is None - else style.color.get_truecolor(_theme).hex - ) - else: - bgcolor = style.bgcolor - has_background = bgcolor is not None and not bgcolor.is_default - background = ( - _theme.background_color.hex - if style.bgcolor is None - else style.bgcolor.get_truecolor(_theme).hex - ) - - text_length = cell_len(text) - if has_background: - text_backgrounds.append( - make_tag( - "rect", - fill=background, - x=x * char_width, - y=y * line_height + 1.5, - width=char_width * text_length, - height=line_height + 0.25, - shape_rendering="crispEdges", - ) - ) - - if text != " " * len(text): - text_group.append( - make_tag( - "text", - escape_text(text), - _class=f"{unique_id}-{class_name}", - x=x * char_width, - y=y * line_height + char_height, - textLength=char_width * len(text), - clip_path=f"url(#{unique_id}-line-{y})", - ) - ) - x += cell_len(text) - - line_offsets = [line_no * line_height + 1.5 for line_no in range(y)] - lines = "\n".join( - f""" - {make_tag("rect", x=0, y=offset, width=char_width * width, height=line_height + 0.25)} - """ - for line_no, offset in enumerate(line_offsets) - ) - - styles = "\n".join( - f".{unique_id}-r{rule_no} {{ {css} }}" for css, rule_no in classes.items() - ) - backgrounds = "".join(text_backgrounds) - matrix = "".join(text_group) - - terminal_width = ceil(width * char_width + padding_width) - terminal_height = (y + 1) * line_height + padding_height - chrome = make_tag( - "rect", - fill=_theme.background_color.hex, - stroke="rgba(255,255,255,0.35)", - stroke_width="1", - x=margin_left, - y=margin_top, - width=terminal_width, - height=terminal_height, - rx=8, - ) - - title_color = _theme.foreground_color.hex - if title: - chrome += make_tag( - "text", - escape_text(title), - _class=f"{unique_id}-title", - fill=title_color, - text_anchor="middle", - x=terminal_width // 2, - y=margin_top + char_height + 6, - ) - chrome += f""" - - - - - - """ - - svg = code_format.format( - unique_id=unique_id, - char_width=char_width, - char_height=char_height, - line_height=line_height, - terminal_width=char_width * width - 1, - terminal_height=(y + 1) * line_height - 1, - width=terminal_width + margin_width, - height=terminal_height + margin_height, - terminal_x=margin_left + padding_left, - terminal_y=margin_top + padding_top, - styles=styles, - chrome=chrome, - backgrounds=backgrounds, - matrix=matrix, - lines=lines, - ) - return svg - - def save_svg( - self, - path: str, - *, - title: str = "Rich", - theme: Optional[TerminalTheme] = None, - clear: bool = True, - code_format: str = CONSOLE_SVG_FORMAT, - ) -> None: - """Generate an SVG file from the console contents (requires record=True in Console constructor). - - Args: - path (str): The path to write the SVG to. - title (str): The title of the tab in the output image - theme (TerminalTheme, optional): The ``TerminalTheme`` object to use to style the terminal - clear (bool, optional): Clear record buffer after exporting. Defaults to ``True`` - code_format (str): Format string used to generate the SVG. Rich will inject a number of variables - into the string in order to form the final SVG output. The default template used and the variables - injected by Rich can be found by inspecting the ``console.CONSOLE_SVG_FORMAT`` variable. - """ - svg = self.export_svg( - title=title, - theme=theme, - clear=clear, - code_format=code_format, - ) - with open(path, "wt", encoding="utf-8") as write_file: - write_file.write(svg) - - -def _svg_hash(svg_main_code: str) -> str: - """Returns a unique hash for the given SVG main code. - - Args: - svg_main_code (str): The content we're going to inject in the SVG envelope. - - Returns: - str: a hash of the given content - """ - return str(zlib.adler32(svg_main_code.encode())) - if __name__ == "__main__": # pragma: no cover - console = Console(record=True) + console = Console() console.log( "JSONRPC [i]request[/i]", @@ -2570,3 +2208,4 @@ def _svg_hash(svg_main_code: str) -> str: }, } ) + console.log("foo") diff --git a/venv/lib/python3.10/site-packages/pip/_vendor/rich/control.py b/venv/lib/python3.10/site-packages/pip/_vendor/rich/control.py index 88fcb92..c98d0d7 100644 --- a/venv/lib/python3.10/site-packages/pip/_vendor/rich/control.py +++ b/venv/lib/python3.10/site-packages/pip/_vendor/rich/control.py @@ -1,35 +1,18 @@ -import sys -import time -from typing import TYPE_CHECKING, Callable, Dict, Iterable, List, Union - -if sys.version_info >= (3, 8): - from typing import Final -else: - from pip._vendor.typing_extensions import Final # pragma: no cover +from typing import Any, Callable, Dict, Iterable, List, TYPE_CHECKING, Union from .segment import ControlCode, ControlType, Segment if TYPE_CHECKING: from .console import Console, ConsoleOptions, RenderResult -STRIP_CONTROL_CODES: Final = [ - 7, # Bell +STRIP_CONTROL_CODES = [ 8, # Backspace 11, # Vertical tab 12, # Form feed 13, # Carriage return ] -_CONTROL_STRIP_TRANSLATE: Final = { - _codepoint: None for _codepoint in STRIP_CONTROL_CODES -} +_CONTROL_TRANSLATE = {_codepoint: None for _codepoint in STRIP_CONTROL_CODES} -CONTROL_ESCAPE: Final = { - 7: "\\a", - 8: "\\b", - 11: "\\v", - 12: "\\f", - 13: "\\r", -} CONTROL_CODES_FORMAT: Dict[int, Callable[..., str]] = { ControlType.BELL: lambda: "\x07", @@ -47,7 +30,6 @@ ControlType.CURSOR_MOVE_TO_COLUMN: lambda param: f"\x1b[{param+1}G", ControlType.ERASE_IN_LINE: lambda param: f"\x1b[{param}K", ControlType.CURSOR_MOVE_TO: lambda x, y: f"\x1b[{y+1};{x+1}H", - ControlType.SET_WINDOW_TITLE: lambda title: f"\x1b]0;{title}\x07", } @@ -165,15 +147,6 @@ def alt_screen(cls, enable: bool) -> "Control": else: return cls(ControlType.DISABLE_ALT_SCREEN) - @classmethod - def title(cls, title: str) -> "Control": - """Set the terminal window title - - Args: - title (str): The new terminal window title - """ - return cls((ControlType.SET_WINDOW_TITLE, title)) - def __str__(self) -> str: return self.segment.text @@ -185,7 +158,7 @@ def __rich_console__( def strip_control_codes( - text: str, _translate_table: Dict[int, None] = _CONTROL_STRIP_TRANSLATE + text: str, _translate_table: Dict[int, None] = _CONTROL_TRANSLATE ) -> str: """Remove control codes from text. @@ -198,28 +171,5 @@ def strip_control_codes( return text.translate(_translate_table) -def escape_control_codes( - text: str, - _translate_table: Dict[int, str] = CONTROL_ESCAPE, -) -> str: - """Replace control codes with their "escaped" equivalent in the given text. - (e.g. "\b" becomes "\\b") - - Args: - text (str): A string possibly containing control codes. - - Returns: - str: String with control codes replaced with their escaped version. - """ - return text.translate(_translate_table) - - if __name__ == "__main__": # pragma: no cover - from pip._vendor.rich.console import Console - - console = Console() - console.print("Look at the title of your terminal window ^") - # console.print(Control((ControlType.SET_WINDOW_TITLE, "Hello, world!"))) - for i in range(10): - console.set_window_title("🚀 Loading" + "." * i) - time.sleep(0.5) + print(strip_control_codes("hello\rWorld")) diff --git a/venv/lib/python3.10/site-packages/pip/_vendor/rich/default_styles.py b/venv/lib/python3.10/site-packages/pip/_vendor/rich/default_styles.py index 46e9ea5..91ab232 100644 --- a/venv/lib/python3.10/site-packages/pip/_vendor/rich/default_styles.py +++ b/venv/lib/python3.10/site-packages/pip/_vendor/rich/default_styles.py @@ -2,6 +2,7 @@ from .style import Style + DEFAULT_STYLES: Dict[str, Style] = { "none": Style.null(), "reset": Style( @@ -39,9 +40,7 @@ "inspect.attr": Style(color="yellow", italic=True), "inspect.attr.dunder": Style(color="yellow", italic=True, dim=True), "inspect.callable": Style(bold=True, color="red"), - "inspect.async_def": Style(italic=True, color="bright_cyan"), "inspect.def": Style(italic=True, color="bright_cyan"), - "inspect.class": Style(italic=True, color="bright_cyan"), "inspect.error": Style(bold=True, color="red"), "inspect.equals": Style(), "inspect.help": Style(color="cyan"), @@ -79,7 +78,6 @@ "repr.attrib_equal": Style(bold=True), "repr.attrib_value": Style(color="magenta", italic=False), "repr.number": Style(color="cyan", bold=True, italic=False), - "repr.number_complex": Style(color="cyan", bold=True, italic=False), # same "repr.bool_true": Style(color="bright_green", italic=True), "repr.bool_false": Style(color="bright_red", italic=True), "repr.none": Style(color="magenta", italic=True), @@ -158,9 +156,6 @@ "markdown.h7": Style(italic=True, dim=True), "markdown.link": Style(color="bright_blue"), "markdown.link_url": Style(color="blue"), - "iso8601.date": Style(color="blue"), - "iso8601.time": Style(color="magenta"), - "iso8601.timezone": Style(color="yellow"), } diff --git a/venv/lib/python3.10/site-packages/pip/_vendor/rich/diagnose.py b/venv/lib/python3.10/site-packages/pip/_vendor/rich/diagnose.py index ad36183..38728da 100644 --- a/venv/lib/python3.10/site-packages/pip/_vendor/rich/diagnose.py +++ b/venv/lib/python3.10/site-packages/pip/_vendor/rich/diagnose.py @@ -1,37 +1,6 @@ -import os -import platform - -from pip._vendor.rich import inspect -from pip._vendor.rich.console import Console, get_windows_console_features -from pip._vendor.rich.panel import Panel -from pip._vendor.rich.pretty import Pretty - +if __name__ == "__main__": # pragma: no cover + from pip._vendor.rich.console import Console + from pip._vendor.rich import inspect -def report() -> None: # pragma: no cover - """Print a report to the terminal with debugging information""" console = Console() inspect(console) - features = get_windows_console_features() - inspect(features) - - env_names = ( - "TERM", - "COLORTERM", - "CLICOLOR", - "NO_COLOR", - "TERM_PROGRAM", - "COLUMNS", - "LINES", - "JUPYTER_COLUMNS", - "JUPYTER_LINES", - "JPY_PARENT_PID", - "VSCODE_VERBOSE_LOGGING", - ) - env = {name: os.getenv(name) for name in env_names} - console.print(Panel.fit((Pretty(env)), title="[b]Environment Variables")) - - console.print(f'platform="{platform.system()}"') - - -if __name__ == "__main__": # pragma: no cover - report() diff --git a/venv/lib/python3.10/site-packages/pip/_vendor/rich/file_proxy.py b/venv/lib/python3.10/site-packages/pip/_vendor/rich/file_proxy.py index cc69f22..3ec593a 100644 --- a/venv/lib/python3.10/site-packages/pip/_vendor/rich/file_proxy.py +++ b/venv/lib/python3.10/site-packages/pip/_vendor/rich/file_proxy.py @@ -1,5 +1,5 @@ import io -from typing import IO, TYPE_CHECKING, Any, List +from typing import List, Any, IO, TYPE_CHECKING from .ansi import AnsiDecoder from .text import Text @@ -48,7 +48,7 @@ def write(self, text: str) -> int: return len(text) def flush(self) -> None: - output = "".join(self.__buffer) - if output: - self.__console.print(output) - del self.__buffer[:] + buffer = self.__buffer + if buffer: + self.__console.print("".join(buffer)) + del buffer[:] diff --git a/venv/lib/python3.10/site-packages/pip/_vendor/rich/filesize.py b/venv/lib/python3.10/site-packages/pip/_vendor/rich/filesize.py index 61be475..b3a0996 100644 --- a/venv/lib/python3.10/site-packages/pip/_vendor/rich/filesize.py +++ b/venv/lib/python3.10/site-packages/pip/_vendor/rich/filesize.py @@ -13,7 +13,7 @@ __all__ = ["decimal"] -from typing import Iterable, List, Optional, Tuple +from typing import Iterable, List, Tuple, Optional def _to_str( @@ -30,7 +30,7 @@ def _to_str( return "{:,} bytes".format(size) for i, suffix in enumerate(suffixes, 2): # noqa: B007 - unit = base**i + unit = base ** i if size < unit: break return "{:,.{precision}f}{separator}{}".format( @@ -44,7 +44,7 @@ def _to_str( def pick_unit_and_suffix(size: int, suffixes: List[str], base: int) -> Tuple[int, str]: """Pick a suffix and base for the given size.""" for i, suffix in enumerate(suffixes): - unit = base**i + unit = base ** i if size < unit * base: break return unit, suffix diff --git a/venv/lib/python3.10/site-packages/pip/_vendor/rich/highlighter.py b/venv/lib/python3.10/site-packages/pip/_vendor/rich/highlighter.py index 82293df..8afdd01 100644 --- a/venv/lib/python3.10/site-packages/pip/_vendor/rich/highlighter.py +++ b/venv/lib/python3.10/site-packages/pip/_vendor/rich/highlighter.py @@ -1,8 +1,7 @@ -import re from abc import ABC, abstractmethod from typing import List, Union -from .text import Span, Text +from .text import Text def _combine_regex(*regexes: str) -> str: @@ -82,23 +81,22 @@ class ReprHighlighter(RegexHighlighter): base_style = "repr." highlights = [ - r"(?P<)(?P[-\w.:|]*)(?P[\w\W]*?)(?P>)", - r'(?P[\w_]{1,50})=(?P"?[\w_]+"?)?', - r"(?P[][{}()])", + r"(?P\<)(?P[\w\-\.\:]*)(?P[\w\W]*?)(?P\>)", + r"(?P[\w_]{1,50})=(?P\"?[\w_]+\"?)?", + r"(?P[\{\[\(\)\]\}])", _combine_regex( r"(?P[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3})", r"(?P([A-Fa-f0-9]{1,4}::?){1,7}[A-Fa-f0-9]{1,4})", r"(?P(?:[0-9A-Fa-f]{1,2}-){7}[0-9A-Fa-f]{1,2}|(?:[0-9A-Fa-f]{1,2}:){7}[0-9A-Fa-f]{1,2}|(?:[0-9A-Fa-f]{4}\.){3}[0-9A-Fa-f]{4})", r"(?P(?:[0-9A-Fa-f]{1,2}-){5}[0-9A-Fa-f]{1,2}|(?:[0-9A-Fa-f]{1,2}:){5}[0-9A-Fa-f]{1,2}|(?:[0-9A-Fa-f]{4}\.){2}[0-9A-Fa-f]{4})", - r"(?P[a-fA-F0-9]{8}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{12})", - r"(?P[\w.]*?)\(", + r"(?P[\w\.]*?)\(", r"\b(?PTrue)\b|\b(?PFalse)\b|\b(?PNone)\b", r"(?P\.\.\.)", - r"(?P(?(?\B(/[-\w._+]+)*\/)(?P[-\w._+]*)?", - r"(?b?'''.*?(?(file|https|http|ws|wss)://[-0-9a-zA-Z$_+!`(),.?/;:&=%#]*)", + r"(?P(?\B(\/[\w\.\-\_\+]+)*\/)(?P[\w\.\-\_\+]*)?", + r"(?b?\'\'\'.*?(?[a-fA-F0-9]{8}\-[a-fA-F0-9]{4}\-[a-fA-F0-9]{4}\-[a-fA-F0-9]{4}\-[a-fA-F0-9]{12})", + r"(?P(file|https|http|ws|wss):\/\/[0-9a-zA-Z\$\-\_\+\!`\(\)\,\.\?\/\;\:\&\=\%\#]*)", ), ] @@ -106,95 +104,15 @@ class ReprHighlighter(RegexHighlighter): class JSONHighlighter(RegexHighlighter): """Highlights JSON""" - # Captures the start and end of JSON strings, handling escaped quotes - JSON_STR = r"(?b?\".*?(?[\{\[\(\)\]\}])", r"\b(?Ptrue)\b|\b(?Pfalse)\b|\b(?Pnull)\b", r"(?P(?b?\".*?(? None: - super().highlight(text) - - # Additional work to handle highlighting JSON keys - plain = text.plain - append = text.spans.append - whitespace = self.JSON_WHITESPACE - for match in re.finditer(self.JSON_STR, plain): - start, end = match.span() - cursor = end - while cursor < len(plain): - char = plain[cursor] - cursor += 1 - if char == ":": - append(Span(start, end, "json.key")) - elif char in whitespace: - continue - break - - -class ISO8601Highlighter(RegexHighlighter): - """Highlights the ISO8601 date time strings. - Regex reference: https://www.oreilly.com/library/view/regular-expressions-cookbook/9781449327453/ch04s07.html - """ - - base_style = "iso8601." - highlights = [ - # - # Dates - # - # Calendar month (e.g. 2008-08). The hyphen is required - r"^(?P[0-9]{4})-(?P1[0-2]|0[1-9])$", - # Calendar date w/o hyphens (e.g. 20080830) - r"^(?P(?P[0-9]{4})(?P1[0-2]|0[1-9])(?P3[01]|0[1-9]|[12][0-9]))$", - # Ordinal date (e.g. 2008-243). The hyphen is optional - r"^(?P(?P[0-9]{4})-?(?P36[0-6]|3[0-5][0-9]|[12][0-9]{2}|0[1-9][0-9]|00[1-9]))$", - # - # Weeks - # - # Week of the year (e.g., 2008-W35). The hyphen is optional - r"^(?P(?P[0-9]{4})-?W(?P5[0-3]|[1-4][0-9]|0[1-9]))$", - # Week date (e.g., 2008-W35-6). The hyphens are optional - r"^(?P(?P[0-9]{4})-?W(?P5[0-3]|[1-4][0-9]|0[1-9])-?(?P[1-7]))$", - # - # Times - # - # Hours and minutes (e.g., 17:21). The colon is optional - r"^(?P
""" @@ -23,7 +17,7 @@ def __init__(self, html: str, text: str) -> None: self.text = text def _repr_mimebundle_( - self, include: Sequence[str], exclude: Sequence[str], **kwargs: Any + self, include: Iterable[str], exclude: Iterable[str], **kwargs: Any ) -> Dict[str, str]: data = {"text/plain": self.text, "text/html": self.html} if include: @@ -39,13 +33,10 @@ class JupyterMixin: __slots__ = () def _repr_mimebundle_( - self: "ConsoleRenderable", - include: Sequence[str], - exclude: Sequence[str], - **kwargs: Any, + self, include: Iterable[str], exclude: Iterable[str], **kwargs: Any ) -> Dict[str, str]: console = get_console() - segments = list(console.render(self, console.options)) + segments = list(console.render(self, console.options)) # type: ignore html = _render_segments(segments) text = console._render_buffer(segments) data = {"text/plain": text, "text/html": html} @@ -72,7 +63,7 @@ def escape(text: str) -> str: rule = style.get_html_style(theme) text = f'{text}' if rule else text if style.link: - text = f'{text}' + text = f'{text}' append_fragment(text) code = "".join(fragments) diff --git a/venv/lib/python3.10/site-packages/pip/_vendor/rich/layout.py b/venv/lib/python3.10/site-packages/pip/_vendor/rich/layout.py index 1d70465..22a4c54 100644 --- a/venv/lib/python3.10/site-packages/pip/_vendor/rich/layout.py +++ b/venv/lib/python3.10/site-packages/pip/_vendor/rich/layout.py @@ -73,7 +73,6 @@ def __rich_console__( style=self.style, title=self.highlighter(title), border_style="blue", - height=height, ) @@ -300,7 +299,7 @@ def add_split(self, *layouts: Union["Layout", RenderableType]) -> None: self._children.extend(_layouts) def split_row(self, *layouts: Union["Layout", RenderableType]) -> None: - """Split the layout in to a row (layouts side by side). + """Split the layout in tow a row (Layouts side by side). Args: *layouts (Layout): Positional arguments should be (sub) Layout instances. diff --git a/venv/lib/python3.10/site-packages/pip/_vendor/rich/live.py b/venv/lib/python3.10/site-packages/pip/_vendor/rich/live.py index e635fe5..6db5b60 100644 --- a/venv/lib/python3.10/site-packages/pip/_vendor/rich/live.py +++ b/venv/lib/python3.10/site-packages/pip/_vendor/rich/live.py @@ -118,15 +118,7 @@ def start(self, refresh: bool = False) -> None: self._enable_redirect_io() self.console.push_render_hook(self) if refresh: - try: - self.refresh() - except Exception: - # If refresh fails, we want to stop the redirection of sys.stderr, - # so the error stacktrace is properly displayed in the terminal. - # (or, if the code that calls Rich captures the exception and wants to display something, - # let this be displayed in the terminal). - self.stop() - raise + self.refresh() if self.auto_refresh: self._refresh_thread = _RefreshThread(self, self.refresh_per_second) self._refresh_thread.start() diff --git a/venv/lib/python3.10/site-packages/pip/_vendor/rich/logging.py b/venv/lib/python3.10/site-packages/pip/_vendor/rich/logging.py index 58188fd..002f1f7 100644 --- a/venv/lib/python3.10/site-packages/pip/_vendor/rich/logging.py +++ b/venv/lib/python3.10/site-packages/pip/_vendor/rich/logging.py @@ -2,8 +2,7 @@ from datetime import datetime from logging import Handler, LogRecord from pathlib import Path -from types import ModuleType -from typing import ClassVar, List, Optional, Iterable, Type, Union +from typing import ClassVar, List, Optional, Type, Union from . import get_console from ._log_render import LogRender, FormatTimeCallable @@ -38,12 +37,10 @@ class RichHandler(Handler): tracebacks_theme (str, optional): Override pygments theme used in traceback. tracebacks_word_wrap (bool, optional): Enable word wrapping of long tracebacks lines. Defaults to True. tracebacks_show_locals (bool, optional): Enable display of locals in tracebacks. Defaults to False. - tracebacks_suppress (Sequence[Union[str, ModuleType]]): Optional sequence of modules or paths to exclude from traceback. locals_max_length (int, optional): Maximum length of containers before abbreviating, or None for no abbreviation. Defaults to 10. locals_max_string (int, optional): Maximum length of string before truncating, or None to disable. Defaults to 80. log_time_format (Union[str, TimeFormatterCallable], optional): If ``log_time`` is enabled, either string for strftime or callable that formats the time. Defaults to "[%x %X] ". - keywords (List[str], optional): List of words to highlight instead of ``RichHandler.KEYWORDS``. """ KEYWORDS: ClassVar[Optional[List[str]]] = [ @@ -76,11 +73,9 @@ def __init__( tracebacks_theme: Optional[str] = None, tracebacks_word_wrap: bool = True, tracebacks_show_locals: bool = False, - tracebacks_suppress: Iterable[Union[str, ModuleType]] = (), locals_max_length: int = 10, locals_max_string: int = 80, log_time_format: Union[str, FormatTimeCallable] = "[%x %X]", - keywords: Optional[List[str]] = None, ) -> None: super().__init__(level=level) self.console = console or get_console() @@ -101,10 +96,8 @@ def __init__( self.tracebacks_theme = tracebacks_theme self.tracebacks_word_wrap = tracebacks_word_wrap self.tracebacks_show_locals = tracebacks_show_locals - self.tracebacks_suppress = tracebacks_suppress self.locals_max_length = locals_max_length self.locals_max_string = locals_max_string - self.keywords = keywords def get_level_text(self, record: LogRecord) -> Text: """Get the level name from the record. @@ -144,7 +137,6 @@ def emit(self, record: LogRecord) -> None: show_locals=self.tracebacks_show_locals, locals_max_length=self.locals_max_length, locals_max_string=self.locals_max_string, - suppress=self.tracebacks_suppress, ) message = record.getMessage() if self.formatter: @@ -179,12 +171,8 @@ def render_message(self, record: LogRecord, message: str) -> "ConsoleRenderable" if highlighter: message_text = highlighter(message_text) - if self.keywords is None: - self.keywords = self.KEYWORDS - - if self.keywords: - message_text.highlight_words(self.keywords, "logging.keyword") - + if self.KEYWORDS: + message_text.highlight_words(self.KEYWORDS, "logging.keyword") return message_text def render( diff --git a/venv/lib/python3.10/site-packages/pip/_vendor/rich/markup.py b/venv/lib/python3.10/site-packages/pip/_vendor/rich/markup.py index fd80d8c..6195402 100644 --- a/venv/lib/python3.10/site-packages/pip/_vendor/rich/markup.py +++ b/venv/lib/python3.10/site-packages/pip/_vendor/rich/markup.py @@ -1,20 +1,21 @@ -import re from ast import literal_eval from operator import attrgetter +import re from typing import Callable, Iterable, List, Match, NamedTuple, Optional, Tuple, Union -from ._emoji_replace import _emoji_replace -from .emoji import EmojiVariant from .errors import MarkupError from .style import Style from .text import Span, Text +from .emoji import EmojiVariant +from ._emoji_replace import _emoji_replace + RE_TAGS = re.compile( - r"""((\\*)\[([a-z#/@][^[]*?)])""", + r"""((\\*)\[([a-z#\/@].*?)\])""", re.VERBOSE, ) -RE_HANDLER = re.compile(r"^([\w.]*?)(\(.*?\))?$") +RE_HANDLER = re.compile(r"^([\w\.]*?)(\(.*?\))?$") class Tag(NamedTuple): @@ -46,8 +47,7 @@ def markup(self) -> str: def escape( - markup: str, - _escape: _EscapeSubMethod = re.compile(r"(\\*)(\[[a-z#/@][^[]*?])").sub, + markup: str, _escape: _EscapeSubMethod = re.compile(r"(\\*)(\[[a-z#\/@].*?\])").sub ) -> str: """Escapes text so that it won't be interpreted as markup. @@ -146,8 +146,6 @@ def pop_style(style_name: str) -> Tuple[int, Tag]: for position, plain_text, tag in _parse(markup): if plain_text is not None: - # Handle open brace escapes, where the brace is not part of a tag. - plain_text = plain_text.replace("\\[", "[") append(emoji_replace(plain_text) if emoji else plain_text) elif tag is not None: if tag.name.startswith("/"): # Closing tag @@ -235,8 +233,8 @@ def pop_style(style_name: str) -> Tuple[int, Tag]: ":warning-emoji: [bold red blink] DANGER![/]", ] - from pip._vendor.rich import print from pip._vendor.rich.table import Table + from pip._vendor.rich import print grid = Table("Markup", "Result", padding=(0, 1)) diff --git a/venv/lib/python3.10/site-packages/pip/_vendor/rich/measure.py b/venv/lib/python3.10/site-packages/pip/_vendor/rich/measure.py index a508ffa..aea238d 100644 --- a/venv/lib/python3.10/site-packages/pip/_vendor/rich/measure.py +++ b/venv/lib/python3.10/site-packages/pip/_vendor/rich/measure.py @@ -1,5 +1,5 @@ from operator import itemgetter -from typing import TYPE_CHECKING, Callable, NamedTuple, Optional, Sequence +from typing import Callable, Iterable, NamedTuple, Optional, TYPE_CHECKING from . import errors from .protocol import is_renderable, rich_cast @@ -96,9 +96,7 @@ def get( if _max_width < 1: return Measurement(0, 0) if isinstance(renderable, str): - renderable = console.render_str( - renderable, markup=options.markup, highlight=False - ) + renderable = console.render_str(renderable, markup=options.markup) renderable = rich_cast(renderable) if is_renderable(renderable): get_console_width: Optional[ @@ -125,7 +123,7 @@ def get( def measure_renderables( console: "Console", options: "ConsoleOptions", - renderables: Sequence["RenderableType"], + renderables: Iterable["RenderableType"], ) -> "Measurement": """Get a measurement that would fit a number of renderables. diff --git a/venv/lib/python3.10/site-packages/pip/_vendor/rich/pager.py b/venv/lib/python3.10/site-packages/pip/_vendor/rich/pager.py index a3f7aa6..dbfb973 100644 --- a/venv/lib/python3.10/site-packages/pip/_vendor/rich/pager.py +++ b/venv/lib/python3.10/site-packages/pip/_vendor/rich/pager.py @@ -1,5 +1,5 @@ from abc import ABC, abstractmethod -from typing import Any +from typing import Any, Callable class Pager(ABC): diff --git a/venv/lib/python3.10/site-packages/pip/_vendor/rich/panel.py b/venv/lib/python3.10/site-packages/pip/_vendor/rich/panel.py index fc2807c..151fe5f 100644 --- a/venv/lib/python3.10/site-packages/pip/_vendor/rich/panel.py +++ b/venv/lib/python3.10/site-packages/pip/_vendor/rich/panel.py @@ -1,13 +1,14 @@ -from typing import TYPE_CHECKING, Optional +from typing import Optional, TYPE_CHECKING + +from .box import Box, ROUNDED from .align import AlignMethod -from .box import ROUNDED, Box from .jupyter import JupyterMixin from .measure import Measurement, measure_renderables from .padding import Padding, PaddingDimensions -from .segment import Segment from .style import StyleType from .text import Text, TextType +from .segment import Segment if TYPE_CHECKING: from .console import Console, ConsoleOptions, RenderableType, RenderResult @@ -182,7 +183,7 @@ def __rich_console__( else: title_text.align(self.title_align, width - 4, character=box.top) yield Segment(box.top_left + box.top, border_style) - yield from console.render(title_text, child_options.update_width(width - 4)) + yield from console.render(title_text) yield Segment(box.top + box.top_right, border_style) yield new_line @@ -201,9 +202,7 @@ def __rich_console__( else: subtitle_text.align(self.subtitle_align, width - 4, character=box.bottom) yield Segment(box.bottom_left + box.bottom, border_style) - yield from console.render( - subtitle_text, child_options.update_width(width - 4) - ) + yield from console.render(subtitle_text) yield Segment(box.bottom + box.bottom_right, border_style) yield new_line @@ -236,8 +235,8 @@ def __rich_measure__( c = Console() - from .box import DOUBLE, ROUNDED from .padding import Padding + from .box import ROUNDED, DOUBLE p = Panel( "Hello, World!", diff --git a/venv/lib/python3.10/site-packages/pip/_vendor/rich/pretty.py b/venv/lib/python3.10/site-packages/pip/_vendor/rich/pretty.py index 4a5ddaa..606ee33 100644 --- a/venv/lib/python3.10/site-packages/pip/_vendor/rich/pretty.py +++ b/venv/lib/python3.10/site-packages/pip/_vendor/rich/pretty.py @@ -1,8 +1,8 @@ import builtins -import collections import dataclasses import inspect import os +import re import sys from array import array from collections import Counter, UserDict, UserList, defaultdict, deque @@ -19,7 +19,6 @@ Iterable, List, Optional, - Sequence, Set, Tuple, Union, @@ -29,10 +28,9 @@ try: import attr as _attr_module - - _has_attrs = True except ImportError: # pragma: no cover - _has_attrs = False + _attr_module = None # type: ignore + from . import get_console from ._loop import loop_last @@ -55,21 +53,14 @@ ) -JUPYTER_CLASSES_TO_NOT_RENDER = { - # Matplotlib "Artists" manage their own rendering in a Jupyter notebook, and we should not try to render them too. - # "Typically, all [Matplotlib] visible elements in a figure are subclasses of Artist." - "matplotlib.artist.Artist", -} - - def _is_attr_object(obj: Any) -> bool: """Check if an object was created with attrs module.""" - return _has_attrs and _attr_module.has(type(obj)) + return _attr_module is not None and _attr_module.has(type(obj)) -def _get_attr_fields(obj: Any) -> Sequence["_attr_module.Attribute[Any]"]: +def _get_attr_fields(obj: Any) -> Iterable["_attr_module.Attribute[Any]"]: """Get fields for an attrs object.""" - return _attr_module.fields(type(obj)) if _has_attrs else [] + return _attr_module.fields(type(obj)) if _attr_module is not None else [] def _is_dataclass_repr(obj: object) -> bool: @@ -89,29 +80,6 @@ def _is_dataclass_repr(obj: object) -> bool: return False -_dummy_namedtuple = collections.namedtuple("_dummy_namedtuple", []) - - -def _has_default_namedtuple_repr(obj: object) -> bool: - """Check if an instance of namedtuple contains the default repr - - Args: - obj (object): A namedtuple - - Returns: - bool: True if the default repr is used, False if there's a custom repr. - """ - obj_file = None - try: - obj_file = inspect.getfile(obj.__repr__) - except (OSError, TypeError): - # OSError handles case where object is defined in __main__ scope, e.g. REPL - no filename available. - # TypeError trapped defensively, in case of object without filename slips through. - pass - default_repr_file = inspect.getfile(_dummy_namedtuple.__repr__) - return obj_file == default_repr_file - - def _ipy_display_hook( value: Any, console: Optional["Console"] = None, @@ -122,12 +90,10 @@ def _ipy_display_hook( max_string: Optional[int] = None, expand_all: bool = False, ) -> None: - # needed here to prevent circular import: - from ._inspect import is_object_one_of_types - from .console import ConsoleRenderable + from .console import ConsoleRenderable # needed here to prevent circular import # always skip rich generated jupyter renderables or None values - if _safe_isinstance(value, JupyterRenderable) or value is None: + if isinstance(value, JupyterRenderable) or value is None: return console = console or get_console() @@ -157,20 +123,13 @@ def _ipy_display_hook( if repr_result is not None: return # Delegate rendering to IPython - # When in a Jupyter notebook let's avoid the display of some specific classes, - # as they result in the rendering of useless and noisy lines such as `
`. - # What does this do? - # --> if the class has "matplotlib.artist.Artist" in its hierarchy for example, we don't render it. - if is_object_one_of_types(value, JUPYTER_CLASSES_TO_NOT_RENDER): - return - # certain renderables should start on a new line - if _safe_isinstance(value, ConsoleRenderable): + if isinstance(value, ConsoleRenderable): console.line() console.print( value - if _safe_isinstance(value, RichRenderable) + if isinstance(value, RichRenderable) else Pretty( value, overflow=overflow, @@ -185,16 +144,6 @@ def _ipy_display_hook( ) -def _safe_isinstance( - obj: object, class_or_tuple: Union[type, Tuple[type, ...]] -) -> bool: - """isinstance can fail in rare cases, for example types with no __class__""" - try: - return isinstance(obj, class_or_tuple) - except Exception: - return False - - def install( console: Optional["Console"] = None, overflow: "OverflowMethod" = "ignore", @@ -226,10 +175,10 @@ def display_hook(value: Any) -> None: """Replacement sys.displayhook which prettifies objects with Rich.""" if value is not None: assert console is not None - builtins._ = None # type: ignore[attr-defined] + builtins._ = None # type: ignore console.print( value - if _safe_isinstance(value, RichRenderable) + if isinstance(value, RichRenderable) else Pretty( value, overflow=overflow, @@ -240,13 +189,13 @@ def display_hook(value: Any) -> None: ), crop=crop, ) - builtins._ = value # type: ignore[attr-defined] + builtins._ = value # type: ignore try: # pragma: no cover - ip = get_ipython() # type: ignore[name-defined] + ip = get_ipython() # type: ignore from IPython.core.formatters import BaseFormatter - class RichFormatter(BaseFormatter): # type: ignore[misc] + class RichFormatter(BaseFormatter): # type: ignore pprint: bool = True def __call__(self, value: Any) -> Any: @@ -365,7 +314,6 @@ def __rich_measure__( indent_size=self.indent_size, max_length=self.max_length, max_string=self.max_string, - expand_all=self.expand_all, ) text_width = ( max(cell_len(line) for line in pretty_str.splitlines()) if pretty_str else 0 @@ -382,7 +330,7 @@ def _get_braces_for_defaultdict(_object: DefaultDict[Any, Any]) -> Tuple[str, st def _get_braces_for_array(_object: "array[Any]") -> Tuple[str, str, str]: - return (f"array({_object.typecode!r}, [", "])", f"array({_object.typecode!r})") + return (f"array({_object.typecode!r}, [", "])", "array({_object.typecode!r})") _BRACES: Dict[type, Callable[[Any], Tuple[str, str, str]]] = { @@ -407,7 +355,7 @@ def _get_braces_for_array(_object: "array[Any]") -> Tuple[str, str, str]: def is_expandable(obj: Any) -> bool: """Check if an object may be expanded by pretty print.""" return ( - _safe_isinstance(obj, _CONTAINERS) + isinstance(obj, _CONTAINERS) or (is_dataclass(obj)) or (hasattr(obj, "__rich_repr__")) or _is_attr_object(obj) @@ -425,7 +373,6 @@ class Node: empty: str = "" last: bool = False is_tuple: bool = False - is_namedtuple: bool = False children: Optional[List["Node"]] = None key_separator = ": " separator: str = ", " @@ -440,7 +387,7 @@ def iter_tokens(self) -> Iterable[str]: elif self.children is not None: if self.children: yield self.open_brace - if self.is_tuple and not self.is_namedtuple and len(self.children) == 1: + if self.is_tuple and len(self.children) == 1: yield from self.children[0].iter_tokens() yield "," else: @@ -567,25 +514,6 @@ def __str__(self) -> str: ) -def _is_namedtuple(obj: Any) -> bool: - """Checks if an object is most likely a namedtuple. It is possible - to craft an object that passes this check and isn't a namedtuple, but - there is only a minuscule chance of this happening unintentionally. - - Args: - obj (Any): The object to test - - Returns: - bool: True if the object is a namedtuple. False otherwise. - """ - try: - fields = getattr(obj, "_fields", None) - except Exception: - # Being very defensive - if we cannot get the attr then its not a namedtuple - return False - return isinstance(obj, tuple) and isinstance(fields, tuple) - - def traverse( _object: Any, max_length: Optional[int] = None, @@ -611,7 +539,7 @@ def to_repr(obj: Any) -> str: """Get repr string for an object, but catch errors.""" if ( max_string is not None - and _safe_isinstance(obj, (bytes, str)) + and isinstance(obj, (bytes, str)) and len(obj) > max_string ): truncated = len(obj) - max_string @@ -637,7 +565,7 @@ def _traverse(obj: Any, root: bool = False, depth: int = 0) -> Node: def iter_rich_args(rich_args: Any) -> Iterable[Union[Any, Tuple[str, Any]]]: for arg in rich_args: - if _safe_isinstance(arg, tuple): + if isinstance(arg, tuple): if len(arg) == 3: key, child, default = arg if default == child: @@ -694,7 +622,7 @@ def iter_rich_args(rich_args: Any) -> Iterable[Union[Any, Tuple[str, Any]]]: last=root, ) for last, arg in loop_last(args): - if _safe_isinstance(arg, tuple): + if isinstance(arg, tuple): key, child = arg child_node = _traverse(child, depth=depth + 1) child_node.last = last @@ -761,7 +689,7 @@ def iter_attrs() -> Iterable[ elif ( is_dataclass(obj) - and not _safe_isinstance(obj, type) + and not isinstance(obj, type) and not fake_attributes and (_is_dataclass_repr(obj) or py_version == (3, 6)) ): @@ -793,28 +721,10 @@ def iter_attrs() -> Iterable[ append(child_node) pop_visited(obj_id) - elif _is_namedtuple(obj) and _has_default_namedtuple_repr(obj): - if reached_max_depth: - node = Node(value_repr="...") - else: - children = [] - class_name = obj.__class__.__name__ - node = Node( - open_brace=f"{class_name}(", - close_brace=")", - children=children, - empty=f"{class_name}()", - ) - append = children.append - for last, (key, value) in loop_last(obj._asdict().items()): - child_node = _traverse(value, depth=depth + 1) - child_node.key_repr = key - child_node.last = last - child_node.key_separator = "=" - append(child_node) - elif _safe_isinstance(obj, _CONTAINERS): + + elif isinstance(obj, _CONTAINERS): for container_type in _CONTAINERS: - if _safe_isinstance(obj, container_type): + if isinstance(obj, container_type): obj_type = container_type break @@ -842,7 +752,7 @@ def iter_attrs() -> Iterable[ num_items = len(obj) last_item_index = num_items - 1 - if _safe_isinstance(obj, _MAPPING_CONTAINERS): + if isinstance(obj, _MAPPING_CONTAINERS): iter_items = iter(obj.items()) if max_length is not None: iter_items = islice(iter_items, max_length) @@ -860,15 +770,14 @@ def iter_attrs() -> Iterable[ child_node.last = index == last_item_index append(child_node) if max_length is not None and num_items > max_length: - append(Node(value_repr=f"... +{num_items - max_length}", last=True)) + append(Node(value_repr=f"... +{num_items-max_length}", last=True)) else: node = Node(empty=empty, children=[], last=root) pop_visited(obj_id) else: node = Node(value_repr=to_repr(obj), last=root) - node.is_tuple = _safe_isinstance(obj, tuple) - node.is_namedtuple = _is_namedtuple(obj) + node.is_tuple = isinstance(obj, tuple) return node node = _traverse(_object, root=True) @@ -903,13 +812,13 @@ def pretty_repr( str: A possibly multi-line representation of the object. """ - if _safe_isinstance(_object, Node): + if isinstance(_object, Node): node = _object else: node = traverse( _object, max_length=max_length, max_string=max_string, max_depth=max_depth ) - repr_str: str = node.render( + repr_str = node.render( max_width=max_width, indent_size=indent_size, expand_all=expand_all ) return repr_str @@ -959,15 +868,6 @@ def __repr__(self) -> str: 1 / 0 return "this will fail" - from typing import NamedTuple - - class StockKeepingUnit(NamedTuple): - name: str - description: str - price: float - category: str - reviews: List[str] - d = defaultdict(int) d["foo"] = 5 data = { @@ -994,16 +894,9 @@ class StockKeepingUnit(NamedTuple): ] ), "atomic": (False, True, None), - "namedtuple": StockKeepingUnit( - "Sparkling British Spring Water", - "Carbonated spring water", - 0.9, - "water", - ["its amazing!", "its terrible!"], - ), "Broken": BrokenRepr(), } - data["foo"].append(data) # type: ignore[attr-defined] + data["foo"].append(data) # type: ignore from pip._vendor.rich import print diff --git a/venv/lib/python3.10/site-packages/pip/_vendor/rich/progress.py b/venv/lib/python3.10/site-packages/pip/_vendor/rich/progress.py index 92cfa80..1f670db 100644 --- a/venv/lib/python3.10/site-packages/pip/_vendor/rich/progress.py +++ b/venv/lib/python3.10/site-packages/pip/_vendor/rich/progress.py @@ -1,46 +1,30 @@ -import io -import sys -import typing -import warnings from abc import ABC, abstractmethod from collections import deque from collections.abc import Sized from dataclasses import dataclass, field from datetime import timedelta -from io import RawIOBase, UnsupportedOperation from math import ceil -from mmap import mmap -from os import PathLike, stat from threading import Event, RLock, Thread from types import TracebackType from typing import ( Any, - BinaryIO, Callable, - ContextManager, Deque, Dict, - Generic, Iterable, List, NamedTuple, NewType, Optional, Sequence, - TextIO, Tuple, Type, TypeVar, Union, ) -if sys.version_info >= (3, 8): - from typing import Literal -else: - from pip._vendor.typing_extensions import Literal # pragma: no cover - from . import filesize, get_console -from .console import Console, Group, JustifyMethod, RenderableType +from .console import Console, JustifyMethod, RenderableType, Group from .highlighter import Highlighter from .jupyter import JupyterMixin from .live import Live @@ -57,9 +41,6 @@ GetTimeCallable = Callable[[], float] -_I = typing.TypeVar("_I", TextIO, BinaryIO) - - class _TrackThread(Thread): """A thread to periodically update progress.""" @@ -115,7 +96,6 @@ def track( pulse_style: StyleType = "bar.pulse", update_period: float = 0.1, disable: bool = False, - show_speed: bool = True, ) -> Iterable[ProgressType]: """Track progress by iterating over a sequence. @@ -133,7 +113,6 @@ def track( pulse_style (StyleType, optional): Style for pulsing bars. Defaults to "bar.pulse". update_period (float, optional): Minimum time (in seconds) between calls to update(). Defaults to 0.1. disable (bool, optional): Disable display of progress. - show_speed (bool, optional): Show speed if total isn't known. Defaults to True. Returns: Iterable[ProgressType]: An iterable of the values in the sequence. @@ -150,7 +129,7 @@ def track( finished_style=finished_style, pulse_style=pulse_style, ), - TaskProgressColumn(show_speed=show_speed), + TextColumn("[progress.percentage]{task.percentage:>3.0f}%"), TimeRemainingColumn(), ) ) @@ -170,324 +149,6 @@ def track( ) -class _Reader(RawIOBase, BinaryIO): - """A reader that tracks progress while it's being read from.""" - - def __init__( - self, - handle: BinaryIO, - progress: "Progress", - task: TaskID, - close_handle: bool = True, - ) -> None: - self.handle = handle - self.progress = progress - self.task = task - self.close_handle = close_handle - self._closed = False - - def __enter__(self) -> "_Reader": - self.handle.__enter__() - return self - - def __exit__( - self, - exc_type: Optional[Type[BaseException]], - exc_val: Optional[BaseException], - exc_tb: Optional[TracebackType], - ) -> None: - self.close() - - def __iter__(self) -> BinaryIO: - return self - - def __next__(self) -> bytes: - line = next(self.handle) - self.progress.advance(self.task, advance=len(line)) - return line - - @property - def closed(self) -> bool: - return self._closed - - def fileno(self) -> int: - return self.handle.fileno() - - def isatty(self) -> bool: - return self.handle.isatty() - - @property - def name(self) -> str: - return self.handle.name - - def readable(self) -> bool: - return self.handle.readable() - - def seekable(self) -> bool: - return self.handle.seekable() - - def writable(self) -> bool: - return False - - def read(self, size: int = -1) -> bytes: - block = self.handle.read(size) - self.progress.advance(self.task, advance=len(block)) - return block - - def readinto(self, b: Union[bytearray, memoryview, mmap]): # type: ignore[no-untyped-def, override] - n = self.handle.readinto(b) # type: ignore[attr-defined] - self.progress.advance(self.task, advance=n) - return n - - def readline(self, size: int = -1) -> bytes: # type: ignore[override] - line = self.handle.readline(size) - self.progress.advance(self.task, advance=len(line)) - return line - - def readlines(self, hint: int = -1) -> List[bytes]: - lines = self.handle.readlines(hint) - self.progress.advance(self.task, advance=sum(map(len, lines))) - return lines - - def close(self) -> None: - if self.close_handle: - self.handle.close() - self._closed = True - - def seek(self, offset: int, whence: int = 0) -> int: - pos = self.handle.seek(offset, whence) - self.progress.update(self.task, completed=pos) - return pos - - def tell(self) -> int: - return self.handle.tell() - - def write(self, s: Any) -> int: - raise UnsupportedOperation("write") - - -class _ReadContext(ContextManager[_I], Generic[_I]): - """A utility class to handle a context for both a reader and a progress.""" - - def __init__(self, progress: "Progress", reader: _I) -> None: - self.progress = progress - self.reader: _I = reader - - def __enter__(self) -> _I: - self.progress.start() - return self.reader.__enter__() - - def __exit__( - self, - exc_type: Optional[Type[BaseException]], - exc_val: Optional[BaseException], - exc_tb: Optional[TracebackType], - ) -> None: - self.progress.stop() - self.reader.__exit__(exc_type, exc_val, exc_tb) - - -def wrap_file( - file: BinaryIO, - total: int, - *, - description: str = "Reading...", - auto_refresh: bool = True, - console: Optional[Console] = None, - transient: bool = False, - get_time: Optional[Callable[[], float]] = None, - refresh_per_second: float = 10, - style: StyleType = "bar.back", - complete_style: StyleType = "bar.complete", - finished_style: StyleType = "bar.finished", - pulse_style: StyleType = "bar.pulse", - disable: bool = False, -) -> ContextManager[BinaryIO]: - """Read bytes from a file while tracking progress. - - Args: - file (Union[str, PathLike[str], BinaryIO]): The path to the file to read, or a file-like object in binary mode. - total (int): Total number of bytes to read. - description (str, optional): Description of task show next to progress bar. Defaults to "Reading". - auto_refresh (bool, optional): Automatic refresh, disable to force a refresh after each iteration. Default is True. - transient: (bool, optional): Clear the progress on exit. Defaults to False. - console (Console, optional): Console to write to. Default creates internal Console instance. - refresh_per_second (float): Number of times per second to refresh the progress information. Defaults to 10. - style (StyleType, optional): Style for the bar background. Defaults to "bar.back". - complete_style (StyleType, optional): Style for the completed bar. Defaults to "bar.complete". - finished_style (StyleType, optional): Style for a finished bar. Defaults to "bar.done". - pulse_style (StyleType, optional): Style for pulsing bars. Defaults to "bar.pulse". - disable (bool, optional): Disable display of progress. - Returns: - ContextManager[BinaryIO]: A context manager yielding a progress reader. - - """ - - columns: List["ProgressColumn"] = ( - [TextColumn("[progress.description]{task.description}")] if description else [] - ) - columns.extend( - ( - BarColumn( - style=style, - complete_style=complete_style, - finished_style=finished_style, - pulse_style=pulse_style, - ), - DownloadColumn(), - TimeRemainingColumn(), - ) - ) - progress = Progress( - *columns, - auto_refresh=auto_refresh, - console=console, - transient=transient, - get_time=get_time, - refresh_per_second=refresh_per_second or 10, - disable=disable, - ) - - reader = progress.wrap_file(file, total=total, description=description) - return _ReadContext(progress, reader) - - -@typing.overload -def open( - file: Union[str, "PathLike[str]", bytes], - mode: Union[Literal["rt"], Literal["r"]], - buffering: int = -1, - encoding: Optional[str] = None, - errors: Optional[str] = None, - newline: Optional[str] = None, - *, - total: Optional[int] = None, - description: str = "Reading...", - auto_refresh: bool = True, - console: Optional[Console] = None, - transient: bool = False, - get_time: Optional[Callable[[], float]] = None, - refresh_per_second: float = 10, - style: StyleType = "bar.back", - complete_style: StyleType = "bar.complete", - finished_style: StyleType = "bar.finished", - pulse_style: StyleType = "bar.pulse", - disable: bool = False, -) -> ContextManager[TextIO]: - pass - - -@typing.overload -def open( - file: Union[str, "PathLike[str]", bytes], - mode: Literal["rb"], - buffering: int = -1, - encoding: Optional[str] = None, - errors: Optional[str] = None, - newline: Optional[str] = None, - *, - total: Optional[int] = None, - description: str = "Reading...", - auto_refresh: bool = True, - console: Optional[Console] = None, - transient: bool = False, - get_time: Optional[Callable[[], float]] = None, - refresh_per_second: float = 10, - style: StyleType = "bar.back", - complete_style: StyleType = "bar.complete", - finished_style: StyleType = "bar.finished", - pulse_style: StyleType = "bar.pulse", - disable: bool = False, -) -> ContextManager[BinaryIO]: - pass - - -def open( - file: Union[str, "PathLike[str]", bytes], - mode: Union[Literal["rb"], Literal["rt"], Literal["r"]] = "r", - buffering: int = -1, - encoding: Optional[str] = None, - errors: Optional[str] = None, - newline: Optional[str] = None, - *, - total: Optional[int] = None, - description: str = "Reading...", - auto_refresh: bool = True, - console: Optional[Console] = None, - transient: bool = False, - get_time: Optional[Callable[[], float]] = None, - refresh_per_second: float = 10, - style: StyleType = "bar.back", - complete_style: StyleType = "bar.complete", - finished_style: StyleType = "bar.finished", - pulse_style: StyleType = "bar.pulse", - disable: bool = False, -) -> Union[ContextManager[BinaryIO], ContextManager[TextIO]]: - """Read bytes from a file while tracking progress. - - Args: - path (Union[str, PathLike[str], BinaryIO]): The path to the file to read, or a file-like object in binary mode. - mode (str): The mode to use to open the file. Only supports "r", "rb" or "rt". - buffering (int): The buffering strategy to use, see :func:`io.open`. - encoding (str, optional): The encoding to use when reading in text mode, see :func:`io.open`. - errors (str, optional): The error handling strategy for decoding errors, see :func:`io.open`. - newline (str, optional): The strategy for handling newlines in text mode, see :func:`io.open` - total: (int, optional): Total number of bytes to read. Must be provided if reading from a file handle. Default for a path is os.stat(file).st_size. - description (str, optional): Description of task show next to progress bar. Defaults to "Reading". - auto_refresh (bool, optional): Automatic refresh, disable to force a refresh after each iteration. Default is True. - transient: (bool, optional): Clear the progress on exit. Defaults to False. - console (Console, optional): Console to write to. Default creates internal Console instance. - refresh_per_second (float): Number of times per second to refresh the progress information. Defaults to 10. - style (StyleType, optional): Style for the bar background. Defaults to "bar.back". - complete_style (StyleType, optional): Style for the completed bar. Defaults to "bar.complete". - finished_style (StyleType, optional): Style for a finished bar. Defaults to "bar.done". - pulse_style (StyleType, optional): Style for pulsing bars. Defaults to "bar.pulse". - disable (bool, optional): Disable display of progress. - encoding (str, optional): The encoding to use when reading in text mode. - - Returns: - ContextManager[BinaryIO]: A context manager yielding a progress reader. - - """ - - columns: List["ProgressColumn"] = ( - [TextColumn("[progress.description]{task.description}")] if description else [] - ) - columns.extend( - ( - BarColumn( - style=style, - complete_style=complete_style, - finished_style=finished_style, - pulse_style=pulse_style, - ), - DownloadColumn(), - TimeRemainingColumn(), - ) - ) - progress = Progress( - *columns, - auto_refresh=auto_refresh, - console=console, - transient=transient, - get_time=get_time, - refresh_per_second=refresh_per_second or 10, - disable=disable, - ) - - reader = progress.open( - file, - mode=mode, - buffering=buffering, - encoding=encoding, - errors=errors, - newline=newline, - total=total, - description=description, - ) - return _ReadContext(progress, reader) # type: ignore[return-value, type-var] - - class ProgressColumn(ABC): """Base class for a widget to use in progress display.""" @@ -657,7 +318,7 @@ def __init__( def render(self, task: "Task") -> ProgressBar: """Gets a progress bar widget for a task.""" return ProgressBar( - total=max(0, task.total) if task.total is not None else None, + total=max(0, task.total), completed=max(0, task.completed), width=None if self.bar_width is None else max(1, self.bar_width), pulse=not task.started, @@ -681,125 +342,19 @@ def render(self, task: "Task") -> Text: return Text(str(delta), style="progress.elapsed") -class TaskProgressColumn(TextColumn): - """Show task progress as a percentage. - - Args: - text_format (str, optional): Format for percentage display. Defaults to "[progress.percentage]{task.percentage:>3.0f}%". - text_format_no_percentage (str, optional): Format if percentage is unknown. Defaults to "". - style (StyleType, optional): Style of output. Defaults to "none". - justify (JustifyMethod, optional): Text justification. Defaults to "left". - markup (bool, optional): Enable markup. Defaults to True. - highlighter (Optional[Highlighter], optional): Highlighter to apply to output. Defaults to None. - table_column (Optional[Column], optional): Table Column to use. Defaults to None. - show_speed (bool, optional): Show speed if total is unknown. Defaults to False. - """ - - def __init__( - self, - text_format: str = "[progress.percentage]{task.percentage:>3.0f}%", - text_format_no_percentage: str = "", - style: StyleType = "none", - justify: JustifyMethod = "left", - markup: bool = True, - highlighter: Optional[Highlighter] = None, - table_column: Optional[Column] = None, - show_speed: bool = False, - ) -> None: - - self.text_format_no_percentage = text_format_no_percentage - self.show_speed = show_speed - super().__init__( - text_format=text_format, - style=style, - justify=justify, - markup=markup, - highlighter=highlighter, - table_column=table_column, - ) - - @classmethod - def render_speed(cls, speed: Optional[float]) -> Text: - """Render the speed in iterations per second. - - Args: - task (Task): A Task object. - - Returns: - Text: Text object containing the task speed. - """ - if speed is None: - return Text("", style="progress.percentage") - unit, suffix = filesize.pick_unit_and_suffix( - int(speed), - ["", "×10³", "×10⁶", "×10⁹", "×10¹²"], - 1000, - ) - data_speed = speed / unit - return Text(f"{data_speed:.1f}{suffix} it/s", style="progress.percentage") - - def render(self, task: "Task") -> Text: - if task.total is None and self.show_speed: - return self.render_speed(task.finished_speed or task.speed) - text_format = ( - self.text_format_no_percentage if task.total is None else self.text_format - ) - _text = text_format.format(task=task) - if self.markup: - text = Text.from_markup(_text, style=self.style, justify=self.justify) - else: - text = Text(_text, style=self.style, justify=self.justify) - if self.highlighter: - self.highlighter.highlight(text) - return text - - class TimeRemainingColumn(ProgressColumn): - """Renders estimated time remaining. - - Args: - compact (bool, optional): Render MM:SS when time remaining is less than an hour. Defaults to False. - elapsed_when_finished (bool, optional): Render time elapsed when the task is finished. Defaults to False. - """ + """Renders estimated time remaining.""" # Only refresh twice a second to prevent jitter max_refresh = 0.5 - def __init__( - self, - compact: bool = False, - elapsed_when_finished: bool = False, - table_column: Optional[Column] = None, - ): - self.compact = compact - self.elapsed_when_finished = elapsed_when_finished - super().__init__(table_column=table_column) - def render(self, task: "Task") -> Text: """Show time remaining.""" - if self.elapsed_when_finished and task.finished: - task_time = task.finished_time - style = "progress.elapsed" - else: - task_time = task.time_remaining - style = "progress.remaining" - - if task.total is None: - return Text("", style=style) - - if task_time is None: - return Text("--:--" if self.compact else "-:--:--", style=style) - - # Based on https://github.com/tqdm/tqdm/blob/master/tqdm/std.py - minutes, seconds = divmod(int(task_time), 60) - hours, minutes = divmod(minutes, 60) - - if self.compact and not hours: - formatted = f"{minutes:02d}:{seconds:02d}" - else: - formatted = f"{hours:d}:{minutes:02d}:{seconds:02d}" - - return Text(formatted, style=style) + remaining = task.time_remaining + if remaining is None: + return Text("-:--:--", style="progress.remaining") + remaining_delta = timedelta(seconds=int(remaining)) + return Text(str(remaining_delta), style="progress.remaining") class FileSizeColumn(ProgressColumn): @@ -816,37 +371,10 @@ class TotalFileSizeColumn(ProgressColumn): def render(self, task: "Task") -> Text: """Show data completed.""" - data_size = filesize.decimal(int(task.total)) if task.total is not None else "" + data_size = filesize.decimal(int(task.total)) return Text(data_size, style="progress.filesize.total") -class MofNCompleteColumn(ProgressColumn): - """Renders completed count/total, e.g. ' 10/1000'. - - Best for bounded tasks with int quantities. - - Space pads the completed count so that progress length does not change as task progresses - past powers of 10. - - Args: - separator (str, optional): Text to separate completed and total values. Defaults to "/". - """ - - def __init__(self, separator: str = "/", table_column: Optional[Column] = None): - self.separator = separator - super().__init__(table_column=table_column) - - def render(self, task: "Task") -> Text: - """Show completed/total.""" - completed = int(task.completed) - total = int(task.total) if task.total is not None else "?" - total_width = len(str(total)) - return Text( - f"{completed:{total_width}d}{self.separator}{total}", - style="progress.download", - ) - - class DownloadColumn(ProgressColumn): """Renders file size downloaded and total, e.g. '0.5/2.3 GB'. @@ -863,34 +391,22 @@ def __init__( def render(self, task: "Task") -> Text: """Calculate common unit for completed and total.""" completed = int(task.completed) - - unit_and_suffix_calculation_base = ( - int(task.total) if task.total is not None else completed - ) + total = int(task.total) if self.binary_units: unit, suffix = filesize.pick_unit_and_suffix( - unit_and_suffix_calculation_base, + total, ["bytes", "KiB", "MiB", "GiB", "TiB", "PiB", "EiB", "ZiB", "YiB"], 1024, ) else: unit, suffix = filesize.pick_unit_and_suffix( - unit_and_suffix_calculation_base, - ["bytes", "kB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB"], - 1000, + total, ["bytes", "KB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB"], 1000 ) - precision = 0 if unit == 1 else 1 - completed_ratio = completed / unit + total_ratio = total / unit + precision = 0 if unit == 1 else 1 completed_str = f"{completed_ratio:,.{precision}f}" - - if task.total is not None: - total = int(task.total) - total_ratio = total / unit - total_str = f"{total_ratio:,.{precision}f}" - else: - total_str = "?" - + total_str = f"{total_ratio:,.{precision}f}" download_status = f"{completed_str}/{total_str} {suffix}" download_text = Text(download_status, style="progress.download") return download_text @@ -931,8 +447,8 @@ class Task: description: str """str: Description of the task.""" - total: Optional[float] - """Optional[float]: Total number of steps in this task.""" + total: float + """str: Total number of steps in this task.""" completed: float """float: Number of steps completed""" @@ -959,7 +475,7 @@ class Task: """Optional[float]: The last speed for a finished task.""" _progress: Deque[ProgressSample] = field( - default_factory=lambda: deque(maxlen=1000), init=False, repr=False + default_factory=deque, init=False, repr=False ) _lock: RLock = field(repr=False, default_factory=RLock) @@ -975,10 +491,8 @@ def started(self) -> bool: return self.start_time is not None @property - def remaining(self) -> Optional[float]: - """Optional[float]: Get the number of steps remaining, if a non-None total was set.""" - if self.total is None: - return None + def remaining(self) -> float: + """float: Get the number of steps remaining.""" return self.total - self.completed @property @@ -997,7 +511,7 @@ def finished(self) -> bool: @property def percentage(self) -> float: - """float: Get progress of task as a percentage. If a None total was set, returns 0""" + """float: Get progress of task as a percentage.""" if not self.total: return 0.0 completed = (self.completed / self.total) * 100.0 @@ -1030,10 +544,7 @@ def time_remaining(self) -> Optional[float]: speed = self.speed if not speed: return None - remaining = self.remaining - if remaining is None: - return None - estimate = ceil(remaining / speed) + estimate = ceil(self.remaining / speed) return estimate def _reset(self) -> None: @@ -1073,9 +584,16 @@ def __init__( disable: bool = False, expand: bool = False, ) -> None: - assert refresh_per_second > 0, "refresh_per_second must be > 0" + assert ( + refresh_per_second is None or refresh_per_second > 0 + ), "refresh_per_second must be > 0" self._lock = RLock() - self.columns = columns or self.get_default_columns() + self.columns = columns or ( + TextColumn("[progress.description]{task.description}"), + BarColumn(), + TextColumn("[progress.percentage]{task.percentage:>3.0f}%"), + TimeRemainingColumn(), + ) self.speed_estimate_period = speed_estimate_period self.disable = disable @@ -1095,37 +613,6 @@ def __init__( self.print = self.console.print self.log = self.console.log - @classmethod - def get_default_columns(cls) -> Tuple[ProgressColumn, ...]: - """Get the default columns used for a new Progress instance: - - a text column for the description (TextColumn) - - the bar itself (BarColumn) - - a text column showing completion percentage (TextColumn) - - an estimated-time-remaining column (TimeRemainingColumn) - If the Progress instance is created without passing a columns argument, - the default columns defined here will be used. - - You can also create a Progress instance using custom columns before - and/or after the defaults, as in this example: - - progress = Progress( - SpinnerColumn(), - *Progress.default_columns(), - "Elapsed:", - TimeElapsedColumn(), - ) - - This code shows the creation of a Progress display, containing - a spinner to the left, the default columns, and a labeled elapsed - time column. - """ - return ( - TextColumn("[progress.description]{task.description}"), - BarColumn(), - TaskProgressColumn(), - TimeRemainingColumn(), - ) - @property def console(self) -> Console: return self.live.console @@ -1194,10 +681,13 @@ def track( Iterable[ProgressType]: An iterable of values taken from the provided sequence. """ - task_total: Optional[float] = None if total is None: if isinstance(sequence, Sized): task_total = float(len(sequence)) + else: + raise ValueError( + f"unable to get size of {sequence!r}, please specify 'total'" + ) else: task_total = total @@ -1219,157 +709,6 @@ def track( advance(task_id, 1) refresh() - def wrap_file( - self, - file: BinaryIO, - total: Optional[int] = None, - *, - task_id: Optional[TaskID] = None, - description: str = "Reading...", - ) -> BinaryIO: - """Track progress file reading from a binary file. - - Args: - file (BinaryIO): A file-like object opened in binary mode. - total (int, optional): Total number of bytes to read. This must be provided unless a task with a total is also given. - task_id (TaskID): Task to track. Default is new task. - description (str, optional): Description of task, if new task is created. - - Returns: - BinaryIO: A readable file-like object in binary mode. - - Raises: - ValueError: When no total value can be extracted from the arguments or the task. - """ - # attempt to recover the total from the task - total_bytes: Optional[float] = None - if total is not None: - total_bytes = total - elif task_id is not None: - with self._lock: - total_bytes = self._tasks[task_id].total - if total_bytes is None: - raise ValueError( - f"unable to get the total number of bytes, please specify 'total'" - ) - - # update total of task or create new task - if task_id is None: - task_id = self.add_task(description, total=total_bytes) - else: - self.update(task_id, total=total_bytes) - - return _Reader(file, self, task_id, close_handle=False) - - @typing.overload - def open( - self, - file: Union[str, "PathLike[str]", bytes], - mode: Literal["rb"], - buffering: int = -1, - encoding: Optional[str] = None, - errors: Optional[str] = None, - newline: Optional[str] = None, - *, - total: Optional[int] = None, - task_id: Optional[TaskID] = None, - description: str = "Reading...", - ) -> BinaryIO: - pass - - @typing.overload - def open( - self, - file: Union[str, "PathLike[str]", bytes], - mode: Union[Literal["r"], Literal["rt"]], - buffering: int = -1, - encoding: Optional[str] = None, - errors: Optional[str] = None, - newline: Optional[str] = None, - *, - total: Optional[int] = None, - task_id: Optional[TaskID] = None, - description: str = "Reading...", - ) -> TextIO: - pass - - def open( - self, - file: Union[str, "PathLike[str]", bytes], - mode: Union[Literal["rb"], Literal["rt"], Literal["r"]] = "r", - buffering: int = -1, - encoding: Optional[str] = None, - errors: Optional[str] = None, - newline: Optional[str] = None, - *, - total: Optional[int] = None, - task_id: Optional[TaskID] = None, - description: str = "Reading...", - ) -> Union[BinaryIO, TextIO]: - """Track progress while reading from a binary file. - - Args: - path (Union[str, PathLike[str]]): The path to the file to read. - mode (str): The mode to use to open the file. Only supports "r", "rb" or "rt". - buffering (int): The buffering strategy to use, see :func:`io.open`. - encoding (str, optional): The encoding to use when reading in text mode, see :func:`io.open`. - errors (str, optional): The error handling strategy for decoding errors, see :func:`io.open`. - newline (str, optional): The strategy for handling newlines in text mode, see :func:`io.open`. - total (int, optional): Total number of bytes to read. If none given, os.stat(path).st_size is used. - task_id (TaskID): Task to track. Default is new task. - description (str, optional): Description of task, if new task is created. - - Returns: - BinaryIO: A readable file-like object in binary mode. - - Raises: - ValueError: When an invalid mode is given. - """ - # normalize the mode (always rb, rt) - _mode = "".join(sorted(mode, reverse=False)) - if _mode not in ("br", "rt", "r"): - raise ValueError("invalid mode {!r}".format(mode)) - - # patch buffering to provide the same behaviour as the builtin `open` - line_buffering = buffering == 1 - if _mode == "br" and buffering == 1: - warnings.warn( - "line buffering (buffering=1) isn't supported in binary mode, the default buffer size will be used", - RuntimeWarning, - ) - buffering = -1 - elif _mode == "rt" or _mode == "r": - if buffering == 0: - raise ValueError("can't have unbuffered text I/O") - elif buffering == 1: - buffering = -1 - - # attempt to get the total with `os.stat` - if total is None: - total = stat(file).st_size - - # update total of task or create new task - if task_id is None: - task_id = self.add_task(description, total=total) - else: - self.update(task_id, total=total) - - # open the file in binary mode, - handle = io.open(file, "rb", buffering=buffering) - reader = _Reader(handle, self, task_id, close_handle=True) - - # wrap the reader in a `TextIOWrapper` if text mode - if mode == "r" or mode == "rt": - return io.TextIOWrapper( - reader, - encoding=encoding, - errors=errors, - newline=newline, - line_buffering=line_buffering, - ) - - return reader - def start_task(self, task_id: TaskID) -> None: """Start a task. @@ -1448,13 +787,11 @@ def update( popleft = _progress.popleft while _progress and _progress[0].timestamp < old_sample_time: popleft() + while len(_progress) > 1000: + popleft() if update_completed > 0: _progress.append(ProgressSample(current_time, update_completed)) - if ( - task.total is not None - and task.completed >= task.total - and task.finished_time is None - ): + if task.completed >= task.total and task.finished_time is None: task.finished_time = task.elapsed if refresh: @@ -1478,8 +815,6 @@ def reset( start (bool, optional): Start the task after reset. Defaults to True. total (float, optional): New total steps in task, or None to use current total. Defaults to None. completed (int, optional): Number of steps completed. Defaults to 0. - visible (bool, optional): Enable display of the task. Defaults to True. - description (str, optional): Change task description if not None. Defaults to None. **fields (str): Additional data fields required for rendering. """ current_time = self.get_time() @@ -1521,11 +856,7 @@ def advance(self, task_id: TaskID, advance: float = 1) -> None: while len(_progress) > 1000: popleft() _progress.append(ProgressSample(current_time, update_completed)) - if ( - task.total is not None - and task.completed >= task.total - and task.finished_time is None - ): + if task.completed >= task.total and task.finished_time is None: task.finished_time = task.elapsed task.finished_speed = task.speed @@ -1586,7 +917,7 @@ def add_task( self, description: str, start: bool = True, - total: Optional[float] = 100.0, + total: float = 100.0, completed: int = 0, visible: bool = True, **fields: Any, @@ -1597,9 +928,8 @@ def add_task( description (str): A description of the task. start (bool, optional): Start the task immediately (to calculate elapsed time). If set to False, you will need to call `start` manually. Defaults to True. - total (float, optional): Number of total steps in the progress if known. - Set to None to render a pulsing animation. Defaults to 100. - completed (int, optional): Number of steps completed so far. Defaults to 0. + total (float, optional): Number of total steps in the progress if know. Defaults to 100. + completed (int, optional): Number of steps completed so far.. Defaults to 0. visible (bool, optional): Enable display of the task. Defaults to True. **fields (str): Additional data fields required for rendering. @@ -1685,15 +1015,18 @@ def remove_task(self, task_id: TaskID) -> None: with Progress( SpinnerColumn(), - *Progress.get_default_columns(), + TextColumn("[progress.description]{task.description}"), + BarColumn(), + TextColumn("[progress.percentage]{task.percentage:>3.0f}%"), + TimeRemainingColumn(), TimeElapsedColumn(), console=console, - transient=False, + transient=True, ) as progress: task1 = progress.add_task("[red]Downloading", total=1000) task2 = progress.add_task("[green]Processing", total=1000) - task3 = progress.add_task("[yellow]Thinking", total=None) + task3 = progress.add_task("[yellow]Thinking", total=1000, start=False) while not progress.finished: progress.update(task1, advance=0.5) diff --git a/venv/lib/python3.10/site-packages/pip/_vendor/rich/progress_bar.py b/venv/lib/python3.10/site-packages/pip/_vendor/rich/progress_bar.py index 9c3a4f2..1797b5f 100644 --- a/venv/lib/python3.10/site-packages/pip/_vendor/rich/progress_bar.py +++ b/venv/lib/python3.10/site-packages/pip/_vendor/rich/progress_bar.py @@ -19,10 +19,10 @@ class ProgressBar(JupyterMixin): """Renders a (progress) bar. Used by rich.progress. Args: - total (float, optional): Number of steps in the bar. Defaults to 100. Set to None to render a pulsing animation. + total (float, optional): Number of steps in the bar. Defaults to 100. completed (float, optional): Number of steps completed. Defaults to 0. width (int, optional): Width of the bar, or ``None`` for maximum width. Defaults to None. - pulse (bool, optional): Enable pulse effect. Defaults to False. Will pulse if a None total was passed. + pulse (bool, optional): Enable pulse effect. Defaults to False. style (StyleType, optional): Style for the bar background. Defaults to "bar.back". complete_style (StyleType, optional): Style for the completed bar. Defaults to "bar.complete". finished_style (StyleType, optional): Style for a finished bar. Defaults to "bar.done". @@ -32,7 +32,7 @@ class ProgressBar(JupyterMixin): def __init__( self, - total: Optional[float] = 100.0, + total: float = 100.0, completed: float = 0, width: Optional[int] = None, pulse: bool = False, @@ -58,10 +58,8 @@ def __repr__(self) -> str: return f"" @property - def percentage_completed(self) -> Optional[float]: + def percentage_completed(self) -> float: """Calculate percentage complete.""" - if self.total is None: - return None completed = (self.completed / self.total) * 100.0 completed = min(100, max(0.0, completed)) return completed @@ -159,29 +157,23 @@ def __rich_console__( width = min(self.width or options.max_width, options.max_width) ascii = options.legacy_windows or options.ascii_only - should_pulse = self.pulse or self.total is None - if should_pulse: + if self.pulse: yield from self._render_pulse(console, width, ascii=ascii) return - completed: Optional[float] = ( - min(self.total, max(0, self.completed)) if self.total is not None else None - ) + completed = min(self.total, max(0, self.completed)) bar = "-" if ascii else "━" half_bar_right = " " if ascii else "╸" half_bar_left = " " if ascii else "╺" complete_halves = ( - int(width * 2 * completed / self.total) - if self.total and completed is not None - else width * 2 + int(width * 2 * completed / self.total) if self.total else width * 2 ) bar_count = complete_halves // 2 half_bar_count = complete_halves % 2 style = console.get_style(self.style) - is_finished = self.total is None or self.completed >= self.total complete_style = console.get_style( - self.finished_style if is_finished else self.complete_style + self.complete_style if self.completed < self.total else self.finished_style ) _Segment = Segment if bar_count: diff --git a/venv/lib/python3.10/site-packages/pip/_vendor/rich/prompt.py b/venv/lib/python3.10/site-packages/pip/_vendor/rich/prompt.py index 2bd0a77..b2cea2b 100644 --- a/venv/lib/python3.10/site-packages/pip/_vendor/rich/prompt.py +++ b/venv/lib/python3.10/site-packages/pip/_vendor/rich/prompt.py @@ -228,14 +228,14 @@ def process_response(self, value: str) -> PromptType: """ value = value.strip() try: - return_value: PromptType = self.response_type(value) + return_value = self.response_type(value) except ValueError: raise InvalidResponse(self.validate_error_message) if self.choices is not None and not self.check_choice(value): raise InvalidResponse(self.illegal_choice_message) - return return_value + return return_value # type: ignore def on_validate_error(self, value: str, error: InvalidResponse) -> None: """Called to handle validation error. diff --git a/venv/lib/python3.10/site-packages/pip/_vendor/rich/protocol.py b/venv/lib/python3.10/site-packages/pip/_vendor/rich/protocol.py index 12ab237..6248052 100644 --- a/venv/lib/python3.10/site-packages/pip/_vendor/rich/protocol.py +++ b/venv/lib/python3.10/site-packages/pip/_vendor/rich/protocol.py @@ -1,4 +1,4 @@ -from typing import Any, cast, Set, TYPE_CHECKING +from typing import Any, Callable, cast, Set, TYPE_CHECKING from inspect import isclass if TYPE_CHECKING: diff --git a/venv/lib/python3.10/site-packages/pip/_vendor/rich/repr.py b/venv/lib/python3.10/site-packages/pip/_vendor/rich/repr.py index 36966e7..17147fd 100644 --- a/venv/lib/python3.10/site-packages/pip/_vendor/rich/repr.py +++ b/venv/lib/python3.10/site-packages/pip/_vendor/rich/repr.py @@ -1,6 +1,5 @@ from functools import partial import inspect -import sys from typing import ( Any, @@ -28,28 +27,28 @@ class ReprError(Exception): @overload -def auto(cls: Optional[Type[T]]) -> Type[T]: +def auto(cls: Optional[T]) -> T: ... @overload -def auto(*, angular: bool = False) -> Callable[[Type[T]], Type[T]]: +def auto(*, angular: bool = False) -> Callable[[T], T]: ... def auto( - cls: Optional[Type[T]] = None, *, angular: Optional[bool] = None -) -> Union[Type[T], Callable[[Type[T]], Type[T]]]: + cls: Optional[T] = None, *, angular: Optional[bool] = None +) -> Union[T, Callable[[T], T]]: """Class decorator to create __repr__ from __rich_repr__""" def do_replace(cls: Type[T], angular: Optional[bool] = None) -> Type[T]: - def auto_repr(self: T) -> str: + def auto_repr(self: Type[T]) -> str: """Create repr string from __rich_repr__""" repr_str: List[str] = [] append = repr_str.append - angular: bool = getattr(self.__rich_repr__, "angular", False) # type: ignore[attr-defined] - for arg in self.__rich_repr__(): # type: ignore[attr-defined] + angular = getattr(self.__rich_repr__, "angular", False) # type: ignore + for arg in self.__rich_repr__(): # type: ignore if isinstance(arg, tuple): if len(arg) == 1: append(repr(arg[0])) @@ -71,7 +70,7 @@ def auto_repr(self: T) -> str: def auto_rich_repr(self: Type[T]) -> Result: """Auto generate __rich_rep__ from signature of __init__""" try: - signature = inspect.signature(self.__init__) + signature = inspect.signature(self.__init__) ## type: ignore for name, param in signature.parameters.items(): if param.kind == param.POSITIONAL_ONLY: yield getattr(self, name) @@ -90,33 +89,33 @@ def auto_rich_repr(self: Type[T]) -> Result: if not hasattr(cls, "__rich_repr__"): auto_rich_repr.__doc__ = "Build a rich repr" - cls.__rich_repr__ = auto_rich_repr # type: ignore[attr-defined] + cls.__rich_repr__ = auto_rich_repr # type: ignore auto_repr.__doc__ = "Return repr(self)" - cls.__repr__ = auto_repr # type: ignore[assignment] + cls.__repr__ = auto_repr # type: ignore if angular is not None: - cls.__rich_repr__.angular = angular # type: ignore[attr-defined] + cls.__rich_repr__.angular = angular # type: ignore return cls if cls is None: - return partial(do_replace, angular=angular) + return partial(do_replace, angular=angular) # type: ignore else: - return do_replace(cls, angular=angular) + return do_replace(cls, angular=angular) # type: ignore @overload -def rich_repr(cls: Optional[Type[T]]) -> Type[T]: +def rich_repr(cls: Optional[T]) -> T: ... @overload -def rich_repr(*, angular: bool = False) -> Callable[[Type[T]], Type[T]]: +def rich_repr(*, angular: bool = False) -> Callable[[T], T]: ... def rich_repr( - cls: Optional[Type[T]] = None, *, angular: bool = False -) -> Union[Type[T], Callable[[Type[T]], Type[T]]]: + cls: Optional[T] = None, *, angular: bool = False +) -> Union[T, Callable[[T], T]]: if cls is None: return auto(angular=angular) else: @@ -144,7 +143,7 @@ def __rich_repr__(self) -> Result: console.print(foo, width=30) console.rule("Angular repr") - Foo.__rich_repr__.angular = True # type: ignore[attr-defined] + Foo.__rich_repr__.angular = True # type: ignore console.print(foo) diff --git a/venv/lib/python3.10/site-packages/pip/_vendor/rich/rule.py b/venv/lib/python3.10/site-packages/pip/_vendor/rich/rule.py index 0b78f7a..ce4754f 100644 --- a/venv/lib/python3.10/site-packages/pip/_vendor/rich/rule.py +++ b/venv/lib/python3.10/site-packages/pip/_vendor/rich/rule.py @@ -4,7 +4,6 @@ from .cells import cell_len, set_cell_size from .console import Console, ConsoleOptions, RenderResult from .jupyter import JupyterMixin -from .measure import Measurement from .style import Style from .text import Text @@ -63,7 +62,10 @@ def __rich_console__( chars_len = cell_len(characters) if not self.title: - yield self._rule_line(chars_len, width) + rule_text = Text(characters * ((width // chars_len) + 1), self.style) + rule_text.truncate(width) + rule_text.plain = set_cell_size(rule_text.plain, width) + yield rule_text return if isinstance(self.title, Text): @@ -73,16 +75,10 @@ def __rich_console__( title_text.plain = title_text.plain.replace("\n", " ") title_text.expand_tabs() - - required_space = 4 if self.align == "center" else 2 - truncate_width = max(0, width - required_space) - if not truncate_width: - yield self._rule_line(chars_len, width) - return - rule_text = Text(end=self.end) + if self.align == "center": - title_text.truncate(truncate_width, overflow="ellipsis") + title_text.truncate(width - 4, overflow="ellipsis") side_width = (width - cell_len(title_text.plain)) // 2 left = Text(characters * (side_width // chars_len + 1)) left.truncate(side_width - 1) @@ -93,12 +89,12 @@ def __rich_console__( rule_text.append(title_text) rule_text.append(" " + right.plain, self.style) elif self.align == "left": - title_text.truncate(truncate_width, overflow="ellipsis") + title_text.truncate(width - 2, overflow="ellipsis") rule_text.append(title_text) rule_text.append(" ") rule_text.append(characters * (width - rule_text.cell_len), self.style) elif self.align == "right": - title_text.truncate(truncate_width, overflow="ellipsis") + title_text.truncate(width - 2, overflow="ellipsis") rule_text.append(characters * (width - title_text.cell_len - 1), self.style) rule_text.append(" ") rule_text.append(title_text) @@ -106,22 +102,10 @@ def __rich_console__( rule_text.plain = set_cell_size(rule_text.plain, width) yield rule_text - def _rule_line(self, chars_len: int, width: int) -> Text: - rule_text = Text(self.characters * ((width // chars_len) + 1), self.style) - rule_text.truncate(width) - rule_text.plain = set_cell_size(rule_text.plain, width) - return rule_text - - def __rich_measure__( - self, console: Console, options: ConsoleOptions - ) -> Measurement: - return Measurement(1, 1) - if __name__ == "__main__": # pragma: no cover - import sys - from pip._vendor.rich.console import Console + import sys try: text = sys.argv[1] @@ -129,6 +113,3 @@ def __rich_measure__( text = "Hello, World" console = Console() console.print(Rule(title=text)) - - console = Console() - console.print(Rule("foo"), width=4) diff --git a/venv/lib/python3.10/site-packages/pip/_vendor/rich/segment.py b/venv/lib/python3.10/site-packages/pip/_vendor/rich/segment.py index 1ea5435..94ca730 100644 --- a/venv/lib/python3.10/site-packages/pip/_vendor/rich/segment.py +++ b/venv/lib/python3.10/site-packages/pip/_vendor/rich/segment.py @@ -18,7 +18,6 @@ from .cells import ( _is_single_cell_widths, - cached_cell_len, cell_len, get_character_cell_size, set_cell_size, @@ -50,13 +49,10 @@ class ControlType(IntEnum): CURSOR_MOVE_TO_COLUMN = 13 CURSOR_MOVE_TO = 14 ERASE_IN_LINE = 15 - SET_WINDOW_TITLE = 16 ControlCode = Union[ - Tuple[ControlType], - Tuple[ControlType, Union[int, str]], - Tuple[ControlType, int, int], + Tuple[ControlType], Tuple[ControlType, int], Tuple[ControlType, int, int] ] @@ -68,25 +64,15 @@ class Segment(NamedTuple): Args: text (str): A piece of text. style (:class:`~rich.style.Style`, optional): An optional style to apply to the text. - control (Tuple[ControlCode], optional): Optional sequence of control codes. - - Attributes: - cell_length (int): The cell length of this Segment. + control (Tuple[ControlCode..], optional): Optional sequence of control codes. """ - text: str + text: str = "" + """Raw text.""" style: Optional[Style] = None + """An optional style.""" control: Optional[Sequence[ControlCode]] = None - - @property - def cell_length(self) -> int: - """The number of terminal cells required to display self.text. - - Returns: - int: A number of cells. - """ - text, _style, control = self - return 0 if control else cell_len(text) + """Optional sequence of control codes.""" def __rich_repr__(self) -> Result: yield self.text @@ -101,6 +87,11 @@ def __bool__(self) -> bool: """Check if the segment contains text.""" return bool(self.text) + @property + def cell_length(self) -> int: + """Get cell length of segment.""" + return 0 if self.control else cell_len(self.text) + @property def is_control(self) -> bool: """Check if the segment contains control codes.""" @@ -108,7 +99,7 @@ def is_control(self) -> bool: @classmethod @lru_cache(1024 * 16) - def _split_cells(cls, segment: "Segment", cut: int) -> Tuple["Segment", "Segment"]: + def _split_cells(cls, segment: "Segment", cut: int) -> Tuple["Segment", "Segment"]: # type: ignore text, style, control = segment _Segment = Segment @@ -144,8 +135,6 @@ def _split_cells(cls, segment: "Segment", cut: int) -> Tuple["Segment", "Segment _Segment(" " + text[pos:], style, control), ) - raise AssertionError("Will never reach here") - def split_cells(self, cut: int) -> Tuple["Segment", "Segment"]: """Split segment in to two segments at the specified column. @@ -291,11 +280,11 @@ def split_and_crop_lines( for segment in segments: if "\n" in segment.text and not segment.control: - text, segment_style, _ = segment + text, style, _ = segment while text: _text, new_line, text = text.partition("\n") if _text: - append(cls(_text, segment_style)) + append(cls(_text, style)) if new_line: cropped_line = adjust_line_length( line, length, style=style, pad=pad @@ -603,56 +592,44 @@ def divide( iter_cuts = iter(cuts) while True: - cut = next(iter_cuts, -1) - if cut == -1: + try: + cut = next(iter_cuts) + except StopIteration: return [] if cut != 0: break yield [] pos = 0 - segments_clear = split_segments.clear - segments_copy = split_segments.copy - - _cell_len = cached_cell_len for segment in segments: - text, _style, control = segment - while text: - end_pos = pos if control else pos + _cell_len(text) + while segment.text: + end_pos = pos + segment.cell_length if end_pos < cut: add_segment(segment) pos = end_pos break - if end_pos == cut: - add_segment(segment) - yield segments_copy() - segments_clear() - pos = end_pos - - cut = next(iter_cuts, -1) - if cut == -1: + try: + if end_pos == cut: + add_segment(segment) + yield split_segments[:] + del split_segments[:] + pos = end_pos + break + else: + before, segment = segment.split_cells(cut - pos) + add_segment(before) + yield split_segments[:] + del split_segments[:] + pos = cut + finally: + try: + cut = next(iter_cuts) + except StopIteration: if split_segments: - yield segments_copy() + yield split_segments[:] return - - break - - else: - before, segment = segment.split_cells(cut - pos) - text, _style, control = segment - add_segment(before) - yield segments_copy() - segments_clear() - pos = cut - - cut = next(iter_cuts, -1) - if cut == -1: - if split_segments: - yield segments_copy() - return - - yield segments_copy() + yield split_segments[:] class Segments: @@ -705,35 +682,39 @@ def __rich_console__( yield from line -if __name__ == "__main__": # pragma: no cover - from pip._vendor.rich.console import Console - from pip._vendor.rich.syntax import Syntax - from pip._vendor.rich.text import Text +if __name__ == "__main__": - code = """from rich.console import Console -console = Console() -text = Text.from_markup("Hello, [bold magenta]World[/]!") -console.print(text)""" + if __name__ == "__main__": # pragma: no cover + from pip._vendor.rich.console import Console + from pip._vendor.rich.syntax import Syntax + from pip._vendor.rich.text import Text + code = """from rich.console import Console + console = Console() text = Text.from_markup("Hello, [bold magenta]World[/]!") + console.print(text)""" - console = Console() + text = Text.from_markup("Hello, [bold magenta]World[/]!") - console.rule("rich.Segment") - console.print( - "A Segment is the last step in the Rich render process before generating text with ANSI codes." - ) - console.print("\nConsider the following code:\n") - console.print(Syntax(code, "python", line_numbers=True)) - console.print() - console.print( - "When you call [b]print()[/b], Rich [i]renders[/i] the object in to the the following:\n" - ) - fragments = list(console.render(text)) - console.print(fragments) - console.print() - console.print("The Segments are then processed to produce the following output:\n") - console.print(text) - console.print( - "\nYou will only need to know this if you are implementing your own Rich renderables." - ) + console = Console() + + console.rule("rich.Segment") + console.print( + "A Segment is the last step in the Rich render process before generating text with ANSI codes." + ) + console.print("\nConsider the following code:\n") + console.print(Syntax(code, "python", line_numbers=True)) + console.print() + console.print( + "When you call [b]print()[/b], Rich [i]renders[/i] the object in to the the following:\n" + ) + fragments = list(console.render(text)) + console.print(fragments) + console.print() + console.print( + "The Segments are then processed to produce the following output:\n" + ) + console.print(text) + console.print( + "\nYou will only need to know this if you are implementing your own Rich renderables." + ) diff --git a/venv/lib/python3.10/site-packages/pip/_vendor/rich/spinner.py b/venv/lib/python3.10/site-packages/pip/_vendor/rich/spinner.py index 0879088..5b13b1e 100644 --- a/venv/lib/python3.10/site-packages/pip/_vendor/rich/spinner.py +++ b/venv/lib/python3.10/site-packages/pip/_vendor/rich/spinner.py @@ -1,4 +1,4 @@ -from typing import cast, List, Optional, TYPE_CHECKING, Union +from typing import cast, List, Optional, TYPE_CHECKING from ._spinners import SPINNERS from .measure import Measurement @@ -34,9 +34,7 @@ def __init__( spinner = SPINNERS[name] except KeyError: raise KeyError(f"no spinner called {name!r}") - self.text: "Union[RenderableType, Text]" = ( - Text.from_markup(text) if isinstance(text, str) else text - ) + self.text = Text.from_markup(text) if isinstance(text, str) else text self.frames = cast(List[str], spinner["frames"])[:] self.interval = cast(float, spinner["interval"]) self.start_time: Optional[float] = None diff --git a/venv/lib/python3.10/site-packages/pip/_vendor/rich/style.py b/venv/lib/python3.10/site-packages/pip/_vendor/rich/style.py index b2e8aff..0787c33 100644 --- a/venv/lib/python3.10/site-packages/pip/_vendor/rich/style.py +++ b/venv/lib/python3.10/site-packages/pip/_vendor/rich/style.py @@ -1,14 +1,15 @@ import sys from functools import lru_cache -from marshal import dumps, loads +from marshal import loads, dumps from random import randint -from typing import Any, Dict, Iterable, List, Optional, Type, Union, cast +from typing import Any, cast, Dict, Iterable, List, Optional, Type, Union from . import errors from .color import Color, ColorParseError, ColorSystem, blend_rgb -from .repr import Result, rich_repr +from .repr import rich_repr, Result from .terminal_theme import DEFAULT_TERMINAL_THEME, TerminalTheme + # Style instances and style definitions are often interchangeable StyleType = Union[str, "Style"] @@ -59,7 +60,7 @@ class Style: _bgcolor: Optional[Color] _attributes: int _set_attributes: int - _hash: Optional[int] + _hash: int _null: bool _meta: Optional[bytes] @@ -190,7 +191,16 @@ def _make_color(color: Union[Color, str]) -> Color: self._link = link self._link_id = f"{randint(0, 999999)}" if link else "" self._meta = None if meta is None else dumps(meta) - self._hash: Optional[int] = None + self._hash = hash( + ( + self._color, + self._bgcolor, + self._attributes, + self._set_attributes, + link, + self._meta, + ) + ) self._null = not (self._set_attributes or color or bgcolor or link or meta) @classmethod @@ -218,8 +228,17 @@ def from_color( style._link = None style._link_id = "" style._meta = None + style._hash = hash( + ( + color, + bgcolor, + None, + None, + None, + None, + ) + ) style._null = not (color or bgcolor) - style._hash = None return style @classmethod @@ -239,7 +258,16 @@ def from_meta(cls, meta: Optional[Dict[str, Any]]) -> "Style": style._link = None style._link_id = "" style._meta = dumps(meta) - style._hash = None + style._hash = hash( + ( + None, + None, + None, + None, + None, + style._meta, + ) + ) style._null = not (meta) return style @@ -251,7 +279,7 @@ def on(cls, meta: Optional[Dict[str, Any]] = None, **handlers: Any) -> "Style": style = Style.on(click=self.on_click) Args: - meta (Optional[Dict[str, Any]], optional): An optional dict of meta information. + meta (Optiona[Dict[str, Any]], optional): An optional dict of meta information. **handlers (Any): Keyword arguments are translated in to handlers. Returns: @@ -339,7 +367,6 @@ def _make_ansi_codes(self, color_system: ColorSystem) -> str: Returns: str: String containing codes. """ - if self._ansi is None: sgr: List[str] = [] append = sgr.append @@ -420,26 +447,16 @@ def __rich_repr__(self) -> Result: def __eq__(self, other: Any) -> bool: if not isinstance(other, Style): return NotImplemented - return self.__hash__() == other.__hash__() - - def __ne__(self, other: Any) -> bool: - if not isinstance(other, Style): - return NotImplemented - return self.__hash__() != other.__hash__() + return ( + self._color == other._color + and self._bgcolor == other._bgcolor + and self._set_attributes == other._set_attributes + and self._attributes == other._attributes + and self._link == other._link + and self._meta == other._meta + ) def __hash__(self) -> int: - if self._hash is not None: - return self._hash - self._hash = hash( - ( - self._color, - self._bgcolor, - self._attributes, - self._set_attributes, - self._link, - self._meta, - ) - ) return self._hash @property @@ -486,9 +503,9 @@ def without_color(self) -> "Style": style._set_attributes = self._set_attributes style._link = self._link style._link_id = f"{randint(0, 999999)}" if self._link else "" + style._hash = self._hash style._null = False style._meta = None - style._hash = None return style @classmethod @@ -661,7 +678,7 @@ def update_link(self, link: Optional[str] = None) -> "Style": style._set_attributes = self._set_attributes style._link = link style._link_id = f"{randint(0, 999999)}" if link else "" - style._hash = None + style._hash = self._hash style._null = False style._meta = self._meta return style @@ -684,7 +701,7 @@ def render( """ if not text or color_system is None: return text - attrs = self._ansi or self._make_ansi_codes(color_system) + attrs = self._make_ansi_codes(color_system) rendered = f"\x1b[{attrs}m{text}\x1b[0m" if attrs else text if self._link and not legacy_windows: rendered = ( @@ -704,8 +721,9 @@ def test(self, text: Optional[str] = None) -> None: text = text or str(self) sys.stdout.write(f"{self.render(text)}\n") - @lru_cache(maxsize=1024) - def _add(self, style: Optional["Style"]) -> "Style": + def __add__(self, style: Optional["Style"]) -> "Style": + if not (isinstance(style, Style) or style is None): + return NotImplemented if style is None or style._null: return self if self._null: @@ -721,18 +739,14 @@ def _add(self, style: Optional["Style"]) -> "Style": new_style._set_attributes = self._set_attributes | style._set_attributes new_style._link = style._link or self._link new_style._link_id = style._link_id or self._link_id - new_style._null = style._null + new_style._hash = style._hash + new_style._null = self._null or style._null if self._meta and style._meta: new_style._meta = dumps({**self.meta, **style.meta}) else: new_style._meta = self._meta or style._meta - new_style._hash = None return new_style - def __add__(self, style: Optional["Style"]) -> "Style": - combined_style = self._add(style) - return combined_style.copy() if combined_style.link else combined_style - NULL_STYLE = Style() diff --git a/venv/lib/python3.10/site-packages/pip/_vendor/rich/syntax.py b/venv/lib/python3.10/site-packages/pip/_vendor/rich/syntax.py index dace718..58cc103 100644 --- a/venv/lib/python3.10/site-packages/pip/_vendor/rich/syntax.py +++ b/venv/lib/python3.10/site-packages/pip/_vendor/rich/syntax.py @@ -1,22 +1,9 @@ import os.path import platform -import re -import sys +from pip._vendor.rich.containers import Lines import textwrap from abc import ABC, abstractmethod -from typing import ( - Any, - Dict, - Iterable, - List, - NamedTuple, - Optional, - Sequence, - Set, - Tuple, - Type, - Union, -) +from typing import Any, Dict, Iterable, List, Optional, Set, Tuple, Type, Union from pip._vendor.pygments.lexer import Lexer from pip._vendor.pygments.lexers import get_lexer_by_name, guess_lexer_for_filename @@ -36,16 +23,13 @@ ) from pip._vendor.pygments.util import ClassNotFound -from pip._vendor.rich.containers import Lines -from pip._vendor.rich.padding import Padding, PaddingDimensions - from ._loop import loop_first from .color import Color, blend_rgb from .console import Console, ConsoleOptions, JustifyMethod, RenderResult from .jupyter import JupyterMixin from .measure import Measurement -from .segment import Segment, Segments -from .style import Style, StyleType +from .segment import Segment +from .style import Style from .text import Text TokenType = Tuple[str, ...] @@ -115,7 +99,6 @@ } RICH_SYNTAX_THEMES = {"ansi_light": ANSI_LIGHT, "ansi_dark": ANSI_DARK} -NUMBERS_COLUMN_DEFAULT_PADDING = 2 class SyntaxTheme(ABC): @@ -207,21 +190,6 @@ def get_background_style(self) -> Style: return self._background_style -SyntaxPosition = Tuple[int, int] - - -class _SyntaxHighlightRange(NamedTuple): - """ - A range to highlight in a Syntax object. - `start` and `end` are 2-integers tuples, where the first integer is the line number - (starting from 1) and the second integer is the column index (starting from 0). - """ - - style: StyleType - start: SyntaxPosition - end: SyntaxPosition - - class Syntax(JupyterMixin): """Construct a Syntax object to render syntax highlighted code. @@ -232,15 +200,13 @@ class Syntax(JupyterMixin): dedent (bool, optional): Enable stripping of initial whitespace. Defaults to False. line_numbers (bool, optional): Enable rendering of line numbers. Defaults to False. start_line (int, optional): Starting number for line numbers. Defaults to 1. - line_range (Tuple[int | None, int | None], optional): If given should be a tuple of the start and end line to render. - A value of None in the tuple indicates the range is open in that direction. + line_range (Tuple[int, int], optional): If given should be a tuple of the start and end line to render. highlight_lines (Set[int]): A set of line numbers to highlight. code_width: Width of code to render (not including line numbers), or ``None`` to use all available width. tab_size (int, optional): Size of tabs. Defaults to 4. word_wrap (bool, optional): Enable word wrapping. background_color (str, optional): Optional background color, or None to use theme color. Defaults to None. indent_guides (bool, optional): Show indent guides. Defaults to False. - padding (PaddingDimensions): Padding to apply around the syntax. Defaults to 0 (no padding). """ _pygments_style_class: Type[PygmentsStyle] @@ -267,14 +233,13 @@ def __init__( dedent: bool = False, line_numbers: bool = False, start_line: int = 1, - line_range: Optional[Tuple[Optional[int], Optional[int]]] = None, + line_range: Optional[Tuple[int, int]] = None, highlight_lines: Optional[Set[int]] = None, code_width: Optional[int] = None, tab_size: int = 4, word_wrap: bool = False, background_color: Optional[str] = None, indent_guides: bool = False, - padding: PaddingDimensions = 0, ) -> None: self.code = code self._lexer = lexer @@ -291,17 +256,14 @@ def __init__( Style(bgcolor=background_color) if background_color else Style() ) self.indent_guides = indent_guides - self.padding = padding self._theme = self.get_theme(theme) - self._stylized_ranges: List[_SyntaxHighlightRange] = [] @classmethod def from_path( cls, path: str, encoding: str = "utf-8", - lexer: Optional[Union[Lexer, str]] = None, theme: Union[str, SyntaxTheme] = DEFAULT_THEME, dedent: bool = False, line_numbers: bool = False, @@ -313,14 +275,12 @@ def from_path( word_wrap: bool = False, background_color: Optional[str] = None, indent_guides: bool = False, - padding: PaddingDimensions = 0, ) -> "Syntax": """Construct a Syntax object from a file. Args: path (str): Path to file to highlight. encoding (str): Encoding of file. - lexer (str | Lexer, optional): Lexer to use. If None, lexer will be auto-detected from path/file content. theme (str, optional): Color theme, aka Pygments style (see https://pygments.org/docs/styles/#getting-a-list-of-available-styles). Defaults to "emacs". dedent (bool, optional): Enable stripping of initial whitespace. Defaults to True. line_numbers (bool, optional): Enable rendering of line numbers. Defaults to False. @@ -332,7 +292,6 @@ def from_path( word_wrap (bool, optional): Enable word wrapping of code. background_color (str, optional): Optional background color, or None to use theme color. Defaults to None. indent_guides (bool, optional): Show indent guides. Defaults to False. - padding (PaddingDimensions): Padding to apply around the syntax. Defaults to 0 (no padding). Returns: [Syntax]: A Syntax object that may be printed to the console @@ -340,12 +299,26 @@ def from_path( with open(path, "rt", encoding=encoding) as code_file: code = code_file.read() - if not lexer: - lexer = cls.guess_lexer(path, code=code) + lexer = None + lexer_name = "default" + try: + _, ext = os.path.splitext(path) + if ext: + extension = ext.lstrip(".").lower() + lexer = get_lexer_by_name(extension) + lexer_name = lexer.name + except ClassNotFound: + pass + + if lexer is None: + try: + lexer_name = guess_lexer_for_filename(path, code).name + except ClassNotFound: + pass return cls( code, - lexer, + lexer_name, theme=theme, dedent=dedent, line_numbers=line_numbers, @@ -357,51 +330,8 @@ def from_path( word_wrap=word_wrap, background_color=background_color, indent_guides=indent_guides, - padding=padding, ) - @classmethod - def guess_lexer(cls, path: str, code: Optional[str] = None) -> str: - """Guess the alias of the Pygments lexer to use based on a path and an optional string of code. - If code is supplied, it will use a combination of the code and the filename to determine the - best lexer to use. For example, if the file is ``index.html`` and the file contains Django - templating syntax, then "html+django" will be returned. If the file is ``index.html``, and no - templating language is used, the "html" lexer will be used. If no string of code - is supplied, the lexer will be chosen based on the file extension.. - - Args: - path (AnyStr): The path to the file containing the code you wish to know the lexer for. - code (str, optional): Optional string of code that will be used as a fallback if no lexer - is found for the supplied path. - - Returns: - str: The name of the Pygments lexer that best matches the supplied path/code. - """ - lexer: Optional[Lexer] = None - lexer_name = "default" - if code: - try: - lexer = guess_lexer_for_filename(path, code) - except ClassNotFound: - pass - - if not lexer: - try: - _, ext = os.path.splitext(path) - if ext: - extension = ext.lstrip(".").lower() - lexer = get_lexer_by_name(extension) - except ClassNotFound: - pass - - if lexer: - if lexer.aliases: - lexer_name = lexer.aliases[0] - else: - lexer_name = lexer.name - - return lexer_name - def _get_base_style(self) -> Style: """Get the base style.""" default_style = self._theme.get_background_style() + self.background_style @@ -439,9 +369,7 @@ def lexer(self) -> Optional[Lexer]: return None def highlight( - self, - code: str, - line_range: Optional[Tuple[Optional[int], Optional[int]]] = None, + self, code: str, line_range: Optional[Tuple[int, int]] = None ) -> Text: """Highlight code and return a Text instance. @@ -478,7 +406,7 @@ def highlight( def line_tokenize() -> Iterable[Tuple[Any, str]]: """Split tokens to one per line.""" - assert lexer # required to make MyPy happy - we know lexer is not None at this point + assert lexer for token_type, token in lexer.get_tokens(code): while token: @@ -489,7 +417,7 @@ def tokens_to_spans() -> Iterable[Tuple[str, Optional[Style]]]: """Convert tokens to spans.""" tokens = iter(line_tokenize()) line_no = 0 - _line_start = line_start - 1 if line_start else 0 + _line_start = line_start - 1 # Skip over tokens until line start while line_no < _line_start: @@ -502,7 +430,7 @@ def tokens_to_spans() -> Iterable[Tuple[str, Optional[Style]]]: yield (token, _get_theme_style(token_type)) if token.endswith("\n"): line_no += 1 - if line_end and line_no >= line_end: + if line_no >= line_end: break text.append_tokens(tokens_to_spans()) @@ -514,26 +442,8 @@ def tokens_to_spans() -> Iterable[Tuple[str, Optional[Style]]]: ) if self.background_color is not None: text.stylize(f"on {self.background_color}") - - if self._stylized_ranges: - self._apply_stylized_ranges(text) - return text - def stylize_range( - self, style: StyleType, start: SyntaxPosition, end: SyntaxPosition - ) -> None: - """ - Adds a custom style on a part of the code, that will be applied to the syntax display when it's rendered. - Line numbers are 1-based, while column indexes are 0-based. - - Args: - style (StyleType): The style to apply. - start (Tuple[int, int]): The start of the range, in the form `[line number, column index]`. - end (Tuple[int, int]): The end of the range, in the form `[line number, column index]`. - """ - self._stylized_ranges.append(_SyntaxHighlightRange(style, start, end)) - def _get_line_numbers_color(self, blend: float = 0.3) -> Color: background_style = self._theme.get_background_style() + self.background_style background_color = background_style.bgcolor @@ -554,10 +464,7 @@ def _numbers_column_width(self) -> int: """Get the number of characters used to render the numbers column.""" column_width = 0 if self.line_numbers: - column_width = ( - len(str(self.start_line + self.code.count("\n"))) - + NUMBERS_COLUMN_DEFAULT_PADDING - ) + column_width = len(str(self.start_line + self.code.count("\n"))) + 2 return column_width def _get_number_styles(self, console: Console) -> Tuple[Style, Style, Style]: @@ -586,31 +493,15 @@ def _get_number_styles(self, console: Console) -> Tuple[Style, Style, Style]: def __rich_measure__( self, console: "Console", options: "ConsoleOptions" ) -> "Measurement": - _, right, _, left = Padding.unpack(self.padding) if self.code_width is not None: - width = self.code_width + self._numbers_column_width + right + left + width = self.code_width + self._numbers_column_width return Measurement(self._numbers_column_width, width) return Measurement(self._numbers_column_width, options.max_width) def __rich_console__( self, console: Console, options: ConsoleOptions ) -> RenderResult: - segments = Segments(self._get_syntax(console, options)) - if self.padding: - yield Padding( - segments, style=self._theme.get_background_style(), pad=self.padding - ) - else: - yield segments - def _get_syntax( - self, - console: Console, - options: ConsoleOptions, - ) -> Iterable[Segment]: - """ - Get the Segments for the Syntax object, excluding any vertical/horizontal padding - """ transparent_background = self._get_base_style().transparent_background code_width = ( ( @@ -622,8 +513,22 @@ def _get_syntax( else self.code_width ) - ends_on_nl, processed_code = self._process_code(self.code) - text = self.highlight(processed_code, self.line_range) + line_offset = 0 + if self.line_range: + start_line, end_line = self.line_range + line_offset = max(0, start_line - 1) + + ends_on_nl = self.code.endswith("\n") + code = self.code if ends_on_nl else self.code + "\n" + code = textwrap.dedent(code) if self.dedent else code + code = code.expandtabs(self.tab_size) + text = self.highlight(code, self.line_range) + + ( + background_style, + number_style, + highlight_number_style, + ) = self._get_number_styles(console) if not self.line_numbers and not self.word_wrap and not self.line_range: if not ends_on_nl: @@ -645,7 +550,7 @@ def _get_syntax( else: syntax_lines = console.render_lines( text, - options.update(width=code_width, height=None, justify="left"), + options.update(width=code_width, height=None), style=self.background_style, pad=True, new_lines=True, @@ -654,10 +559,6 @@ def _get_syntax( yield from syntax_line return - start_line, end_line = self.line_range or (None, None) - line_offset = 0 - if start_line: - line_offset = max(0, start_line - 1) lines: Union[List[Text], Lines] = text.split("\n", allow_blank=ends_on_nl) if self.line_range: lines = lines[line_offset:end_line] @@ -681,24 +582,20 @@ def _get_syntax( highlight_line = self.highlight_lines.__contains__ _Segment = Segment + padding = _Segment(" " * numbers_column_width + " ", background_style) new_line = _Segment("\n") line_pointer = "> " if options.legacy_windows else "❱ " - ( - background_style, - number_style, - highlight_number_style, - ) = self._get_number_styles(console) - for line_no, line in enumerate(lines, self.start_line + line_offset): if self.word_wrap: wrapped_lines = console.render_lines( line, - render_options.update(height=None, justify="left"), + render_options.update(height=None), style=background_style, pad=not transparent_background, ) + else: segments = list(line.render(console, end="")) if options.no_wrap: @@ -712,11 +609,7 @@ def _get_syntax( pad=not transparent_background, ) ] - if self.line_numbers: - wrapped_line_left_pad = _Segment( - " " * numbers_column_width + " ", background_style - ) for first, wrapped_line in loop_first(wrapped_lines): if first: line_column = str(line_no).rjust(numbers_column_width - 2) + " " @@ -727,7 +620,7 @@ def _get_syntax( yield _Segment(" ", highlight_number_style) yield _Segment(line_column, number_style) else: - yield wrapped_line_left_pad + yield padding yield from wrapped_line yield new_line else: @@ -735,83 +628,6 @@ def _get_syntax( yield from wrapped_line yield new_line - def _apply_stylized_ranges(self, text: Text) -> None: - """ - Apply stylized ranges to a text instance, - using the given code to determine the right portion to apply the style to. - - Args: - text (Text): Text instance to apply the style to. - """ - code = text.plain - newlines_offsets = [ - # Let's add outer boundaries at each side of the list: - 0, - # N.B. using "\n" here is much faster than using metacharacters such as "^" or "\Z": - *[ - match.start() + 1 - for match in re.finditer("\n", code, flags=re.MULTILINE) - ], - len(code) + 1, - ] - - for stylized_range in self._stylized_ranges: - start = _get_code_index_for_syntax_position( - newlines_offsets, stylized_range.start - ) - end = _get_code_index_for_syntax_position( - newlines_offsets, stylized_range.end - ) - if start is not None and end is not None: - text.stylize(stylized_range.style, start, end) - - def _process_code(self, code: str) -> Tuple[bool, str]: - """ - Applies various processing to a raw code string - (normalises it so it always ends with a line return, dedents it if necessary, etc.) - - Args: - code (str): The raw code string to process - - Returns: - Tuple[bool, str]: the boolean indicates whether the raw code ends with a line return, - while the string is the processed code. - """ - ends_on_nl = code.endswith("\n") - processed_code = code if ends_on_nl else code + "\n" - processed_code = ( - textwrap.dedent(processed_code) if self.dedent else processed_code - ) - processed_code = processed_code.expandtabs(self.tab_size) - return ends_on_nl, processed_code - - -def _get_code_index_for_syntax_position( - newlines_offsets: Sequence[int], position: SyntaxPosition -) -> Optional[int]: - """ - Returns the index of the code string for the given positions. - - Args: - newlines_offsets (Sequence[int]): The offset of each newline character found in the code snippet. - position (SyntaxPosition): The position to search for. - - Returns: - Optional[int]: The index of the code string for this position, or `None` - if the given position's line number is out of range (if it's the column that is out of range - we silently clamp its value so that it reaches the end of the line) - """ - lines_count = len(newlines_offsets) - - line_number, column_index = position - if line_number > lines_count or len(newlines_offsets) < (line_number + 1): - return None # `line_number` is out of range - line_index = line_number - 1 - line_length = newlines_offsets[line_index + 1] - newlines_offsets[line_index] - 1 - # If `column_index` is out of range: let's silently clamp it: - column_index = min(line_length, column_index) - return newlines_offsets[line_index] + column_index - if __name__ == "__main__": # pragma: no cover @@ -886,20 +702,10 @@ def _get_code_index_for_syntax_position( parser.add_argument( "-x", "--lexer", - default=None, + default="default", dest="lexer_name", help="Lexer name", ) - parser.add_argument( - "-p", "--padding", type=int, default=0, dest="padding", help="Padding" - ) - parser.add_argument( - "--highlight-line", - type=int, - default=None, - dest="highlight_line", - help="The line number (not index!) to highlight", - ) args = parser.parse_args() from pip._vendor.rich.console import Console @@ -916,19 +722,14 @@ def _get_code_index_for_syntax_position( theme=args.theme, background_color=args.background_color, indent_guides=args.indent_guides, - padding=args.padding, - highlight_lines={args.highlight_line}, ) else: syntax = Syntax.from_path( args.path, - lexer=args.lexer_name, line_numbers=args.line_numbers, word_wrap=args.word_wrap, theme=args.theme, background_color=args.background_color, indent_guides=args.indent_guides, - padding=args.padding, - highlight_lines={args.highlight_line}, ) console.print(syntax, soft_wrap=args.soft_wrap) diff --git a/venv/lib/python3.10/site-packages/pip/_vendor/rich/table.py b/venv/lib/python3.10/site-packages/pip/_vendor/rich/table.py index 8fc28ef..da43860 100644 --- a/venv/lib/python3.10/site-packages/pip/_vendor/rich/table.py +++ b/venv/lib/python3.10/site-packages/pip/_vendor/rich/table.py @@ -37,35 +37,7 @@ @dataclass class Column: - """Defines a column within a ~Table. - - Args: - title (Union[str, Text], optional): The title of the table rendered at the top. Defaults to None. - caption (Union[str, Text], optional): The table caption rendered below. Defaults to None. - width (int, optional): The width in characters of the table, or ``None`` to automatically fit. Defaults to None. - min_width (Optional[int], optional): The minimum width of the table, or ``None`` for no minimum. Defaults to None. - box (box.Box, optional): One of the constants in box.py used to draw the edges (see :ref:`appendix_box`), or ``None`` for no box lines. Defaults to box.HEAVY_HEAD. - safe_box (Optional[bool], optional): Disable box characters that don't display on windows legacy terminal with *raster* fonts. Defaults to True. - padding (PaddingDimensions, optional): Padding for cells (top, right, bottom, left). Defaults to (0, 1). - collapse_padding (bool, optional): Enable collapsing of padding around cells. Defaults to False. - pad_edge (bool, optional): Enable padding of edge cells. Defaults to True. - expand (bool, optional): Expand the table to fit the available space if ``True``, otherwise the table width will be auto-calculated. Defaults to False. - show_header (bool, optional): Show a header row. Defaults to True. - show_footer (bool, optional): Show a footer row. Defaults to False. - show_edge (bool, optional): Draw a box around the outside of the table. Defaults to True. - show_lines (bool, optional): Draw lines between every row. Defaults to False. - leading (bool, optional): Number of blank lines between rows (precludes ``show_lines``). Defaults to 0. - style (Union[str, Style], optional): Default style for the table. Defaults to "none". - row_styles (List[Union, str], optional): Optional list of row styles, if more than one style is given then the styles will alternate. Defaults to None. - header_style (Union[str, Style], optional): Style of the header. Defaults to "table.header". - footer_style (Union[str, Style], optional): Style of the footer. Defaults to "table.footer". - border_style (Union[str, Style], optional): Style of the border. Defaults to None. - title_style (Union[str, Style], optional): Style of the title. Defaults to None. - caption_style (Union[str, Style], optional): Style of the caption. Defaults to None. - title_justify (str, optional): Justify method for title. Defaults to "center". - caption_justify (str, optional): Justify method for caption. Defaults to "center". - highlight (bool, optional): Highlight cell contents (if str). Defaults to False. - """ + """Defines a column in a table.""" header: "RenderableType" = "" """RenderableType: Renderable for the header (typically a string)""" @@ -752,8 +724,8 @@ def _render( if self.box else None ) - _box = _box.get_plain_headed_box() if _box and not self.show_header else _box + # _box = self.box new_line = Segment.line() columns = self.columns diff --git a/venv/lib/python3.10/site-packages/pip/_vendor/rich/terminal_theme.py b/venv/lib/python3.10/site-packages/pip/_vendor/rich/terminal_theme.py index 565e9d9..801ac0b 100644 --- a/venv/lib/python3.10/site-packages/pip/_vendor/rich/terminal_theme.py +++ b/venv/lib/python3.10/site-packages/pip/_vendor/rich/terminal_theme.py @@ -53,101 +53,3 @@ def __init__( (255, 255, 255), ], ) - -MONOKAI = TerminalTheme( - (12, 12, 12), - (217, 217, 217), - [ - (26, 26, 26), - (244, 0, 95), - (152, 224, 36), - (253, 151, 31), - (157, 101, 255), - (244, 0, 95), - (88, 209, 235), - (196, 197, 181), - (98, 94, 76), - ], - [ - (244, 0, 95), - (152, 224, 36), - (224, 213, 97), - (157, 101, 255), - (244, 0, 95), - (88, 209, 235), - (246, 246, 239), - ], -) -DIMMED_MONOKAI = TerminalTheme( - (25, 25, 25), - (185, 188, 186), - [ - (58, 61, 67), - (190, 63, 72), - (135, 154, 59), - (197, 166, 53), - (79, 118, 161), - (133, 92, 141), - (87, 143, 164), - (185, 188, 186), - (136, 137, 135), - ], - [ - (251, 0, 31), - (15, 114, 47), - (196, 112, 51), - (24, 109, 227), - (251, 0, 103), - (46, 112, 109), - (253, 255, 185), - ], -) -NIGHT_OWLISH = TerminalTheme( - (255, 255, 255), - (64, 63, 83), - [ - (1, 22, 39), - (211, 66, 62), - (42, 162, 152), - (218, 170, 1), - (72, 118, 214), - (64, 63, 83), - (8, 145, 106), - (122, 129, 129), - (122, 129, 129), - ], - [ - (247, 110, 110), - (73, 208, 197), - (218, 194, 107), - (92, 167, 228), - (105, 112, 152), - (0, 201, 144), - (152, 159, 177), - ], -) - -SVG_EXPORT_THEME = TerminalTheme( - (41, 41, 41), - (197, 200, 198), - [ - (75, 78, 85), - (204, 85, 90), - (152, 168, 75), - (208, 179, 68), - (96, 138, 177), - (152, 114, 159), - (104, 160, 179), - (197, 200, 198), - (154, 155, 153), - ], - [ - (255, 38, 39), - (0, 130, 61), - (208, 132, 66), - (25, 132, 233), - (255, 44, 122), - (57, 130, 128), - (253, 253, 197), - ], -) diff --git a/venv/lib/python3.10/site-packages/pip/_vendor/rich/text.py b/venv/lib/python3.10/site-packages/pip/_vendor/rich/text.py index 12037d0..ea12c09 100644 --- a/venv/lib/python3.10/site-packages/pip/_vendor/rich/text.py +++ b/venv/lib/python3.10/site-packages/pip/_vendor/rich/text.py @@ -2,6 +2,7 @@ from functools import partial, reduce from math import gcd from operator import itemgetter +from pip._vendor.rich.emoji import EmojiVariant from typing import ( TYPE_CHECKING, Any, @@ -140,8 +141,7 @@ def __init__( tab_size: Optional[int] = 8, spans: Optional[List[Span]] = None, ) -> None: - sanitized_text = strip_control_codes(text) - self._text = [sanitized_text] + self._text = [strip_control_codes(text)] self.style = style self.justify: Optional["JustifyMethod"] = justify self.overflow: Optional["OverflowMethod"] = overflow @@ -149,7 +149,7 @@ def __init__( self.end = end self.tab_size = tab_size self._spans: List[Span] = spans or [] - self._length: int = len(sanitized_text) + self._length: int = len(text) def __len__(self) -> int: return self._length @@ -253,7 +253,6 @@ def from_markup( emoji_variant: Optional[EmojiVariant] = None, justify: Optional["JustifyMethod"] = None, overflow: Optional["OverflowMethod"] = None, - end: str = "\n", ) -> "Text": """Create Text instance from markup. @@ -262,7 +261,6 @@ def from_markup( emoji (bool, optional): Also render emoji code. Defaults to True. justify (str, optional): Justify method: "left", "center", "full", "right". Defaults to None. overflow (str, optional): Overflow method: "crop", "fold", "ellipsis". Defaults to None. - end (str, optional): Character to end text with. Defaults to "\\\\n". Returns: Text: A Text instance with markup rendered. @@ -272,7 +270,6 @@ def from_markup( rendered_text = render(text, style, emoji=emoji, emoji_variant=emoji_variant) rendered_text.justify = justify rendered_text.overflow = overflow - rendered_text.end = end return rendered_text @classmethod @@ -394,10 +391,9 @@ def plain(self) -> str: def plain(self, new_text: str) -> None: """Set the text to a new value.""" if new_text != self.plain: - sanitized_text = strip_control_codes(new_text) - self._text[:] = [sanitized_text] + self._text[:] = [new_text] old_length = self._length - self._length = len(sanitized_text) + self._length = len(new_text) if old_length > self._length: self._trim_spans() @@ -907,10 +903,10 @@ def append( if len(text): if isinstance(text, str): - sanitized_text = strip_control_codes(text) - self._text.append(sanitized_text) + text = strip_control_codes(text) + self._text.append(text) offset = len(self) - text_length = len(sanitized_text) + text_length = len(text) if style is not None: self._spans.append(Span(offset, offset + text_length, style)) self._length += text_length diff --git a/venv/lib/python3.10/site-packages/pip/_vendor/rich/traceback.py b/venv/lib/python3.10/site-packages/pip/_vendor/rich/traceback.py index e5023c7..66a39eb 100644 --- a/venv/lib/python3.10/site-packages/pip/_vendor/rich/traceback.py +++ b/venv/lib/python3.10/site-packages/pip/_vendor/rich/traceback.py @@ -12,10 +12,9 @@ from pip._vendor.pygments.token import Comment, Keyword, Name, Number, Operator, String from pip._vendor.pygments.token import Text as TextToken from pip._vendor.pygments.token import Token -from pip._vendor.pygments.util import ClassNotFound from . import pretty -from ._loop import loop_last +from ._loop import loop_first, loop_last from .columns import Columns from .console import Console, ConsoleOptions, ConsoleRenderable, RenderResult, group from .constrain import Constrain @@ -131,7 +130,7 @@ def ipy_display_traceback( try: # pragma: no cover # if within ipython, use customized traceback - ip = get_ipython() # type: ignore[name-defined] + ip = get_ipython() # type: ignore ipy_excepthook_closure(ip) return sys.excepthook except Exception: @@ -367,8 +366,6 @@ def safe_str(_object: Any) -> str: if filename and not filename.startswith("<"): if not os.path.isabs(filename): filename = os.path.join(_IMPORT_CWD, filename) - if frame_summary.f_locals.get("_rich_traceback_omit", False): - continue frame = Frame( filename=filename or "?", lineno=line_no, @@ -385,7 +382,7 @@ def safe_str(_object: Any) -> str: else None, ) append(frame) - if frame_summary.f_locals.get("_rich_traceback_guard", False): + if "_rich_traceback_guard" in frame_summary.f_locals: del stack.frames[:] cause = getattr(exc_value, "__cause__", None) @@ -393,8 +390,9 @@ def safe_str(_object: Any) -> str: exc_type = cause.__class__ exc_value = cause traceback = cause.__traceback__ - is_cause = True - continue + if traceback: + is_cause = True + continue cause = exc_value.__context__ if ( @@ -405,8 +403,9 @@ def safe_str(_object: Any) -> str: exc_type = cause.__class__ exc_value = cause traceback = cause.__traceback__ - is_cause = False - continue + if traceback: + is_cause = False + continue # No cover, code is reached but coverage doesn't recognize it. break # pragma: no cover @@ -524,10 +523,10 @@ def _guess_lexer(cls, filename: str, code: str) -> str: first_line = code[:new_line_index] if new_line_index != -1 else code if first_line.startswith("#!") and "python" in first_line.lower(): return "python" - try: - return cls.LEXERS.get(ext) or guess_lexer_for_filename(filename, code).name - except ClassNotFound: - return "text" + lexer_name = ( + cls.LEXERS.get(ext) or guess_lexer_for_filename(filename, code).name + ) + return lexer_name @group() def _render_stack(self, stack: Stack) -> RenderResult: @@ -586,7 +585,7 @@ def render_locals(frame: Frame) -> Iterable[ConsoleRenderable]: ) excluded = False - first = frame_index == 0 + first = frame_index == 1 frame_filename = frame.filename suppressed = any(frame_filename.startswith(path) for path in self.suppress) @@ -672,7 +671,7 @@ def error() -> None: try: foo(0) except: - slfkjsldkfj # type: ignore[name-defined] + slfkjsldkfj # type: ignore except: console.print_exception(show_locals=True) diff --git a/venv/lib/python3.10/site-packages/pip/_vendor/rich/tree.py b/venv/lib/python3.10/site-packages/pip/_vendor/rich/tree.py index afe8da1..c5ec27d 100644 --- a/venv/lib/python3.10/site-packages/pip/_vendor/rich/tree.py +++ b/venv/lib/python3.10/site-packages/pip/_vendor/rich/tree.py @@ -45,7 +45,7 @@ def add( style: Optional[StyleType] = None, guide_style: Optional[StyleType] = None, expanded: bool = True, - highlight: Optional[bool] = False, + highlight: bool = False, ) -> "Tree": """Add a child tree. @@ -136,7 +136,6 @@ def make_guide(index: int, style: Style) -> Segment: highlight=self.highlight, height=None, ), - pad=options.justify is not None, ) if not (depth == 0 and self.hide_root): @@ -215,9 +214,9 @@ def __rich_measure__( code = """\ class Segment(NamedTuple): - text: str = "" - style: Optional[Style] = None - is_control: bool = False + text: str = "" + style: Optional[Style] = None + is_control: bool = False """ syntax = Syntax(code, "python", theme="monokai", line_numbers=True) @@ -225,7 +224,7 @@ class Segment(NamedTuple): """\ ### example.md > Hello, World! -> +> > Markdown _all_ the things """ ) @@ -247,5 +246,4 @@ class Segment(NamedTuple): containers_node.add(Group("📄 [b magenta]Table", table)) console = Console() - console.print(root) diff --git a/venv/lib/python3.10/site-packages/pip/_vendor/tenacity/__init__.py b/venv/lib/python3.10/site-packages/pip/_vendor/tenacity/__init__.py index ab3be3b..086ad46 100644 --- a/venv/lib/python3.10/site-packages/pip/_vendor/tenacity/__init__.py +++ b/venv/lib/python3.10/site-packages/pip/_vendor/tenacity/__init__.py @@ -33,7 +33,6 @@ from .retry import retry_any # noqa from .retry import retry_if_exception # noqa from .retry import retry_if_exception_type # noqa -from .retry import retry_if_exception_cause_type # noqa from .retry import retry_if_not_exception_type # noqa from .retry import retry_if_not_result # noqa from .retry import retry_if_result # noqa @@ -64,7 +63,6 @@ from .wait import wait_random # noqa from .wait import wait_random_exponential # noqa from .wait import wait_random_exponential as wait_full_jitter # noqa -from .wait import wait_exponential_jitter # noqa # Import all built-in before strategies for easier usage. from .before import before_log # noqa diff --git a/venv/lib/python3.10/site-packages/pip/_vendor/tenacity/retry.py b/venv/lib/python3.10/site-packages/pip/_vendor/tenacity/retry.py index 9ebeb62..1d727e9 100644 --- a/venv/lib/python3.10/site-packages/pip/_vendor/tenacity/retry.py +++ b/venv/lib/python3.10/site-packages/pip/_vendor/tenacity/retry.py @@ -117,33 +117,6 @@ def __call__(self, retry_state: "RetryCallState") -> bool: return self.predicate(retry_state.outcome.exception()) -class retry_if_exception_cause_type(retry_base): - """Retries if any of the causes of the raised exception is of one or more types. - - The check on the type of the cause of the exception is done recursively (until finding - an exception in the chain that has no `__cause__`) - """ - - def __init__( - self, - exception_types: typing.Union[ - typing.Type[BaseException], - typing.Tuple[typing.Type[BaseException], ...], - ] = Exception, - ) -> None: - self.exception_cause_types = exception_types - - def __call__(self, retry_state: "RetryCallState") -> bool: - if retry_state.outcome.failed: - exc = retry_state.outcome.exception() - while exc is not None: - if isinstance(exc.__cause__, self.exception_cause_types): - return True - exc = exc.__cause__ - - return False - - class retry_if_result(retry_base): """Retries if the result verifies a predicate.""" diff --git a/venv/lib/python3.10/site-packages/pip/_vendor/tenacity/wait.py b/venv/lib/python3.10/site-packages/pip/_vendor/tenacity/wait.py index 8fdfc8f..6ed97a7 100644 --- a/venv/lib/python3.10/site-packages/pip/_vendor/tenacity/wait.py +++ b/venv/lib/python3.10/site-packages/pip/_vendor/tenacity/wait.py @@ -17,19 +17,12 @@ import abc import random import typing -from datetime import timedelta from pip._vendor.tenacity import _utils if typing.TYPE_CHECKING: from pip._vendor.tenacity import RetryCallState -wait_unit_type = typing.Union[int, float, timedelta] - - -def to_seconds(wait_unit: wait_unit_type) -> float: - return float(wait_unit.total_seconds() if isinstance(wait_unit, timedelta) else wait_unit) - class wait_base(abc.ABC): """Abstract base class for wait strategies.""" @@ -51,8 +44,8 @@ def __radd__(self, other: "wait_base") -> typing.Union["wait_combine", "wait_bas class wait_fixed(wait_base): """Wait strategy that waits a fixed amount of time between each retry.""" - def __init__(self, wait: wait_unit_type) -> None: - self.wait_fixed = to_seconds(wait) + def __init__(self, wait: float) -> None: + self.wait_fixed = wait def __call__(self, retry_state: "RetryCallState") -> float: return self.wait_fixed @@ -68,9 +61,9 @@ def __init__(self) -> None: class wait_random(wait_base): """Wait strategy that waits a random amount of time between min/max.""" - def __init__(self, min: wait_unit_type = 0, max: wait_unit_type = 1) -> None: # noqa - self.wait_random_min = to_seconds(min) - self.wait_random_max = to_seconds(max) + def __init__(self, min: typing.Union[int, float] = 0, max: typing.Union[int, float] = 1) -> None: # noqa + self.wait_random_min = min + self.wait_random_max = max def __call__(self, retry_state: "RetryCallState") -> float: return self.wait_random_min + (random.random() * (self.wait_random_max - self.wait_random_min)) @@ -120,13 +113,13 @@ class wait_incrementing(wait_base): def __init__( self, - start: wait_unit_type = 0, - increment: wait_unit_type = 100, - max: wait_unit_type = _utils.MAX_WAIT, # noqa + start: typing.Union[int, float] = 0, + increment: typing.Union[int, float] = 100, + max: typing.Union[int, float] = _utils.MAX_WAIT, # noqa ) -> None: - self.start = to_seconds(start) - self.increment = to_seconds(increment) - self.max = to_seconds(max) + self.start = start + self.increment = increment + self.max = max def __call__(self, retry_state: "RetryCallState") -> float: result = self.start + (self.increment * (retry_state.attempt_number - 1)) @@ -149,13 +142,13 @@ class wait_exponential(wait_base): def __init__( self, multiplier: typing.Union[int, float] = 1, - max: wait_unit_type = _utils.MAX_WAIT, # noqa + max: typing.Union[int, float] = _utils.MAX_WAIT, # noqa exp_base: typing.Union[int, float] = 2, - min: wait_unit_type = 0, # noqa + min: typing.Union[int, float] = 0, # noqa ) -> None: self.multiplier = multiplier - self.min = to_seconds(min) - self.max = to_seconds(max) + self.min = min + self.max = max self.exp_base = exp_base def __call__(self, retry_state: "RetryCallState") -> float: @@ -196,37 +189,3 @@ class wait_random_exponential(wait_exponential): def __call__(self, retry_state: "RetryCallState") -> float: high = super().__call__(retry_state=retry_state) return random.uniform(0, high) - - -class wait_exponential_jitter(wait_base): - """Wait strategy that applies exponential backoff and jitter. - - It allows for a customized initial wait, maximum wait and jitter. - - This implements the strategy described here: - https://cloud.google.com/storage/docs/retry-strategy - - The wait time is min(initial * (2**n + random.uniform(0, jitter)), maximum) - where n is the retry count. - """ - - def __init__( - self, - initial: float = 1, - max: float = _utils.MAX_WAIT, # noqa - exp_base: float = 2, - jitter: float = 1, - ) -> None: - self.initial = initial - self.max = max - self.exp_base = exp_base - self.jitter = jitter - - def __call__(self, retry_state: "RetryCallState") -> float: - jitter = random.uniform(0, self.jitter) - try: - exp = self.exp_base ** (retry_state.attempt_number - 1) - result = self.initial * exp + jitter - except OverflowError: - result = self.max - return max(0, min(result, self.max)) diff --git a/venv/lib/python3.10/site-packages/pip/_vendor/tomli/__init__.py b/venv/lib/python3.10/site-packages/pip/_vendor/tomli/__init__.py index 4c6ec97..1cd8e07 100644 --- a/venv/lib/python3.10/site-packages/pip/_vendor/tomli/__init__.py +++ b/venv/lib/python3.10/site-packages/pip/_vendor/tomli/__init__.py @@ -1,11 +1,6 @@ -# SPDX-License-Identifier: MIT -# SPDX-FileCopyrightText: 2021 Taneli Hukkinen -# Licensed to PSF under a Contributor Agreement. +"""A lil' TOML parser.""" __all__ = ("loads", "load", "TOMLDecodeError") -__version__ = "2.0.1" # DO NOT EDIT THIS LINE MANUALLY. LET bump2version UTILITY DO IT +__version__ = "1.0.3" # DO NOT EDIT THIS LINE MANUALLY. LET bump2version UTILITY DO IT -from ._parser import TOMLDecodeError, load, loads - -# Pretend this exception was created here. -TOMLDecodeError.__module__ = __name__ +from pip._vendor.tomli._parser import TOMLDecodeError, load, loads diff --git a/venv/lib/python3.10/site-packages/pip/_vendor/tomli/_parser.py b/venv/lib/python3.10/site-packages/pip/_vendor/tomli/_parser.py index f1bb0aa..730a746 100644 --- a/venv/lib/python3.10/site-packages/pip/_vendor/tomli/_parser.py +++ b/venv/lib/python3.10/site-packages/pip/_vendor/tomli/_parser.py @@ -1,33 +1,42 @@ -# SPDX-License-Identifier: MIT -# SPDX-FileCopyrightText: 2021 Taneli Hukkinen -# Licensed to PSF under a Contributor Agreement. - -from __future__ import annotations - -from collections.abc import Iterable import string from types import MappingProxyType -from typing import Any, BinaryIO, NamedTuple +from typing import ( + TYPE_CHECKING, + Any, + Callable, + Dict, + FrozenSet, + Iterable, + Optional, + TextIO, + Tuple, +) -from ._re import ( +from pip._vendor.tomli._re import ( + RE_BIN, RE_DATETIME, + RE_HEX, RE_LOCALTIME, RE_NUMBER, + RE_OCT, match_to_datetime, match_to_localtime, match_to_number, ) -from ._types import Key, ParseFloat, Pos + +if TYPE_CHECKING: + from re import Pattern + ASCII_CTRL = frozenset(chr(i) for i in range(32)) | frozenset(chr(127)) # Neither of these sets include quotation mark or backslash. They are # currently handled as separate cases in the parser functions. ILLEGAL_BASIC_STR_CHARS = ASCII_CTRL - frozenset("\t") -ILLEGAL_MULTILINE_BASIC_STR_CHARS = ASCII_CTRL - frozenset("\t\n") +ILLEGAL_MULTILINE_BASIC_STR_CHARS = ASCII_CTRL - frozenset("\t\n\r") ILLEGAL_LITERAL_STR_CHARS = ILLEGAL_BASIC_STR_CHARS -ILLEGAL_MULTILINE_LITERAL_STR_CHARS = ILLEGAL_MULTILINE_BASIC_STR_CHARS +ILLEGAL_MULTILINE_LITERAL_STR_CHARS = ASCII_CTRL - frozenset("\t\n") ILLEGAL_COMMENT_CHARS = ILLEGAL_BASIC_STR_CHARS @@ -35,7 +44,6 @@ TOML_WS_AND_NEWLINE = TOML_WS | frozenset("\n") BARE_KEY_CHARS = frozenset(string.ascii_letters + string.digits + "-_") KEY_INITIAL_CHARS = BARE_KEY_CHARS | frozenset("\"'") -HEXDIGIT_CHARS = frozenset(string.hexdigits) BASIC_STR_ESCAPE_REPLACEMENTS = MappingProxyType( { @@ -49,33 +57,30 @@ } ) +# Type annotations +ParseFloat = Callable[[str], Any] +Key = Tuple[str, ...] +Pos = int + class TOMLDecodeError(ValueError): """An error raised if a document is not valid TOML.""" -def load(__fp: BinaryIO, *, parse_float: ParseFloat = float) -> dict[str, Any]: - """Parse TOML from a binary file object.""" - b = __fp.read() - try: - s = b.decode() - except AttributeError: - raise TypeError( - "File must be opened in binary mode, e.g. use `open('foo.toml', 'rb')`" - ) from None +def load(fp: TextIO, *, parse_float: ParseFloat = float) -> Dict[str, Any]: + """Parse TOML from a file object.""" + s = fp.read() return loads(s, parse_float=parse_float) -def loads(__s: str, *, parse_float: ParseFloat = float) -> dict[str, Any]: # noqa: C901 +def loads(s: str, *, parse_float: ParseFloat = float) -> Dict[str, Any]: # noqa: C901 """Parse TOML from a string.""" # The spec allows converting "\r\n" to "\n", even in string # literals. Let's do so to simplify parsing. - src = __s.replace("\r\n", "\n") + src = s.replace("\r\n", "\n") pos = 0 - out = Output(NestedDict(), Flags()) - header: Key = () - parse_float = make_safe_parse_float(parse_float) + state = State() # Parse one statement at a time # (typically means one line in TOML source) @@ -99,18 +104,17 @@ def loads(__s: str, *, parse_float: ParseFloat = float) -> dict[str, Any]: # no pos += 1 continue if char in KEY_INITIAL_CHARS: - pos = key_value_rule(src, pos, out, header, parse_float) + pos = key_value_rule(src, pos, state, parse_float) pos = skip_chars(src, pos, TOML_WS) elif char == "[": try: - second_char: str | None = src[pos + 1] + second_char: Optional[str] = src[pos + 1] except IndexError: second_char = None - out.flags.finalize_pending() if second_char == "[": - pos, header = create_list_rule(src, pos, out) + pos = create_list_rule(src, pos, state) else: - pos, header = create_dict_rule(src, pos, out) + pos = create_dict_rule(src, pos, state) pos = skip_chars(src, pos, TOML_WS) elif char != "#": raise suffixed_err(src, pos, "Invalid statement") @@ -129,7 +133,17 @@ def loads(__s: str, *, parse_float: ParseFloat = float) -> dict[str, Any]: # no ) pos += 1 - return out.data.dict + return state.out.dict + + +class State: + def __init__(self) -> None: + # Mutable, read-only + self.out = NestedDict() + self.flags = Flags() + + # Immutable, read and write + self.header_namespace: Key = () class Flags: @@ -142,16 +156,7 @@ class Flags: EXPLICIT_NEST = 1 def __init__(self) -> None: - self._flags: dict[str, dict] = {} - self._pending_flags: set[tuple[Key, int]] = set() - - def add_pending(self, key: Key, flag: int) -> None: - self._pending_flags.add((key, flag)) - - def finalize_pending(self) -> None: - for key, flag in self._pending_flags: - self.set(key, flag, recursive=False) - self._pending_flags.clear() + self._flags: Dict[str, dict] = {} def unset_all(self, key: Key) -> None: cont = self._flags @@ -161,6 +166,19 @@ def unset_all(self, key: Key) -> None: cont = cont[k]["nested"] cont.pop(key[-1], None) + def set_for_relative_key(self, head_key: Key, rel_key: Key, flag: int) -> None: + cont = self._flags + for k in head_key: + if k not in cont: + cont[k] = {"flags": set(), "recursive_flags": set(), "nested": {}} + cont = cont[k]["nested"] + for k in rel_key: + if k in cont: + cont[k]["flags"].add(flag) + else: + cont[k] = {"flags": {flag}, "recursive_flags": set(), "nested": {}} + cont = cont[k]["nested"] + def set(self, key: Key, flag: int, *, recursive: bool) -> None: # noqa: A003 cont = self._flags key_parent, key_stem = key[:-1], key[-1] @@ -193,7 +211,7 @@ def is_(self, key: Key, flag: int) -> bool: class NestedDict: def __init__(self) -> None: # The parsed content of the TOML document - self.dict: dict[str, Any] = {} + self.dict: Dict[str, Any] = {} def get_or_create_nest( self, @@ -224,11 +242,6 @@ def append_nest_to_list(self, key: Key) -> None: cont[last_key] = [{}] -class Output(NamedTuple): - data: NestedDict - flags: Flags - - def skip_chars(src: str, pos: Pos, chars: Iterable[str]) -> Pos: try: while src[pos] in chars: @@ -243,7 +256,7 @@ def skip_until( pos: Pos, expect: str, *, - error_on: frozenset[str], + error_on: FrozenSet[str], error_on_eof: bool, ) -> Pos: try: @@ -251,18 +264,19 @@ def skip_until( except ValueError: new_pos = len(src) if error_on_eof: - raise suffixed_err(src, new_pos, f"Expected {expect!r}") from None + raise suffixed_err(src, new_pos, f'Expected "{expect!r}"') - if not error_on.isdisjoint(src[pos:new_pos]): - while src[pos] not in error_on: - pos += 1 - raise suffixed_err(src, pos, f"Found invalid character {src[pos]!r}") + bad_chars = error_on.intersection(src[pos:new_pos]) + if bad_chars: + bad_char = next(iter(bad_chars)) + bad_pos = src.index(bad_char, pos) + raise suffixed_err(src, bad_pos, f'Found invalid character "{bad_char!r}"') return new_pos def skip_comment(src: str, pos: Pos) -> Pos: try: - char: str | None = src[pos] + char: Optional[str] = src[pos] except IndexError: char = None if char == "#": @@ -281,116 +295,115 @@ def skip_comments_and_array_ws(src: str, pos: Pos) -> Pos: return pos -def create_dict_rule(src: str, pos: Pos, out: Output) -> tuple[Pos, Key]: +def create_dict_rule(src: str, pos: Pos, state: State) -> Pos: pos += 1 # Skip "[" pos = skip_chars(src, pos, TOML_WS) pos, key = parse_key(src, pos) - if out.flags.is_(key, Flags.EXPLICIT_NEST) or out.flags.is_(key, Flags.FROZEN): - raise suffixed_err(src, pos, f"Cannot declare {key} twice") - out.flags.set(key, Flags.EXPLICIT_NEST, recursive=False) + if state.flags.is_(key, Flags.EXPLICIT_NEST) or state.flags.is_(key, Flags.FROZEN): + raise suffixed_err(src, pos, f"Can not declare {key} twice") + state.flags.set(key, Flags.EXPLICIT_NEST, recursive=False) try: - out.data.get_or_create_nest(key) + state.out.get_or_create_nest(key) except KeyError: - raise suffixed_err(src, pos, "Cannot overwrite a value") from None + raise suffixed_err(src, pos, "Can not overwrite a value") + state.header_namespace = key - if not src.startswith("]", pos): - raise suffixed_err(src, pos, "Expected ']' at the end of a table declaration") - return pos + 1, key + if src[pos : pos + 1] != "]": + raise suffixed_err(src, pos, 'Expected "]" at the end of a table declaration') + return pos + 1 -def create_list_rule(src: str, pos: Pos, out: Output) -> tuple[Pos, Key]: +def create_list_rule(src: str, pos: Pos, state: State) -> Pos: pos += 2 # Skip "[[" pos = skip_chars(src, pos, TOML_WS) pos, key = parse_key(src, pos) - if out.flags.is_(key, Flags.FROZEN): - raise suffixed_err(src, pos, f"Cannot mutate immutable namespace {key}") + if state.flags.is_(key, Flags.FROZEN): + raise suffixed_err(src, pos, f"Can not mutate immutable namespace {key}") # Free the namespace now that it points to another empty list item... - out.flags.unset_all(key) + state.flags.unset_all(key) # ...but this key precisely is still prohibited from table declaration - out.flags.set(key, Flags.EXPLICIT_NEST, recursive=False) + state.flags.set(key, Flags.EXPLICIT_NEST, recursive=False) try: - out.data.append_nest_to_list(key) + state.out.append_nest_to_list(key) except KeyError: - raise suffixed_err(src, pos, "Cannot overwrite a value") from None + raise suffixed_err(src, pos, "Can not overwrite a value") + state.header_namespace = key - if not src.startswith("]]", pos): - raise suffixed_err(src, pos, "Expected ']]' at the end of an array declaration") - return pos + 2, key + end_marker = src[pos : pos + 2] + if end_marker != "]]": + raise suffixed_err( + src, + pos, + f'Found "{end_marker!r}" at the end of an array declaration.' + ' Expected "]]"', + ) + return pos + 2 -def key_value_rule( - src: str, pos: Pos, out: Output, header: Key, parse_float: ParseFloat -) -> Pos: +def key_value_rule(src: str, pos: Pos, state: State, parse_float: ParseFloat) -> Pos: pos, key, value = parse_key_value_pair(src, pos, parse_float) key_parent, key_stem = key[:-1], key[-1] - abs_key_parent = header + key_parent - - relative_path_cont_keys = (header + key[:i] for i in range(1, len(key))) - for cont_key in relative_path_cont_keys: - # Check that dotted key syntax does not redefine an existing table - if out.flags.is_(cont_key, Flags.EXPLICIT_NEST): - raise suffixed_err(src, pos, f"Cannot redefine namespace {cont_key}") - # Containers in the relative path can't be opened with the table syntax or - # dotted key/value syntax in following table sections. - out.flags.add_pending(cont_key, Flags.EXPLICIT_NEST) - - if out.flags.is_(abs_key_parent, Flags.FROZEN): + abs_key_parent = state.header_namespace + key_parent + + if state.flags.is_(abs_key_parent, Flags.FROZEN): raise suffixed_err( - src, pos, f"Cannot mutate immutable namespace {abs_key_parent}" + src, pos, f"Can not mutate immutable namespace {abs_key_parent}" ) - + # Containers in the relative path can't be opened with the table syntax after this + state.flags.set_for_relative_key(state.header_namespace, key, Flags.EXPLICIT_NEST) try: - nest = out.data.get_or_create_nest(abs_key_parent) + nest = state.out.get_or_create_nest(abs_key_parent) except KeyError: - raise suffixed_err(src, pos, "Cannot overwrite a value") from None + raise suffixed_err(src, pos, "Can not overwrite a value") if key_stem in nest: - raise suffixed_err(src, pos, "Cannot overwrite a value") + raise suffixed_err(src, pos, "Can not overwrite a value") # Mark inline table and array namespaces recursively immutable if isinstance(value, (dict, list)): - out.flags.set(header + key, Flags.FROZEN, recursive=True) + abs_key = state.header_namespace + key + state.flags.set(abs_key, Flags.FROZEN, recursive=True) nest[key_stem] = value return pos def parse_key_value_pair( src: str, pos: Pos, parse_float: ParseFloat -) -> tuple[Pos, Key, Any]: +) -> Tuple[Pos, Key, Any]: pos, key = parse_key(src, pos) try: - char: str | None = src[pos] + char: Optional[str] = src[pos] except IndexError: char = None if char != "=": - raise suffixed_err(src, pos, "Expected '=' after a key in a key/value pair") + raise suffixed_err(src, pos, 'Expected "=" after a key in a key/value pair') pos += 1 pos = skip_chars(src, pos, TOML_WS) pos, value = parse_value(src, pos, parse_float) return pos, key, value -def parse_key(src: str, pos: Pos) -> tuple[Pos, Key]: +def parse_key(src: str, pos: Pos) -> Tuple[Pos, Key]: pos, key_part = parse_key_part(src, pos) - key: Key = (key_part,) + key = [key_part] pos = skip_chars(src, pos, TOML_WS) while True: try: - char: str | None = src[pos] + char: Optional[str] = src[pos] except IndexError: char = None if char != ".": - return pos, key + return pos, tuple(key) pos += 1 pos = skip_chars(src, pos, TOML_WS) pos, key_part = parse_key_part(src, pos) - key += (key_part,) + key.append(key_part) pos = skip_chars(src, pos, TOML_WS) -def parse_key_part(src: str, pos: Pos) -> tuple[Pos, str]: +def parse_key_part(src: str, pos: Pos) -> Tuple[Pos, str]: try: - char: str | None = src[pos] + char: Optional[str] = src[pos] except IndexError: char = None if char in BARE_KEY_CHARS: @@ -404,17 +417,17 @@ def parse_key_part(src: str, pos: Pos) -> tuple[Pos, str]: raise suffixed_err(src, pos, "Invalid initial character for a key part") -def parse_one_line_basic_str(src: str, pos: Pos) -> tuple[Pos, str]: +def parse_one_line_basic_str(src: str, pos: Pos) -> Tuple[Pos, str]: pos += 1 return parse_basic_str(src, pos, multiline=False) -def parse_array(src: str, pos: Pos, parse_float: ParseFloat) -> tuple[Pos, list]: +def parse_array(src: str, pos: Pos, parse_float: ParseFloat) -> Tuple[Pos, list]: pos += 1 array: list = [] pos = skip_comments_and_array_ws(src, pos) - if src.startswith("]", pos): + if src[pos : pos + 1] == "]": return pos + 1, array while True: pos, val = parse_value(src, pos, parse_float) @@ -429,29 +442,29 @@ def parse_array(src: str, pos: Pos, parse_float: ParseFloat) -> tuple[Pos, list] pos += 1 pos = skip_comments_and_array_ws(src, pos) - if src.startswith("]", pos): + if src[pos : pos + 1] == "]": return pos + 1, array -def parse_inline_table(src: str, pos: Pos, parse_float: ParseFloat) -> tuple[Pos, dict]: +def parse_inline_table(src: str, pos: Pos, parse_float: ParseFloat) -> Tuple[Pos, dict]: pos += 1 nested_dict = NestedDict() flags = Flags() pos = skip_chars(src, pos, TOML_WS) - if src.startswith("}", pos): + if src[pos : pos + 1] == "}": return pos + 1, nested_dict.dict while True: pos, key, value = parse_key_value_pair(src, pos, parse_float) key_parent, key_stem = key[:-1], key[-1] if flags.is_(key, Flags.FROZEN): - raise suffixed_err(src, pos, f"Cannot mutate immutable namespace {key}") + raise suffixed_err(src, pos, f"Can not mutate immutable namespace {key}") try: nest = nested_dict.get_or_create_nest(key_parent, access_lists=False) except KeyError: - raise suffixed_err(src, pos, "Cannot overwrite a value") from None + raise suffixed_err(src, pos, "Can not overwrite a value") if key_stem in nest: - raise suffixed_err(src, pos, f"Duplicate inline table key {key_stem!r}") + raise suffixed_err(src, pos, f'Duplicate inline table key "{key_stem}"') nest[key_stem] = value pos = skip_chars(src, pos, TOML_WS) c = src[pos : pos + 1] @@ -467,7 +480,7 @@ def parse_inline_table(src: str, pos: Pos, parse_float: ParseFloat) -> tuple[Pos def parse_basic_str_escape( src: str, pos: Pos, *, multiline: bool = False -) -> tuple[Pos, str]: +) -> Tuple[Pos, str]: escape_id = src[pos : pos + 2] pos += 2 if multiline and escape_id in {"\\ ", "\\\t", "\\\n"}: @@ -475,12 +488,11 @@ def parse_basic_str_escape( # the doc. Error if non-whitespace is found before newline. if escape_id != "\\\n": pos = skip_chars(src, pos, TOML_WS) - try: - char = src[pos] - except IndexError: + char = src[pos : pos + 1] + if not char: return pos, "" if char != "\n": - raise suffixed_err(src, pos, "Unescaped '\\' in a string") + raise suffixed_err(src, pos, 'Unescaped "\\" in a string') pos += 1 pos = skip_chars(src, pos, TOML_WS_AND_NEWLINE) return pos, "" @@ -491,16 +503,18 @@ def parse_basic_str_escape( try: return pos, BASIC_STR_ESCAPE_REPLACEMENTS[escape_id] except KeyError: - raise suffixed_err(src, pos, "Unescaped '\\' in a string") from None + if len(escape_id) != 2: + raise suffixed_err(src, pos, "Unterminated string") + raise suffixed_err(src, pos, 'Unescaped "\\" in a string') -def parse_basic_str_escape_multiline(src: str, pos: Pos) -> tuple[Pos, str]: +def parse_basic_str_escape_multiline(src: str, pos: Pos) -> Tuple[Pos, str]: return parse_basic_str_escape(src, pos, multiline=True) -def parse_hex_char(src: str, pos: Pos, hex_len: int) -> tuple[Pos, str]: +def parse_hex_char(src: str, pos: Pos, hex_len: int) -> Tuple[Pos, str]: hex_str = src[pos : pos + hex_len] - if len(hex_str) != hex_len or not HEXDIGIT_CHARS.issuperset(hex_str): + if len(hex_str) != hex_len or any(c not in string.hexdigits for c in hex_str): raise suffixed_err(src, pos, "Invalid hex value") pos += hex_len hex_int = int(hex_str, 16) @@ -509,7 +523,7 @@ def parse_hex_char(src: str, pos: Pos, hex_len: int) -> tuple[Pos, str]: return pos, chr(hex_int) -def parse_literal_str(src: str, pos: Pos) -> tuple[Pos, str]: +def parse_literal_str(src: str, pos: Pos) -> Tuple[Pos, str]: pos += 1 # Skip starting apostrophe start_pos = pos pos = skip_until( @@ -518,9 +532,9 @@ def parse_literal_str(src: str, pos: Pos) -> tuple[Pos, str]: return pos + 1, src[start_pos:pos] # Skip ending apostrophe -def parse_multiline_str(src: str, pos: Pos, *, literal: bool) -> tuple[Pos, str]: +def parse_multiline_str(src: str, pos: Pos, *, literal: bool) -> Tuple[Pos, str]: pos += 3 - if src.startswith("\n", pos): + if src[pos : pos + 1] == "\n": pos += 1 if literal: @@ -540,16 +554,16 @@ def parse_multiline_str(src: str, pos: Pos, *, literal: bool) -> tuple[Pos, str] # Add at maximum two extra apostrophes/quotes if the end sequence # is 4 or 5 chars long instead of just 3. - if not src.startswith(delim, pos): + if src[pos : pos + 1] != delim: return pos, result pos += 1 - if not src.startswith(delim, pos): + if src[pos : pos + 1] != delim: return pos, result + delim pos += 1 return pos, result + (delim * 2) -def parse_basic_str(src: str, pos: Pos, *, multiline: bool) -> tuple[Pos, str]: +def parse_basic_str(src: str, pos: Pos, *, multiline: bool) -> Tuple[Pos, str]: if multiline: error_on = ILLEGAL_MULTILINE_BASIC_STR_CHARS parse_escapes = parse_basic_str_escape_multiline @@ -562,11 +576,11 @@ def parse_basic_str(src: str, pos: Pos, *, multiline: bool) -> tuple[Pos, str]: try: char = src[pos] except IndexError: - raise suffixed_err(src, pos, "Unterminated string") from None + raise suffixed_err(src, pos, "Unterminated string") if char == '"': if not multiline: return pos + 1, result + src[start_pos:pos] - if src.startswith('"""', pos): + if src[pos + 1 : pos + 3] == '""': return pos + 3, result + src[start_pos:pos] pos += 1 continue @@ -577,67 +591,86 @@ def parse_basic_str(src: str, pos: Pos, *, multiline: bool) -> tuple[Pos, str]: start_pos = pos continue if char in error_on: - raise suffixed_err(src, pos, f"Illegal character {char!r}") + raise suffixed_err(src, pos, f'Illegal character "{char!r}"') pos += 1 +def parse_regex(src: str, pos: Pos, regex: "Pattern") -> Tuple[Pos, str]: + match = regex.match(src, pos) + if not match: + raise suffixed_err(src, pos, "Unexpected sequence") + return match.end(), match.group() + + def parse_value( # noqa: C901 src: str, pos: Pos, parse_float: ParseFloat -) -> tuple[Pos, Any]: +) -> Tuple[Pos, Any]: try: - char: str | None = src[pos] + char: Optional[str] = src[pos] except IndexError: char = None - # IMPORTANT: order conditions based on speed of checking and likelihood - # Basic strings if char == '"': - if src.startswith('"""', pos): + if src[pos + 1 : pos + 3] == '""': return parse_multiline_str(src, pos, literal=False) return parse_one_line_basic_str(src, pos) # Literal strings if char == "'": - if src.startswith("'''", pos): + if src[pos + 1 : pos + 3] == "''": return parse_multiline_str(src, pos, literal=True) return parse_literal_str(src, pos) # Booleans if char == "t": - if src.startswith("true", pos): + if src[pos + 1 : pos + 4] == "rue": return pos + 4, True if char == "f": - if src.startswith("false", pos): + if src[pos + 1 : pos + 5] == "alse": return pos + 5, False - # Arrays - if char == "[": - return parse_array(src, pos, parse_float) - - # Inline tables - if char == "{": - return parse_inline_table(src, pos, parse_float) - # Dates and times datetime_match = RE_DATETIME.match(src, pos) if datetime_match: try: datetime_obj = match_to_datetime(datetime_match) - except ValueError as e: - raise suffixed_err(src, pos, "Invalid date or datetime") from e + except ValueError: + raise suffixed_err(src, pos, "Invalid date or datetime") return datetime_match.end(), datetime_obj localtime_match = RE_LOCALTIME.match(src, pos) if localtime_match: return localtime_match.end(), match_to_localtime(localtime_match) - # Integers and "normal" floats. + # Non-decimal integers + if char == "0": + second_char = src[pos + 1 : pos + 2] + if second_char == "x": + pos, hex_str = parse_regex(src, pos + 2, RE_HEX) + return pos, int(hex_str, 16) + if second_char == "o": + pos, oct_str = parse_regex(src, pos + 2, RE_OCT) + return pos, int(oct_str, 8) + if second_char == "b": + pos, bin_str = parse_regex(src, pos + 2, RE_BIN) + return pos, int(bin_str, 2) + + # Decimal integers and "normal" floats. # The regex will greedily match any type starting with a decimal - # char, so needs to be located after handling of dates and times. + # char, so needs to be located after handling of non-decimal ints, + # and dates and times. number_match = RE_NUMBER.match(src, pos) if number_match: return number_match.end(), match_to_number(number_match, parse_float) + # Arrays + if char == "[": + return parse_array(src, pos, parse_float) + + # Inline tables + if char == "{": + return parse_inline_table(src, pos, parse_float) + # Special floats first_three = src[pos : pos + 3] if first_three in {"inf", "nan"}: @@ -668,24 +701,3 @@ def coord_repr(src: str, pos: Pos) -> str: def is_unicode_scalar_value(codepoint: int) -> bool: return (0 <= codepoint <= 55295) or (57344 <= codepoint <= 1114111) - - -def make_safe_parse_float(parse_float: ParseFloat) -> ParseFloat: - """A decorator to make `parse_float` safe. - - `parse_float` must not return dicts or lists, because these types - would be mixed with parsed TOML tables and arrays, thus confusing - the parser. The returned decorated callable raises `ValueError` - instead of returning illegal types. - """ - # The default `float` callable never returns illegal types. Optimize it. - if parse_float is float: # type: ignore[comparison-overlap] - return float - - def safe_parse_float(float_str: str) -> Any: - float_value = parse_float(float_str) - if isinstance(float_value, (dict, list)): - raise ValueError("parse_float must not return dicts or lists") - return float_value - - return safe_parse_float diff --git a/venv/lib/python3.10/site-packages/pip/_vendor/tomli/_re.py b/venv/lib/python3.10/site-packages/pip/_vendor/tomli/_re.py index 994bb74..3883fdd 100644 --- a/venv/lib/python3.10/site-packages/pip/_vendor/tomli/_re.py +++ b/venv/lib/python3.10/site-packages/pip/_vendor/tomli/_re.py @@ -1,55 +1,37 @@ -# SPDX-License-Identifier: MIT -# SPDX-FileCopyrightText: 2021 Taneli Hukkinen -# Licensed to PSF under a Contributor Agreement. - -from __future__ import annotations - from datetime import date, datetime, time, timedelta, timezone, tzinfo -from functools import lru_cache import re -from typing import Any +from typing import TYPE_CHECKING, Any, Optional, Union -from ._types import ParseFloat +if TYPE_CHECKING: + from re import Match + + from pip._vendor.tomli._parser import ParseFloat # E.g. # - 00:32:00.999999 # - 00:32:00 -_TIME_RE_STR = r"([01][0-9]|2[0-3]):([0-5][0-9]):([0-5][0-9])(?:\.([0-9]{1,6})[0-9]*)?" +_TIME_RE_STR = r"([01][0-9]|2[0-3]):([0-5][0-9]):([0-5][0-9])(\.[0-9]+)?" +RE_HEX = re.compile(r"[0-9A-Fa-f](?:_?[0-9A-Fa-f])*") +RE_BIN = re.compile(r"[01](?:_?[01])*") +RE_OCT = re.compile(r"[0-7](?:_?[0-7])*") RE_NUMBER = re.compile( - r""" -0 -(?: - x[0-9A-Fa-f](?:_?[0-9A-Fa-f])* # hex - | - b[01](?:_?[01])* # bin - | - o[0-7](?:_?[0-7])* # oct -) -| -[+-]?(?:0|[1-9](?:_?[0-9])*) # dec, integer part -(?P - (?:\.[0-9](?:_?[0-9])*)? # optional fractional part - (?:[eE][+-]?[0-9](?:_?[0-9])*)? # optional exponent part -) -""", - flags=re.VERBOSE, + r"[+-]?(?:0|[1-9](?:_?[0-9])*)" # integer + + r"(?:\.[0-9](?:_?[0-9])*)?" # optional fractional part + + r"(?:[eE][+-]?[0-9](?:_?[0-9])*)?" # optional exponent part ) RE_LOCALTIME = re.compile(_TIME_RE_STR) RE_DATETIME = re.compile( - rf""" -([0-9]{{4}})-(0[1-9]|1[0-2])-(0[1-9]|[12][0-9]|3[01]) # date, e.g. 1988-10-27 -(?: - [Tt ] - {_TIME_RE_STR} - (?:([Zz])|([+-])([01][0-9]|2[0-3]):([0-5][0-9]))? # optional time offset -)? -""", - flags=re.VERBOSE, + r"([0-9]{4})-(0[1-9]|1[0-2])-(0[1-9]|1[0-9]|2[0-9]|3[01])" # date, e.g. 1988-10-27 + + r"(?:" + + r"[T ]" + + _TIME_RE_STR + + r"(?:(Z)|([+-])([01][0-9]|2[0-3]):([0-5][0-9]))?" # time offset + + r")?" ) -def match_to_datetime(match: re.Match) -> datetime | date: +def match_to_datetime(match: "Match") -> Union[datetime, date]: """Convert a `RE_DATETIME` match to `datetime.datetime` or `datetime.date`. Raises ValueError if the match does not correspond to a valid date @@ -64,7 +46,7 @@ def match_to_datetime(match: re.Match) -> datetime | date: sec_str, micros_str, zulu_time, - offset_sign_str, + offset_dir_str, offset_hour_str, offset_minute_str, ) = match.groups() @@ -72,10 +54,14 @@ def match_to_datetime(match: re.Match) -> datetime | date: if hour_str is None: return date(year, month, day) hour, minute, sec = int(hour_str), int(minute_str), int(sec_str) - micros = int(micros_str.ljust(6, "0")) if micros_str else 0 - if offset_sign_str: - tz: tzinfo | None = cached_tz( - offset_hour_str, offset_minute_str, offset_sign_str + micros = int(micros_str[1:].ljust(6, "0")[:6]) if micros_str else 0 + if offset_dir_str: + offset_dir = 1 if offset_dir_str == "+" else -1 + tz: Optional[tzinfo] = timezone( + timedelta( + hours=offset_dir * int(offset_hour_str), + minutes=offset_dir * int(offset_minute_str), + ) ) elif zulu_time: tz = timezone.utc @@ -84,24 +70,14 @@ def match_to_datetime(match: re.Match) -> datetime | date: return datetime(year, month, day, hour, minute, sec, micros, tzinfo=tz) -@lru_cache(maxsize=None) -def cached_tz(hour_str: str, minute_str: str, sign_str: str) -> timezone: - sign = 1 if sign_str == "+" else -1 - return timezone( - timedelta( - hours=sign * int(hour_str), - minutes=sign * int(minute_str), - ) - ) - - -def match_to_localtime(match: re.Match) -> time: +def match_to_localtime(match: "Match") -> time: hour_str, minute_str, sec_str, micros_str = match.groups() - micros = int(micros_str.ljust(6, "0")) if micros_str else 0 + micros = int(micros_str[1:].ljust(6, "0")[:6]) if micros_str else 0 return time(int(hour_str), int(minute_str), int(sec_str), micros) -def match_to_number(match: re.Match, parse_float: ParseFloat) -> Any: - if match.group("floatpart"): - return parse_float(match.group()) - return int(match.group(), 0) +def match_to_number(match: "Match", parse_float: "ParseFloat") -> Any: + match_str = match.group() + if "." in match_str or "e" in match_str or "E" in match_str: + return parse_float(match_str) + return int(match_str) diff --git a/venv/lib/python3.10/site-packages/pip/_vendor/typing_extensions.py b/venv/lib/python3.10/site-packages/pip/_vendor/typing_extensions.py index 34199c2..9f1c7aa 100644 --- a/venv/lib/python3.10/site-packages/pip/_vendor/typing_extensions.py +++ b/venv/lib/python3.10/site-packages/pip/_vendor/typing_extensions.py @@ -1,28 +1,52 @@ import abc import collections import collections.abc -import functools import operator import sys -import types as _types import typing +# After PEP 560, internal typing API was substantially reworked. +# This is especially important for Protocol class which uses internal APIs +# quite extensively. +PEP_560 = sys.version_info[:3] >= (3, 7, 0) +if PEP_560: + GenericMeta = type +else: + # 3.6 + from typing import GenericMeta, _type_vars # noqa + +# The two functions below are copies of typing internal helpers. +# They are needed by _ProtocolMeta + + +def _no_slots_copy(dct): + dict_copy = dict(dct) + if '__slots__' in dict_copy: + for slot in dict_copy['__slots__']: + dict_copy.pop(slot, None) + return dict_copy + + +def _check_generic(cls, parameters): + if not cls.__parameters__: + raise TypeError(f"{cls} is not a generic class") + alen = len(parameters) + elen = len(cls.__parameters__) + if alen != elen: + raise TypeError(f"Too {'many' if alen > elen else 'few'} arguments for {cls};" + f" actual {alen}, expected {elen}") + + +# Please keep __all__ alphabetized within each category. __all__ = [ # Super-special typing primitives. - 'Any', 'ClassVar', 'Concatenate', 'Final', - 'LiteralString', 'ParamSpec', - 'ParamSpecArgs', - 'ParamSpecKwargs', 'Self', 'Type', - 'TypeVar', - 'TypeVarTuple', - 'Unpack', # ABCs (from collections.abc). 'Awaitable', @@ -38,7 +62,6 @@ 'Counter', 'Deque', 'DefaultDict', - 'NamedTuple', 'OrderedDict', 'TypedDict', @@ -47,101 +70,49 @@ # One-off things. 'Annotated', - 'assert_never', - 'assert_type', - 'clear_overloads', - 'dataclass_transform', - 'get_overloads', 'final', - 'get_args', - 'get_origin', - 'get_type_hints', 'IntVar', - 'is_typeddict', 'Literal', 'NewType', 'overload', - 'override', 'Protocol', - 'reveal_type', 'runtime', 'runtime_checkable', 'Text', 'TypeAlias', 'TypeGuard', 'TYPE_CHECKING', - 'Never', - 'NoReturn', - 'Required', - 'NotRequired', ] -# for backward compatibility -PEP_560 = True -GenericMeta = type - -# The functions below are modified copies of typing internal helpers. -# They are needed by _ProtocolMeta and they provide support for PEP 646. - -_marker = object() - - -def _check_generic(cls, parameters, elen=_marker): - """Check correct count for parameters of a generic cls (internal helper). - This gives a nice error message in case of count mismatch. - """ - if not elen: - raise TypeError(f"{cls} is not a generic class") - if elen is _marker: - if not hasattr(cls, "__parameters__") or not cls.__parameters__: - raise TypeError(f"{cls} is not a generic class") - elen = len(cls.__parameters__) - alen = len(parameters) - if alen != elen: - if hasattr(cls, "__parameters__"): - parameters = [p for p in cls.__parameters__ if not _is_unpack(p)] - num_tv_tuples = sum(isinstance(p, TypeVarTuple) for p in parameters) - if (num_tv_tuples > 0) and (alen >= elen - num_tv_tuples): - return - raise TypeError(f"Too {'many' if alen > elen else 'few'} parameters for {cls};" - f" actual {alen}, expected {elen}") - +if PEP_560: + __all__.extend(["get_args", "get_origin", "get_type_hints"]) -if sys.version_info >= (3, 10): - def _should_collect_from_parameters(t): - return isinstance( - t, (typing._GenericAlias, _types.GenericAlias, _types.UnionType) - ) -elif sys.version_info >= (3, 9): - def _should_collect_from_parameters(t): - return isinstance(t, (typing._GenericAlias, _types.GenericAlias)) +# 3.6.2+ +if hasattr(typing, 'NoReturn'): + NoReturn = typing.NoReturn +# 3.6.0-3.6.1 else: - def _should_collect_from_parameters(t): - return isinstance(t, typing._GenericAlias) and not t._special + class _NoReturn(typing._FinalTypingBase, _root=True): + """Special type indicating functions that never return. + Example:: + + from typing import NoReturn + def stop() -> NoReturn: + raise Exception('no way') -def _collect_type_vars(types, typevar_types=None): - """Collect all type variable contained in types in order of - first appearance (lexicographic order). For example:: + This type is invalid in other positions, e.g., ``List[NoReturn]`` + will fail in static type checkers. + """ + __slots__ = () - _collect_type_vars((T, List[S, T])) == (T, S) - """ - if typevar_types is None: - typevar_types = typing.TypeVar - tvars = [] - for t in types: - if ( - isinstance(t, typevar_types) and - t not in tvars and - not _is_unpack(t) - ): - tvars.append(t) - if _should_collect_from_parameters(t): - tvars.extend([t for t in t.__parameters__ if t not in tvars]) - return tuple(tvars) + def __instancecheck__(self, obj): + raise TypeError("NoReturn cannot be used with isinstance().") + def __subclasscheck__(self, cls): + raise TypeError("NoReturn cannot be used with issubclass().") -NoReturn = typing.NoReturn + NoReturn = _NoReturn(_root=True) # Some unconstrained type variables. These are used by the container types. # (These are not for export.) @@ -151,37 +122,6 @@ def _collect_type_vars(types, typevar_types=None): T_co = typing.TypeVar('T_co', covariant=True) # Any type covariant containers. T_contra = typing.TypeVar('T_contra', contravariant=True) # Ditto contravariant. - -if sys.version_info >= (3, 11): - from typing import Any -else: - - class _AnyMeta(type): - def __instancecheck__(self, obj): - if self is Any: - raise TypeError("typing_extensions.Any cannot be used with isinstance()") - return super().__instancecheck__(obj) - - def __repr__(self): - if self is Any: - return "typing_extensions.Any" - return super().__repr__() - - class Any(metaclass=_AnyMeta): - """Special type indicating an unconstrained type. - - Any is compatible with every type. - - Any assumed to have all methods. - - All values assumed to be instances of Any. - Note that all the above statements are true from the point of view of - static type checkers. At runtime, Any should not be used with instance - checks. - """ - def __new__(cls, *args, **kwargs): - if cls is Any: - raise TypeError("Any cannot be instantiated") - return super().__new__(cls, *args, **kwargs) - - ClassVar = typing.ClassVar # On older versions of typing there is an internal class named "Final". @@ -189,7 +129,7 @@ def __new__(cls, *args, **kwargs): if hasattr(typing, 'Final') and sys.version_info[:2] >= (3, 7): Final = typing.Final # 3.7 -else: +elif sys.version_info[:2] >= (3, 7): class _FinalForm(typing._SpecialForm, _root=True): def __repr__(self): @@ -197,7 +137,7 @@ def __repr__(self): def __getitem__(self, parameters): item = typing._type_check(parameters, - f'{self._name} accepts only a single type.') + f'{self._name} accepts only single type') return typing._GenericAlias(self, (item,)) Final = _FinalForm('Final', @@ -214,13 +154,67 @@ class FastConnector(Connection): TIMEOUT = 1 # Error reported by type checker There is no runtime checking of these properties.""") +# 3.6 +else: + class _Final(typing._FinalTypingBase, _root=True): + """A special typing construct to indicate that a name + cannot be re-assigned or overridden in a subclass. + For example: + + MAX_SIZE: Final = 9000 + MAX_SIZE += 1 # Error reported by type checker + + class Connection: + TIMEOUT: Final[int] = 10 + class FastConnector(Connection): + TIMEOUT = 1 # Error reported by type checker + + There is no runtime checking of these properties. + """ + + __slots__ = ('__type__',) + + def __init__(self, tp=None, **kwds): + self.__type__ = tp + + def __getitem__(self, item): + cls = type(self) + if self.__type__ is None: + return cls(typing._type_check(item, + f'{cls.__name__[1:]} accepts only single type.'), + _root=True) + raise TypeError(f'{cls.__name__[1:]} cannot be further subscripted') + + def _eval_type(self, globalns, localns): + new_tp = typing._eval_type(self.__type__, globalns, localns) + if new_tp == self.__type__: + return self + return type(self)(new_tp, _root=True) + + def __repr__(self): + r = super().__repr__() + if self.__type__ is not None: + r += f'[{typing._type_repr(self.__type__)}]' + return r + + def __hash__(self): + return hash((type(self).__name__, self.__type__)) + + def __eq__(self, other): + if not isinstance(other, _Final): + return NotImplemented + if self.__type__ is not None: + return self.__type__ == other.__type__ + return self is other + + Final = _Final(_root=True) -if sys.version_info >= (3, 11): + +# 3.8+ +if hasattr(typing, 'final'): final = typing.final +# 3.6-3.7 else: - # @final exists in 3.8+, but we backport it for all versions - # before 3.11 to keep support for the __final__ attribute. - # See https://bugs.python.org/issue46342 def final(f): """This decorator can be used to indicate to type checkers that the decorated method cannot be overridden, and decorated class @@ -239,17 +233,8 @@ class Leaf: class Other(Leaf): # Error reported by type checker ... - There is no runtime checking of these properties. The decorator - sets the ``__final__`` attribute to ``True`` on the decorated object - to allow runtime introspection. + There is no runtime checking of these properties. """ - try: - f.__final__ = True - except (AttributeError, TypeError): - # Skip the attribute silently if it is not writable. - # AttributeError happens if the object has __slots__ or a - # read-only property, TypeError if it's a builtin class. - pass return f @@ -261,7 +246,7 @@ def IntVar(name): if hasattr(typing, 'Literal'): Literal = typing.Literal # 3.7: -else: +elif sys.version_info[:2] >= (3, 7): class _LiteralForm(typing._SpecialForm, _root=True): def __repr__(self): @@ -283,75 +268,59 @@ def __getitem__(self, parameters): Literal[...] cannot be subclassed. There is no runtime checking verifying that the parameter is actually a value instead of a type.""") +# 3.6: +else: + class _Literal(typing._FinalTypingBase, _root=True): + """A type that can be used to indicate to type checkers that the + corresponding value has a value literally equivalent to the + provided parameter. For example: + var: Literal[4] = 4 -_overload_dummy = typing._overload_dummy # noqa - + The type checker understands that 'var' is literally equal to the + value 4 and no other value. -if hasattr(typing, "get_overloads"): # 3.11+ - overload = typing.overload - get_overloads = typing.get_overloads - clear_overloads = typing.clear_overloads -else: - # {module: {qualname: {firstlineno: func}}} - _overload_registry = collections.defaultdict( - functools.partial(collections.defaultdict, dict) - ) - - def overload(func): - """Decorator for overloaded functions/methods. - - In a stub file, place two or more stub definitions for the same - function in a row, each decorated with @overload. For example: - - @overload - def utf8(value: None) -> None: ... - @overload - def utf8(value: bytes) -> bytes: ... - @overload - def utf8(value: str) -> bytes: ... - - In a non-stub file (i.e. a regular .py file), do the same but - follow it with an implementation. The implementation should *not* - be decorated with @overload. For example: - - @overload - def utf8(value: None) -> None: ... - @overload - def utf8(value: bytes) -> bytes: ... - @overload - def utf8(value: str) -> bytes: ... - def utf8(value): - # implementation goes here - - The overloads for a function can be retrieved at runtime using the - get_overloads() function. + Literal[...] cannot be subclassed. There is no runtime checking + verifying that the parameter is actually a value instead of a type. """ - # classmethod and staticmethod - f = getattr(func, "__func__", func) - try: - _overload_registry[f.__module__][f.__qualname__][ - f.__code__.co_firstlineno - ] = func - except AttributeError: - # Not a normal function; ignore. - pass - return _overload_dummy - def get_overloads(func): - """Return all defined overloads for *func* as a sequence.""" - # classmethod and staticmethod - f = getattr(func, "__func__", func) - if f.__module__ not in _overload_registry: - return [] - mod_dict = _overload_registry[f.__module__] - if f.__qualname__ not in mod_dict: - return [] - return list(mod_dict[f.__qualname__].values()) + __slots__ = ('__values__',) + + def __init__(self, values=None, **kwds): + self.__values__ = values + + def __getitem__(self, values): + cls = type(self) + if self.__values__ is None: + if not isinstance(values, tuple): + values = (values,) + return cls(values, _root=True) + raise TypeError(f'{cls.__name__[1:]} cannot be further subscripted') + + def _eval_type(self, globalns, localns): + return self + + def __repr__(self): + r = super().__repr__() + if self.__values__ is not None: + r += f'[{", ".join(map(typing._type_repr, self.__values__))}]' + return r + + def __hash__(self): + return hash((type(self).__name__, self.__values__)) + + def __eq__(self, other): + if not isinstance(other, _Literal): + return NotImplemented + if self.__values__ is not None: + return self.__values__ == other.__values__ + return self is other + + Literal = _Literal(_root=True) - def clear_overloads(): - """Clear all overloads in the registry.""" - _overload_registry.clear() + +_overload_dummy = typing._overload_dummy # noqa +overload = typing.overload # This is not a real generic class. Don't use outside annotations. @@ -361,30 +330,154 @@ def clear_overloads(): # A few are simply re-exported for completeness. +class _ExtensionsGenericMeta(GenericMeta): + def __subclasscheck__(self, subclass): + """This mimics a more modern GenericMeta.__subclasscheck__() logic + (that does not have problems with recursion) to work around interactions + between collections, typing, and typing_extensions on older + versions of Python, see https://github.com/python/typing/issues/501. + """ + if self.__origin__ is not None: + if sys._getframe(1).f_globals['__name__'] not in ['abc', 'functools']: + raise TypeError("Parameterized generics cannot be used with class " + "or instance checks") + return False + if not self.__extra__: + return super().__subclasscheck__(subclass) + res = self.__extra__.__subclasshook__(subclass) + if res is not NotImplemented: + return res + if self.__extra__ in subclass.__mro__: + return True + for scls in self.__extra__.__subclasses__(): + if isinstance(scls, GenericMeta): + continue + if issubclass(subclass, scls): + return True + return False + + Awaitable = typing.Awaitable Coroutine = typing.Coroutine AsyncIterable = typing.AsyncIterable AsyncIterator = typing.AsyncIterator -Deque = typing.Deque + +# 3.6.1+ +if hasattr(typing, 'Deque'): + Deque = typing.Deque +# 3.6.0 +else: + class Deque(collections.deque, typing.MutableSequence[T], + metaclass=_ExtensionsGenericMeta, + extra=collections.deque): + __slots__ = () + + def __new__(cls, *args, **kwds): + if cls._gorg is Deque: + return collections.deque(*args, **kwds) + return typing._generic_new(collections.deque, cls, *args, **kwds) + ContextManager = typing.ContextManager -AsyncContextManager = typing.AsyncContextManager +# 3.6.2+ +if hasattr(typing, 'AsyncContextManager'): + AsyncContextManager = typing.AsyncContextManager +# 3.6.0-3.6.1 +else: + from _collections_abc import _check_methods as _check_methods_in_mro # noqa + + class AsyncContextManager(typing.Generic[T_co]): + __slots__ = () + + async def __aenter__(self): + return self + + @abc.abstractmethod + async def __aexit__(self, exc_type, exc_value, traceback): + return None + + @classmethod + def __subclasshook__(cls, C): + if cls is AsyncContextManager: + return _check_methods_in_mro(C, "__aenter__", "__aexit__") + return NotImplemented + DefaultDict = typing.DefaultDict # 3.7.2+ if hasattr(typing, 'OrderedDict'): OrderedDict = typing.OrderedDict # 3.7.0-3.7.2 -else: +elif (3, 7, 0) <= sys.version_info[:3] < (3, 7, 2): OrderedDict = typing._alias(collections.OrderedDict, (KT, VT)) +# 3.6 +else: + class OrderedDict(collections.OrderedDict, typing.MutableMapping[KT, VT], + metaclass=_ExtensionsGenericMeta, + extra=collections.OrderedDict): + + __slots__ = () + + def __new__(cls, *args, **kwds): + if cls._gorg is OrderedDict: + return collections.OrderedDict(*args, **kwds) + return typing._generic_new(collections.OrderedDict, cls, *args, **kwds) + +# 3.6.2+ +if hasattr(typing, 'Counter'): + Counter = typing.Counter +# 3.6.0-3.6.1 +else: + class Counter(collections.Counter, + typing.Dict[T, int], + metaclass=_ExtensionsGenericMeta, extra=collections.Counter): + + __slots__ = () + + def __new__(cls, *args, **kwds): + if cls._gorg is Counter: + return collections.Counter(*args, **kwds) + return typing._generic_new(collections.Counter, cls, *args, **kwds) + +# 3.6.1+ +if hasattr(typing, 'ChainMap'): + ChainMap = typing.ChainMap +elif hasattr(collections, 'ChainMap'): + class ChainMap(collections.ChainMap, typing.MutableMapping[KT, VT], + metaclass=_ExtensionsGenericMeta, + extra=collections.ChainMap): + + __slots__ = () + + def __new__(cls, *args, **kwds): + if cls._gorg is ChainMap: + return collections.ChainMap(*args, **kwds) + return typing._generic_new(collections.ChainMap, cls, *args, **kwds) + +# 3.6.1+ +if hasattr(typing, 'AsyncGenerator'): + AsyncGenerator = typing.AsyncGenerator +# 3.6.0 +else: + class AsyncGenerator(AsyncIterator[T_co], typing.Generic[T_co, T_contra], + metaclass=_ExtensionsGenericMeta, + extra=collections.abc.AsyncGenerator): + __slots__ = () -Counter = typing.Counter -ChainMap = typing.ChainMap -AsyncGenerator = typing.AsyncGenerator NewType = typing.NewType Text = typing.Text TYPE_CHECKING = typing.TYPE_CHECKING +def _gorg(cls): + """This function exists for compatibility with old typing versions.""" + assert isinstance(cls, GenericMeta) + if hasattr(cls, '_gorg'): + return cls._gorg + while cls.__origin__ is not None: + cls = cls.__origin__ + return cls + + _PROTO_WHITELIST = ['Callable', 'Awaitable', 'Iterable', 'Iterator', 'AsyncIterable', 'AsyncIterator', 'Hashable', 'Sized', 'Container', 'Collection', 'Reversible', @@ -414,57 +507,18 @@ def _is_callable_members_only(cls): return all(callable(getattr(cls, attr, None)) for attr in _get_protocol_attrs(cls)) -def _maybe_adjust_parameters(cls): - """Helper function used in Protocol.__init_subclass__ and _TypedDictMeta.__new__. - - The contents of this function are very similar - to logic found in typing.Generic.__init_subclass__ - on the CPython main branch. - """ - tvars = [] - if '__orig_bases__' in cls.__dict__: - tvars = typing._collect_type_vars(cls.__orig_bases__) - # Look for Generic[T1, ..., Tn] or Protocol[T1, ..., Tn]. - # If found, tvars must be a subset of it. - # If not found, tvars is it. - # Also check for and reject plain Generic, - # and reject multiple Generic[...] and/or Protocol[...]. - gvars = None - for base in cls.__orig_bases__: - if (isinstance(base, typing._GenericAlias) and - base.__origin__ in (typing.Generic, Protocol)): - # for error messages - the_base = base.__origin__.__name__ - if gvars is not None: - raise TypeError( - "Cannot inherit from Generic[...]" - " and/or Protocol[...] multiple types.") - gvars = base.__parameters__ - if gvars is None: - gvars = tvars - else: - tvarset = set(tvars) - gvarset = set(gvars) - if not tvarset <= gvarset: - s_vars = ', '.join(str(t) for t in tvars if t not in gvarset) - s_args = ', '.join(str(g) for g in gvars) - raise TypeError(f"Some type variables ({s_vars}) are" - f" not listed in {the_base}[{s_args}]") - tvars = gvars - cls.__parameters__ = tuple(tvars) - - # 3.8+ if hasattr(typing, 'Protocol'): Protocol = typing.Protocol # 3.7 -else: +elif PEP_560: + from typing import _collect_type_vars # noqa def _no_init(self, *args, **kwargs): if type(self)._is_protocol: raise TypeError('Protocols cannot be instantiated') - class _ProtocolMeta(abc.ABCMeta): # noqa: B024 + class _ProtocolMeta(abc.ABCMeta): # This metaclass is a bit unfortunate and exists only because of the lack # of __instancehook__. def __instancecheck__(cls, instance): @@ -546,17 +600,47 @@ def __class_getitem__(cls, params): "Parameters to Protocol[...] must all be unique") else: # Subscripting a regular Generic subclass. - _check_generic(cls, params, len(cls.__parameters__)) + _check_generic(cls, params) return typing._GenericAlias(cls, params) def __init_subclass__(cls, *args, **kwargs): + tvars = [] if '__orig_bases__' in cls.__dict__: error = typing.Generic in cls.__orig_bases__ else: error = typing.Generic in cls.__bases__ if error: raise TypeError("Cannot inherit from plain Generic") - _maybe_adjust_parameters(cls) + if '__orig_bases__' in cls.__dict__: + tvars = _collect_type_vars(cls.__orig_bases__) + # Look for Generic[T1, ..., Tn] or Protocol[T1, ..., Tn]. + # If found, tvars must be a subset of it. + # If not found, tvars is it. + # Also check for and reject plain Generic, + # and reject multiple Generic[...] and/or Protocol[...]. + gvars = None + for base in cls.__orig_bases__: + if (isinstance(base, typing._GenericAlias) and + base.__origin__ in (typing.Generic, Protocol)): + # for error messages + the_base = base.__origin__.__name__ + if gvars is not None: + raise TypeError( + "Cannot inherit from Generic[...]" + " and/or Protocol[...] multiple types.") + gvars = base.__parameters__ + if gvars is None: + gvars = tvars + else: + tvarset = set(tvars) + gvarset = set(gvars) + if not tvarset <= gvarset: + s_vars = ', '.join(str(t) for t in tvars if t not in gvarset) + s_args = ', '.join(str(g) for g in gvars) + raise TypeError(f"Some type variables ({s_vars}) are" + f" not listed in {the_base}[{s_args}]") + tvars = gvars + cls.__parameters__ = tuple(tvars) # Determine if this is a protocol or a concrete subclass. if not cls.__dict__.get('_is_protocol', None): @@ -610,12 +694,250 @@ def _proto_hook(other): raise TypeError('Protocols can only inherit from other' f' protocols, got {repr(base)}') cls.__init__ = _no_init +# 3.6 +else: + from typing import _next_in_mro, _type_check # noqa + + def _no_init(self, *args, **kwargs): + if type(self)._is_protocol: + raise TypeError('Protocols cannot be instantiated') + + class _ProtocolMeta(GenericMeta): + """Internal metaclass for Protocol. + + This exists so Protocol classes can be generic without deriving + from Generic. + """ + def __new__(cls, name, bases, namespace, + tvars=None, args=None, origin=None, extra=None, orig_bases=None): + # This is just a version copied from GenericMeta.__new__ that + # includes "Protocol" special treatment. (Comments removed for brevity.) + assert extra is None # Protocols should not have extra + if tvars is not None: + assert origin is not None + assert all(isinstance(t, typing.TypeVar) for t in tvars), tvars + else: + tvars = _type_vars(bases) + gvars = None + for base in bases: + if base is typing.Generic: + raise TypeError("Cannot inherit from plain Generic") + if (isinstance(base, GenericMeta) and + base.__origin__ in (typing.Generic, Protocol)): + if gvars is not None: + raise TypeError( + "Cannot inherit from Generic[...] or" + " Protocol[...] multiple times.") + gvars = base.__parameters__ + if gvars is None: + gvars = tvars + else: + tvarset = set(tvars) + gvarset = set(gvars) + if not tvarset <= gvarset: + s_vars = ", ".join(str(t) for t in tvars if t not in gvarset) + s_args = ", ".join(str(g) for g in gvars) + cls_name = "Generic" if any(b.__origin__ is typing.Generic + for b in bases) else "Protocol" + raise TypeError(f"Some type variables ({s_vars}) are" + f" not listed in {cls_name}[{s_args}]") + tvars = gvars + + initial_bases = bases + if (extra is not None and type(extra) is abc.ABCMeta and + extra not in bases): + bases = (extra,) + bases + bases = tuple(_gorg(b) if isinstance(b, GenericMeta) else b + for b in bases) + if any(isinstance(b, GenericMeta) and b is not typing.Generic for b in bases): + bases = tuple(b for b in bases if b is not typing.Generic) + namespace.update({'__origin__': origin, '__extra__': extra}) + self = super(GenericMeta, cls).__new__(cls, name, bases, namespace, + _root=True) + super(GenericMeta, self).__setattr__('_gorg', + self if not origin else + _gorg(origin)) + self.__parameters__ = tvars + self.__args__ = tuple(... if a is typing._TypingEllipsis else + () if a is typing._TypingEmpty else + a for a in args) if args else None + self.__next_in_mro__ = _next_in_mro(self) + if orig_bases is None: + self.__orig_bases__ = initial_bases + elif origin is not None: + self._abc_registry = origin._abc_registry + self._abc_cache = origin._abc_cache + if hasattr(self, '_subs_tree'): + self.__tree_hash__ = (hash(self._subs_tree()) if origin else + super(GenericMeta, self).__hash__()) + return self + + def __init__(cls, *args, **kwargs): + super().__init__(*args, **kwargs) + if not cls.__dict__.get('_is_protocol', None): + cls._is_protocol = any(b is Protocol or + isinstance(b, _ProtocolMeta) and + b.__origin__ is Protocol + for b in cls.__bases__) + if cls._is_protocol: + for base in cls.__mro__[1:]: + if not (base in (object, typing.Generic) or + base.__module__ == 'collections.abc' and + base.__name__ in _PROTO_WHITELIST or + isinstance(base, typing.TypingMeta) and base._is_protocol or + isinstance(base, GenericMeta) and + base.__origin__ is typing.Generic): + raise TypeError(f'Protocols can only inherit from other' + f' protocols, got {repr(base)}') + + cls.__init__ = _no_init + + def _proto_hook(other): + if not cls.__dict__.get('_is_protocol', None): + return NotImplemented + if not isinstance(other, type): + # Same error as for issubclass(1, int) + raise TypeError('issubclass() arg 1 must be a class') + for attr in _get_protocol_attrs(cls): + for base in other.__mro__: + if attr in base.__dict__: + if base.__dict__[attr] is None: + return NotImplemented + break + annotations = getattr(base, '__annotations__', {}) + if (isinstance(annotations, typing.Mapping) and + attr in annotations and + isinstance(other, _ProtocolMeta) and + other._is_protocol): + break + else: + return NotImplemented + return True + if '__subclasshook__' not in cls.__dict__: + cls.__subclasshook__ = _proto_hook + + def __instancecheck__(self, instance): + # We need this method for situations where attributes are + # assigned in __init__. + if ((not getattr(self, '_is_protocol', False) or + _is_callable_members_only(self)) and + issubclass(instance.__class__, self)): + return True + if self._is_protocol: + if all(hasattr(instance, attr) and + (not callable(getattr(self, attr, None)) or + getattr(instance, attr) is not None) + for attr in _get_protocol_attrs(self)): + return True + return super(GenericMeta, self).__instancecheck__(instance) + + def __subclasscheck__(self, cls): + if self.__origin__ is not None: + if sys._getframe(1).f_globals['__name__'] not in ['abc', 'functools']: + raise TypeError("Parameterized generics cannot be used with class " + "or instance checks") + return False + if (self.__dict__.get('_is_protocol', None) and + not self.__dict__.get('_is_runtime_protocol', None)): + if sys._getframe(1).f_globals['__name__'] in ['abc', + 'functools', + 'typing']: + return False + raise TypeError("Instance and class checks can only be used with" + " @runtime protocols") + if (self.__dict__.get('_is_runtime_protocol', None) and + not _is_callable_members_only(self)): + if sys._getframe(1).f_globals['__name__'] in ['abc', + 'functools', + 'typing']: + return super(GenericMeta, self).__subclasscheck__(cls) + raise TypeError("Protocols with non-method members" + " don't support issubclass()") + return super(GenericMeta, self).__subclasscheck__(cls) + + @typing._tp_cache + def __getitem__(self, params): + # We also need to copy this from GenericMeta.__getitem__ to get + # special treatment of "Protocol". (Comments removed for brevity.) + if not isinstance(params, tuple): + params = (params,) + if not params and _gorg(self) is not typing.Tuple: + raise TypeError( + f"Parameter list to {self.__qualname__}[...] cannot be empty") + msg = "Parameters to generic types must be types." + params = tuple(_type_check(p, msg) for p in params) + if self in (typing.Generic, Protocol): + if not all(isinstance(p, typing.TypeVar) for p in params): + raise TypeError( + f"Parameters to {repr(self)}[...] must all be type variables") + if len(set(params)) != len(params): + raise TypeError( + f"Parameters to {repr(self)}[...] must all be unique") + tvars = params + args = params + elif self in (typing.Tuple, typing.Callable): + tvars = _type_vars(params) + args = params + elif self.__origin__ in (typing.Generic, Protocol): + raise TypeError(f"Cannot subscript already-subscripted {repr(self)}") + else: + _check_generic(self, params) + tvars = _type_vars(params) + args = params + + prepend = (self,) if self.__origin__ is None else () + return self.__class__(self.__name__, + prepend + self.__bases__, + _no_slots_copy(self.__dict__), + tvars=tvars, + args=args, + origin=self, + extra=self.__extra__, + orig_bases=self.__orig_bases__) + + class Protocol(metaclass=_ProtocolMeta): + """Base class for protocol classes. Protocol classes are defined as:: + + class Proto(Protocol): + def meth(self) -> int: + ... + + Such classes are primarily used with static type checkers that recognize + structural subtyping (static duck-typing), for example:: + + class C: + def meth(self) -> int: + return 0 + + def func(x: Proto) -> int: + return x.meth() + + func(C()) # Passes static type check + + See PEP 544 for details. Protocol classes decorated with + @typing_extensions.runtime act as simple-minded runtime protocol that checks + only the presence of given attributes, ignoring their type signatures. + + Protocol classes can be generic, they are defined as:: + + class GenProto(Protocol[T]): + def meth(self) -> T: + ... + """ + __slots__ = () + _is_protocol = True + + def __new__(cls, *args, **kwds): + if _gorg(cls) is Protocol: + raise TypeError("Type Protocol cannot be instantiated; " + "it can be used only as a base class") + return typing._generic_new(cls.__next_in_mro__, cls, *args, **kwds) # 3.8+ if hasattr(typing, 'runtime_checkable'): runtime_checkable = typing.runtime_checkable -# 3.7 +# 3.6-3.7 else: def runtime_checkable(cls): """Mark a protocol class as a runtime protocol, so that it @@ -639,7 +961,7 @@ def runtime_checkable(cls): # 3.8+ if hasattr(typing, 'SupportsIndex'): SupportsIndex = typing.SupportsIndex -# 3.7 +# 3.6-3.7 else: @runtime_checkable class SupportsIndex(Protocol): @@ -650,17 +972,12 @@ def __index__(self) -> int: pass -if hasattr(typing, "Required"): +if sys.version_info >= (3, 9, 2): # The standard library TypedDict in Python 3.8 does not store runtime information # about which (if any) keys are optional. See https://bugs.python.org/issue38834 # The standard library TypedDict in Python 3.9.0/1 does not honour the "total" # keyword with old-style TypedDict(). See https://bugs.python.org/issue42059 - # The standard library TypedDict below Python 3.11 does not store runtime - # information about optional and required keys when using Required or NotRequired. - # Generic TypedDicts are also impossible using typing.TypedDict on Python <3.11. TypedDict = typing.TypedDict - _TypedDictMeta = typing._TypedDictMeta - is_typeddict = typing.is_typeddict else: def _check_fails(cls, other): try: @@ -740,18 +1057,11 @@ def __new__(cls, name, bases, ns, total=True): # Subclasses and instances of TypedDict return actual dictionaries # via _dict_new. ns['__new__'] = _typeddict_new if name == 'TypedDict' else _dict_new - # Don't insert typing.Generic into __bases__ here, - # or Generic.__init_subclass__ will raise TypeError - # in the super().__new__() call. - # Instead, monkey-patch __bases__ onto the class after it's been created. tp_dict = super().__new__(cls, name, (dict,), ns) - if any(issubclass(base, typing.Generic) for base in bases): - tp_dict.__bases__ = (typing.Generic, dict) - _maybe_adjust_parameters(tp_dict) - annotations = {} own_annotations = ns.get('__annotations__', {}) + own_annotation_keys = set(own_annotations.keys()) msg = "TypedDict('Name', {f0: t0, f1: t1, ...}); each t must be a type" own_annotations = { n: typing._type_check(tp, msg) for n, tp in own_annotations.items() @@ -765,22 +1075,10 @@ def __new__(cls, name, bases, ns, total=True): optional_keys.update(base.__dict__.get('__optional_keys__', ())) annotations.update(own_annotations) - for annotation_key, annotation_type in own_annotations.items(): - annotation_origin = get_origin(annotation_type) - if annotation_origin is Annotated: - annotation_args = get_args(annotation_type) - if annotation_args: - annotation_type = annotation_args[0] - annotation_origin = get_origin(annotation_type) - - if annotation_origin is Required: - required_keys.add(annotation_key) - elif annotation_origin is NotRequired: - optional_keys.add(annotation_key) - elif total: - required_keys.add(annotation_key) - else: - optional_keys.add(annotation_key) + if total: + required_keys.update(own_annotation_keys) + else: + optional_keys.update(own_annotation_keys) tp_dict.__annotations__ = annotations tp_dict.__required_keys__ = frozenset(required_keys) @@ -823,74 +1121,121 @@ class Point2D(TypedDict): syntax forms work for Python 2.7 and 3.2+ """ - if hasattr(typing, "_TypedDictMeta"): - _TYPEDDICT_TYPES = (typing._TypedDictMeta, _TypedDictMeta) - else: - _TYPEDDICT_TYPES = (_TypedDictMeta,) - - def is_typeddict(tp): - """Check if an annotation is a TypedDict class - For example:: - class Film(TypedDict): - title: str - year: int +# Python 3.9+ has PEP 593 (Annotated and modified get_type_hints) +if hasattr(typing, 'Annotated'): + Annotated = typing.Annotated + get_type_hints = typing.get_type_hints + # Not exported and not a public API, but needed for get_origin() and get_args() + # to work. + _AnnotatedAlias = typing._AnnotatedAlias +# 3.7-3.8 +elif PEP_560: + class _AnnotatedAlias(typing._GenericAlias, _root=True): + """Runtime representation of an annotated type. - is_typeddict(Film) # => True - is_typeddict(Union[list, str]) # => False + At its core 'Annotated[t, dec1, dec2, ...]' is an alias for the type 't' + with extra annotations. The alias behaves like a normal typing alias, + instantiating is the same as instantiating the underlying type, binding + it to types is also the same. """ - return isinstance(tp, tuple(_TYPEDDICT_TYPES)) + def __init__(self, origin, metadata): + if isinstance(origin, _AnnotatedAlias): + metadata = origin.__metadata__ + metadata + origin = origin.__origin__ + super().__init__(origin, origin) + self.__metadata__ = metadata + def copy_with(self, params): + assert len(params) == 1 + new_type = params[0] + return _AnnotatedAlias(new_type, self.__metadata__) -if hasattr(typing, "assert_type"): - assert_type = typing.assert_type + def __repr__(self): + return (f"typing_extensions.Annotated[{typing._type_repr(self.__origin__)}, " + f"{', '.join(repr(a) for a in self.__metadata__)}]") -else: - def assert_type(__val, __typ): - """Assert (to the type checker) that the value is of the given type. + def __reduce__(self): + return operator.getitem, ( + Annotated, (self.__origin__,) + self.__metadata__ + ) - When the type checker encounters a call to assert_type(), it - emits an error if the value is not of the specified type:: + def __eq__(self, other): + if not isinstance(other, _AnnotatedAlias): + return NotImplemented + if self.__origin__ != other.__origin__: + return False + return self.__metadata__ == other.__metadata__ + + def __hash__(self): + return hash((self.__origin__, self.__metadata__)) + + class Annotated: + """Add context specific metadata to a type. + + Example: Annotated[int, runtime_check.Unsigned] indicates to the + hypothetical runtime_check module that this type is an unsigned int. + Every other consumer of this type can ignore this metadata and treat + this type as int. + + The first argument to Annotated must be a valid type (and will be in + the __origin__ field), the remaining arguments are kept as a tuple in + the __extra__ field. + + Details: + + - It's an error to call `Annotated` with less than two arguments. + - Nested Annotated are flattened:: + + Annotated[Annotated[T, Ann1, Ann2], Ann3] == Annotated[T, Ann1, Ann2, Ann3] - def greet(name: str) -> None: - assert_type(name, str) # ok - assert_type(name, int) # type checker error + - Instantiating an annotated type is equivalent to instantiating the + underlying type:: + + Annotated[C, Ann1](5) == C(5) + + - Annotated can be used as a generic type alias:: + + Optimized = Annotated[T, runtime.Optimize()] + Optimized[int] == Annotated[int, runtime.Optimize()] - At runtime this returns the first argument unchanged and otherwise - does nothing. + OptimizedList = Annotated[List[T], runtime.Optimize()] + OptimizedList[int] == Annotated[List[int], runtime.Optimize()] """ - return __val + __slots__ = () -if hasattr(typing, "Required"): - get_type_hints = typing.get_type_hints -else: - import functools - import types + def __new__(cls, *args, **kwargs): + raise TypeError("Type Annotated cannot be instantiated.") + + @typing._tp_cache + def __class_getitem__(cls, params): + if not isinstance(params, tuple) or len(params) < 2: + raise TypeError("Annotated[...] should be used " + "with at least two arguments (a type and an " + "annotation).") + msg = "Annotated[t, ...]: t must be a type." + origin = typing._type_check(params[0], msg) + metadata = tuple(params[1:]) + return _AnnotatedAlias(origin, metadata) + + def __init_subclass__(cls, *args, **kwargs): + raise TypeError( + f"Cannot subclass {cls.__module__}.Annotated" + ) - # replaces _strip_annotations() - def _strip_extras(t): - """Strips Annotated, Required and NotRequired from a given type.""" + def _strip_annotations(t): + """Strips the annotations from a given type. + """ if isinstance(t, _AnnotatedAlias): - return _strip_extras(t.__origin__) - if hasattr(t, "__origin__") and t.__origin__ in (Required, NotRequired): - return _strip_extras(t.__args__[0]) + return _strip_annotations(t.__origin__) if isinstance(t, typing._GenericAlias): - stripped_args = tuple(_strip_extras(a) for a in t.__args__) - if stripped_args == t.__args__: - return t - return t.copy_with(stripped_args) - if hasattr(types, "GenericAlias") and isinstance(t, types.GenericAlias): - stripped_args = tuple(_strip_extras(a) for a in t.__args__) - if stripped_args == t.__args__: - return t - return types.GenericAlias(t.__origin__, stripped_args) - if hasattr(types, "UnionType") and isinstance(t, types.UnionType): - stripped_args = tuple(_strip_extras(a) for a in t.__args__) + stripped_args = tuple(_strip_annotations(a) for a in t.__args__) if stripped_args == t.__args__: return t - return functools.reduce(operator.or_, stripped_args) - + res = t.copy_with(stripped_args) + res._special = t._special + return res return t def get_type_hints(obj, globalns=None, localns=None, include_extras=False): @@ -899,8 +1244,7 @@ def get_type_hints(obj, globalns=None, localns=None, include_extras=False): This is often the same as obj.__annotations__, but it handles forward references encoded as string literals, adds Optional[t] if a default value equal to None is set and recursively replaces all - 'Annotated[T, ...]', 'Required[T]' or 'NotRequired[T]' with 'T' - (unless 'include_extras=True'). + 'Annotated[T, ...]' with 'T' (unless 'include_extras=True'). The argument may be a module, class, method, or function. The annotations are returned as a dictionary. For classes, annotations include also @@ -925,65 +1269,120 @@ def get_type_hints(obj, globalns=None, localns=None, include_extras=False): - If two dict arguments are passed, they specify globals and locals, respectively. """ - if hasattr(typing, "Annotated"): - hint = typing.get_type_hints( - obj, globalns=globalns, localns=localns, include_extras=True - ) - else: - hint = typing.get_type_hints(obj, globalns=globalns, localns=localns) + hint = typing.get_type_hints(obj, globalns=globalns, localns=localns) if include_extras: return hint - return {k: _strip_extras(t) for k, t in hint.items()} + return {k: _strip_annotations(t) for k, t in hint.items()} +# 3.6 +else: + def _is_dunder(name): + """Returns True if name is a __dunder_variable_name__.""" + return len(name) > 4 and name.startswith('__') and name.endswith('__') -# Python 3.9+ has PEP 593 (Annotated) -if hasattr(typing, 'Annotated'): - Annotated = typing.Annotated - # Not exported and not a public API, but needed for get_origin() and get_args() - # to work. - _AnnotatedAlias = typing._AnnotatedAlias -# 3.7-3.8 -else: - class _AnnotatedAlias(typing._GenericAlias, _root=True): - """Runtime representation of an annotated type. + # Prior to Python 3.7 types did not have `copy_with`. A lot of the equality + # checks, argument expansion etc. are done on the _subs_tre. As a result we + # can't provide a get_type_hints function that strips out annotations. - At its core 'Annotated[t, dec1, dec2, ...]' is an alias for the type 't' - with extra annotations. The alias behaves like a normal typing alias, - instantiating is the same as instantiating the underlying type, binding - it to types is also the same. - """ - def __init__(self, origin, metadata): - if isinstance(origin, _AnnotatedAlias): - metadata = origin.__metadata__ + metadata - origin = origin.__origin__ - super().__init__(origin, origin) - self.__metadata__ = metadata + class AnnotatedMeta(typing.GenericMeta): + """Metaclass for Annotated""" - def copy_with(self, params): - assert len(params) == 1 - new_type = params[0] - return _AnnotatedAlias(new_type, self.__metadata__) + def __new__(cls, name, bases, namespace, **kwargs): + if any(b is not object for b in bases): + raise TypeError("Cannot subclass " + str(Annotated)) + return super().__new__(cls, name, bases, namespace, **kwargs) - def __repr__(self): - return (f"typing_extensions.Annotated[{typing._type_repr(self.__origin__)}, " - f"{', '.join(repr(a) for a in self.__metadata__)}]") + @property + def __metadata__(self): + return self._subs_tree()[2] - def __reduce__(self): - return operator.getitem, ( - Annotated, (self.__origin__,) + self.__metadata__ + def _tree_repr(self, tree): + cls, origin, metadata = tree + if not isinstance(origin, tuple): + tp_repr = typing._type_repr(origin) + else: + tp_repr = origin[0]._tree_repr(origin) + metadata_reprs = ", ".join(repr(arg) for arg in metadata) + return f'{cls}[{tp_repr}, {metadata_reprs}]' + + def _subs_tree(self, tvars=None, args=None): # noqa + if self is Annotated: + return Annotated + res = super()._subs_tree(tvars=tvars, args=args) + # Flatten nested Annotated + if isinstance(res[1], tuple) and res[1][0] is Annotated: + sub_tp = res[1][1] + sub_annot = res[1][2] + return (Annotated, sub_tp, sub_annot + res[2]) + return res + + def _get_cons(self): + """Return the class used to create instance of this type.""" + if self.__origin__ is None: + raise TypeError("Cannot get the underlying type of a " + "non-specialized Annotated type.") + tree = self._subs_tree() + while isinstance(tree, tuple) and tree[0] is Annotated: + tree = tree[1] + if isinstance(tree, tuple): + return tree[0] + else: + return tree + + @typing._tp_cache + def __getitem__(self, params): + if not isinstance(params, tuple): + params = (params,) + if self.__origin__ is not None: # specializing an instantiated type + return super().__getitem__(params) + elif not isinstance(params, tuple) or len(params) < 2: + raise TypeError("Annotated[...] should be instantiated " + "with at least two arguments (a type and an " + "annotation).") + else: + msg = "Annotated[t, ...]: t must be a type." + tp = typing._type_check(params[0], msg) + metadata = tuple(params[1:]) + return self.__class__( + self.__name__, + self.__bases__, + _no_slots_copy(self.__dict__), + tvars=_type_vars((tp,)), + # Metadata is a tuple so it won't be touched by _replace_args et al. + args=(tp, metadata), + origin=self, ) - def __eq__(self, other): - if not isinstance(other, _AnnotatedAlias): - return NotImplemented - if self.__origin__ != other.__origin__: - return False - return self.__metadata__ == other.__metadata__ + def __call__(self, *args, **kwargs): + cons = self._get_cons() + result = cons(*args, **kwargs) + try: + result.__orig_class__ = self + except AttributeError: + pass + return result + + def __getattr__(self, attr): + # For simplicity we just don't relay all dunder names + if self.__origin__ is not None and not _is_dunder(attr): + return getattr(self._get_cons(), attr) + raise AttributeError(attr) + + def __setattr__(self, attr, value): + if _is_dunder(attr) or attr.startswith('_abc_'): + super().__setattr__(attr, value) + elif self.__origin__ is None: + raise AttributeError(attr) + else: + setattr(self._get_cons(), attr, value) - def __hash__(self): - return hash((self.__origin__, self.__metadata__)) + def __instancecheck__(self, obj): + raise TypeError("Annotated cannot be used with isinstance().") - class Annotated: + def __subclasscheck__(self, cls): + raise TypeError("Annotated cannot be used with issubclass().") + + class Annotated(metaclass=AnnotatedMeta): """Add context specific metadata to a type. Example: Annotated[int, runtime_check.Unsigned] indicates to the @@ -991,9 +1390,8 @@ class Annotated: Every other consumer of this type can ignore this metadata and treat this type as int. - The first argument to Annotated must be a valid type (and will be in - the __origin__ field), the remaining arguments are kept as a tuple in - the __extra__ field. + The first argument to Annotated must be a valid type, the remaining + arguments are kept as a tuple in the __metadata__ field. Details: @@ -1016,31 +1414,6 @@ class Annotated: OptimizedList[int] == Annotated[List[int], runtime.Optimize()] """ - __slots__ = () - - def __new__(cls, *args, **kwargs): - raise TypeError("Type Annotated cannot be instantiated.") - - @typing._tp_cache - def __class_getitem__(cls, params): - if not isinstance(params, tuple) or len(params) < 2: - raise TypeError("Annotated[...] should be used " - "with at least two arguments (a type and an " - "annotation).") - allowed_special_forms = (ClassVar, Final) - if get_origin(params[0]) in allowed_special_forms: - origin = params[0] - else: - msg = "Annotated[t, ...]: t must be a type." - origin = typing._type_check(params[0], msg) - metadata = tuple(params[1:]) - return _AnnotatedAlias(origin, metadata) - - def __init_subclass__(cls, *args, **kwargs): - raise TypeError( - f"Cannot subclass {cls.__module__}.Annotated" - ) - # Python 3.8 has get_origin() and get_args() but those implementations aren't # Annotated-aware, so we can't use those. Python 3.9's versions don't support # ParamSpecArgs and ParamSpecKwargs, so only Python 3.10's versions will do. @@ -1048,7 +1421,7 @@ def __init_subclass__(cls, *args, **kwargs): get_origin = typing.get_origin get_args = typing.get_args # 3.7-3.9 -else: +elif PEP_560: try: # 3.9+ from typing import _BaseGenericAlias @@ -1056,9 +1429,9 @@ def __init_subclass__(cls, *args, **kwargs): _BaseGenericAlias = typing._GenericAlias try: # 3.9+ - from typing import GenericAlias as _typing_GenericAlias + from typing import GenericAlias except ImportError: - _typing_GenericAlias = typing._GenericAlias + GenericAlias = typing._GenericAlias def get_origin(tp): """Get the unsubscripted version of a type. @@ -1077,7 +1450,7 @@ def get_origin(tp): """ if isinstance(tp, _AnnotatedAlias): return Annotated - if isinstance(tp, (typing._GenericAlias, _typing_GenericAlias, _BaseGenericAlias, + if isinstance(tp, (typing._GenericAlias, GenericAlias, _BaseGenericAlias, ParamSpecArgs, ParamSpecKwargs)): return tp.__origin__ if tp is typing.Generic: @@ -1097,7 +1470,7 @@ def get_args(tp): """ if isinstance(tp, _AnnotatedAlias): return (tp.__origin__,) + tp.__metadata__ - if isinstance(tp, (typing._GenericAlias, _typing_GenericAlias)): + if isinstance(tp, (typing._GenericAlias, GenericAlias)): if getattr(tp, "_special", False): return () res = tp.__args__ @@ -1130,7 +1503,7 @@ def TypeAlias(self, parameters): """ raise TypeError(f"{self} is not subscriptable") # 3.7-3.8 -else: +elif sys.version_info[:2] >= (3, 7): class _TypeAliasForm(typing._SpecialForm, _root=True): def __repr__(self): return 'typing_extensions.' + self._name @@ -1146,51 +1519,44 @@ def __repr__(self): It's invalid when used anywhere except as in the example above.""") +# 3.6 +else: + class _TypeAliasMeta(typing.TypingMeta): + """Metaclass for TypeAlias""" + def __repr__(self): + return 'typing_extensions.TypeAlias' -class _DefaultMixin: - """Mixin for TypeVarLike defaults.""" + class _TypeAliasBase(typing._FinalTypingBase, metaclass=_TypeAliasMeta, _root=True): + """Special marker indicating that an assignment should + be recognized as a proper type alias definition by type + checkers. - __slots__ = () + For example:: - def __init__(self, default): - if isinstance(default, (tuple, list)): - self.__default__ = tuple((typing._type_check(d, "Default must be a type") - for d in default)) - elif default: - self.__default__ = typing._type_check(default, "Default must be a type") - else: - self.__default__ = None + Predicate: TypeAlias = Callable[..., bool] + It's invalid when used anywhere except as in the example above. + """ + __slots__ = () -# Add default and infer_variance parameters from PEP 696 and 695 -class TypeVar(typing.TypeVar, _DefaultMixin, _root=True): - """Type variable.""" + def __instancecheck__(self, obj): + raise TypeError("TypeAlias cannot be used with isinstance().") - __module__ = 'typing' + def __subclasscheck__(self, cls): + raise TypeError("TypeAlias cannot be used with issubclass().") - def __init__(self, name, *constraints, bound=None, - covariant=False, contravariant=False, - default=None, infer_variance=False): - super().__init__(name, *constraints, bound=bound, covariant=covariant, - contravariant=contravariant) - _DefaultMixin.__init__(self, default) - self.__infer_variance__ = infer_variance + def __repr__(self): + return 'typing_extensions.TypeAlias' - # for pickling: - try: - def_mod = sys._getframe(1).f_globals.get('__name__', '__main__') - except (AttributeError, ValueError): - def_mod = None - if def_mod != 'typing_extensions': - self.__module__ = def_mod + TypeAlias = _TypeAliasBase(_root=True) # Python 3.10+ has PEP 612 if hasattr(typing, 'ParamSpecArgs'): ParamSpecArgs = typing.ParamSpecArgs ParamSpecKwargs = typing.ParamSpecKwargs -# 3.7-3.9 +# 3.6-3.9 else: class _Immutable: """Mixin to indicate that object should not be copied.""" @@ -1220,11 +1586,6 @@ def __init__(self, origin): def __repr__(self): return f"{self.__origin__.__name__}.args" - def __eq__(self, other): - if not isinstance(other, ParamSpecArgs): - return NotImplemented - return self.__origin__ == other.__origin__ - class ParamSpecKwargs(_Immutable): """The kwargs for a ParamSpec object. @@ -1243,39 +1604,14 @@ def __init__(self, origin): def __repr__(self): return f"{self.__origin__.__name__}.kwargs" - def __eq__(self, other): - if not isinstance(other, ParamSpecKwargs): - return NotImplemented - return self.__origin__ == other.__origin__ - # 3.10+ if hasattr(typing, 'ParamSpec'): - - # Add default Parameter - PEP 696 - class ParamSpec(typing.ParamSpec, _DefaultMixin, _root=True): - """Parameter specification variable.""" - - __module__ = 'typing' - - def __init__(self, name, *, bound=None, covariant=False, contravariant=False, - default=None): - super().__init__(name, bound=bound, covariant=covariant, - contravariant=contravariant) - _DefaultMixin.__init__(self, default) - - # for pickling: - try: - def_mod = sys._getframe(1).f_globals.get('__name__', '__main__') - except (AttributeError, ValueError): - def_mod = None - if def_mod != 'typing_extensions': - self.__module__ = def_mod - -# 3.7-3.9 + ParamSpec = typing.ParamSpec +# 3.6-3.9 else: # Inherits from list as a workaround for Callable checks in Python < 3.9.2. - class ParamSpec(list, _DefaultMixin): + class ParamSpec(list): """Parameter specification variable. Usage:: @@ -1333,8 +1669,7 @@ def args(self): def kwargs(self): return ParamSpecKwargs(self) - def __init__(self, name, *, bound=None, covariant=False, contravariant=False, - default=None): + def __init__(self, name, *, bound=None, covariant=False, contravariant=False): super().__init__([self]) self.__name__ = name self.__covariant__ = bool(covariant) @@ -1343,7 +1678,6 @@ def __init__(self, name, *, bound=None, covariant=False, contravariant=False, self.__bound__ = typing._type_check(bound, 'Bound must be a type.') else: self.__bound__ = None - _DefaultMixin.__init__(self, default) # for pickling: try: @@ -1375,17 +1709,28 @@ def __reduce__(self): def __call__(self, *args, **kwargs): pass + if not PEP_560: + # Only needed in 3.6. + def _get_type_vars(self, tvars): + if self not in tvars: + tvars.append(self) -# 3.7-3.9 + +# 3.6-3.9 if not hasattr(typing, 'Concatenate'): # Inherits from list as a workaround for Callable checks in Python < 3.9.2. class _ConcatenateGenericAlias(list): # Trick Generic into looking into this for __parameters__. - __class__ = typing._GenericAlias + if PEP_560: + __class__ = typing._GenericAlias + else: + __class__ = typing._TypingBase # Flag in 3.8. _special = False + # Attribute in 3.6 and earlier. + _gorg = typing.Generic def __init__(self, origin, args): super().__init__(args) @@ -1410,8 +1755,14 @@ def __parameters__(self): tp for tp in self.__args__ if isinstance(tp, (typing.TypeVar, ParamSpec)) ) + if not PEP_560: + # Only required in 3.6. + def _get_type_vars(self, tvars): + if self.__origin__ and self.__parameters__: + typing._get_type_vars(self.__parameters__, tvars) -# 3.7-3.9 + +# 3.6-3.9 @typing._tp_cache def _concatenate_getitem(self, parameters): if parameters == (): @@ -1446,7 +1797,7 @@ def Concatenate(self, parameters): """ return _concatenate_getitem(self, parameters) # 3.7-8 -else: +elif sys.version_info[:2] >= (3, 7): class _ConcatenateForm(typing._SpecialForm, _root=True): def __repr__(self): return 'typing_extensions.' + self._name @@ -1466,6 +1817,42 @@ def __getitem__(self, parameters): See PEP 612 for detailed information. """) +# 3.6 +else: + class _ConcatenateAliasMeta(typing.TypingMeta): + """Metaclass for Concatenate.""" + + def __repr__(self): + return 'typing_extensions.Concatenate' + + class _ConcatenateAliasBase(typing._FinalTypingBase, + metaclass=_ConcatenateAliasMeta, + _root=True): + """Used in conjunction with ``ParamSpec`` and ``Callable`` to represent a + higher order function which adds, removes or transforms parameters of a + callable. + + For example:: + + Callable[Concatenate[int, P], int] + + See PEP 612 for detailed information. + """ + __slots__ = () + + def __instancecheck__(self, obj): + raise TypeError("Concatenate cannot be used with isinstance().") + + def __subclasscheck__(self, cls): + raise TypeError("Concatenate cannot be used with issubclass().") + + def __repr__(self): + return 'typing_extensions.Concatenate' + + def __getitem__(self, parameters): + return _concatenate_getitem(self, parameters) + + Concatenate = _ConcatenateAliasBase(_root=True) # 3.10+ if hasattr(typing, 'TypeGuard'): @@ -1520,10 +1907,10 @@ def is_str(val: Union[str, float]): ``TypeGuard`` also works with type variables. For more information, see PEP 647 (User-Defined Type Guards). """ - item = typing._type_check(parameters, f'{self} accepts only a single type.') + item = typing._type_check(parameters, f'{self} accepts only single type.') return typing._GenericAlias(self, (item,)) # 3.7-3.8 -else: +elif sys.version_info[:2] >= (3, 7): class _TypeGuardForm(typing._SpecialForm, _root=True): def __repr__(self): @@ -1578,78 +1965,135 @@ def is_str(val: Union[str, float]): ``TypeGuard`` also works with type variables. For more information, see PEP 647 (User-Defined Type Guards). """) +# 3.6 +else: + class _TypeGuard(typing._FinalTypingBase, _root=True): + """Special typing form used to annotate the return type of a user-defined + type guard function. ``TypeGuard`` only accepts a single type argument. + At runtime, functions marked this way should return a boolean. + ``TypeGuard`` aims to benefit *type narrowing* -- a technique used by static + type checkers to determine a more precise type of an expression within a + program's code flow. Usually type narrowing is done by analyzing + conditional code flow and applying the narrowing to a block of code. The + conditional expression here is sometimes referred to as a "type guard". + + Sometimes it would be convenient to use a user-defined boolean function + as a type guard. Such a function should use ``TypeGuard[...]`` as its + return type to alert static type checkers to this intention. -# Vendored from cpython typing._SpecialFrom -class _SpecialForm(typing._Final, _root=True): - __slots__ = ('_name', '__doc__', '_getitem') + Using ``-> TypeGuard`` tells the static type checker that for a given + function: - def __init__(self, getitem): - self._getitem = getitem - self._name = getitem.__name__ - self.__doc__ = getitem.__doc__ + 1. The return value is a boolean. + 2. If the return value is ``True``, the type of its argument + is the type inside ``TypeGuard``. - def __getattr__(self, item): - if item in {'__name__', '__qualname__'}: - return self._name + For example:: + + def is_str(val: Union[str, float]): + # "isinstance" type guard + if isinstance(val, str): + # Type of ``val`` is narrowed to ``str`` + ... + else: + # Else, type of ``val`` is narrowed to ``float``. + ... + + Strict type narrowing is not enforced -- ``TypeB`` need not be a narrower + form of ``TypeA`` (it can even be a wider form) and this may lead to + type-unsafe results. The main reason is to allow for things like + narrowing ``List[object]`` to ``List[str]`` even though the latter is not + a subtype of the former, since ``List`` is invariant. The responsibility of + writing type-safe type guards is left to the user. - raise AttributeError(item) + ``TypeGuard`` also works with type variables. For more information, see + PEP 647 (User-Defined Type Guards). + """ - def __mro_entries__(self, bases): - raise TypeError(f"Cannot subclass {self!r}") + __slots__ = ('__type__',) - def __repr__(self): - return f'typing_extensions.{self._name}' + def __init__(self, tp=None, **kwds): + self.__type__ = tp - def __reduce__(self): - return self._name + def __getitem__(self, item): + cls = type(self) + if self.__type__ is None: + return cls(typing._type_check(item, + f'{cls.__name__[1:]} accepts only a single type.'), + _root=True) + raise TypeError(f'{cls.__name__[1:]} cannot be further subscripted') - def __call__(self, *args, **kwds): - raise TypeError(f"Cannot instantiate {self!r}") + def _eval_type(self, globalns, localns): + new_tp = typing._eval_type(self.__type__, globalns, localns) + if new_tp == self.__type__: + return self + return type(self)(new_tp, _root=True) - def __or__(self, other): - return typing.Union[self, other] + def __repr__(self): + r = super().__repr__() + if self.__type__ is not None: + r += f'[{typing._type_repr(self.__type__)}]' + return r - def __ror__(self, other): - return typing.Union[other, self] + def __hash__(self): + return hash((type(self).__name__, self.__type__)) - def __instancecheck__(self, obj): - raise TypeError(f"{self} cannot be used with isinstance()") + def __eq__(self, other): + if not isinstance(other, _TypeGuard): + return NotImplemented + if self.__type__ is not None: + return self.__type__ == other.__type__ + return self is other - def __subclasscheck__(self, cls): - raise TypeError(f"{self} cannot be used with issubclass()") + TypeGuard = _TypeGuard(_root=True) - @typing._tp_cache - def __getitem__(self, parameters): - return self._getitem(self, parameters) +if hasattr(typing, "Self"): + Self = typing.Self +elif sys.version_info[:2] >= (3, 7): + # Vendored from cpython typing._SpecialFrom + class _SpecialForm(typing._Final, _root=True): + __slots__ = ('_name', '__doc__', '_getitem') + def __init__(self, getitem): + self._getitem = getitem + self._name = getitem.__name__ + self.__doc__ = getitem.__doc__ -if hasattr(typing, "LiteralString"): - LiteralString = typing.LiteralString -else: - @_SpecialForm - def LiteralString(self, params): - """Represents an arbitrary literal string. + def __getattr__(self, item): + if item in {'__name__', '__qualname__'}: + return self._name - Example:: + raise AttributeError(item) - from pip._vendor.typing_extensions import LiteralString + def __mro_entries__(self, bases): + raise TypeError(f"Cannot subclass {self!r}") - def query(sql: LiteralString) -> ...: - ... + def __repr__(self): + return f'typing_extensions.{self._name}' - query("SELECT * FROM table") # ok - query(f"SELECT * FROM {input()}") # not ok + def __reduce__(self): + return self._name - See PEP 675 for details. + def __call__(self, *args, **kwds): + raise TypeError(f"Cannot instantiate {self!r}") - """ - raise TypeError(f"{self} is not subscriptable") + def __or__(self, other): + return typing.Union[self, other] + def __ror__(self, other): + return typing.Union[other, self] + + def __instancecheck__(self, obj): + raise TypeError(f"{self} cannot be used with isinstance()") + + def __subclasscheck__(self, cls): + raise TypeError(f"{self} cannot be used with issubclass()") + + @typing._tp_cache + def __getitem__(self, parameters): + return self._getitem(self, parameters) -if hasattr(typing, "Self"): - Self = typing.Self -else: @_SpecialForm def Self(self, params): """Used to spell the type of "self" in classes. @@ -1666,36 +2110,30 @@ def parse(self, data: bytes) -> Self: """ raise TypeError(f"{self} is not subscriptable") +else: + class _Self(typing._FinalTypingBase, _root=True): + """Used to spell the type of "self" in classes. + Example:: -if hasattr(typing, "Never"): - Never = typing.Never -else: - @_SpecialForm - def Never(self, params): - """The bottom type, a type that has no members. + from typing import Self - This can be used to define a function that should never be - called, or a function that never returns:: + class ReturnsSelf: + def parse(self, data: bytes) -> Self: + ... + return self - from pip._vendor.typing_extensions import Never + """ - def never_call_me(arg: Never) -> None: - pass + __slots__ = () - def int_or_str(arg: int | str) -> None: - never_call_me(arg) # type checker error - match arg: - case int(): - print("It's an int") - case str(): - print("It's a str") - case _: - never_call_me(arg) # ok, arg is of type Never + def __instancecheck__(self, obj): + raise TypeError(f"{self} cannot be used with isinstance().") - """ + def __subclasscheck__(self, cls): + raise TypeError(f"{self} cannot be used with issubclass().") - raise TypeError(f"{self} is not subscriptable") + Self = _Self(_root=True) if hasattr(typing, 'Required'): @@ -1723,7 +2161,7 @@ class Movie(TypedDict, total=False): There is no runtime checking that a required key is actually provided when instantiating a related TypedDict. """ - item = typing._type_check(parameters, f'{self._name} accepts only a single type.') + item = typing._type_check(parameters, f'{self._name} accepts only single type') return typing._GenericAlias(self, (item,)) @_ExtensionsSpecialForm @@ -1740,17 +2178,17 @@ class Movie(TypedDict): year=1999, ) """ - item = typing._type_check(parameters, f'{self._name} accepts only a single type.') + item = typing._type_check(parameters, f'{self._name} accepts only single type') return typing._GenericAlias(self, (item,)) -else: +elif sys.version_info[:2] >= (3, 7): class _RequiredForm(typing._SpecialForm, _root=True): def __repr__(self): return 'typing_extensions.' + self._name def __getitem__(self, parameters): item = typing._type_check(parameters, - f'{self._name} accepts only a single type.') + '{} accepts only single type'.format(self._name)) return typing._GenericAlias(self, (item,)) Required = _RequiredForm( @@ -1784,426 +2222,75 @@ class Movie(TypedDict): year=1999, ) """) - - -if hasattr(typing, "Unpack"): # 3.11+ - Unpack = typing.Unpack -elif sys.version_info[:2] >= (3, 9): - class _UnpackSpecialForm(typing._SpecialForm, _root=True): - def __repr__(self): - return 'typing_extensions.' + self._name - - class _UnpackAlias(typing._GenericAlias, _root=True): - __class__ = typing.TypeVar - - @_UnpackSpecialForm - def Unpack(self, parameters): - """A special typing construct to unpack a variadic type. For example: - - Shape = TypeVarTuple('Shape') - Batch = NewType('Batch', int) - - def add_batch_axis( - x: Array[Unpack[Shape]] - ) -> Array[Batch, Unpack[Shape]]: ... - - """ - item = typing._type_check(parameters, f'{self._name} accepts only a single type.') - return _UnpackAlias(self, (item,)) - - def _is_unpack(obj): - return isinstance(obj, _UnpackAlias) - -else: - class _UnpackAlias(typing._GenericAlias, _root=True): - __class__ = typing.TypeVar - - class _UnpackForm(typing._SpecialForm, _root=True): - def __repr__(self): - return 'typing_extensions.' + self._name - - def __getitem__(self, parameters): - item = typing._type_check(parameters, - f'{self._name} accepts only a single type.') - return _UnpackAlias(self, (item,)) - - Unpack = _UnpackForm( - 'Unpack', - doc="""A special typing construct to unpack a variadic type. For example: - - Shape = TypeVarTuple('Shape') - Batch = NewType('Batch', int) - - def add_batch_axis( - x: Array[Unpack[Shape]] - ) -> Array[Batch, Unpack[Shape]]: ... - - """) - - def _is_unpack(obj): - return isinstance(obj, _UnpackAlias) - - -if hasattr(typing, "TypeVarTuple"): # 3.11+ - - # Add default Parameter - PEP 696 - class TypeVarTuple(typing.TypeVarTuple, _DefaultMixin, _root=True): - """Type variable tuple.""" - - def __init__(self, name, *, default=None): - super().__init__(name) - _DefaultMixin.__init__(self, default) - - # for pickling: - try: - def_mod = sys._getframe(1).f_globals.get('__name__', '__main__') - except (AttributeError, ValueError): - def_mod = None - if def_mod != 'typing_extensions': - self.__module__ = def_mod - else: - class TypeVarTuple(_DefaultMixin): - """Type variable tuple. - - Usage:: - - Ts = TypeVarTuple('Ts') - - In the same way that a normal type variable is a stand-in for a single - type such as ``int``, a type variable *tuple* is a stand-in for a *tuple* - type such as ``Tuple[int, str]``. - - Type variable tuples can be used in ``Generic`` declarations. - Consider the following example:: - - class Array(Generic[*Ts]): ... - - The ``Ts`` type variable tuple here behaves like ``tuple[T1, T2]``, - where ``T1`` and ``T2`` are type variables. To use these type variables - as type parameters of ``Array``, we must *unpack* the type variable tuple using - the star operator: ``*Ts``. The signature of ``Array`` then behaves - as if we had simply written ``class Array(Generic[T1, T2]): ...``. - In contrast to ``Generic[T1, T2]``, however, ``Generic[*Shape]`` allows - us to parameterise the class with an *arbitrary* number of type parameters. - - Type variable tuples can be used anywhere a normal ``TypeVar`` can. - This includes class definitions, as shown above, as well as function - signatures and variable annotations:: - - class Array(Generic[*Ts]): - - def __init__(self, shape: Tuple[*Ts]): - self._shape: Tuple[*Ts] = shape - - def get_shape(self) -> Tuple[*Ts]: - return self._shape - - shape = (Height(480), Width(640)) - x: Array[Height, Width] = Array(shape) - y = abs(x) # Inferred type is Array[Height, Width] - z = x + x # ... is Array[Height, Width] - x.get_shape() # ... is tuple[Height, Width] - - """ - - # Trick Generic __parameters__. - __class__ = typing.TypeVar - - def __iter__(self): - yield self.__unpacked__ - - def __init__(self, name, *, default=None): - self.__name__ = name - _DefaultMixin.__init__(self, default) - - # for pickling: - try: - def_mod = sys._getframe(1).f_globals.get('__name__', '__main__') - except (AttributeError, ValueError): - def_mod = None - if def_mod != 'typing_extensions': - self.__module__ = def_mod - - self.__unpacked__ = Unpack[self] + # NOTE: Modeled after _Final's implementation when _FinalTypingBase available + class _MaybeRequired(typing._FinalTypingBase, _root=True): + __slots__ = ('__type__',) + + def __init__(self, tp=None, **kwds): + self.__type__ = tp + + def __getitem__(self, item): + cls = type(self) + if self.__type__ is None: + return cls(typing._type_check(item, + '{} accepts only single type.'.format(cls.__name__[1:])), + _root=True) + raise TypeError('{} cannot be further subscripted' + .format(cls.__name__[1:])) + + def _eval_type(self, globalns, localns): + new_tp = typing._eval_type(self.__type__, globalns, localns) + if new_tp == self.__type__: + return self + return type(self)(new_tp, _root=True) def __repr__(self): - return self.__name__ + r = super().__repr__() + if self.__type__ is not None: + r += '[{}]'.format(typing._type_repr(self.__type__)) + return r def __hash__(self): - return object.__hash__(self) + return hash((type(self).__name__, self.__type__)) def __eq__(self, other): + if not isinstance(other, type(self)): + return NotImplemented + if self.__type__ is not None: + return self.__type__ == other.__type__ return self is other - def __reduce__(self): - return self.__name__ - - def __init_subclass__(self, *args, **kwds): - if '_root' not in kwds: - raise TypeError("Cannot subclass special typing classes") - - -if hasattr(typing, "reveal_type"): - reveal_type = typing.reveal_type -else: - def reveal_type(__obj: T) -> T: - """Reveal the inferred type of a variable. - - When a static type checker encounters a call to ``reveal_type()``, - it will emit the inferred type of the argument:: - - x: int = 1 - reveal_type(x) - - Running a static type checker (e.g., ``mypy``) on this example - will produce output similar to 'Revealed type is "builtins.int"'. - - At runtime, the function prints the runtime type of the - argument and returns it unchanged. - - """ - print(f"Runtime type is {type(__obj).__name__!r}", file=sys.stderr) - return __obj - - -if hasattr(typing, "assert_never"): - assert_never = typing.assert_never -else: - def assert_never(__arg: Never) -> Never: - """Assert to the type checker that a line of code is unreachable. - - Example:: - - def int_or_str(arg: int | str) -> None: - match arg: - case int(): - print("It's an int") - case str(): - print("It's a str") - case _: - assert_never(arg) - - If a type checker finds that a call to assert_never() is - reachable, it will emit an error. - - At runtime, this throws an exception when called. - - """ - raise AssertionError("Expected code to be unreachable") - - -if hasattr(typing, 'dataclass_transform'): - dataclass_transform = typing.dataclass_transform -else: - def dataclass_transform( - *, - eq_default: bool = True, - order_default: bool = False, - kw_only_default: bool = False, - field_specifiers: typing.Tuple[ - typing.Union[typing.Type[typing.Any], typing.Callable[..., typing.Any]], - ... - ] = (), - **kwargs: typing.Any, - ) -> typing.Callable[[T], T]: - """Decorator that marks a function, class, or metaclass as providing - dataclass-like behavior. - - Example: - - from pip._vendor.typing_extensions import dataclass_transform - - _T = TypeVar("_T") - - # Used on a decorator function - @dataclass_transform() - def create_model(cls: type[_T]) -> type[_T]: - ... - return cls - - @create_model - class CustomerModel: - id: int - name: str - - # Used on a base class - @dataclass_transform() - class ModelBase: ... - - class CustomerModel(ModelBase): - id: int - name: str - - # Used on a metaclass - @dataclass_transform() - class ModelMeta(type): ... - - class ModelBase(metaclass=ModelMeta): ... - - class CustomerModel(ModelBase): - id: int - name: str - - Each of the ``CustomerModel`` classes defined in this example will now - behave similarly to a dataclass created with the ``@dataclasses.dataclass`` - decorator. For example, the type checker will synthesize an ``__init__`` - method. - - The arguments to this decorator can be used to customize this behavior: - - ``eq_default`` indicates whether the ``eq`` parameter is assumed to be - True or False if it is omitted by the caller. - - ``order_default`` indicates whether the ``order`` parameter is - assumed to be True or False if it is omitted by the caller. - - ``kw_only_default`` indicates whether the ``kw_only`` parameter is - assumed to be True or False if it is omitted by the caller. - - ``field_specifiers`` specifies a static list of supported classes - or functions that describe fields, similar to ``dataclasses.field()``. + class _Required(_MaybeRequired, _root=True): + """A special typing construct to mark a key of a total=False TypedDict + as required. For example: - At runtime, this decorator records its arguments in the - ``__dataclass_transform__`` attribute on the decorated object. + class Movie(TypedDict, total=False): + title: Required[str] + year: int - See PEP 681 for details. + m = Movie( + title='The Matrix', # typechecker error if key is omitted + year=1999, + ) + There is no runtime checking that a required key is actually provided + when instantiating a related TypedDict. """ - def decorator(cls_or_fn): - cls_or_fn.__dataclass_transform__ = { - "eq_default": eq_default, - "order_default": order_default, - "kw_only_default": kw_only_default, - "field_specifiers": field_specifiers, - "kwargs": kwargs, - } - return cls_or_fn - return decorator - -if hasattr(typing, "override"): - override = typing.override -else: - _F = typing.TypeVar("_F", bound=typing.Callable[..., typing.Any]) - - def override(__arg: _F) -> _F: - """Indicate that a method is intended to override a method in a base class. - - Usage: - - class Base: - def method(self) -> None: ... - pass - - class Child(Base): - @override - def method(self) -> None: - super().method() - - When this decorator is applied to a method, the type checker will - validate that it overrides a method with the same name on a base class. - This helps prevent bugs that may occur when a base class is changed - without an equivalent change to a child class. - - See PEP 698 for details. + class _NotRequired(_MaybeRequired, _root=True): + """A special typing construct to mark a key of a TypedDict as + potentially missing. For example: - """ - return __arg - - -# We have to do some monkey patching to deal with the dual nature of -# Unpack/TypeVarTuple: -# - We want Unpack to be a kind of TypeVar so it gets accepted in -# Generic[Unpack[Ts]] -# - We want it to *not* be treated as a TypeVar for the purposes of -# counting generic parameters, so that when we subscript a generic, -# the runtime doesn't try to substitute the Unpack with the subscripted type. -if not hasattr(typing, "TypeVarTuple"): - typing._collect_type_vars = _collect_type_vars - typing._check_generic = _check_generic - - -# Backport typing.NamedTuple as it exists in Python 3.11. -# In 3.11, the ability to define generic `NamedTuple`s was supported. -# This was explicitly disallowed in 3.9-3.10, and only half-worked in <=3.8. -if sys.version_info >= (3, 11): - NamedTuple = typing.NamedTuple -else: - def _caller(): - try: - return sys._getframe(2).f_globals.get('__name__', '__main__') - except (AttributeError, ValueError): # For platforms without _getframe() - return None + class Movie(TypedDict): + title: str + year: NotRequired[int] - def _make_nmtuple(name, types, module, defaults=()): - fields = [n for n, t in types] - annotations = {n: typing._type_check(t, f"field {n} annotation must be a type") - for n, t in types} - nm_tpl = collections.namedtuple(name, fields, - defaults=defaults, module=module) - nm_tpl.__annotations__ = nm_tpl.__new__.__annotations__ = annotations - # The `_field_types` attribute was removed in 3.9; - # in earlier versions, it is the same as the `__annotations__` attribute - if sys.version_info < (3, 9): - nm_tpl._field_types = annotations - return nm_tpl - - _prohibited_namedtuple_fields = typing._prohibited - _special_namedtuple_fields = frozenset({'__module__', '__name__', '__annotations__'}) - - class _NamedTupleMeta(type): - def __new__(cls, typename, bases, ns): - assert _NamedTuple in bases - for base in bases: - if base is not _NamedTuple and base is not typing.Generic: - raise TypeError( - 'can only inherit from a NamedTuple type and Generic') - bases = tuple(tuple if base is _NamedTuple else base for base in bases) - types = ns.get('__annotations__', {}) - default_names = [] - for field_name in types: - if field_name in ns: - default_names.append(field_name) - elif default_names: - raise TypeError(f"Non-default namedtuple field {field_name} " - f"cannot follow default field" - f"{'s' if len(default_names) > 1 else ''} " - f"{', '.join(default_names)}") - nm_tpl = _make_nmtuple( - typename, types.items(), - defaults=[ns[n] for n in default_names], - module=ns['__module__'] + m = Movie( + title='The Matrix', # typechecker error if key is omitted + year=1999, ) - nm_tpl.__bases__ = bases - if typing.Generic in bases: - class_getitem = typing.Generic.__class_getitem__.__func__ - nm_tpl.__class_getitem__ = classmethod(class_getitem) - # update from user namespace without overriding special namedtuple attributes - for key in ns: - if key in _prohibited_namedtuple_fields: - raise AttributeError("Cannot overwrite NamedTuple attribute " + key) - elif key not in _special_namedtuple_fields and key not in nm_tpl._fields: - setattr(nm_tpl, key, ns[key]) - if typing.Generic in bases: - nm_tpl.__init_subclass__() - return nm_tpl - - def NamedTuple(__typename, __fields=None, **kwargs): - if __fields is None: - __fields = kwargs.items() - elif kwargs: - raise TypeError("Either list of fields or keywords" - " can be provided to NamedTuple, not both") - return _make_nmtuple(__typename, __fields, module=_caller()) - - NamedTuple.__doc__ = typing.NamedTuple.__doc__ - _NamedTuple = type.__new__(_NamedTupleMeta, 'NamedTuple', (), {}) - - # On 3.8+, alter the signature so that it matches typing.NamedTuple. - # The signature of typing.NamedTuple on >=3.8 is invalid syntax in Python 3.7, - # so just leave the signature as it is on 3.7. - if sys.version_info >= (3, 8): - NamedTuple.__text_signature__ = '(typename, fields=None, /, **kwargs)' - - def _namedtuple_mro_entries(bases): - assert NamedTuple in bases - return (_NamedTuple,) + """ - NamedTuple.__mro_entries__ = _namedtuple_mro_entries + Required = _Required(_root=True) + NotRequired = _NotRequired(_root=True) diff --git a/venv/lib/python3.10/site-packages/pip/_vendor/urllib3/__init__.py b/venv/lib/python3.10/site-packages/pip/_vendor/urllib3/__init__.py index c6fa382..fe86b59 100644 --- a/venv/lib/python3.10/site-packages/pip/_vendor/urllib3/__init__.py +++ b/venv/lib/python3.10/site-packages/pip/_vendor/urllib3/__init__.py @@ -19,23 +19,6 @@ from .util.timeout import Timeout from .util.url import get_host -# === NOTE TO REPACKAGERS AND VENDORS === -# Please delete this block, this logic is only -# for urllib3 being distributed via PyPI. -# See: https://github.com/urllib3/urllib3/issues/2680 -try: - import urllib3_secure_extra # type: ignore # noqa: F401 -except ImportError: - pass -else: - warnings.warn( - "'urllib3[secure]' extra is deprecated and will be removed " - "in a future release of urllib3 2.x. Read more in this issue: " - "https://github.com/urllib3/urllib3/issues/2680", - category=DeprecationWarning, - stacklevel=2, - ) - __author__ = "Andrey Petrov (andrey.petrov@shazow.net)" __license__ = "MIT" __version__ = __version__ diff --git a/venv/lib/python3.10/site-packages/pip/_vendor/urllib3/_version.py b/venv/lib/python3.10/site-packages/pip/_vendor/urllib3/_version.py index 6fbc84b..fa8979d 100644 --- a/venv/lib/python3.10/site-packages/pip/_vendor/urllib3/_version.py +++ b/venv/lib/python3.10/site-packages/pip/_vendor/urllib3/_version.py @@ -1,2 +1,2 @@ # This file is protected via CODEOWNERS -__version__ = "1.26.12" +__version__ = "1.26.8" diff --git a/venv/lib/python3.10/site-packages/pip/_vendor/urllib3/connection.py b/venv/lib/python3.10/site-packages/pip/_vendor/urllib3/connection.py index 10fb36c..4d92ac6 100644 --- a/venv/lib/python3.10/site-packages/pip/_vendor/urllib3/connection.py +++ b/venv/lib/python3.10/site-packages/pip/_vendor/urllib3/connection.py @@ -68,7 +68,7 @@ class BrokenPipeError(Exception): # When it comes time to update this value as a part of regular maintenance # (ie test_recent_date is failing) update it to ~6 months before the current date. -RECENT_DATE = datetime.date(2022, 1, 1) +RECENT_DATE = datetime.date(2020, 7, 1) _CONTAINS_CONTROL_CHAR_RE = re.compile(r"[^-!#$%&'*+.^_`|~0-9a-zA-Z]") @@ -355,15 +355,17 @@ def set_cert( def connect(self): # Add certificate verification - self.sock = conn = self._new_conn() + conn = self._new_conn() hostname = self.host tls_in_tls = False if self._is_using_tunnel(): if self.tls_in_tls_required: - self.sock = conn = self._connect_tls_proxy(hostname, conn) + conn = self._connect_tls_proxy(hostname, conn) tls_in_tls = True + self.sock = conn + # Calls self._set_hostport(), so self.host is # self._tunnel_host below. self._tunnel() diff --git a/venv/lib/python3.10/site-packages/pip/_vendor/urllib3/connectionpool.py b/venv/lib/python3.10/site-packages/pip/_vendor/urllib3/connectionpool.py index 96339e9..15bffcb 100644 --- a/venv/lib/python3.10/site-packages/pip/_vendor/urllib3/connectionpool.py +++ b/venv/lib/python3.10/site-packages/pip/_vendor/urllib3/connectionpool.py @@ -767,8 +767,6 @@ def _is_ssl_error_message_from_http_proxy(ssl_error): isinstance(e, BaseSSLError) and self.proxy and _is_ssl_error_message_from_http_proxy(e) - and conn.proxy - and conn.proxy.scheme == "https" ): e = ProxyError( "Your proxy appears to only use HTTP and not HTTPS, " diff --git a/venv/lib/python3.10/site-packages/pip/_vendor/urllib3/contrib/pyopenssl.py b/venv/lib/python3.10/site-packages/pip/_vendor/urllib3/contrib/pyopenssl.py index 528764a..3130f51 100644 --- a/venv/lib/python3.10/site-packages/pip/_vendor/urllib3/contrib/pyopenssl.py +++ b/venv/lib/python3.10/site-packages/pip/_vendor/urllib3/contrib/pyopenssl.py @@ -73,20 +73,11 @@ class UnsupportedExtension(Exception): import logging import ssl import sys -import warnings from .. import util from ..packages import six from ..util.ssl_ import PROTOCOL_TLS_CLIENT -warnings.warn( - "'urllib3.contrib.pyopenssl' module is deprecated and will be removed " - "in a future release of urllib3 2.x. Read more in this issue: " - "https://github.com/urllib3/urllib3/issues/2680", - category=DeprecationWarning, - stacklevel=2, -) - __all__ = ["inject_into_urllib3", "extract_from_urllib3"] # SNI always works. @@ -415,6 +406,7 @@ def makefile(self, mode, bufsize=-1): self._makefile_refs += 1 return _fileobject(self, mode, bufsize, close=True) + else: # Platform-specific: Python 3 makefile = backport_makefile diff --git a/venv/lib/python3.10/site-packages/pip/_vendor/urllib3/contrib/securetransport.py b/venv/lib/python3.10/site-packages/pip/_vendor/urllib3/contrib/securetransport.py index 4a06bc6..b4ca80b 100644 --- a/venv/lib/python3.10/site-packages/pip/_vendor/urllib3/contrib/securetransport.py +++ b/venv/lib/python3.10/site-packages/pip/_vendor/urllib3/contrib/securetransport.py @@ -770,6 +770,7 @@ def makefile(self, mode, bufsize=-1): self._makefile_refs += 1 return _fileobject(self, mode, bufsize, close=True) + else: # Platform-specific: Python 3 def makefile(self, mode="r", buffering=None, *args, **kwargs): diff --git a/venv/lib/python3.10/site-packages/pip/_vendor/urllib3/packages/six.py b/venv/lib/python3.10/site-packages/pip/_vendor/urllib3/packages/six.py index f099a3d..ba50acb 100644 --- a/venv/lib/python3.10/site-packages/pip/_vendor/urllib3/packages/six.py +++ b/venv/lib/python3.10/site-packages/pip/_vendor/urllib3/packages/six.py @@ -772,6 +772,7 @@ def reraise(tp, value, tb=None): value = None tb = None + else: def exec_(_code_, _globs_=None, _locs_=None): diff --git a/venv/lib/python3.10/site-packages/pip/_vendor/urllib3/poolmanager.py b/venv/lib/python3.10/site-packages/pip/_vendor/urllib3/poolmanager.py index ca4ec34..3a31a28 100644 --- a/venv/lib/python3.10/site-packages/pip/_vendor/urllib3/poolmanager.py +++ b/venv/lib/python3.10/site-packages/pip/_vendor/urllib3/poolmanager.py @@ -34,7 +34,6 @@ "ca_cert_dir", "ssl_context", "key_password", - "server_hostname", ) # All known keyword arguments that could be provided to the pool manager, its diff --git a/venv/lib/python3.10/site-packages/pip/_vendor/urllib3/response.py b/venv/lib/python3.10/site-packages/pip/_vendor/urllib3/response.py index 4969b70..38693f4 100644 --- a/venv/lib/python3.10/site-packages/pip/_vendor/urllib3/response.py +++ b/venv/lib/python3.10/site-packages/pip/_vendor/urllib3/response.py @@ -2,15 +2,16 @@ import io import logging -import sys import zlib from contextlib import contextmanager from socket import error as SocketError from socket import timeout as SocketTimeout -brotli = None +try: + import brotli +except ImportError: + brotli = None -from . import util from ._collections import HTTPHeaderDict from .connection import BaseSSLError, HTTPException from .exceptions import ( @@ -477,54 +478,6 @@ def _error_catcher(self): if self._original_response and self._original_response.isclosed(): self.release_conn() - def _fp_read(self, amt): - """ - Read a response with the thought that reading the number of bytes - larger than can fit in a 32-bit int at a time via SSL in some - known cases leads to an overflow error that has to be prevented - if `amt` or `self.length_remaining` indicate that a problem may - happen. - - The known cases: - * 3.8 <= CPython < 3.9.7 because of a bug - https://github.com/urllib3/urllib3/issues/2513#issuecomment-1152559900. - * urllib3 injected with pyOpenSSL-backed SSL-support. - * CPython < 3.10 only when `amt` does not fit 32-bit int. - """ - assert self._fp - c_int_max = 2 ** 31 - 1 - if ( - ( - (amt and amt > c_int_max) - or (self.length_remaining and self.length_remaining > c_int_max) - ) - and not util.IS_SECURETRANSPORT - and (util.IS_PYOPENSSL or sys.version_info < (3, 10)) - ): - buffer = io.BytesIO() - # Besides `max_chunk_amt` being a maximum chunk size, it - # affects memory overhead of reading a response by this - # method in CPython. - # `c_int_max` equal to 2 GiB - 1 byte is the actual maximum - # chunk size that does not lead to an overflow error, but - # 256 MiB is a compromise. - max_chunk_amt = 2 ** 28 - while amt is None or amt != 0: - if amt is not None: - chunk_amt = min(amt, max_chunk_amt) - amt -= chunk_amt - else: - chunk_amt = max_chunk_amt - data = self._fp.read(chunk_amt) - if not data: - break - buffer.write(data) - del data # to reduce peak memory usage by `max_chunk_amt`. - return buffer.getvalue() - else: - # StringIO doesn't like amt=None - return self._fp.read(amt) if amt is not None else self._fp.read() - def read(self, amt=None, decode_content=None, cache_content=False): """ Similar to :meth:`http.client.HTTPResponse.read`, but with two additional @@ -557,11 +510,13 @@ def read(self, amt=None, decode_content=None, cache_content=False): fp_closed = getattr(self._fp, "closed", False) with self._error_catcher(): - data = self._fp_read(amt) if not fp_closed else b"" if amt is None: + # cStringIO doesn't like amt=None + data = self._fp.read() if not fp_closed else b"" flush_decoder = True else: cache_content = False + data = self._fp.read(amt) if not fp_closed else b"" if ( amt != 0 and not data ): # Platform-specific: Buggy versions of Python. diff --git a/venv/lib/python3.10/site-packages/pip/_vendor/urllib3/util/request.py b/venv/lib/python3.10/site-packages/pip/_vendor/urllib3/util/request.py index 330766e..2510338 100644 --- a/venv/lib/python3.10/site-packages/pip/_vendor/urllib3/util/request.py +++ b/venv/lib/python3.10/site-packages/pip/_vendor/urllib3/util/request.py @@ -13,6 +13,12 @@ SKIPPABLE_HEADERS = frozenset(["accept-encoding", "host", "user-agent"]) ACCEPT_ENCODING = "gzip,deflate" +try: + import brotli as _unused_module_brotli # noqa: F401 +except ImportError: + pass +else: + ACCEPT_ENCODING += ",br" _FAILEDTELL = object() diff --git a/venv/lib/python3.10/site-packages/pip/_vendor/urllib3/util/ssl_match_hostname.py b/venv/lib/python3.10/site-packages/pip/_vendor/urllib3/util/ssl_match_hostname.py index 1dd950c..a4b4a56 100644 --- a/venv/lib/python3.10/site-packages/pip/_vendor/urllib3/util/ssl_match_hostname.py +++ b/venv/lib/python3.10/site-packages/pip/_vendor/urllib3/util/ssl_match_hostname.py @@ -112,9 +112,11 @@ def match_hostname(cert, hostname): try: # Divergence from upstream: ipaddress can't handle byte str host_ip = ipaddress.ip_address(_to_unicode(hostname)) - except (UnicodeError, ValueError): - # ValueError: Not an IP address (common case) - # UnicodeError: Divergence from upstream: Have to deal with ipaddress not taking + except ValueError: + # Not an IP address (common case) + host_ip = None + except UnicodeError: + # Divergence from upstream: Have to deal with ipaddress not taking # byte strings. addresses should be all ascii, so we consider it not # an ipaddress in this case host_ip = None @@ -122,7 +124,7 @@ def match_hostname(cert, hostname): # Divergence from upstream: Make ipaddress library optional if ipaddress is None: host_ip = None - else: # Defensive + else: raise dnsnames = [] san = cert.get("subjectAltName", ()) diff --git a/venv/lib/python3.10/site-packages/pip/_vendor/urllib3/util/url.py b/venv/lib/python3.10/site-packages/pip/_vendor/urllib3/util/url.py index 86bd8b4..3651c43 100644 --- a/venv/lib/python3.10/site-packages/pip/_vendor/urllib3/util/url.py +++ b/venv/lib/python3.10/site-packages/pip/_vendor/urllib3/util/url.py @@ -279,9 +279,6 @@ def _normalize_host(host, scheme): if scheme in NORMALIZABLE_SCHEMES: is_ipv6 = IPV6_ADDRZ_RE.match(host) if is_ipv6: - # IPv6 hosts of the form 'a::b%zone' are encoded in a URL as - # such per RFC 6874: 'a::b%25zone'. Unquote the ZoneID - # separator as necessary to return a valid RFC 4007 scoped IP. match = ZONE_ID_RE.search(host) if match: start, end = match.span(1) @@ -334,7 +331,7 @@ def parse_url(url): """ Given a url, return a parsed :class:`.Url` namedtuple. Best-effort is performed to parse incomplete urls. Fields not provided will be None. - This parser is RFC 3986 and RFC 6874 compliant. + This parser is RFC 3986 compliant. The parser logic and helper functions are based heavily on work done in the ``rfc3986`` module. diff --git a/venv/lib/python3.10/site-packages/pip/_vendor/urllib3/util/wait.py b/venv/lib/python3.10/site-packages/pip/_vendor/urllib3/util/wait.py index 21b4590..c280646 100644 --- a/venv/lib/python3.10/site-packages/pip/_vendor/urllib3/util/wait.py +++ b/venv/lib/python3.10/site-packages/pip/_vendor/urllib3/util/wait.py @@ -42,6 +42,7 @@ class NoWayToWaitForSocketError(Exception): def _retry_on_intr(fn, timeout): return fn(timeout) + else: # Old and broken Pythons. def _retry_on_intr(fn, timeout): diff --git a/venv/lib/python3.10/site-packages/pip/_vendor/vendor.txt b/venv/lib/python3.10/site-packages/pip/_vendor/vendor.txt index 9e9d4c1..23f4cf3 100644 --- a/venv/lib/python3.10/site-packages/pip/_vendor/vendor.txt +++ b/venv/lib/python3.10/site-packages/pip/_vendor/vendor.txt @@ -1,23 +1,25 @@ -CacheControl==0.12.11 # Make sure to update the license in pyproject.toml for this. -colorama==0.4.5 -distlib==0.3.6 -distro==1.7.0 -msgpack==1.0.4 +CacheControl==0.12.10 # Make sure to update the license in pyproject.toml for this. +colorama==0.4.4 +distlib==0.3.4 +distro==1.6.0 +html5lib==1.1 +msgpack==1.0.3 packaging==21.3 -pep517==0.13.0 -platformdirs==2.5.2 -pyparsing==3.0.9 -requests==2.28.1 - certifi==2022.09.24 - chardet==5.0.0 - idna==3.4 - urllib3==1.26.12 -rich==12.5.1 - pygments==2.13.0 - typing_extensions==4.4.0 +pep517==0.12.0 +platformdirs==2.4.1 +progress==1.6 +pyparsing==3.0.7 +requests==2.27.1 + certifi==2021.10.08 + chardet==4.0.0 + idna==3.3 + urllib3==1.26.8 +rich==11.0.0 + pygments==2.11.2 + typing_extensions==4.0.1 resolvelib==0.8.1 setuptools==44.0.0 six==1.16.0 -tenacity==8.1.0 -tomli==2.0.1 +tenacity==8.0.1 +tomli==1.0.3 webencodings==0.5.1 diff --git a/venv/lib/python3.10/site-packages/pkg_resources/__init__.py b/venv/lib/python3.10/site-packages/pkg_resources/__init__.py index d59226a..955fdc4 100644 --- a/venv/lib/python3.10/site-packages/pkg_resources/__init__.py +++ b/venv/lib/python3.10/site-packages/pkg_resources/__init__.py @@ -71,19 +71,12 @@ except ImportError: importlib_machinery = None -from pkg_resources.extern.jaraco.text import ( - yield_lines, - drop_comment, - join_continuation, -) - from pkg_resources.extern import appdirs from pkg_resources.extern import packaging __import__('pkg_resources.extern.packaging.version') __import__('pkg_resources.extern.packaging.specifiers') __import__('pkg_resources.extern.packaging.requirements') __import__('pkg_resources.extern.packaging.markers') -__import__('pkg_resources.extern.packaging.utils') if sys.version_info < (3, 5): raise RuntimeError("Python 3.5 or later is required") @@ -555,7 +548,6 @@ def __init__(self, entries=None): self.entries = [] self.entry_keys = {} self.by_key = {} - self.normalized_to_canonical_keys = {} self.callbacks = [] if entries is None: @@ -636,14 +628,6 @@ def find(self, req): is returned. """ dist = self.by_key.get(req.key) - - if dist is None: - canonical_key = self.normalized_to_canonical_keys.get(req.key) - - if canonical_key is not None: - req.key = canonical_key - dist = self.by_key.get(canonical_key) - if dist is not None and dist not in req: # XXX add more info raise VersionConflict(dist, req) @@ -712,8 +696,6 @@ def add(self, dist, entry=None, insert=True, replace=False): return self.by_key[dist.key] = dist - normalized_name = packaging.utils.canonicalize_name(dist.key) - self.normalized_to_canonical_keys[normalized_name] = dist.key if dist.key not in keys: keys.append(dist.key) if dist.key not in keys2: @@ -934,15 +916,14 @@ def _added_new(self, dist): def __getstate__(self): return ( self.entries[:], self.entry_keys.copy(), self.by_key.copy(), - self.normalized_to_canonical_keys.copy(), self.callbacks[:] + self.callbacks[:] ) - def __setstate__(self, e_k_b_n_c): - entries, keys, by_key, normalized_to_canonical_keys, callbacks = e_k_b_n_c + def __setstate__(self, e_k_b_c): + entries, keys, by_key, callbacks = e_k_b_c self.entries = entries[:] self.entry_keys = keys.copy() self.by_key = by_key.copy() - self.normalized_to_canonical_keys = normalized_to_canonical_keys.copy() self.callbacks = callbacks[:] @@ -1600,7 +1581,7 @@ class EggProvider(NullProvider): """Provider based on a virtual filesystem""" def __init__(self, module): - super().__init__(module) + NullProvider.__init__(self, module) self._setup_prefix() def _setup_prefix(self): @@ -1720,7 +1701,7 @@ class ZipProvider(EggProvider): _zip_manifests = MemoizedZipManifests() def __init__(self, module): - super().__init__(module) + EggProvider.__init__(self, module) self.zip_pre = self.loader.archive + os.sep def _zipinfo_name(self, fspath): @@ -2224,14 +2205,12 @@ def _handle_ns(packageName, path_item): # use find_spec (PEP 451) and fall-back to find_module (PEP 302) try: - spec = importer.find_spec(packageName) + loader = importer.find_spec(packageName).loader except AttributeError: # capture warnings due to #1111 with warnings.catch_warnings(): warnings.simplefilter("ignore") loader = importer.find_module(packageName) - else: - loader = spec.loader if spec else None if loader is None: return None @@ -2417,6 +2396,21 @@ def _set_parent_ns(packageName): setattr(sys.modules[parent], name, sys.modules[packageName]) +def _nonblank(str): + return str and not str.startswith('#') + + +@functools.singledispatch +def yield_lines(iterable): + """Yield valid lines of a string or iterable""" + return itertools.chain.from_iterable(map(yield_lines, iterable)) + + +@yield_lines.register(str) +def _(text): + return filter(_nonblank, map(str.strip, text.splitlines())) + + MODULE = re.compile(r"\w+(\.\w+)*$").match EGG_NAME = re.compile( r""" @@ -3053,12 +3047,12 @@ def reqs_for_extra(extra): if not req.marker or req.marker.evaluate({'extra': extra}): yield req - common = types.MappingProxyType(dict.fromkeys(reqs_for_extra(None))) + common = frozenset(reqs_for_extra(None)) dm[None].extend(common) for extra in self._parsed_pkg_info.get_all('Provides-Extra') or []: s_extra = safe_extra(extra.strip()) - dm[s_extra] = [r for r in reqs_for_extra(extra) if r not in common] + dm[s_extra] = list(frozenset(reqs_for_extra(extra)) - common) return dm @@ -3084,12 +3078,25 @@ def issue_warning(*args, **kw): def parse_requirements(strs): - """ - Yield ``Requirement`` objects for each specification in `strs`. + """Yield ``Requirement`` objects for each specification in `strs` `strs` must be a string, or a (possibly-nested) iterable thereof. """ - return map(Requirement, join_continuation(map(drop_comment, yield_lines(strs)))) + # create a steppable iterator, so we can handle \-continuations + lines = iter(yield_lines(strs)) + + for line in lines: + # Drop comments -- a hash without a space may be in a URL. + if ' #' in line: + line = line[:line.find(' #')] + # If there is a line continuation, drop it, and append the next line. + if line.endswith('\\'): + line = line[:-2].strip() + try: + line += next(lines) + except StopIteration: + return + yield Requirement(line) class RequirementParseError(packaging.requirements.InvalidRequirement): diff --git a/venv/lib/python3.10/site-packages/pkg_resources/_vendor/importlib_resources/__init__.py b/venv/lib/python3.10/site-packages/pkg_resources/_vendor/importlib_resources/__init__.py deleted file mode 100644 index 34e3a99..0000000 --- a/venv/lib/python3.10/site-packages/pkg_resources/_vendor/importlib_resources/__init__.py +++ /dev/null @@ -1,36 +0,0 @@ -"""Read resources contained within a package.""" - -from ._common import ( - as_file, - files, - Package, -) - -from ._legacy import ( - contents, - open_binary, - read_binary, - open_text, - read_text, - is_resource, - path, - Resource, -) - -from .abc import ResourceReader - - -__all__ = [ - 'Package', - 'Resource', - 'ResourceReader', - 'as_file', - 'contents', - 'files', - 'is_resource', - 'open_binary', - 'open_text', - 'path', - 'read_binary', - 'read_text', -] diff --git a/venv/lib/python3.10/site-packages/pkg_resources/_vendor/importlib_resources/_adapters.py b/venv/lib/python3.10/site-packages/pkg_resources/_vendor/importlib_resources/_adapters.py deleted file mode 100644 index ea363d8..0000000 --- a/venv/lib/python3.10/site-packages/pkg_resources/_vendor/importlib_resources/_adapters.py +++ /dev/null @@ -1,170 +0,0 @@ -from contextlib import suppress -from io import TextIOWrapper - -from . import abc - - -class SpecLoaderAdapter: - """ - Adapt a package spec to adapt the underlying loader. - """ - - def __init__(self, spec, adapter=lambda spec: spec.loader): - self.spec = spec - self.loader = adapter(spec) - - def __getattr__(self, name): - return getattr(self.spec, name) - - -class TraversableResourcesLoader: - """ - Adapt a loader to provide TraversableResources. - """ - - def __init__(self, spec): - self.spec = spec - - def get_resource_reader(self, name): - return CompatibilityFiles(self.spec)._native() - - -def _io_wrapper(file, mode='r', *args, **kwargs): - if mode == 'r': - return TextIOWrapper(file, *args, **kwargs) - elif mode == 'rb': - return file - raise ValueError( - "Invalid mode value '{}', only 'r' and 'rb' are supported".format(mode) - ) - - -class CompatibilityFiles: - """ - Adapter for an existing or non-existent resource reader - to provide a compatibility .files(). - """ - - class SpecPath(abc.Traversable): - """ - Path tied to a module spec. - Can be read and exposes the resource reader children. - """ - - def __init__(self, spec, reader): - self._spec = spec - self._reader = reader - - def iterdir(self): - if not self._reader: - return iter(()) - return iter( - CompatibilityFiles.ChildPath(self._reader, path) - for path in self._reader.contents() - ) - - def is_file(self): - return False - - is_dir = is_file - - def joinpath(self, other): - if not self._reader: - return CompatibilityFiles.OrphanPath(other) - return CompatibilityFiles.ChildPath(self._reader, other) - - @property - def name(self): - return self._spec.name - - def open(self, mode='r', *args, **kwargs): - return _io_wrapper(self._reader.open_resource(None), mode, *args, **kwargs) - - class ChildPath(abc.Traversable): - """ - Path tied to a resource reader child. - Can be read but doesn't expose any meaningful children. - """ - - def __init__(self, reader, name): - self._reader = reader - self._name = name - - def iterdir(self): - return iter(()) - - def is_file(self): - return self._reader.is_resource(self.name) - - def is_dir(self): - return not self.is_file() - - def joinpath(self, other): - return CompatibilityFiles.OrphanPath(self.name, other) - - @property - def name(self): - return self._name - - def open(self, mode='r', *args, **kwargs): - return _io_wrapper( - self._reader.open_resource(self.name), mode, *args, **kwargs - ) - - class OrphanPath(abc.Traversable): - """ - Orphan path, not tied to a module spec or resource reader. - Can't be read and doesn't expose any meaningful children. - """ - - def __init__(self, *path_parts): - if len(path_parts) < 1: - raise ValueError('Need at least one path part to construct a path') - self._path = path_parts - - def iterdir(self): - return iter(()) - - def is_file(self): - return False - - is_dir = is_file - - def joinpath(self, other): - return CompatibilityFiles.OrphanPath(*self._path, other) - - @property - def name(self): - return self._path[-1] - - def open(self, mode='r', *args, **kwargs): - raise FileNotFoundError("Can't open orphan path") - - def __init__(self, spec): - self.spec = spec - - @property - def _reader(self): - with suppress(AttributeError): - return self.spec.loader.get_resource_reader(self.spec.name) - - def _native(self): - """ - Return the native reader if it supports files(). - """ - reader = self._reader - return reader if hasattr(reader, 'files') else self - - def __getattr__(self, attr): - return getattr(self._reader, attr) - - def files(self): - return CompatibilityFiles.SpecPath(self.spec, self._reader) - - -def wrap_spec(package): - """ - Construct a package spec with traversable compatibility - on the spec/loader/reader. - """ - return SpecLoaderAdapter(package.__spec__, TraversableResourcesLoader) diff --git a/venv/lib/python3.10/site-packages/pkg_resources/_vendor/importlib_resources/_common.py b/venv/lib/python3.10/site-packages/pkg_resources/_vendor/importlib_resources/_common.py deleted file mode 100644 index a12e2c7..0000000 --- a/venv/lib/python3.10/site-packages/pkg_resources/_vendor/importlib_resources/_common.py +++ /dev/null @@ -1,104 +0,0 @@ -import os -import pathlib -import tempfile -import functools -import contextlib -import types -import importlib - -from typing import Union, Optional -from .abc import ResourceReader, Traversable - -from ._compat import wrap_spec - -Package = Union[types.ModuleType, str] - - -def files(package): - # type: (Package) -> Traversable - """ - Get a Traversable resource from a package - """ - return from_package(get_package(package)) - - -def get_resource_reader(package): - # type: (types.ModuleType) -> Optional[ResourceReader] - """ - Return the package's loader if it's a ResourceReader. - """ - # We can't use - # a issubclass() check here because apparently abc.'s __subclasscheck__() - # hook wants to create a weak reference to the object, but - # zipimport.zipimporter does not support weak references, resulting in a - # TypeError. That seems terrible. - spec = package.__spec__ - reader = getattr(spec.loader, 'get_resource_reader', None) # type: ignore - if reader is None: - return None - return reader(spec.name) # type: ignore - - -def resolve(cand): - # type: (Package) -> types.ModuleType - return cand if isinstance(cand, types.ModuleType) else importlib.import_module(cand) - - -def get_package(package): - # type: (Package) -> types.ModuleType - """Take a package name or module object and return the module. - - Raise an exception if the resolved module is not a package. - """ - resolved = resolve(package) - if wrap_spec(resolved).submodule_search_locations is None: - raise TypeError(f'{package!r} is not a package') - return resolved - - -def from_package(package): - """ - Return a Traversable object for the given package. - - """ - spec = wrap_spec(package) - reader = spec.loader.get_resource_reader(spec.name) - return reader.files() - - -@contextlib.contextmanager -def _tempfile(reader, suffix=''): - # Not using tempfile.NamedTemporaryFile as it leads to deeper 'try' - # blocks due to the need to close the temporary file to work on Windows - # properly. - fd, raw_path = tempfile.mkstemp(suffix=suffix) - try: - try: - os.write(fd, reader()) - finally: - os.close(fd) - del reader - yield pathlib.Path(raw_path) - finally: - try: - os.remove(raw_path) - except FileNotFoundError: - pass - - -@functools.singledispatch -def as_file(path): - """ - Given a Traversable object, return that object as a - path on the local file system in a context manager. - """ - return _tempfile(path.read_bytes, suffix=path.name) - - -@as_file.register(pathlib.Path) -@contextlib.contextmanager -def _(path): - """ - Degenerate behavior for pathlib.Path objects. - """ - yield path diff --git a/venv/lib/python3.10/site-packages/pkg_resources/_vendor/importlib_resources/_compat.py b/venv/lib/python3.10/site-packages/pkg_resources/_vendor/importlib_resources/_compat.py deleted file mode 100644 index cb9fc82..0000000 --- a/venv/lib/python3.10/site-packages/pkg_resources/_vendor/importlib_resources/_compat.py +++ /dev/null @@ -1,98 +0,0 @@ -# flake8: noqa - -import abc -import sys -import pathlib -from contextlib import suppress - -if sys.version_info >= (3, 10): - from zipfile import Path as ZipPath # type: ignore -else: - from ..zipp import Path as ZipPath # type: ignore - - -try: - from typing import runtime_checkable # type: ignore -except ImportError: - - def runtime_checkable(cls): # type: ignore - return cls - - -try: - from typing import Protocol # type: ignore -except ImportError: - Protocol = abc.ABC # type: ignore - - -class TraversableResourcesLoader: - """ - Adapt loaders to provide TraversableResources and other - compatibility. - - Used primarily for Python 3.9 and earlier where the native - loaders do not yet implement TraversableResources. - """ - - def __init__(self, spec): - self.spec = spec - - @property - def path(self): - return self.spec.origin - - def get_resource_reader(self, name): - from . import readers, _adapters - - def _zip_reader(spec): - with suppress(AttributeError): - return readers.ZipReader(spec.loader, spec.name) - - def _namespace_reader(spec): - with suppress(AttributeError, ValueError): - return readers.NamespaceReader(spec.submodule_search_locations) - - def _available_reader(spec): - with suppress(AttributeError): - return spec.loader.get_resource_reader(spec.name) - - def _native_reader(spec): - reader = _available_reader(spec) - return reader if hasattr(reader, 'files') else None - - def _file_reader(spec): - try: - path = pathlib.Path(self.path) - except TypeError: - return None - if path.exists(): - return readers.FileReader(self) - - return ( - # native reader if it supplies 'files' - _native_reader(self.spec) - or - # local ZipReader if a zip module - _zip_reader(self.spec) - or - # local NamespaceReader if a namespace module - _namespace_reader(self.spec) - or - # local FileReader - _file_reader(self.spec) - # fallback - adapt the spec ResourceReader to TraversableReader - or _adapters.CompatibilityFiles(self.spec) - ) - - -def wrap_spec(package): - """ - Construct a package spec with traversable compatibility - on the spec/loader/reader. - - Supersedes _adapters.wrap_spec to use TraversableResourcesLoader - from above for older Python compatibility (<3.10). - """ - from . import _adapters - - return _adapters.SpecLoaderAdapter(package.__spec__, TraversableResourcesLoader) diff --git a/venv/lib/python3.10/site-packages/pkg_resources/_vendor/importlib_resources/_itertools.py b/venv/lib/python3.10/site-packages/pkg_resources/_vendor/importlib_resources/_itertools.py deleted file mode 100644 index cce0558..0000000 --- a/venv/lib/python3.10/site-packages/pkg_resources/_vendor/importlib_resources/_itertools.py +++ /dev/null @@ -1,35 +0,0 @@ -from itertools import filterfalse - -from typing import ( - Callable, - Iterable, - Iterator, - Optional, - Set, - TypeVar, - Union, -) - -# Type and type variable definitions -_T = TypeVar('_T') -_U = TypeVar('_U') - - -def unique_everseen( - iterable: Iterable[_T], key: Optional[Callable[[_T], _U]] = None -) -> Iterator[_T]: - "List unique elements, preserving order. Remember all elements ever seen." - # unique_everseen('AAAABBBCCDAABBB') --> A B C D - # unique_everseen('ABBCcAD', str.lower) --> A B C D - seen: Set[Union[_T, _U]] = set() - seen_add = seen.add - if key is None: - for element in filterfalse(seen.__contains__, iterable): - seen_add(element) - yield element - else: - for element in iterable: - k = key(element) - if k not in seen: - seen_add(k) - yield element diff --git a/venv/lib/python3.10/site-packages/pkg_resources/_vendor/importlib_resources/_legacy.py b/venv/lib/python3.10/site-packages/pkg_resources/_vendor/importlib_resources/_legacy.py deleted file mode 100644 index 1d5d3f1..0000000 --- a/venv/lib/python3.10/site-packages/pkg_resources/_vendor/importlib_resources/_legacy.py +++ /dev/null @@ -1,121 +0,0 @@ -import functools -import os -import pathlib -import types -import warnings - -from typing import Union, Iterable, ContextManager, BinaryIO, TextIO, Any - -from . import _common - -Package = Union[types.ModuleType, str] -Resource = str - - -def deprecated(func): - @functools.wraps(func) - def wrapper(*args, **kwargs): - warnings.warn( - f"{func.__name__} is deprecated. Use files() instead. " - "Refer to https://importlib-resources.readthedocs.io" - "/en/latest/using.html#migrating-from-legacy for migration advice.", - DeprecationWarning, - stacklevel=2, - ) - return func(*args, **kwargs) - - return wrapper - - -def normalize_path(path): - # type: (Any) -> str - """Normalize a path by ensuring it is a string. - - If the resulting string contains path separators, an exception is raised. - """ - str_path = str(path) - parent, file_name = os.path.split(str_path) - if parent: - raise ValueError(f'{path!r} must be only a file name') - return file_name - - -@deprecated -def open_binary(package: Package, resource: Resource) -> BinaryIO: - """Return a file-like object opened for binary reading of the resource.""" - return (_common.files(package) / normalize_path(resource)).open('rb') - - -@deprecated -def read_binary(package: Package, resource: Resource) -> bytes: - """Return the binary contents of the resource.""" - return (_common.files(package) / normalize_path(resource)).read_bytes() - - -@deprecated -def open_text( - package: Package, - resource: Resource, - encoding: str = 'utf-8', - errors: str = 'strict', -) -> TextIO: - """Return a file-like object opened for text reading of the resource.""" - return (_common.files(package) / normalize_path(resource)).open( - 'r', encoding=encoding, errors=errors - ) - - -@deprecated -def read_text( - package: Package, - resource: Resource, - encoding: str = 'utf-8', - errors: str = 'strict', -) -> str: - """Return the decoded string of the resource. - - The decoding-related arguments have the same semantics as those of - bytes.decode(). - """ - with open_text(package, resource, encoding, errors) as fp: - return fp.read() - - -@deprecated -def contents(package: Package) -> Iterable[str]: - """Return an iterable of entries in `package`. - - Note that not all entries are resources. Specifically, directories are - not considered resources. Use `is_resource()` on each entry returned here - to check if it is a resource or not. - """ - return [path.name for path in _common.files(package).iterdir()] - - -@deprecated -def is_resource(package: Package, name: str) -> bool: - """True if `name` is a resource inside `package`. - - Directories are *not* resources. - """ - resource = normalize_path(name) - return any( - traversable.name == resource and traversable.is_file() - for traversable in _common.files(package).iterdir() - ) - - -@deprecated -def path( - package: Package, - resource: Resource, -) -> ContextManager[pathlib.Path]: - """A context manager providing a file path object to the resource. - - If the resource does not already exist on its own on the file system, - a temporary file will be created. If the file was created, the file - will be deleted upon exiting the context manager (no exception is - raised if the file was deleted prior to the context manager - exiting). - """ - return _common.as_file(_common.files(package) / normalize_path(resource)) diff --git a/venv/lib/python3.10/site-packages/pkg_resources/_vendor/importlib_resources/abc.py b/venv/lib/python3.10/site-packages/pkg_resources/_vendor/importlib_resources/abc.py deleted file mode 100644 index d39dc1a..0000000 --- a/venv/lib/python3.10/site-packages/pkg_resources/_vendor/importlib_resources/abc.py +++ /dev/null @@ -1,137 +0,0 @@ -import abc -from typing import BinaryIO, Iterable, Text - -from ._compat import runtime_checkable, Protocol - - -class ResourceReader(metaclass=abc.ABCMeta): - """Abstract base class for loaders to provide resource reading support.""" - - @abc.abstractmethod - def open_resource(self, resource: Text) -> BinaryIO: - """Return an opened, file-like object for binary reading. - - The 'resource' argument is expected to represent only a file name. - If the resource cannot be found, FileNotFoundError is raised. - """ - # This deliberately raises FileNotFoundError instead of - # NotImplementedError so that if this method is accidentally called, - # it'll still do the right thing. - raise FileNotFoundError - - @abc.abstractmethod - def resource_path(self, resource: Text) -> Text: - """Return the file system path to the specified resource. - - The 'resource' argument is expected to represent only a file name. - If the resource does not exist on the file system, raise - FileNotFoundError. - """ - # This deliberately raises FileNotFoundError instead of - # NotImplementedError so that if this method is accidentally called, - # it'll still do the right thing. - raise FileNotFoundError - - @abc.abstractmethod - def is_resource(self, path: Text) -> bool: - """Return True if the named 'path' is a resource. - - Files are resources, directories are not. - """ - raise FileNotFoundError - - @abc.abstractmethod - def contents(self) -> Iterable[str]: - """Return an iterable of entries in `package`.""" - raise FileNotFoundError - - -@runtime_checkable -class Traversable(Protocol): - """ - An object with a subset of pathlib.Path methods suitable for - traversing directories and opening files. - """ - - @abc.abstractmethod - def iterdir(self): - """ - Yield Traversable objects in self - """ - - def read_bytes(self): - """ - Read contents of self as bytes - """ - with self.open('rb') as strm: - return strm.read() - - def read_text(self, encoding=None): - """ - Read contents of self as text - """ - with self.open(encoding=encoding) as strm: - return strm.read() - - @abc.abstractmethod - def is_dir(self) -> bool: - """ - Return True if self is a directory - """ - - @abc.abstractmethod - def is_file(self) -> bool: - """ - Return True if self is a file - """ - - @abc.abstractmethod - def joinpath(self, child): - """ - Return Traversable child in self - """ - - def __truediv__(self, child): - """ - Return Traversable child in self - """ - return self.joinpath(child) - - @abc.abstractmethod - def open(self, mode='r', *args, **kwargs): - """ - mode may be 'r' or 'rb' to open as text or binary. Return a handle - suitable for reading (same as pathlib.Path.open). - - When opening as text, accepts encoding parameters such as those - accepted by io.TextIOWrapper. - """ - - @abc.abstractproperty - def name(self) -> str: - """ - The base name of this object without any parent references. - """ - - -class TraversableResources(ResourceReader): - """ - The required interface for providing traversable - resources. - """ - - @abc.abstractmethod - def files(self): - """Return a Traversable object for the loaded package.""" - - def open_resource(self, resource): - return self.files().joinpath(resource).open('rb') - - def resource_path(self, resource): - raise FileNotFoundError(resource) - - def is_resource(self, path): - return self.files().joinpath(path).is_file() - - def contents(self): - return (item.name for item in self.files().iterdir()) diff --git a/venv/lib/python3.10/site-packages/pkg_resources/_vendor/importlib_resources/readers.py b/venv/lib/python3.10/site-packages/pkg_resources/_vendor/importlib_resources/readers.py deleted file mode 100644 index f1190ca..0000000 --- a/venv/lib/python3.10/site-packages/pkg_resources/_vendor/importlib_resources/readers.py +++ /dev/null @@ -1,122 +0,0 @@ -import collections -import pathlib -import operator - -from . import abc - -from ._itertools import unique_everseen -from ._compat import ZipPath - - -def remove_duplicates(items): - return iter(collections.OrderedDict.fromkeys(items)) - - -class FileReader(abc.TraversableResources): - def __init__(self, loader): - self.path = pathlib.Path(loader.path).parent - - def resource_path(self, resource): - """ - Return the file system path to prevent - `resources.path()` from creating a temporary - copy. - """ - return str(self.path.joinpath(resource)) - - def files(self): - return self.path - - -class ZipReader(abc.TraversableResources): - def __init__(self, loader, module): - _, _, name = module.rpartition('.') - self.prefix = loader.prefix.replace('\\', '/') + name + '/' - self.archive = loader.archive - - def open_resource(self, resource): - try: - return super().open_resource(resource) - except KeyError as exc: - raise FileNotFoundError(exc.args[0]) - - def is_resource(self, path): - # workaround for `zipfile.Path.is_file` returning true - # for non-existent paths. - target = self.files().joinpath(path) - return target.is_file() and target.exists() - - def files(self): - return ZipPath(self.archive, self.prefix) - - -class MultiplexedPath(abc.Traversable): - """ - Given a series of Traversable objects, implement a merged - version of the interface across all objects. Useful for - namespace packages which may be multihomed at a single - name. - """ - - def __init__(self, *paths): - self._paths = list(map(pathlib.Path, remove_duplicates(paths))) - if not self._paths: - message = 'MultiplexedPath must contain at least one path' - raise FileNotFoundError(message) - if not all(path.is_dir() for path in self._paths): - raise NotADirectoryError('MultiplexedPath only supports directories') - - def iterdir(self): - files = (file for path in self._paths for file in path.iterdir()) - return unique_everseen(files, key=operator.attrgetter('name')) - - def read_bytes(self): - raise FileNotFoundError(f'{self} is not a file') - - def read_text(self, *args, **kwargs): - raise FileNotFoundError(f'{self} is not a file') - - def is_dir(self): - return True - - def is_file(self): - return False - - def joinpath(self, child): - # first try to find child in current paths - for file in self.iterdir(): - if file.name == child: - return file - # if it does not exist, construct it with the first path - return self._paths[0] / child - - __truediv__ = joinpath - - def open(self, *args, **kwargs): - raise FileNotFoundError(f'{self} is not a file') - - @property - def name(self): - return self._paths[0].name - - def __repr__(self): - paths = ', '.join(f"'{path}'" for path in self._paths) - return f'MultiplexedPath({paths})' - - -class NamespaceReader(abc.TraversableResources): - def __init__(self, namespace_path): - if 'NamespacePath' not in str(namespace_path): - raise ValueError('Invalid path') - self.path = MultiplexedPath(*list(namespace_path)) - - def resource_path(self, resource): - """ - Return the file system path to prevent - `resources.path()` from creating a temporary - copy. - """ - return str(self.path.joinpath(resource)) - - def files(self): - return self.path diff --git a/venv/lib/python3.10/site-packages/pkg_resources/_vendor/importlib_resources/simple.py b/venv/lib/python3.10/site-packages/pkg_resources/_vendor/importlib_resources/simple.py deleted file mode 100644 index da073cb..0000000 --- a/venv/lib/python3.10/site-packages/pkg_resources/_vendor/importlib_resources/simple.py +++ /dev/null @@ -1,116 +0,0 @@ -""" -Interface adapters for low-level readers. -""" - -import abc -import io -import itertools -from typing import BinaryIO, List - -from .abc import Traversable, TraversableResources - - -class SimpleReader(abc.ABC): - """ - The minimum, low-level interface required from a resource - provider. - """ - - @abc.abstractproperty - def package(self): - # type: () -> str - """ - The name of the package for which this reader loads resources. - """ - - @abc.abstractmethod - def children(self): - # type: () -> List['SimpleReader'] - """ - Obtain an iterable of SimpleReader for available - child containers (e.g. directories). - """ - - @abc.abstractmethod - def resources(self): - # type: () -> List[str] - """ - Obtain available named resources for this virtual package. - """ - - @abc.abstractmethod - def open_binary(self, resource): - # type: (str) -> BinaryIO - """ - Obtain a File-like for a named resource. - """ - - @property - def name(self): - return self.package.split('.')[-1] - - -class ResourceHandle(Traversable): - """ - Handle to a named resource in a ResourceReader. - """ - - def __init__(self, parent, name): - # type: (ResourceContainer, str) -> None - self.parent = parent - self.name = name # type: ignore - - def is_file(self): - return True - - def is_dir(self): - return False - - def open(self, mode='r', *args, **kwargs): - stream = self.parent.reader.open_binary(self.name) - if 'b' not in mode: - stream = io.TextIOWrapper(*args, **kwargs) - return stream - - def joinpath(self, name): - raise RuntimeError("Cannot traverse into a resource") - - -class ResourceContainer(Traversable): - """ - Traversable container for a package's resources via its reader. - """ - - def __init__(self, reader): - # type: (SimpleReader) -> None - self.reader = reader - - def is_dir(self): - return True - - def is_file(self): - return False - - def iterdir(self): - files = (ResourceHandle(self, name) for name in self.reader.resources) - dirs = map(ResourceContainer, self.reader.children()) - return itertools.chain(files, dirs) - - def open(self, *args, **kwargs): - raise IsADirectoryError() - - def joinpath(self, name): - return next( - traversable for traversable in self.iterdir() if traversable.name == name - ) - - -class TraversableReader(TraversableResources, SimpleReader): - """ - A TraversableResources based on SimpleReader. Resource providers - may derive from this class to provide the TraversableResources - interface by supplying the SimpleReader interface. - """ - - def files(self): - return ResourceContainer(self) diff --git a/venv/lib/python3.10/site-packages/pkg_resources/_vendor/jaraco/context.py b/venv/lib/python3.10/site-packages/pkg_resources/_vendor/jaraco/context.py deleted file mode 100644 index 87a4e3d..0000000 --- a/venv/lib/python3.10/site-packages/pkg_resources/_vendor/jaraco/context.py +++ /dev/null @@ -1,213 +0,0 @@ -import os -import subprocess -import contextlib -import functools -import tempfile -import shutil -import operator - - -@contextlib.contextmanager -def pushd(dir): - orig = os.getcwd() - os.chdir(dir) - try: - yield dir - finally: - os.chdir(orig) - - -@contextlib.contextmanager -def tarball_context(url, target_dir=None, runner=None, pushd=pushd): - """ - Get a tarball, extract it, change to that directory, yield, then - clean up. - `runner` is the function to invoke commands. - `pushd` is a context manager for changing the directory. - """ - if target_dir is None: - target_dir = os.path.basename(url).replace('.tar.gz', '').replace('.tgz', '') - if runner is None: - runner = functools.partial(subprocess.check_call, shell=True) - # In the tar command, use --strip-components=1 to strip the first path and - # then - # use -C to cause the files to be extracted to {target_dir}. This ensures - # that we always know where the files were extracted. - runner('mkdir {target_dir}'.format(**vars())) - try: - getter = 'wget {url} -O -' - extract = 'tar x{compression} --strip-components=1 -C {target_dir}' - cmd = ' | '.join((getter, extract)) - runner(cmd.format(compression=infer_compression(url), **vars())) - with pushd(target_dir): - yield target_dir - finally: - runner('rm -Rf {target_dir}'.format(**vars())) - - -def infer_compression(url): - """ - Given a URL or filename, infer the compression code for tar. - """ - # cheat and just assume it's the last two characters - compression_indicator = url[-2:] - mapping = dict(gz='z', bz='j', xz='J') - # Assume 'z' (gzip) if no match - return mapping.get(compression_indicator, 'z') - - -@contextlib.contextmanager -def temp_dir(remover=shutil.rmtree): - """ - Create a temporary directory context. Pass a custom remover - to override the removal behavior. - """ - temp_dir = tempfile.mkdtemp() - try: - yield temp_dir - finally: - remover(temp_dir) - - -@contextlib.contextmanager -def repo_context(url, branch=None, quiet=True, dest_ctx=temp_dir): - """ - Check out the repo indicated by url. - - If dest_ctx is supplied, it should be a context manager - to yield the target directory for the check out. - """ - exe = 'git' if 'git' in url else 'hg' - with dest_ctx() as repo_dir: - cmd = [exe, 'clone', url, repo_dir] - if branch: - cmd.extend(['--branch', branch]) - devnull = open(os.path.devnull, 'w') - stdout = devnull if quiet else None - subprocess.check_call(cmd, stdout=stdout) - yield repo_dir - - -@contextlib.contextmanager -def null(): - yield - - -class ExceptionTrap: - """ - A context manager that will catch certain exceptions and provide an - indication they occurred. - - >>> with ExceptionTrap() as trap: - ... raise Exception() - >>> bool(trap) - True - - >>> with ExceptionTrap() as trap: - ... pass - >>> bool(trap) - False - - >>> with ExceptionTrap(ValueError) as trap: - ... raise ValueError("1 + 1 is not 3") - >>> bool(trap) - True - - >>> with ExceptionTrap(ValueError) as trap: - ... raise Exception() - Traceback (most recent call last): - ... - Exception - - >>> bool(trap) - False - """ - - exc_info = None, None, None - - def __init__(self, exceptions=(Exception,)): - self.exceptions = exceptions - - def __enter__(self): - return self - - @property - def type(self): - return self.exc_info[0] - - @property - def value(self): - return self.exc_info[1] - - @property - def tb(self): - return self.exc_info[2] - - def __exit__(self, *exc_info): - type = exc_info[0] - matches = type and issubclass(type, self.exceptions) - if matches: - self.exc_info = exc_info - return matches - - def __bool__(self): - return bool(self.type) - - def raises(self, func, *, _test=bool): - """ - Wrap func and replace the result with the truth - value of the trap (True if an exception occurred). - - First, give the decorator an alias to support Python 3.8 - Syntax. - - >>> raises = ExceptionTrap(ValueError).raises - - Now decorate a function that always fails. - - >>> @raises - ... def fail(): - ... raise ValueError('failed') - >>> fail() - True - """ - - @functools.wraps(func) - def wrapper(*args, **kwargs): - with ExceptionTrap(self.exceptions) as trap: - func(*args, **kwargs) - return _test(trap) - - return wrapper - - def passes(self, func): - """ - Wrap func and replace the result with the truth - value of the trap (True if no exception). - - First, give the decorator an alias to support Python 3.8 - Syntax. - - >>> passes = ExceptionTrap(ValueError).passes - - Now decorate a function that always fails. - - >>> @passes - ... def fail(): - ... raise ValueError('failed') - - >>> fail() - False - """ - return self.raises(func, _test=operator.not_) - - -class suppress(contextlib.suppress, contextlib.ContextDecorator): - """ - A version of contextlib.suppress with decorator support. - - >>> @suppress(KeyError) - ... def key_error(): - ... {}[''] - >>> key_error() - """ diff --git a/venv/lib/python3.10/site-packages/pkg_resources/_vendor/jaraco/functools.py b/venv/lib/python3.10/site-packages/pkg_resources/_vendor/jaraco/functools.py deleted file mode 100644 index a3fea3a..0000000 --- a/venv/lib/python3.10/site-packages/pkg_resources/_vendor/jaraco/functools.py +++ /dev/null @@ -1,525 +0,0 @@ -import functools -import time -import inspect -import collections -import types -import itertools - -import pkg_resources.extern.more_itertools - -from typing import Callable, TypeVar - - -CallableT = TypeVar("CallableT", bound=Callable[..., object]) - - -def compose(*funcs): - """ - Compose any number of unary functions into a single unary function. - - >>> import textwrap - >>> expected = str.strip(textwrap.dedent(compose.__doc__)) - >>> strip_and_dedent = compose(str.strip, textwrap.dedent) - >>> strip_and_dedent(compose.__doc__) == expected - True - - Compose also allows the innermost function to take arbitrary arguments. - - >>> round_three = lambda x: round(x, ndigits=3) - >>> f = compose(round_three, int.__truediv__) - >>> [f(3*x, x+1) for x in range(1,10)] - [1.5, 2.0, 2.25, 2.4, 2.5, 2.571, 2.625, 2.667, 2.7] - """ - - def compose_two(f1, f2): - return lambda *args, **kwargs: f1(f2(*args, **kwargs)) - - return functools.reduce(compose_two, funcs) - - -def method_caller(method_name, *args, **kwargs): - """ - Return a function that will call a named method on the - target object with optional positional and keyword - arguments. - - >>> lower = method_caller('lower') - >>> lower('MyString') - 'mystring' - """ - - def call_method(target): - func = getattr(target, method_name) - return func(*args, **kwargs) - - return call_method - - -def once(func): - """ - Decorate func so it's only ever called the first time. - - This decorator can ensure that an expensive or non-idempotent function - will not be expensive on subsequent calls and is idempotent. - - >>> add_three = once(lambda a: a+3) - >>> add_three(3) - 6 - >>> add_three(9) - 6 - >>> add_three('12') - 6 - - To reset the stored value, simply clear the property ``saved_result``. - - >>> del add_three.saved_result - >>> add_three(9) - 12 - >>> add_three(8) - 12 - - Or invoke 'reset()' on it. - - >>> add_three.reset() - >>> add_three(-3) - 0 - >>> add_three(0) - 0 - """ - - @functools.wraps(func) - def wrapper(*args, **kwargs): - if not hasattr(wrapper, 'saved_result'): - wrapper.saved_result = func(*args, **kwargs) - return wrapper.saved_result - - wrapper.reset = lambda: vars(wrapper).__delitem__('saved_result') - return wrapper - - -def method_cache( - method: CallableT, - cache_wrapper: Callable[ - [CallableT], CallableT - ] = functools.lru_cache(), # type: ignore[assignment] -) -> CallableT: - """ - Wrap lru_cache to support storing the cache data in the object instances. - - Abstracts the common paradigm where the method explicitly saves an - underscore-prefixed protected property on first call and returns that - subsequently. - - >>> class MyClass: - ... calls = 0 - ... - ... @method_cache - ... def method(self, value): - ... self.calls += 1 - ... return value - - >>> a = MyClass() - >>> a.method(3) - 3 - >>> for x in range(75): - ... res = a.method(x) - >>> a.calls - 75 - - Note that the apparent behavior will be exactly like that of lru_cache - except that the cache is stored on each instance, so values in one - instance will not flush values from another, and when an instance is - deleted, so are the cached values for that instance. - - >>> b = MyClass() - >>> for x in range(35): - ... res = b.method(x) - >>> b.calls - 35 - >>> a.method(0) - 0 - >>> a.calls - 75 - - Note that if method had been decorated with ``functools.lru_cache()``, - a.calls would have been 76 (due to the cached value of 0 having been - flushed by the 'b' instance). - - Clear the cache with ``.cache_clear()`` - - >>> a.method.cache_clear() - - Same for a method that hasn't yet been called. - - >>> c = MyClass() - >>> c.method.cache_clear() - - Another cache wrapper may be supplied: - - >>> cache = functools.lru_cache(maxsize=2) - >>> MyClass.method2 = method_cache(lambda self: 3, cache_wrapper=cache) - >>> a = MyClass() - >>> a.method2() - 3 - - Caution - do not subsequently wrap the method with another decorator, such - as ``@property``, which changes the semantics of the function. - - See also - http://code.activestate.com/recipes/577452-a-memoize-decorator-for-instance-methods/ - for another implementation and additional justification. - """ - - def wrapper(self: object, *args: object, **kwargs: object) -> object: - # it's the first call, replace the method with a cached, bound method - bound_method: CallableT = types.MethodType( # type: ignore[assignment] - method, self - ) - cached_method = cache_wrapper(bound_method) - setattr(self, method.__name__, cached_method) - return cached_method(*args, **kwargs) - - # Support cache clear even before cache has been created. - wrapper.cache_clear = lambda: None # type: ignore[attr-defined] - - return ( # type: ignore[return-value] - _special_method_cache(method, cache_wrapper) or wrapper - ) - - -def _special_method_cache(method, cache_wrapper): - """ - Because Python treats special methods differently, it's not - possible to use instance attributes to implement the cached - methods. - - Instead, install the wrapper method under a different name - and return a simple proxy to that wrapper. - - https://github.com/jaraco/jaraco.functools/issues/5 - """ - name = method.__name__ - special_names = '__getattr__', '__getitem__' - if name not in special_names: - return - - wrapper_name = '__cached' + name - - def proxy(self, *args, **kwargs): - if wrapper_name not in vars(self): - bound = types.MethodType(method, self) - cache = cache_wrapper(bound) - setattr(self, wrapper_name, cache) - else: - cache = getattr(self, wrapper_name) - return cache(*args, **kwargs) - - return proxy - - -def apply(transform): - """ - Decorate a function with a transform function that is - invoked on results returned from the decorated function. - - >>> @apply(reversed) - ... def get_numbers(start): - ... "doc for get_numbers" - ... return range(start, start+3) - >>> list(get_numbers(4)) - [6, 5, 4] - >>> get_numbers.__doc__ - 'doc for get_numbers' - """ - - def wrap(func): - return functools.wraps(func)(compose(transform, func)) - - return wrap - - -def result_invoke(action): - r""" - Decorate a function with an action function that is - invoked on the results returned from the decorated - function (for its side-effect), then return the original - result. - - >>> @result_invoke(print) - ... def add_two(a, b): - ... return a + b - >>> x = add_two(2, 3) - 5 - >>> x - 5 - """ - - def wrap(func): - @functools.wraps(func) - def wrapper(*args, **kwargs): - result = func(*args, **kwargs) - action(result) - return result - - return wrapper - - return wrap - - -def call_aside(f, *args, **kwargs): - """ - Call a function for its side effect after initialization. - - >>> @call_aside - ... def func(): print("called") - called - >>> func() - called - - Use functools.partial to pass parameters to the initial call - - >>> @functools.partial(call_aside, name='bingo') - ... def func(name): print("called with", name) - called with bingo - """ - f(*args, **kwargs) - return f - - -class Throttler: - """ - Rate-limit a function (or other callable) - """ - - def __init__(self, func, max_rate=float('Inf')): - if isinstance(func, Throttler): - func = func.func - self.func = func - self.max_rate = max_rate - self.reset() - - def reset(self): - self.last_called = 0 - - def __call__(self, *args, **kwargs): - self._wait() - return self.func(*args, **kwargs) - - def _wait(self): - "ensure at least 1/max_rate seconds from last call" - elapsed = time.time() - self.last_called - must_wait = 1 / self.max_rate - elapsed - time.sleep(max(0, must_wait)) - self.last_called = time.time() - - def __get__(self, obj, type=None): - return first_invoke(self._wait, functools.partial(self.func, obj)) - - -def first_invoke(func1, func2): - """ - Return a function that when invoked will invoke func1 without - any parameters (for its side-effect) and then invoke func2 - with whatever parameters were passed, returning its result. - """ - - def wrapper(*args, **kwargs): - func1() - return func2(*args, **kwargs) - - return wrapper - - -def retry_call(func, cleanup=lambda: None, retries=0, trap=()): - """ - Given a callable func, trap the indicated exceptions - for up to 'retries' times, invoking cleanup on the - exception. On the final attempt, allow any exceptions - to propagate. - """ - attempts = itertools.count() if retries == float('inf') else range(retries) - for attempt in attempts: - try: - return func() - except trap: - cleanup() - - return func() - - -def retry(*r_args, **r_kwargs): - """ - Decorator wrapper for retry_call. Accepts arguments to retry_call - except func and then returns a decorator for the decorated function. - - Ex: - - >>> @retry(retries=3) - ... def my_func(a, b): - ... "this is my funk" - ... print(a, b) - >>> my_func.__doc__ - 'this is my funk' - """ - - def decorate(func): - @functools.wraps(func) - def wrapper(*f_args, **f_kwargs): - bound = functools.partial(func, *f_args, **f_kwargs) - return retry_call(bound, *r_args, **r_kwargs) - - return wrapper - - return decorate - - -def print_yielded(func): - """ - Convert a generator into a function that prints all yielded elements - - >>> @print_yielded - ... def x(): - ... yield 3; yield None - >>> x() - 3 - None - """ - print_all = functools.partial(map, print) - print_results = compose(more_itertools.consume, print_all, func) - return functools.wraps(func)(print_results) - - -def pass_none(func): - """ - Wrap func so it's not called if its first param is None - - >>> print_text = pass_none(print) - >>> print_text('text') - text - >>> print_text(None) - """ - - @functools.wraps(func) - def wrapper(param, *args, **kwargs): - if param is not None: - return func(param, *args, **kwargs) - - return wrapper - - -def assign_params(func, namespace): - """ - Assign parameters from namespace where func solicits. - - >>> def func(x, y=3): - ... print(x, y) - >>> assigned = assign_params(func, dict(x=2, z=4)) - >>> assigned() - 2 3 - - The usual errors are raised if a function doesn't receive - its required parameters: - - >>> assigned = assign_params(func, dict(y=3, z=4)) - >>> assigned() - Traceback (most recent call last): - TypeError: func() ...argument... - - It even works on methods: - - >>> class Handler: - ... def meth(self, arg): - ... print(arg) - >>> assign_params(Handler().meth, dict(arg='crystal', foo='clear'))() - crystal - """ - sig = inspect.signature(func) - params = sig.parameters.keys() - call_ns = {k: namespace[k] for k in params if k in namespace} - return functools.partial(func, **call_ns) - - -def save_method_args(method): - """ - Wrap a method such that when it is called, the args and kwargs are - saved on the method. - - >>> class MyClass: - ... @save_method_args - ... def method(self, a, b): - ... print(a, b) - >>> my_ob = MyClass() - >>> my_ob.method(1, 2) - 1 2 - >>> my_ob._saved_method.args - (1, 2) - >>> my_ob._saved_method.kwargs - {} - >>> my_ob.method(a=3, b='foo') - 3 foo - >>> my_ob._saved_method.args - () - >>> my_ob._saved_method.kwargs == dict(a=3, b='foo') - True - - The arguments are stored on the instance, allowing for - different instance to save different args. - - >>> your_ob = MyClass() - >>> your_ob.method({str('x'): 3}, b=[4]) - {'x': 3} [4] - >>> your_ob._saved_method.args - ({'x': 3},) - >>> my_ob._saved_method.args - () - """ - args_and_kwargs = collections.namedtuple('args_and_kwargs', 'args kwargs') - - @functools.wraps(method) - def wrapper(self, *args, **kwargs): - attr_name = '_saved_' + method.__name__ - attr = args_and_kwargs(args, kwargs) - setattr(self, attr_name, attr) - return method(self, *args, **kwargs) - - return wrapper - - -def except_(*exceptions, replace=None, use=None): - """ - Replace the indicated exceptions, if raised, with the indicated - literal replacement or evaluated expression (if present). - - >>> safe_int = except_(ValueError)(int) - >>> safe_int('five') - >>> safe_int('5') - 5 - - Specify a literal replacement with ``replace``. - - >>> safe_int_r = except_(ValueError, replace=0)(int) - >>> safe_int_r('five') - 0 - - Provide an expression to ``use`` to pass through particular parameters. - - >>> safe_int_pt = except_(ValueError, use='args[0]')(int) - >>> safe_int_pt('five') - 'five' - - """ - - def decorate(func): - @functools.wraps(func) - def wrapper(*args, **kwargs): - try: - return func(*args, **kwargs) - except exceptions: - try: - return eval(use) - except TypeError: - return replace - - return wrapper - - return decorate diff --git a/venv/lib/python3.10/site-packages/pkg_resources/_vendor/jaraco/text/__init__.py b/venv/lib/python3.10/site-packages/pkg_resources/_vendor/jaraco/text/__init__.py deleted file mode 100644 index c466378..0000000 --- a/venv/lib/python3.10/site-packages/pkg_resources/_vendor/jaraco/text/__init__.py +++ /dev/null @@ -1,599 +0,0 @@ -import re -import itertools -import textwrap -import functools - -try: - from importlib.resources import files # type: ignore -except ImportError: # pragma: nocover - from pkg_resources.extern.importlib_resources import files # type: ignore - -from pkg_resources.extern.jaraco.functools import compose, method_cache -from pkg_resources.extern.jaraco.context import ExceptionTrap - - -def substitution(old, new): - """ - Return a function that will perform a substitution on a string - """ - return lambda s: s.replace(old, new) - - -def multi_substitution(*substitutions): - """ - Take a sequence of pairs specifying substitutions, and create - a function that performs those substitutions. - - >>> multi_substitution(('foo', 'bar'), ('bar', 'baz'))('foo') - 'baz' - """ - substitutions = itertools.starmap(substitution, substitutions) - # compose function applies last function first, so reverse the - # substitutions to get the expected order. - substitutions = reversed(tuple(substitutions)) - return compose(*substitutions) - - -class FoldedCase(str): - """ - A case insensitive string class; behaves just like str - except compares equal when the only variation is case. - - >>> s = FoldedCase('hello world') - - >>> s == 'Hello World' - True - - >>> 'Hello World' == s - True - - >>> s != 'Hello World' - False - - >>> s.index('O') - 4 - - >>> s.split('O') - ['hell', ' w', 'rld'] - - >>> sorted(map(FoldedCase, ['GAMMA', 'alpha', 'Beta'])) - ['alpha', 'Beta', 'GAMMA'] - - Sequence membership is straightforward. - - >>> "Hello World" in [s] - True - >>> s in ["Hello World"] - True - - You may test for set inclusion, but candidate and elements - must both be folded. - - >>> FoldedCase("Hello World") in {s} - True - >>> s in {FoldedCase("Hello World")} - True - - String inclusion works as long as the FoldedCase object - is on the right. - - >>> "hello" in FoldedCase("Hello World") - True - - But not if the FoldedCase object is on the left: - - >>> FoldedCase('hello') in 'Hello World' - False - - In that case, use ``in_``: - - >>> FoldedCase('hello').in_('Hello World') - True - - >>> FoldedCase('hello') > FoldedCase('Hello') - False - """ - - def __lt__(self, other): - return self.lower() < other.lower() - - def __gt__(self, other): - return self.lower() > other.lower() - - def __eq__(self, other): - return self.lower() == other.lower() - - def __ne__(self, other): - return self.lower() != other.lower() - - def __hash__(self): - return hash(self.lower()) - - def __contains__(self, other): - return super().lower().__contains__(other.lower()) - - def in_(self, other): - "Does self appear in other?" - return self in FoldedCase(other) - - # cache lower since it's likely to be called frequently. - @method_cache - def lower(self): - return super().lower() - - def index(self, sub): - return self.lower().index(sub.lower()) - - def split(self, splitter=' ', maxsplit=0): - pattern = re.compile(re.escape(splitter), re.I) - return pattern.split(self, maxsplit) - - -# Python 3.8 compatibility -_unicode_trap = ExceptionTrap(UnicodeDecodeError) - - -@_unicode_trap.passes -def is_decodable(value): - r""" - Return True if the supplied value is decodable (using the default - encoding). - - >>> is_decodable(b'\xff') - False - >>> is_decodable(b'\x32') - True - """ - value.decode() - - -def is_binary(value): - r""" - Return True if the value appears to be binary (that is, it's a byte - string and isn't decodable). - - >>> is_binary(b'\xff') - True - >>> is_binary('\xff') - False - """ - return isinstance(value, bytes) and not is_decodable(value) - - -def trim(s): - r""" - Trim something like a docstring to remove the whitespace that - is common due to indentation and formatting. - - >>> trim("\n\tfoo = bar\n\t\tbar = baz\n") - 'foo = bar\n\tbar = baz' - """ - return textwrap.dedent(s).strip() - - -def wrap(s): - """ - Wrap lines of text, retaining existing newlines as - paragraph markers. - - >>> print(wrap(lorem_ipsum)) - Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do - eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad - minim veniam, quis nostrud exercitation ullamco laboris nisi ut - aliquip ex ea commodo consequat. Duis aute irure dolor in - reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla - pariatur. Excepteur sint occaecat cupidatat non proident, sunt in - culpa qui officia deserunt mollit anim id est laborum. - - Curabitur pretium tincidunt lacus. Nulla gravida orci a odio. Nullam - varius, turpis et commodo pharetra, est eros bibendum elit, nec luctus - magna felis sollicitudin mauris. Integer in mauris eu nibh euismod - gravida. Duis ac tellus et risus vulputate vehicula. Donec lobortis - risus a elit. Etiam tempor. Ut ullamcorper, ligula eu tempor congue, - eros est euismod turpis, id tincidunt sapien risus a quam. Maecenas - fermentum consequat mi. Donec fermentum. Pellentesque malesuada nulla - a mi. Duis sapien sem, aliquet nec, commodo eget, consequat quis, - neque. Aliquam faucibus, elit ut dictum aliquet, felis nisl adipiscing - sapien, sed malesuada diam lacus eget erat. Cras mollis scelerisque - nunc. Nullam arcu. Aliquam consequat. Curabitur augue lorem, dapibus - quis, laoreet et, pretium ac, nisi. Aenean magna nisl, mollis quis, - molestie eu, feugiat in, orci. In hac habitasse platea dictumst. - """ - paragraphs = s.splitlines() - wrapped = ('\n'.join(textwrap.wrap(para)) for para in paragraphs) - return '\n\n'.join(wrapped) - - -def unwrap(s): - r""" - Given a multi-line string, return an unwrapped version. - - >>> wrapped = wrap(lorem_ipsum) - >>> wrapped.count('\n') - 20 - >>> unwrapped = unwrap(wrapped) - >>> unwrapped.count('\n') - 1 - >>> print(unwrapped) - Lorem ipsum dolor sit amet, consectetur adipiscing ... - Curabitur pretium tincidunt lacus. Nulla gravida orci ... - - """ - paragraphs = re.split(r'\n\n+', s) - cleaned = (para.replace('\n', ' ') for para in paragraphs) - return '\n'.join(cleaned) - - - - -class Splitter(object): - """object that will split a string with the given arguments for each call - - >>> s = Splitter(',') - >>> s('hello, world, this is your, master calling') - ['hello', ' world', ' this is your', ' master calling'] - """ - - def __init__(self, *args): - self.args = args - - def __call__(self, s): - return s.split(*self.args) - - -def indent(string, prefix=' ' * 4): - """ - >>> indent('foo') - ' foo' - """ - return prefix + string - - -class WordSet(tuple): - """ - Given an identifier, return the words that identifier represents, - whether in camel case, underscore-separated, etc. - - >>> WordSet.parse("camelCase") - ('camel', 'Case') - - >>> WordSet.parse("under_sep") - ('under', 'sep') - - Acronyms should be retained - - >>> WordSet.parse("firstSNL") - ('first', 'SNL') - - >>> WordSet.parse("you_and_I") - ('you', 'and', 'I') - - >>> WordSet.parse("A simple test") - ('A', 'simple', 'test') - - Multiple caps should not interfere with the first cap of another word. - - >>> WordSet.parse("myABCClass") - ('my', 'ABC', 'Class') - - The result is a WordSet, so you can get the form you need. - - >>> WordSet.parse("myABCClass").underscore_separated() - 'my_ABC_Class' - - >>> WordSet.parse('a-command').camel_case() - 'ACommand' - - >>> WordSet.parse('someIdentifier').lowered().space_separated() - 'some identifier' - - Slices of the result should return another WordSet. - - >>> WordSet.parse('taken-out-of-context')[1:].underscore_separated() - 'out_of_context' - - >>> WordSet.from_class_name(WordSet()).lowered().space_separated() - 'word set' - - >>> example = WordSet.parse('figured it out') - >>> example.headless_camel_case() - 'figuredItOut' - >>> example.dash_separated() - 'figured-it-out' - - """ - - _pattern = re.compile('([A-Z]?[a-z]+)|([A-Z]+(?![a-z]))') - - def capitalized(self): - return WordSet(word.capitalize() for word in self) - - def lowered(self): - return WordSet(word.lower() for word in self) - - def camel_case(self): - return ''.join(self.capitalized()) - - def headless_camel_case(self): - words = iter(self) - first = next(words).lower() - new_words = itertools.chain((first,), WordSet(words).camel_case()) - return ''.join(new_words) - - def underscore_separated(self): - return '_'.join(self) - - def dash_separated(self): - return '-'.join(self) - - def space_separated(self): - return ' '.join(self) - - def trim_right(self, item): - """ - Remove the item from the end of the set. - - >>> WordSet.parse('foo bar').trim_right('foo') - ('foo', 'bar') - >>> WordSet.parse('foo bar').trim_right('bar') - ('foo',) - >>> WordSet.parse('').trim_right('bar') - () - """ - return self[:-1] if self and self[-1] == item else self - - def trim_left(self, item): - """ - Remove the item from the beginning of the set. - - >>> WordSet.parse('foo bar').trim_left('foo') - ('bar',) - >>> WordSet.parse('foo bar').trim_left('bar') - ('foo', 'bar') - >>> WordSet.parse('').trim_left('bar') - () - """ - return self[1:] if self and self[0] == item else self - - def trim(self, item): - """ - >>> WordSet.parse('foo bar').trim('foo') - ('bar',) - """ - return self.trim_left(item).trim_right(item) - - def __getitem__(self, item): - result = super(WordSet, self).__getitem__(item) - if isinstance(item, slice): - result = WordSet(result) - return result - - @classmethod - def parse(cls, identifier): - matches = cls._pattern.finditer(identifier) - return WordSet(match.group(0) for match in matches) - - @classmethod - def from_class_name(cls, subject): - return cls.parse(subject.__class__.__name__) - - -# for backward compatibility -words = WordSet.parse - - -def simple_html_strip(s): - r""" - Remove HTML from the string `s`. - - >>> str(simple_html_strip('')) - '' - - >>> print(simple_html_strip('A stormy day in paradise')) - A stormy day in paradise - - >>> print(simple_html_strip('Somebody tell the truth.')) - Somebody tell the truth. - - >>> print(simple_html_strip('What about
\nmultiple lines?')) - What about - multiple lines? - """ - html_stripper = re.compile('()|(<[^>]*>)|([^<]+)', re.DOTALL) - texts = (match.group(3) or '' for match in html_stripper.finditer(s)) - return ''.join(texts) - - -class SeparatedValues(str): - """ - A string separated by a separator. Overrides __iter__ for getting - the values. - - >>> list(SeparatedValues('a,b,c')) - ['a', 'b', 'c'] - - Whitespace is stripped and empty values are discarded. - - >>> list(SeparatedValues(' a, b , c, ')) - ['a', 'b', 'c'] - """ - - separator = ',' - - def __iter__(self): - parts = self.split(self.separator) - return filter(None, (part.strip() for part in parts)) - - -class Stripper: - r""" - Given a series of lines, find the common prefix and strip it from them. - - >>> lines = [ - ... 'abcdefg\n', - ... 'abc\n', - ... 'abcde\n', - ... ] - >>> res = Stripper.strip_prefix(lines) - >>> res.prefix - 'abc' - >>> list(res.lines) - ['defg\n', '\n', 'de\n'] - - If no prefix is common, nothing should be stripped. - - >>> lines = [ - ... 'abcd\n', - ... '1234\n', - ... ] - >>> res = Stripper.strip_prefix(lines) - >>> res.prefix = '' - >>> list(res.lines) - ['abcd\n', '1234\n'] - """ - - def __init__(self, prefix, lines): - self.prefix = prefix - self.lines = map(self, lines) - - @classmethod - def strip_prefix(cls, lines): - prefix_lines, lines = itertools.tee(lines) - prefix = functools.reduce(cls.common_prefix, prefix_lines) - return cls(prefix, lines) - - def __call__(self, line): - if not self.prefix: - return line - null, prefix, rest = line.partition(self.prefix) - return rest - - @staticmethod - def common_prefix(s1, s2): - """ - Return the common prefix of two lines. - """ - index = min(len(s1), len(s2)) - while s1[:index] != s2[:index]: - index -= 1 - return s1[:index] - - -def remove_prefix(text, prefix): - """ - Remove the prefix from the text if it exists. - - >>> remove_prefix('underwhelming performance', 'underwhelming ') - 'performance' - - >>> remove_prefix('something special', 'sample') - 'something special' - """ - null, prefix, rest = text.rpartition(prefix) - return rest - - -def remove_suffix(text, suffix): - """ - Remove the suffix from the text if it exists. - - >>> remove_suffix('name.git', '.git') - 'name' - - >>> remove_suffix('something special', 'sample') - 'something special' - """ - rest, suffix, null = text.partition(suffix) - return rest - - -def normalize_newlines(text): - r""" - Replace alternate newlines with the canonical newline. - - >>> normalize_newlines('Lorem Ipsum\u2029') - 'Lorem Ipsum\n' - >>> normalize_newlines('Lorem Ipsum\r\n') - 'Lorem Ipsum\n' - >>> normalize_newlines('Lorem Ipsum\x85') - 'Lorem Ipsum\n' - """ - newlines = ['\r\n', '\r', '\n', '\u0085', '\u2028', '\u2029'] - pattern = '|'.join(newlines) - return re.sub(pattern, '\n', text) - - -def _nonblank(str): - return str and not str.startswith('#') - - -@functools.singledispatch -def yield_lines(iterable): - r""" - Yield valid lines of a string or iterable. - - >>> list(yield_lines('')) - [] - >>> list(yield_lines(['foo', 'bar'])) - ['foo', 'bar'] - >>> list(yield_lines('foo\nbar')) - ['foo', 'bar'] - >>> list(yield_lines('\nfoo\n#bar\nbaz #comment')) - ['foo', 'baz #comment'] - >>> list(yield_lines(['foo\nbar', 'baz', 'bing\n\n\n'])) - ['foo', 'bar', 'baz', 'bing'] - """ - return itertools.chain.from_iterable(map(yield_lines, iterable)) - - -@yield_lines.register(str) -def _(text): - return filter(_nonblank, map(str.strip, text.splitlines())) - - -def drop_comment(line): - """ - Drop comments. - - >>> drop_comment('foo # bar') - 'foo' - - A hash without a space may be in a URL. - - >>> drop_comment('http://example.com/foo#bar') - 'http://example.com/foo#bar' - """ - return line.partition(' #')[0] - - -def join_continuation(lines): - r""" - Join lines continued by a trailing backslash. - - >>> list(join_continuation(['foo \\', 'bar', 'baz'])) - ['foobar', 'baz'] - >>> list(join_continuation(['foo \\', 'bar', 'baz'])) - ['foobar', 'baz'] - >>> list(join_continuation(['foo \\', 'bar \\', 'baz'])) - ['foobarbaz'] - - Not sure why, but... - The character preceeding the backslash is also elided. - - >>> list(join_continuation(['goo\\', 'dly'])) - ['godly'] - - A terrible idea, but... - If no line is available to continue, suppress the lines. - - >>> list(join_continuation(['foo', 'bar\\', 'baz\\'])) - ['foo'] - """ - lines = iter(lines) - for item in lines: - while item.endswith('\\'): - try: - item = item[:-2].strip() + next(lines) - except StopIteration: - return - yield item diff --git a/venv/lib/python3.10/site-packages/pkg_resources/_vendor/more_itertools/__init__.py b/venv/lib/python3.10/site-packages/pkg_resources/_vendor/more_itertools/__init__.py deleted file mode 100644 index ea38bef..0000000 --- a/venv/lib/python3.10/site-packages/pkg_resources/_vendor/more_itertools/__init__.py +++ /dev/null @@ -1,4 +0,0 @@ -from .more import * # noqa -from .recipes import * # noqa - -__version__ = '8.12.0' diff --git a/venv/lib/python3.10/site-packages/pkg_resources/_vendor/more_itertools/more.py b/venv/lib/python3.10/site-packages/pkg_resources/_vendor/more_itertools/more.py deleted file mode 100644 index 6b6a5ca..0000000 --- a/venv/lib/python3.10/site-packages/pkg_resources/_vendor/more_itertools/more.py +++ /dev/null @@ -1,4316 +0,0 @@ -import warnings - -from collections import Counter, defaultdict, deque, abc -from collections.abc import Sequence -from functools import partial, reduce, wraps -from heapq import merge, heapify, heapreplace, heappop -from itertools import ( - chain, - compress, - count, - cycle, - dropwhile, - groupby, - islice, - repeat, - starmap, - takewhile, - tee, - zip_longest, -) -from math import exp, factorial, floor, log -from queue import Empty, Queue -from random import random, randrange, uniform -from operator import itemgetter, mul, sub, gt, lt, ge, le -from sys import hexversion, maxsize -from time import monotonic - -from .recipes import ( - consume, - flatten, - pairwise, - powerset, - take, - unique_everseen, -) - -__all__ = [ - 'AbortThread', - 'SequenceView', - 'UnequalIterablesError', - 'adjacent', - 'all_unique', - 'always_iterable', - 'always_reversible', - 'bucket', - 'callback_iter', - 'chunked', - 'chunked_even', - 'circular_shifts', - 'collapse', - 'collate', - 'combination_index', - 'consecutive_groups', - 'consumer', - 'count_cycle', - 'countable', - 'difference', - 'distinct_combinations', - 'distinct_permutations', - 'distribute', - 'divide', - 'duplicates_everseen', - 'duplicates_justseen', - 'exactly_n', - 'filter_except', - 'first', - 'groupby_transform', - 'ichunked', - 'ilen', - 'interleave', - 'interleave_evenly', - 'interleave_longest', - 'intersperse', - 'is_sorted', - 'islice_extended', - 'iterate', - 'last', - 'locate', - 'lstrip', - 'make_decorator', - 'map_except', - 'map_if', - 'map_reduce', - 'mark_ends', - 'minmax', - 'nth_or_last', - 'nth_permutation', - 'nth_product', - 'numeric_range', - 'one', - 'only', - 'padded', - 'partitions', - 'peekable', - 'permutation_index', - 'product_index', - 'raise_', - 'repeat_each', - 'repeat_last', - 'replace', - 'rlocate', - 'rstrip', - 'run_length', - 'sample', - 'seekable', - 'set_partitions', - 'side_effect', - 'sliced', - 'sort_together', - 'split_after', - 'split_at', - 'split_before', - 'split_into', - 'split_when', - 'spy', - 'stagger', - 'strip', - 'strictly_n', - 'substrings', - 'substrings_indexes', - 'time_limited', - 'unique_in_window', - 'unique_to_each', - 'unzip', - 'value_chain', - 'windowed', - 'windowed_complete', - 'with_iter', - 'zip_broadcast', - 'zip_equal', - 'zip_offset', -] - - -_marker = object() - - -def chunked(iterable, n, strict=False): - """Break *iterable* into lists of length *n*: - - >>> list(chunked([1, 2, 3, 4, 5, 6], 3)) - [[1, 2, 3], [4, 5, 6]] - - By the default, the last yielded list will have fewer than *n* elements - if the length of *iterable* is not divisible by *n*: - - >>> list(chunked([1, 2, 3, 4, 5, 6, 7, 8], 3)) - [[1, 2, 3], [4, 5, 6], [7, 8]] - - To use a fill-in value instead, see the :func:`grouper` recipe. - - If the length of *iterable* is not divisible by *n* and *strict* is - ``True``, then ``ValueError`` will be raised before the last - list is yielded. - - """ - iterator = iter(partial(take, n, iter(iterable)), []) - if strict: - if n is None: - raise ValueError('n must not be None when using strict mode.') - - def ret(): - for chunk in iterator: - if len(chunk) != n: - raise ValueError('iterable is not divisible by n.') - yield chunk - - return iter(ret()) - else: - return iterator - - -def first(iterable, default=_marker): - """Return the first item of *iterable*, or *default* if *iterable* is - empty. - - >>> first([0, 1, 2, 3]) - 0 - >>> first([], 'some default') - 'some default' - - If *default* is not provided and there are no items in the iterable, - raise ``ValueError``. - - :func:`first` is useful when you have a generator of expensive-to-retrieve - values and want any arbitrary one. It is marginally shorter than - ``next(iter(iterable), default)``. - - """ - try: - return next(iter(iterable)) - except StopIteration as e: - if default is _marker: - raise ValueError( - 'first() was called on an empty iterable, and no ' - 'default value was provided.' - ) from e - return default - - -def last(iterable, default=_marker): - """Return the last item of *iterable*, or *default* if *iterable* is - empty. - - >>> last([0, 1, 2, 3]) - 3 - >>> last([], 'some default') - 'some default' - - If *default* is not provided and there are no items in the iterable, - raise ``ValueError``. - """ - try: - if isinstance(iterable, Sequence): - return iterable[-1] - # Work around https://bugs.python.org/issue38525 - elif hasattr(iterable, '__reversed__') and (hexversion != 0x030800F0): - return next(reversed(iterable)) - else: - return deque(iterable, maxlen=1)[-1] - except (IndexError, TypeError, StopIteration): - if default is _marker: - raise ValueError( - 'last() was called on an empty iterable, and no default was ' - 'provided.' - ) - return default - - -def nth_or_last(iterable, n, default=_marker): - """Return the nth or the last item of *iterable*, - or *default* if *iterable* is empty. - - >>> nth_or_last([0, 1, 2, 3], 2) - 2 - >>> nth_or_last([0, 1], 2) - 1 - >>> nth_or_last([], 0, 'some default') - 'some default' - - If *default* is not provided and there are no items in the iterable, - raise ``ValueError``. - """ - return last(islice(iterable, n + 1), default=default) - - -class peekable: - """Wrap an iterator to allow lookahead and prepending elements. - - Call :meth:`peek` on the result to get the value that will be returned - by :func:`next`. This won't advance the iterator: - - >>> p = peekable(['a', 'b']) - >>> p.peek() - 'a' - >>> next(p) - 'a' - - Pass :meth:`peek` a default value to return that instead of raising - ``StopIteration`` when the iterator is exhausted. - - >>> p = peekable([]) - >>> p.peek('hi') - 'hi' - - peekables also offer a :meth:`prepend` method, which "inserts" items - at the head of the iterable: - - >>> p = peekable([1, 2, 3]) - >>> p.prepend(10, 11, 12) - >>> next(p) - 10 - >>> p.peek() - 11 - >>> list(p) - [11, 12, 1, 2, 3] - - peekables can be indexed. Index 0 is the item that will be returned by - :func:`next`, index 1 is the item after that, and so on: - The values up to the given index will be cached. - - >>> p = peekable(['a', 'b', 'c', 'd']) - >>> p[0] - 'a' - >>> p[1] - 'b' - >>> next(p) - 'a' - - Negative indexes are supported, but be aware that they will cache the - remaining items in the source iterator, which may require significant - storage. - - To check whether a peekable is exhausted, check its truth value: - - >>> p = peekable(['a', 'b']) - >>> if p: # peekable has items - ... list(p) - ['a', 'b'] - >>> if not p: # peekable is exhausted - ... list(p) - [] - - """ - - def __init__(self, iterable): - self._it = iter(iterable) - self._cache = deque() - - def __iter__(self): - return self - - def __bool__(self): - try: - self.peek() - except StopIteration: - return False - return True - - def peek(self, default=_marker): - """Return the item that will be next returned from ``next()``. - - Return ``default`` if there are no items left. If ``default`` is not - provided, raise ``StopIteration``. - - """ - if not self._cache: - try: - self._cache.append(next(self._it)) - except StopIteration: - if default is _marker: - raise - return default - return self._cache[0] - - def prepend(self, *items): - """Stack up items to be the next ones returned from ``next()`` or - ``self.peek()``. The items will be returned in - first in, first out order:: - - >>> p = peekable([1, 2, 3]) - >>> p.prepend(10, 11, 12) - >>> next(p) - 10 - >>> list(p) - [11, 12, 1, 2, 3] - - It is possible, by prepending items, to "resurrect" a peekable that - previously raised ``StopIteration``. - - >>> p = peekable([]) - >>> next(p) - Traceback (most recent call last): - ... - StopIteration - >>> p.prepend(1) - >>> next(p) - 1 - >>> next(p) - Traceback (most recent call last): - ... - StopIteration - - """ - self._cache.extendleft(reversed(items)) - - def __next__(self): - if self._cache: - return self._cache.popleft() - - return next(self._it) - - def _get_slice(self, index): - # Normalize the slice's arguments - step = 1 if (index.step is None) else index.step - if step > 0: - start = 0 if (index.start is None) else index.start - stop = maxsize if (index.stop is None) else index.stop - elif step < 0: - start = -1 if (index.start is None) else index.start - stop = (-maxsize - 1) if (index.stop is None) else index.stop - else: - raise ValueError('slice step cannot be zero') - - # If either the start or stop index is negative, we'll need to cache - # the rest of the iterable in order to slice from the right side. - if (start < 0) or (stop < 0): - self._cache.extend(self._it) - # Otherwise we'll need to find the rightmost index and cache to that - # point. - else: - n = min(max(start, stop) + 1, maxsize) - cache_len = len(self._cache) - if n >= cache_len: - self._cache.extend(islice(self._it, n - cache_len)) - - return list(self._cache)[index] - - def __getitem__(self, index): - if isinstance(index, slice): - return self._get_slice(index) - - cache_len = len(self._cache) - if index < 0: - self._cache.extend(self._it) - elif index >= cache_len: - self._cache.extend(islice(self._it, index + 1 - cache_len)) - - return self._cache[index] - - -def collate(*iterables, **kwargs): - """Return a sorted merge of the items from each of several already-sorted - *iterables*. - - >>> list(collate('ACDZ', 'AZ', 'JKL')) - ['A', 'A', 'C', 'D', 'J', 'K', 'L', 'Z', 'Z'] - - Works lazily, keeping only the next value from each iterable in memory. Use - :func:`collate` to, for example, perform a n-way mergesort of items that - don't fit in memory. - - If a *key* function is specified, the iterables will be sorted according - to its result: - - >>> key = lambda s: int(s) # Sort by numeric value, not by string - >>> list(collate(['1', '10'], ['2', '11'], key=key)) - ['1', '2', '10', '11'] - - - If the *iterables* are sorted in descending order, set *reverse* to - ``True``: - - >>> list(collate([5, 3, 1], [4, 2, 0], reverse=True)) - [5, 4, 3, 2, 1, 0] - - If the elements of the passed-in iterables are out of order, you might get - unexpected results. - - On Python 3.5+, this function is an alias for :func:`heapq.merge`. - - """ - warnings.warn( - "collate is no longer part of more_itertools, use heapq.merge", - DeprecationWarning, - ) - return merge(*iterables, **kwargs) - - -def consumer(func): - """Decorator that automatically advances a PEP-342-style "reverse iterator" - to its first yield point so you don't have to call ``next()`` on it - manually. - - >>> @consumer - ... def tally(): - ... i = 0 - ... while True: - ... print('Thing number %s is %s.' % (i, (yield))) - ... i += 1 - ... - >>> t = tally() - >>> t.send('red') - Thing number 0 is red. - >>> t.send('fish') - Thing number 1 is fish. - - Without the decorator, you would have to call ``next(t)`` before - ``t.send()`` could be used. - - """ - - @wraps(func) - def wrapper(*args, **kwargs): - gen = func(*args, **kwargs) - next(gen) - return gen - - return wrapper - - -def ilen(iterable): - """Return the number of items in *iterable*. - - >>> ilen(x for x in range(1000000) if x % 3 == 0) - 333334 - - This consumes the iterable, so handle with care. - - """ - # This approach was selected because benchmarks showed it's likely the - # fastest of the known implementations at the time of writing. - # See GitHub tracker: #236, #230. - counter = count() - deque(zip(iterable, counter), maxlen=0) - return next(counter) - - -def iterate(func, start): - """Return ``start``, ``func(start)``, ``func(func(start))``, ... - - >>> from itertools import islice - >>> list(islice(iterate(lambda x: 2*x, 1), 10)) - [1, 2, 4, 8, 16, 32, 64, 128, 256, 512] - - """ - while True: - yield start - start = func(start) - - -def with_iter(context_manager): - """Wrap an iterable in a ``with`` statement, so it closes once exhausted. - - For example, this will close the file when the iterator is exhausted:: - - upper_lines = (line.upper() for line in with_iter(open('foo'))) - - Any context manager which returns an iterable is a candidate for - ``with_iter``. - - """ - with context_manager as iterable: - yield from iterable - - -def one(iterable, too_short=None, too_long=None): - """Return the first item from *iterable*, which is expected to contain only - that item. Raise an exception if *iterable* is empty or has more than one - item. - - :func:`one` is useful for ensuring that an iterable contains only one item. - For example, it can be used to retrieve the result of a database query - that is expected to return a single row. - - If *iterable* is empty, ``ValueError`` will be raised. You may specify a - different exception with the *too_short* keyword: - - >>> it = [] - >>> one(it) # doctest: +IGNORE_EXCEPTION_DETAIL - Traceback (most recent call last): - ... - ValueError: too many items in iterable (expected 1)' - >>> too_short = IndexError('too few items') - >>> one(it, too_short=too_short) # doctest: +IGNORE_EXCEPTION_DETAIL - Traceback (most recent call last): - ... - IndexError: too few items - - Similarly, if *iterable* contains more than one item, ``ValueError`` will - be raised. You may specify a different exception with the *too_long* - keyword: - - >>> it = ['too', 'many'] - >>> one(it) # doctest: +IGNORE_EXCEPTION_DETAIL - Traceback (most recent call last): - ... - ValueError: Expected exactly one item in iterable, but got 'too', - 'many', and perhaps more. - >>> too_long = RuntimeError - >>> one(it, too_long=too_long) # doctest: +IGNORE_EXCEPTION_DETAIL - Traceback (most recent call last): - ... - RuntimeError - - Note that :func:`one` attempts to advance *iterable* twice to ensure there - is only one item. See :func:`spy` or :func:`peekable` to check iterable - contents less destructively. - - """ - it = iter(iterable) - - try: - first_value = next(it) - except StopIteration as e: - raise ( - too_short or ValueError('too few items in iterable (expected 1)') - ) from e - - try: - second_value = next(it) - except StopIteration: - pass - else: - msg = ( - 'Expected exactly one item in iterable, but got {!r}, {!r}, ' - 'and perhaps more.'.format(first_value, second_value) - ) - raise too_long or ValueError(msg) - - return first_value - - -def raise_(exception, *args): - raise exception(*args) - - -def strictly_n(iterable, n, too_short=None, too_long=None): - """Validate that *iterable* has exactly *n* items and return them if - it does. If it has fewer than *n* items, call function *too_short* - with those items. If it has more than *n* items, call function - *too_long* with the first ``n + 1`` items. - - >>> iterable = ['a', 'b', 'c', 'd'] - >>> n = 4 - >>> list(strictly_n(iterable, n)) - ['a', 'b', 'c', 'd'] - - By default, *too_short* and *too_long* are functions that raise - ``ValueError``. - - >>> list(strictly_n('ab', 3)) # doctest: +IGNORE_EXCEPTION_DETAIL - Traceback (most recent call last): - ... - ValueError: too few items in iterable (got 2) - - >>> list(strictly_n('abc', 2)) # doctest: +IGNORE_EXCEPTION_DETAIL - Traceback (most recent call last): - ... - ValueError: too many items in iterable (got at least 3) - - You can instead supply functions that do something else. - *too_short* will be called with the number of items in *iterable*. - *too_long* will be called with `n + 1`. - - >>> def too_short(item_count): - ... raise RuntimeError - >>> it = strictly_n('abcd', 6, too_short=too_short) - >>> list(it) # doctest: +IGNORE_EXCEPTION_DETAIL - Traceback (most recent call last): - ... - RuntimeError - - >>> def too_long(item_count): - ... print('The boss is going to hear about this') - >>> it = strictly_n('abcdef', 4, too_long=too_long) - >>> list(it) - The boss is going to hear about this - ['a', 'b', 'c', 'd'] - - """ - if too_short is None: - too_short = lambda item_count: raise_( - ValueError, - 'Too few items in iterable (got {})'.format(item_count), - ) - - if too_long is None: - too_long = lambda item_count: raise_( - ValueError, - 'Too many items in iterable (got at least {})'.format(item_count), - ) - - it = iter(iterable) - for i in range(n): - try: - item = next(it) - except StopIteration: - too_short(i) - return - else: - yield item - - try: - next(it) - except StopIteration: - pass - else: - too_long(n + 1) - - -def distinct_permutations(iterable, r=None): - """Yield successive distinct permutations of the elements in *iterable*. - - >>> sorted(distinct_permutations([1, 0, 1])) - [(0, 1, 1), (1, 0, 1), (1, 1, 0)] - - Equivalent to ``set(permutations(iterable))``, except duplicates are not - generated and thrown away. For larger input sequences this is much more - efficient. - - Duplicate permutations arise when there are duplicated elements in the - input iterable. The number of items returned is - `n! / (x_1! * x_2! * ... * x_n!)`, where `n` is the total number of - items input, and each `x_i` is the count of a distinct item in the input - sequence. - - If *r* is given, only the *r*-length permutations are yielded. - - >>> sorted(distinct_permutations([1, 0, 1], r=2)) - [(0, 1), (1, 0), (1, 1)] - >>> sorted(distinct_permutations(range(3), r=2)) - [(0, 1), (0, 2), (1, 0), (1, 2), (2, 0), (2, 1)] - - """ - # Algorithm: https://w.wiki/Qai - def _full(A): - while True: - # Yield the permutation we have - yield tuple(A) - - # Find the largest index i such that A[i] < A[i + 1] - for i in range(size - 2, -1, -1): - if A[i] < A[i + 1]: - break - # If no such index exists, this permutation is the last one - else: - return - - # Find the largest index j greater than j such that A[i] < A[j] - for j in range(size - 1, i, -1): - if A[i] < A[j]: - break - - # Swap the value of A[i] with that of A[j], then reverse the - # sequence from A[i + 1] to form the new permutation - A[i], A[j] = A[j], A[i] - A[i + 1 :] = A[: i - size : -1] # A[i + 1:][::-1] - - # Algorithm: modified from the above - def _partial(A, r): - # Split A into the first r items and the last r items - head, tail = A[:r], A[r:] - right_head_indexes = range(r - 1, -1, -1) - left_tail_indexes = range(len(tail)) - - while True: - # Yield the permutation we have - yield tuple(head) - - # Starting from the right, find the first index of the head with - # value smaller than the maximum value of the tail - call it i. - pivot = tail[-1] - for i in right_head_indexes: - if head[i] < pivot: - break - pivot = head[i] - else: - return - - # Starting from the left, find the first value of the tail - # with a value greater than head[i] and swap. - for j in left_tail_indexes: - if tail[j] > head[i]: - head[i], tail[j] = tail[j], head[i] - break - # If we didn't find one, start from the right and find the first - # index of the head with a value greater than head[i] and swap. - else: - for j in right_head_indexes: - if head[j] > head[i]: - head[i], head[j] = head[j], head[i] - break - - # Reverse head[i + 1:] and swap it with tail[:r - (i + 1)] - tail += head[: i - r : -1] # head[i + 1:][::-1] - i += 1 - head[i:], tail[:] = tail[: r - i], tail[r - i :] - - items = sorted(iterable) - - size = len(items) - if r is None: - r = size - - if 0 < r <= size: - return _full(items) if (r == size) else _partial(items, r) - - return iter(() if r else ((),)) - - -def intersperse(e, iterable, n=1): - """Intersperse filler element *e* among the items in *iterable*, leaving - *n* items between each filler element. - - >>> list(intersperse('!', [1, 2, 3, 4, 5])) - [1, '!', 2, '!', 3, '!', 4, '!', 5] - - >>> list(intersperse(None, [1, 2, 3, 4, 5], n=2)) - [1, 2, None, 3, 4, None, 5] - - """ - if n == 0: - raise ValueError('n must be > 0') - elif n == 1: - # interleave(repeat(e), iterable) -> e, x_0, e, x_1, e, x_2... - # islice(..., 1, None) -> x_0, e, x_1, e, x_2... - return islice(interleave(repeat(e), iterable), 1, None) - else: - # interleave(filler, chunks) -> [e], [x_0, x_1], [e], [x_2, x_3]... - # islice(..., 1, None) -> [x_0, x_1], [e], [x_2, x_3]... - # flatten(...) -> x_0, x_1, e, x_2, x_3... - filler = repeat([e]) - chunks = chunked(iterable, n) - return flatten(islice(interleave(filler, chunks), 1, None)) - - -def unique_to_each(*iterables): - """Return the elements from each of the input iterables that aren't in the - other input iterables. - - For example, suppose you have a set of packages, each with a set of - dependencies:: - - {'pkg_1': {'A', 'B'}, 'pkg_2': {'B', 'C'}, 'pkg_3': {'B', 'D'}} - - If you remove one package, which dependencies can also be removed? - - If ``pkg_1`` is removed, then ``A`` is no longer necessary - it is not - associated with ``pkg_2`` or ``pkg_3``. Similarly, ``C`` is only needed for - ``pkg_2``, and ``D`` is only needed for ``pkg_3``:: - - >>> unique_to_each({'A', 'B'}, {'B', 'C'}, {'B', 'D'}) - [['A'], ['C'], ['D']] - - If there are duplicates in one input iterable that aren't in the others - they will be duplicated in the output. Input order is preserved:: - - >>> unique_to_each("mississippi", "missouri") - [['p', 'p'], ['o', 'u', 'r']] - - It is assumed that the elements of each iterable are hashable. - - """ - pool = [list(it) for it in iterables] - counts = Counter(chain.from_iterable(map(set, pool))) - uniques = {element for element in counts if counts[element] == 1} - return [list(filter(uniques.__contains__, it)) for it in pool] - - -def windowed(seq, n, fillvalue=None, step=1): - """Return a sliding window of width *n* over the given iterable. - - >>> all_windows = windowed([1, 2, 3, 4, 5], 3) - >>> list(all_windows) - [(1, 2, 3), (2, 3, 4), (3, 4, 5)] - - When the window is larger than the iterable, *fillvalue* is used in place - of missing values: - - >>> list(windowed([1, 2, 3], 4)) - [(1, 2, 3, None)] - - Each window will advance in increments of *step*: - - >>> list(windowed([1, 2, 3, 4, 5, 6], 3, fillvalue='!', step=2)) - [(1, 2, 3), (3, 4, 5), (5, 6, '!')] - - To slide into the iterable's items, use :func:`chain` to add filler items - to the left: - - >>> iterable = [1, 2, 3, 4] - >>> n = 3 - >>> padding = [None] * (n - 1) - >>> list(windowed(chain(padding, iterable), 3)) - [(None, None, 1), (None, 1, 2), (1, 2, 3), (2, 3, 4)] - """ - if n < 0: - raise ValueError('n must be >= 0') - if n == 0: - yield tuple() - return - if step < 1: - raise ValueError('step must be >= 1') - - window = deque(maxlen=n) - i = n - for _ in map(window.append, seq): - i -= 1 - if not i: - i = step - yield tuple(window) - - size = len(window) - if size < n: - yield tuple(chain(window, repeat(fillvalue, n - size))) - elif 0 < i < min(step, n): - window += (fillvalue,) * i - yield tuple(window) - - -def substrings(iterable): - """Yield all of the substrings of *iterable*. - - >>> [''.join(s) for s in substrings('more')] - ['m', 'o', 'r', 'e', 'mo', 'or', 're', 'mor', 'ore', 'more'] - - Note that non-string iterables can also be subdivided. - - >>> list(substrings([0, 1, 2])) - [(0,), (1,), (2,), (0, 1), (1, 2), (0, 1, 2)] - - """ - # The length-1 substrings - seq = [] - for item in iter(iterable): - seq.append(item) - yield (item,) - seq = tuple(seq) - item_count = len(seq) - - # And the rest - for n in range(2, item_count + 1): - for i in range(item_count - n + 1): - yield seq[i : i + n] - - -def substrings_indexes(seq, reverse=False): - """Yield all substrings and their positions in *seq* - - The items yielded will be a tuple of the form ``(substr, i, j)``, where - ``substr == seq[i:j]``. - - This function only works for iterables that support slicing, such as - ``str`` objects. - - >>> for item in substrings_indexes('more'): - ... print(item) - ('m', 0, 1) - ('o', 1, 2) - ('r', 2, 3) - ('e', 3, 4) - ('mo', 0, 2) - ('or', 1, 3) - ('re', 2, 4) - ('mor', 0, 3) - ('ore', 1, 4) - ('more', 0, 4) - - Set *reverse* to ``True`` to yield the same items in the opposite order. - - - """ - r = range(1, len(seq) + 1) - if reverse: - r = reversed(r) - return ( - (seq[i : i + L], i, i + L) for L in r for i in range(len(seq) - L + 1) - ) - - -class bucket: - """Wrap *iterable* and return an object that buckets it iterable into - child iterables based on a *key* function. - - >>> iterable = ['a1', 'b1', 'c1', 'a2', 'b2', 'c2', 'b3'] - >>> s = bucket(iterable, key=lambda x: x[0]) # Bucket by 1st character - >>> sorted(list(s)) # Get the keys - ['a', 'b', 'c'] - >>> a_iterable = s['a'] - >>> next(a_iterable) - 'a1' - >>> next(a_iterable) - 'a2' - >>> list(s['b']) - ['b1', 'b2', 'b3'] - - The original iterable will be advanced and its items will be cached until - they are used by the child iterables. This may require significant storage. - - By default, attempting to select a bucket to which no items belong will - exhaust the iterable and cache all values. - If you specify a *validator* function, selected buckets will instead be - checked against it. - - >>> from itertools import count - >>> it = count(1, 2) # Infinite sequence of odd numbers - >>> key = lambda x: x % 10 # Bucket by last digit - >>> validator = lambda x: x in {1, 3, 5, 7, 9} # Odd digits only - >>> s = bucket(it, key=key, validator=validator) - >>> 2 in s - False - >>> list(s[2]) - [] - - """ - - def __init__(self, iterable, key, validator=None): - self._it = iter(iterable) - self._key = key - self._cache = defaultdict(deque) - self._validator = validator or (lambda x: True) - - def __contains__(self, value): - if not self._validator(value): - return False - - try: - item = next(self[value]) - except StopIteration: - return False - else: - self._cache[value].appendleft(item) - - return True - - def _get_values(self, value): - """ - Helper to yield items from the parent iterator that match *value*. - Items that don't match are stored in the local cache as they - are encountered. - """ - while True: - # If we've cached some items that match the target value, emit - # the first one and evict it from the cache. - if self._cache[value]: - yield self._cache[value].popleft() - # Otherwise we need to advance the parent iterator to search for - # a matching item, caching the rest. - else: - while True: - try: - item = next(self._it) - except StopIteration: - return - item_value = self._key(item) - if item_value == value: - yield item - break - elif self._validator(item_value): - self._cache[item_value].append(item) - - def __iter__(self): - for item in self._it: - item_value = self._key(item) - if self._validator(item_value): - self._cache[item_value].append(item) - - yield from self._cache.keys() - - def __getitem__(self, value): - if not self._validator(value): - return iter(()) - - return self._get_values(value) - - -def spy(iterable, n=1): - """Return a 2-tuple with a list containing the first *n* elements of - *iterable*, and an iterator with the same items as *iterable*. - This allows you to "look ahead" at the items in the iterable without - advancing it. - - There is one item in the list by default: - - >>> iterable = 'abcdefg' - >>> head, iterable = spy(iterable) - >>> head - ['a'] - >>> list(iterable) - ['a', 'b', 'c', 'd', 'e', 'f', 'g'] - - You may use unpacking to retrieve items instead of lists: - - >>> (head,), iterable = spy('abcdefg') - >>> head - 'a' - >>> (first, second), iterable = spy('abcdefg', 2) - >>> first - 'a' - >>> second - 'b' - - The number of items requested can be larger than the number of items in - the iterable: - - >>> iterable = [1, 2, 3, 4, 5] - >>> head, iterable = spy(iterable, 10) - >>> head - [1, 2, 3, 4, 5] - >>> list(iterable) - [1, 2, 3, 4, 5] - - """ - it = iter(iterable) - head = take(n, it) - - return head.copy(), chain(head, it) - - -def interleave(*iterables): - """Return a new iterable yielding from each iterable in turn, - until the shortest is exhausted. - - >>> list(interleave([1, 2, 3], [4, 5], [6, 7, 8])) - [1, 4, 6, 2, 5, 7] - - For a version that doesn't terminate after the shortest iterable is - exhausted, see :func:`interleave_longest`. - - """ - return chain.from_iterable(zip(*iterables)) - - -def interleave_longest(*iterables): - """Return a new iterable yielding from each iterable in turn, - skipping any that are exhausted. - - >>> list(interleave_longest([1, 2, 3], [4, 5], [6, 7, 8])) - [1, 4, 6, 2, 5, 7, 3, 8] - - This function produces the same output as :func:`roundrobin`, but may - perform better for some inputs (in particular when the number of iterables - is large). - - """ - i = chain.from_iterable(zip_longest(*iterables, fillvalue=_marker)) - return (x for x in i if x is not _marker) - - -def interleave_evenly(iterables, lengths=None): - """ - Interleave multiple iterables so that their elements are evenly distributed - throughout the output sequence. - - >>> iterables = [1, 2, 3, 4, 5], ['a', 'b'] - >>> list(interleave_evenly(iterables)) - [1, 2, 'a', 3, 4, 'b', 5] - - >>> iterables = [[1, 2, 3], [4, 5], [6, 7, 8]] - >>> list(interleave_evenly(iterables)) - [1, 6, 4, 2, 7, 3, 8, 5] - - This function requires iterables of known length. Iterables without - ``__len__()`` can be used by manually specifying lengths with *lengths*: - - >>> from itertools import combinations, repeat - >>> iterables = [combinations(range(4), 2), ['a', 'b', 'c']] - >>> lengths = [4 * (4 - 1) // 2, 3] - >>> list(interleave_evenly(iterables, lengths=lengths)) - [(0, 1), (0, 2), 'a', (0, 3), (1, 2), 'b', (1, 3), (2, 3), 'c'] - - Based on Bresenham's algorithm. - """ - if lengths is None: - try: - lengths = [len(it) for it in iterables] - except TypeError: - raise ValueError( - 'Iterable lengths could not be determined automatically. ' - 'Specify them with the lengths keyword.' - ) - elif len(iterables) != len(lengths): - raise ValueError('Mismatching number of iterables and lengths.') - - dims = len(lengths) - - # sort iterables by length, descending - lengths_permute = sorted( - range(dims), key=lambda i: lengths[i], reverse=True - ) - lengths_desc = [lengths[i] for i in lengths_permute] - iters_desc = [iter(iterables[i]) for i in lengths_permute] - - # the longest iterable is the primary one (Bresenham: the longest - # distance along an axis) - delta_primary, deltas_secondary = lengths_desc[0], lengths_desc[1:] - iter_primary, iters_secondary = iters_desc[0], iters_desc[1:] - errors = [delta_primary // dims] * len(deltas_secondary) - - to_yield = sum(lengths) - while to_yield: - yield next(iter_primary) - to_yield -= 1 - # update errors for each secondary iterable - errors = [e - delta for e, delta in zip(errors, deltas_secondary)] - - # those iterables for which the error is negative are yielded - # ("diagonal step" in Bresenham) - for i, e in enumerate(errors): - if e < 0: - yield next(iters_secondary[i]) - to_yield -= 1 - errors[i] += delta_primary - - -def collapse(iterable, base_type=None, levels=None): - """Flatten an iterable with multiple levels of nesting (e.g., a list of - lists of tuples) into non-iterable types. - - >>> iterable = [(1, 2), ([3, 4], [[5], [6]])] - >>> list(collapse(iterable)) - [1, 2, 3, 4, 5, 6] - - Binary and text strings are not considered iterable and - will not be collapsed. - - To avoid collapsing other types, specify *base_type*: - - >>> iterable = ['ab', ('cd', 'ef'), ['gh', 'ij']] - >>> list(collapse(iterable, base_type=tuple)) - ['ab', ('cd', 'ef'), 'gh', 'ij'] - - Specify *levels* to stop flattening after a certain level: - - >>> iterable = [('a', ['b']), ('c', ['d'])] - >>> list(collapse(iterable)) # Fully flattened - ['a', 'b', 'c', 'd'] - >>> list(collapse(iterable, levels=1)) # Only one level flattened - ['a', ['b'], 'c', ['d']] - - """ - - def walk(node, level): - if ( - ((levels is not None) and (level > levels)) - or isinstance(node, (str, bytes)) - or ((base_type is not None) and isinstance(node, base_type)) - ): - yield node - return - - try: - tree = iter(node) - except TypeError: - yield node - return - else: - for child in tree: - yield from walk(child, level + 1) - - yield from walk(iterable, 0) - - -def side_effect(func, iterable, chunk_size=None, before=None, after=None): - """Invoke *func* on each item in *iterable* (or on each *chunk_size* group - of items) before yielding the item. - - `func` must be a function that takes a single argument. Its return value - will be discarded. - - *before* and *after* are optional functions that take no arguments. They - will be executed before iteration starts and after it ends, respectively. - - `side_effect` can be used for logging, updating progress bars, or anything - that is not functionally "pure." - - Emitting a status message: - - >>> from more_itertools import consume - >>> func = lambda item: print('Received {}'.format(item)) - >>> consume(side_effect(func, range(2))) - Received 0 - Received 1 - - Operating on chunks of items: - - >>> pair_sums = [] - >>> func = lambda chunk: pair_sums.append(sum(chunk)) - >>> list(side_effect(func, [0, 1, 2, 3, 4, 5], 2)) - [0, 1, 2, 3, 4, 5] - >>> list(pair_sums) - [1, 5, 9] - - Writing to a file-like object: - - >>> from io import StringIO - >>> from more_itertools import consume - >>> f = StringIO() - >>> func = lambda x: print(x, file=f) - >>> before = lambda: print(u'HEADER', file=f) - >>> after = f.close - >>> it = [u'a', u'b', u'c'] - >>> consume(side_effect(func, it, before=before, after=after)) - >>> f.closed - True - - """ - try: - if before is not None: - before() - - if chunk_size is None: - for item in iterable: - func(item) - yield item - else: - for chunk in chunked(iterable, chunk_size): - func(chunk) - yield from chunk - finally: - if after is not None: - after() - - -def sliced(seq, n, strict=False): - """Yield slices of length *n* from the sequence *seq*. - - >>> list(sliced((1, 2, 3, 4, 5, 6), 3)) - [(1, 2, 3), (4, 5, 6)] - - By the default, the last yielded slice will have fewer than *n* elements - if the length of *seq* is not divisible by *n*: - - >>> list(sliced((1, 2, 3, 4, 5, 6, 7, 8), 3)) - [(1, 2, 3), (4, 5, 6), (7, 8)] - - If the length of *seq* is not divisible by *n* and *strict* is - ``True``, then ``ValueError`` will be raised before the last - slice is yielded. - - This function will only work for iterables that support slicing. - For non-sliceable iterables, see :func:`chunked`. - - """ - iterator = takewhile(len, (seq[i : i + n] for i in count(0, n))) - if strict: - - def ret(): - for _slice in iterator: - if len(_slice) != n: - raise ValueError("seq is not divisible by n.") - yield _slice - - return iter(ret()) - else: - return iterator - - -def split_at(iterable, pred, maxsplit=-1, keep_separator=False): - """Yield lists of items from *iterable*, where each list is delimited by - an item where callable *pred* returns ``True``. - - >>> list(split_at('abcdcba', lambda x: x == 'b')) - [['a'], ['c', 'd', 'c'], ['a']] - - >>> list(split_at(range(10), lambda n: n % 2 == 1)) - [[0], [2], [4], [6], [8], []] - - At most *maxsplit* splits are done. If *maxsplit* is not specified or -1, - then there is no limit on the number of splits: - - >>> list(split_at(range(10), lambda n: n % 2 == 1, maxsplit=2)) - [[0], [2], [4, 5, 6, 7, 8, 9]] - - By default, the delimiting items are not included in the output. - The include them, set *keep_separator* to ``True``. - - >>> list(split_at('abcdcba', lambda x: x == 'b', keep_separator=True)) - [['a'], ['b'], ['c', 'd', 'c'], ['b'], ['a']] - - """ - if maxsplit == 0: - yield list(iterable) - return - - buf = [] - it = iter(iterable) - for item in it: - if pred(item): - yield buf - if keep_separator: - yield [item] - if maxsplit == 1: - yield list(it) - return - buf = [] - maxsplit -= 1 - else: - buf.append(item) - yield buf - - -def split_before(iterable, pred, maxsplit=-1): - """Yield lists of items from *iterable*, where each list ends just before - an item for which callable *pred* returns ``True``: - - >>> list(split_before('OneTwo', lambda s: s.isupper())) - [['O', 'n', 'e'], ['T', 'w', 'o']] - - >>> list(split_before(range(10), lambda n: n % 3 == 0)) - [[0, 1, 2], [3, 4, 5], [6, 7, 8], [9]] - - At most *maxsplit* splits are done. If *maxsplit* is not specified or -1, - then there is no limit on the number of splits: - - >>> list(split_before(range(10), lambda n: n % 3 == 0, maxsplit=2)) - [[0, 1, 2], [3, 4, 5], [6, 7, 8, 9]] - """ - if maxsplit == 0: - yield list(iterable) - return - - buf = [] - it = iter(iterable) - for item in it: - if pred(item) and buf: - yield buf - if maxsplit == 1: - yield [item] + list(it) - return - buf = [] - maxsplit -= 1 - buf.append(item) - if buf: - yield buf - - -def split_after(iterable, pred, maxsplit=-1): - """Yield lists of items from *iterable*, where each list ends with an - item where callable *pred* returns ``True``: - - >>> list(split_after('one1two2', lambda s: s.isdigit())) - [['o', 'n', 'e', '1'], ['t', 'w', 'o', '2']] - - >>> list(split_after(range(10), lambda n: n % 3 == 0)) - [[0], [1, 2, 3], [4, 5, 6], [7, 8, 9]] - - At most *maxsplit* splits are done. If *maxsplit* is not specified or -1, - then there is no limit on the number of splits: - - >>> list(split_after(range(10), lambda n: n % 3 == 0, maxsplit=2)) - [[0], [1, 2, 3], [4, 5, 6, 7, 8, 9]] - - """ - if maxsplit == 0: - yield list(iterable) - return - - buf = [] - it = iter(iterable) - for item in it: - buf.append(item) - if pred(item) and buf: - yield buf - if maxsplit == 1: - yield list(it) - return - buf = [] - maxsplit -= 1 - if buf: - yield buf - - -def split_when(iterable, pred, maxsplit=-1): - """Split *iterable* into pieces based on the output of *pred*. - *pred* should be a function that takes successive pairs of items and - returns ``True`` if the iterable should be split in between them. - - For example, to find runs of increasing numbers, split the iterable when - element ``i`` is larger than element ``i + 1``: - - >>> list(split_when([1, 2, 3, 3, 2, 5, 2, 4, 2], lambda x, y: x > y)) - [[1, 2, 3, 3], [2, 5], [2, 4], [2]] - - At most *maxsplit* splits are done. If *maxsplit* is not specified or -1, - then there is no limit on the number of splits: - - >>> list(split_when([1, 2, 3, 3, 2, 5, 2, 4, 2], - ... lambda x, y: x > y, maxsplit=2)) - [[1, 2, 3, 3], [2, 5], [2, 4, 2]] - - """ - if maxsplit == 0: - yield list(iterable) - return - - it = iter(iterable) - try: - cur_item = next(it) - except StopIteration: - return - - buf = [cur_item] - for next_item in it: - if pred(cur_item, next_item): - yield buf - if maxsplit == 1: - yield [next_item] + list(it) - return - buf = [] - maxsplit -= 1 - - buf.append(next_item) - cur_item = next_item - - yield buf - - -def split_into(iterable, sizes): - """Yield a list of sequential items from *iterable* of length 'n' for each - integer 'n' in *sizes*. - - >>> list(split_into([1,2,3,4,5,6], [1,2,3])) - [[1], [2, 3], [4, 5, 6]] - - If the sum of *sizes* is smaller than the length of *iterable*, then the - remaining items of *iterable* will not be returned. - - >>> list(split_into([1,2,3,4,5,6], [2,3])) - [[1, 2], [3, 4, 5]] - - If the sum of *sizes* is larger than the length of *iterable*, fewer items - will be returned in the iteration that overruns *iterable* and further - lists will be empty: - - >>> list(split_into([1,2,3,4], [1,2,3,4])) - [[1], [2, 3], [4], []] - - When a ``None`` object is encountered in *sizes*, the returned list will - contain items up to the end of *iterable* the same way that itertools.slice - does: - - >>> list(split_into([1,2,3,4,5,6,7,8,9,0], [2,3,None])) - [[1, 2], [3, 4, 5], [6, 7, 8, 9, 0]] - - :func:`split_into` can be useful for grouping a series of items where the - sizes of the groups are not uniform. An example would be where in a row - from a table, multiple columns represent elements of the same feature - (e.g. a point represented by x,y,z) but, the format is not the same for - all columns. - """ - # convert the iterable argument into an iterator so its contents can - # be consumed by islice in case it is a generator - it = iter(iterable) - - for size in sizes: - if size is None: - yield list(it) - return - else: - yield list(islice(it, size)) - - -def padded(iterable, fillvalue=None, n=None, next_multiple=False): - """Yield the elements from *iterable*, followed by *fillvalue*, such that - at least *n* items are emitted. - - >>> list(padded([1, 2, 3], '?', 5)) - [1, 2, 3, '?', '?'] - - If *next_multiple* is ``True``, *fillvalue* will be emitted until the - number of items emitted is a multiple of *n*:: - - >>> list(padded([1, 2, 3, 4], n=3, next_multiple=True)) - [1, 2, 3, 4, None, None] - - If *n* is ``None``, *fillvalue* will be emitted indefinitely. - - """ - it = iter(iterable) - if n is None: - yield from chain(it, repeat(fillvalue)) - elif n < 1: - raise ValueError('n must be at least 1') - else: - item_count = 0 - for item in it: - yield item - item_count += 1 - - remaining = (n - item_count) % n if next_multiple else n - item_count - for _ in range(remaining): - yield fillvalue - - -def repeat_each(iterable, n=2): - """Repeat each element in *iterable* *n* times. - - >>> list(repeat_each('ABC', 3)) - ['A', 'A', 'A', 'B', 'B', 'B', 'C', 'C', 'C'] - """ - return chain.from_iterable(map(repeat, iterable, repeat(n))) - - -def repeat_last(iterable, default=None): - """After the *iterable* is exhausted, keep yielding its last element. - - >>> list(islice(repeat_last(range(3)), 5)) - [0, 1, 2, 2, 2] - - If the iterable is empty, yield *default* forever:: - - >>> list(islice(repeat_last(range(0), 42), 5)) - [42, 42, 42, 42, 42] - - """ - item = _marker - for item in iterable: - yield item - final = default if item is _marker else item - yield from repeat(final) - - -def distribute(n, iterable): - """Distribute the items from *iterable* among *n* smaller iterables. - - >>> group_1, group_2 = distribute(2, [1, 2, 3, 4, 5, 6]) - >>> list(group_1) - [1, 3, 5] - >>> list(group_2) - [2, 4, 6] - - If the length of *iterable* is not evenly divisible by *n*, then the - length of the returned iterables will not be identical: - - >>> children = distribute(3, [1, 2, 3, 4, 5, 6, 7]) - >>> [list(c) for c in children] - [[1, 4, 7], [2, 5], [3, 6]] - - If the length of *iterable* is smaller than *n*, then the last returned - iterables will be empty: - - >>> children = distribute(5, [1, 2, 3]) - >>> [list(c) for c in children] - [[1], [2], [3], [], []] - - This function uses :func:`itertools.tee` and may require significant - storage. If you need the order items in the smaller iterables to match the - original iterable, see :func:`divide`. - - """ - if n < 1: - raise ValueError('n must be at least 1') - - children = tee(iterable, n) - return [islice(it, index, None, n) for index, it in enumerate(children)] - - -def stagger(iterable, offsets=(-1, 0, 1), longest=False, fillvalue=None): - """Yield tuples whose elements are offset from *iterable*. - The amount by which the `i`-th item in each tuple is offset is given by - the `i`-th item in *offsets*. - - >>> list(stagger([0, 1, 2, 3])) - [(None, 0, 1), (0, 1, 2), (1, 2, 3)] - >>> list(stagger(range(8), offsets=(0, 2, 4))) - [(0, 2, 4), (1, 3, 5), (2, 4, 6), (3, 5, 7)] - - By default, the sequence will end when the final element of a tuple is the - last item in the iterable. To continue until the first element of a tuple - is the last item in the iterable, set *longest* to ``True``:: - - >>> list(stagger([0, 1, 2, 3], longest=True)) - [(None, 0, 1), (0, 1, 2), (1, 2, 3), (2, 3, None), (3, None, None)] - - By default, ``None`` will be used to replace offsets beyond the end of the - sequence. Specify *fillvalue* to use some other value. - - """ - children = tee(iterable, len(offsets)) - - return zip_offset( - *children, offsets=offsets, longest=longest, fillvalue=fillvalue - ) - - -class UnequalIterablesError(ValueError): - def __init__(self, details=None): - msg = 'Iterables have different lengths' - if details is not None: - msg += (': index 0 has length {}; index {} has length {}').format( - *details - ) - - super().__init__(msg) - - -def _zip_equal_generator(iterables): - for combo in zip_longest(*iterables, fillvalue=_marker): - for val in combo: - if val is _marker: - raise UnequalIterablesError() - yield combo - - -def _zip_equal(*iterables): - # Check whether the iterables are all the same size. - try: - first_size = len(iterables[0]) - for i, it in enumerate(iterables[1:], 1): - size = len(it) - if size != first_size: - break - else: - # If we didn't break out, we can use the built-in zip. - return zip(*iterables) - - # If we did break out, there was a mismatch. - raise UnequalIterablesError(details=(first_size, i, size)) - # If any one of the iterables didn't have a length, start reading - # them until one runs out. - except TypeError: - return _zip_equal_generator(iterables) - - -def zip_equal(*iterables): - """``zip`` the input *iterables* together, but raise - ``UnequalIterablesError`` if they aren't all the same length. - - >>> it_1 = range(3) - >>> it_2 = iter('abc') - >>> list(zip_equal(it_1, it_2)) - [(0, 'a'), (1, 'b'), (2, 'c')] - - >>> it_1 = range(3) - >>> it_2 = iter('abcd') - >>> list(zip_equal(it_1, it_2)) # doctest: +IGNORE_EXCEPTION_DETAIL - Traceback (most recent call last): - ... - more_itertools.more.UnequalIterablesError: Iterables have different - lengths - - """ - if hexversion >= 0x30A00A6: - warnings.warn( - ( - 'zip_equal will be removed in a future version of ' - 'more-itertools. Use the builtin zip function with ' - 'strict=True instead.' - ), - DeprecationWarning, - ) - - return _zip_equal(*iterables) - - -def zip_offset(*iterables, offsets, longest=False, fillvalue=None): - """``zip`` the input *iterables* together, but offset the `i`-th iterable - by the `i`-th item in *offsets*. - - >>> list(zip_offset('0123', 'abcdef', offsets=(0, 1))) - [('0', 'b'), ('1', 'c'), ('2', 'd'), ('3', 'e')] - - This can be used as a lightweight alternative to SciPy or pandas to analyze - data sets in which some series have a lead or lag relationship. - - By default, the sequence will end when the shortest iterable is exhausted. - To continue until the longest iterable is exhausted, set *longest* to - ``True``. - - >>> list(zip_offset('0123', 'abcdef', offsets=(0, 1), longest=True)) - [('0', 'b'), ('1', 'c'), ('2', 'd'), ('3', 'e'), (None, 'f')] - - By default, ``None`` will be used to replace offsets beyond the end of the - sequence. Specify *fillvalue* to use some other value. - - """ - if len(iterables) != len(offsets): - raise ValueError("Number of iterables and offsets didn't match") - - staggered = [] - for it, n in zip(iterables, offsets): - if n < 0: - staggered.append(chain(repeat(fillvalue, -n), it)) - elif n > 0: - staggered.append(islice(it, n, None)) - else: - staggered.append(it) - - if longest: - return zip_longest(*staggered, fillvalue=fillvalue) - - return zip(*staggered) - - -def sort_together(iterables, key_list=(0,), key=None, reverse=False): - """Return the input iterables sorted together, with *key_list* as the - priority for sorting. All iterables are trimmed to the length of the - shortest one. - - This can be used like the sorting function in a spreadsheet. If each - iterable represents a column of data, the key list determines which - columns are used for sorting. - - By default, all iterables are sorted using the ``0``-th iterable:: - - >>> iterables = [(4, 3, 2, 1), ('a', 'b', 'c', 'd')] - >>> sort_together(iterables) - [(1, 2, 3, 4), ('d', 'c', 'b', 'a')] - - Set a different key list to sort according to another iterable. - Specifying multiple keys dictates how ties are broken:: - - >>> iterables = [(3, 1, 2), (0, 1, 0), ('c', 'b', 'a')] - >>> sort_together(iterables, key_list=(1, 2)) - [(2, 3, 1), (0, 0, 1), ('a', 'c', 'b')] - - To sort by a function of the elements of the iterable, pass a *key* - function. Its arguments are the elements of the iterables corresponding to - the key list:: - - >>> names = ('a', 'b', 'c') - >>> lengths = (1, 2, 3) - >>> widths = (5, 2, 1) - >>> def area(length, width): - ... return length * width - >>> sort_together([names, lengths, widths], key_list=(1, 2), key=area) - [('c', 'b', 'a'), (3, 2, 1), (1, 2, 5)] - - Set *reverse* to ``True`` to sort in descending order. - - >>> sort_together([(1, 2, 3), ('c', 'b', 'a')], reverse=True) - [(3, 2, 1), ('a', 'b', 'c')] - - """ - if key is None: - # if there is no key function, the key argument to sorted is an - # itemgetter - key_argument = itemgetter(*key_list) - else: - # if there is a key function, call it with the items at the offsets - # specified by the key function as arguments - key_list = list(key_list) - if len(key_list) == 1: - # if key_list contains a single item, pass the item at that offset - # as the only argument to the key function - key_offset = key_list[0] - key_argument = lambda zipped_items: key(zipped_items[key_offset]) - else: - # if key_list contains multiple items, use itemgetter to return a - # tuple of items, which we pass as *args to the key function - get_key_items = itemgetter(*key_list) - key_argument = lambda zipped_items: key( - *get_key_items(zipped_items) - ) - - return list( - zip(*sorted(zip(*iterables), key=key_argument, reverse=reverse)) - ) - - -def unzip(iterable): - """The inverse of :func:`zip`, this function disaggregates the elements - of the zipped *iterable*. - - The ``i``-th iterable contains the ``i``-th element from each element - of the zipped iterable. The first element is used to to determine the - length of the remaining elements. - - >>> iterable = [('a', 1), ('b', 2), ('c', 3), ('d', 4)] - >>> letters, numbers = unzip(iterable) - >>> list(letters) - ['a', 'b', 'c', 'd'] - >>> list(numbers) - [1, 2, 3, 4] - - This is similar to using ``zip(*iterable)``, but it avoids reading - *iterable* into memory. Note, however, that this function uses - :func:`itertools.tee` and thus may require significant storage. - - """ - head, iterable = spy(iter(iterable)) - if not head: - # empty iterable, e.g. zip([], [], []) - return () - # spy returns a one-length iterable as head - head = head[0] - iterables = tee(iterable, len(head)) - - def itemgetter(i): - def getter(obj): - try: - return obj[i] - except IndexError: - # basically if we have an iterable like - # iter([(1, 2, 3), (4, 5), (6,)]) - # the second unzipped iterable would fail at the third tuple - # since it would try to access tup[1] - # same with the third unzipped iterable and the second tuple - # to support these "improperly zipped" iterables, - # we create a custom itemgetter - # which just stops the unzipped iterables - # at first length mismatch - raise StopIteration - - return getter - - return tuple(map(itemgetter(i), it) for i, it in enumerate(iterables)) - - -def divide(n, iterable): - """Divide the elements from *iterable* into *n* parts, maintaining - order. - - >>> group_1, group_2 = divide(2, [1, 2, 3, 4, 5, 6]) - >>> list(group_1) - [1, 2, 3] - >>> list(group_2) - [4, 5, 6] - - If the length of *iterable* is not evenly divisible by *n*, then the - length of the returned iterables will not be identical: - - >>> children = divide(3, [1, 2, 3, 4, 5, 6, 7]) - >>> [list(c) for c in children] - [[1, 2, 3], [4, 5], [6, 7]] - - If the length of the iterable is smaller than n, then the last returned - iterables will be empty: - - >>> children = divide(5, [1, 2, 3]) - >>> [list(c) for c in children] - [[1], [2], [3], [], []] - - This function will exhaust the iterable before returning and may require - significant storage. If order is not important, see :func:`distribute`, - which does not first pull the iterable into memory. - - """ - if n < 1: - raise ValueError('n must be at least 1') - - try: - iterable[:0] - except TypeError: - seq = tuple(iterable) - else: - seq = iterable - - q, r = divmod(len(seq), n) - - ret = [] - stop = 0 - for i in range(1, n + 1): - start = stop - stop += q + 1 if i <= r else q - ret.append(iter(seq[start:stop])) - - return ret - - -def always_iterable(obj, base_type=(str, bytes)): - """If *obj* is iterable, return an iterator over its items:: - - >>> obj = (1, 2, 3) - >>> list(always_iterable(obj)) - [1, 2, 3] - - If *obj* is not iterable, return a one-item iterable containing *obj*:: - - >>> obj = 1 - >>> list(always_iterable(obj)) - [1] - - If *obj* is ``None``, return an empty iterable: - - >>> obj = None - >>> list(always_iterable(None)) - [] - - By default, binary and text strings are not considered iterable:: - - >>> obj = 'foo' - >>> list(always_iterable(obj)) - ['foo'] - - If *base_type* is set, objects for which ``isinstance(obj, base_type)`` - returns ``True`` won't be considered iterable. - - >>> obj = {'a': 1} - >>> list(always_iterable(obj)) # Iterate over the dict's keys - ['a'] - >>> list(always_iterable(obj, base_type=dict)) # Treat dicts as a unit - [{'a': 1}] - - Set *base_type* to ``None`` to avoid any special handling and treat objects - Python considers iterable as iterable: - - >>> obj = 'foo' - >>> list(always_iterable(obj, base_type=None)) - ['f', 'o', 'o'] - """ - if obj is None: - return iter(()) - - if (base_type is not None) and isinstance(obj, base_type): - return iter((obj,)) - - try: - return iter(obj) - except TypeError: - return iter((obj,)) - - -def adjacent(predicate, iterable, distance=1): - """Return an iterable over `(bool, item)` tuples where the `item` is - drawn from *iterable* and the `bool` indicates whether - that item satisfies the *predicate* or is adjacent to an item that does. - - For example, to find whether items are adjacent to a ``3``:: - - >>> list(adjacent(lambda x: x == 3, range(6))) - [(False, 0), (False, 1), (True, 2), (True, 3), (True, 4), (False, 5)] - - Set *distance* to change what counts as adjacent. For example, to find - whether items are two places away from a ``3``: - - >>> list(adjacent(lambda x: x == 3, range(6), distance=2)) - [(False, 0), (True, 1), (True, 2), (True, 3), (True, 4), (True, 5)] - - This is useful for contextualizing the results of a search function. - For example, a code comparison tool might want to identify lines that - have changed, but also surrounding lines to give the viewer of the diff - context. - - The predicate function will only be called once for each item in the - iterable. - - See also :func:`groupby_transform`, which can be used with this function - to group ranges of items with the same `bool` value. - - """ - # Allow distance=0 mainly for testing that it reproduces results with map() - if distance < 0: - raise ValueError('distance must be at least 0') - - i1, i2 = tee(iterable) - padding = [False] * distance - selected = chain(padding, map(predicate, i1), padding) - adjacent_to_selected = map(any, windowed(selected, 2 * distance + 1)) - return zip(adjacent_to_selected, i2) - - -def groupby_transform(iterable, keyfunc=None, valuefunc=None, reducefunc=None): - """An extension of :func:`itertools.groupby` that can apply transformations - to the grouped data. - - * *keyfunc* is a function computing a key value for each item in *iterable* - * *valuefunc* is a function that transforms the individual items from - *iterable* after grouping - * *reducefunc* is a function that transforms each group of items - - >>> iterable = 'aAAbBBcCC' - >>> keyfunc = lambda k: k.upper() - >>> valuefunc = lambda v: v.lower() - >>> reducefunc = lambda g: ''.join(g) - >>> list(groupby_transform(iterable, keyfunc, valuefunc, reducefunc)) - [('A', 'aaa'), ('B', 'bbb'), ('C', 'ccc')] - - Each optional argument defaults to an identity function if not specified. - - :func:`groupby_transform` is useful when grouping elements of an iterable - using a separate iterable as the key. To do this, :func:`zip` the iterables - and pass a *keyfunc* that extracts the first element and a *valuefunc* - that extracts the second element:: - - >>> from operator import itemgetter - >>> keys = [0, 0, 1, 1, 1, 2, 2, 2, 3] - >>> values = 'abcdefghi' - >>> iterable = zip(keys, values) - >>> grouper = groupby_transform(iterable, itemgetter(0), itemgetter(1)) - >>> [(k, ''.join(g)) for k, g in grouper] - [(0, 'ab'), (1, 'cde'), (2, 'fgh'), (3, 'i')] - - Note that the order of items in the iterable is significant. - Only adjacent items are grouped together, so if you don't want any - duplicate groups, you should sort the iterable by the key function. - - """ - ret = groupby(iterable, keyfunc) - if valuefunc: - ret = ((k, map(valuefunc, g)) for k, g in ret) - if reducefunc: - ret = ((k, reducefunc(g)) for k, g in ret) - - return ret - - -class numeric_range(abc.Sequence, abc.Hashable): - """An extension of the built-in ``range()`` function whose arguments can - be any orderable numeric type. - - With only *stop* specified, *start* defaults to ``0`` and *step* - defaults to ``1``. The output items will match the type of *stop*: - - >>> list(numeric_range(3.5)) - [0.0, 1.0, 2.0, 3.0] - - With only *start* and *stop* specified, *step* defaults to ``1``. The - output items will match the type of *start*: - - >>> from decimal import Decimal - >>> start = Decimal('2.1') - >>> stop = Decimal('5.1') - >>> list(numeric_range(start, stop)) - [Decimal('2.1'), Decimal('3.1'), Decimal('4.1')] - - With *start*, *stop*, and *step* specified the output items will match - the type of ``start + step``: - - >>> from fractions import Fraction - >>> start = Fraction(1, 2) # Start at 1/2 - >>> stop = Fraction(5, 2) # End at 5/2 - >>> step = Fraction(1, 2) # Count by 1/2 - >>> list(numeric_range(start, stop, step)) - [Fraction(1, 2), Fraction(1, 1), Fraction(3, 2), Fraction(2, 1)] - - If *step* is zero, ``ValueError`` is raised. Negative steps are supported: - - >>> list(numeric_range(3, -1, -1.0)) - [3.0, 2.0, 1.0, 0.0] - - Be aware of the limitations of floating point numbers; the representation - of the yielded numbers may be surprising. - - ``datetime.datetime`` objects can be used for *start* and *stop*, if *step* - is a ``datetime.timedelta`` object: - - >>> import datetime - >>> start = datetime.datetime(2019, 1, 1) - >>> stop = datetime.datetime(2019, 1, 3) - >>> step = datetime.timedelta(days=1) - >>> items = iter(numeric_range(start, stop, step)) - >>> next(items) - datetime.datetime(2019, 1, 1, 0, 0) - >>> next(items) - datetime.datetime(2019, 1, 2, 0, 0) - - """ - - _EMPTY_HASH = hash(range(0, 0)) - - def __init__(self, *args): - argc = len(args) - if argc == 1: - (self._stop,) = args - self._start = type(self._stop)(0) - self._step = type(self._stop - self._start)(1) - elif argc == 2: - self._start, self._stop = args - self._step = type(self._stop - self._start)(1) - elif argc == 3: - self._start, self._stop, self._step = args - elif argc == 0: - raise TypeError( - 'numeric_range expected at least ' - '1 argument, got {}'.format(argc) - ) - else: - raise TypeError( - 'numeric_range expected at most ' - '3 arguments, got {}'.format(argc) - ) - - self._zero = type(self._step)(0) - if self._step == self._zero: - raise ValueError('numeric_range() arg 3 must not be zero') - self._growing = self._step > self._zero - self._init_len() - - def __bool__(self): - if self._growing: - return self._start < self._stop - else: - return self._start > self._stop - - def __contains__(self, elem): - if self._growing: - if self._start <= elem < self._stop: - return (elem - self._start) % self._step == self._zero - else: - if self._start >= elem > self._stop: - return (self._start - elem) % (-self._step) == self._zero - - return False - - def __eq__(self, other): - if isinstance(other, numeric_range): - empty_self = not bool(self) - empty_other = not bool(other) - if empty_self or empty_other: - return empty_self and empty_other # True if both empty - else: - return ( - self._start == other._start - and self._step == other._step - and self._get_by_index(-1) == other._get_by_index(-1) - ) - else: - return False - - def __getitem__(self, key): - if isinstance(key, int): - return self._get_by_index(key) - elif isinstance(key, slice): - step = self._step if key.step is None else key.step * self._step - - if key.start is None or key.start <= -self._len: - start = self._start - elif key.start >= self._len: - start = self._stop - else: # -self._len < key.start < self._len - start = self._get_by_index(key.start) - - if key.stop is None or key.stop >= self._len: - stop = self._stop - elif key.stop <= -self._len: - stop = self._start - else: # -self._len < key.stop < self._len - stop = self._get_by_index(key.stop) - - return numeric_range(start, stop, step) - else: - raise TypeError( - 'numeric range indices must be ' - 'integers or slices, not {}'.format(type(key).__name__) - ) - - def __hash__(self): - if self: - return hash((self._start, self._get_by_index(-1), self._step)) - else: - return self._EMPTY_HASH - - def __iter__(self): - values = (self._start + (n * self._step) for n in count()) - if self._growing: - return takewhile(partial(gt, self._stop), values) - else: - return takewhile(partial(lt, self._stop), values) - - def __len__(self): - return self._len - - def _init_len(self): - if self._growing: - start = self._start - stop = self._stop - step = self._step - else: - start = self._stop - stop = self._start - step = -self._step - distance = stop - start - if distance <= self._zero: - self._len = 0 - else: # distance > 0 and step > 0: regular euclidean division - q, r = divmod(distance, step) - self._len = int(q) + int(r != self._zero) - - def __reduce__(self): - return numeric_range, (self._start, self._stop, self._step) - - def __repr__(self): - if self._step == 1: - return "numeric_range({}, {})".format( - repr(self._start), repr(self._stop) - ) - else: - return "numeric_range({}, {}, {})".format( - repr(self._start), repr(self._stop), repr(self._step) - ) - - def __reversed__(self): - return iter( - numeric_range( - self._get_by_index(-1), self._start - self._step, -self._step - ) - ) - - def count(self, value): - return int(value in self) - - def index(self, value): - if self._growing: - if self._start <= value < self._stop: - q, r = divmod(value - self._start, self._step) - if r == self._zero: - return int(q) - else: - if self._start >= value > self._stop: - q, r = divmod(self._start - value, -self._step) - if r == self._zero: - return int(q) - - raise ValueError("{} is not in numeric range".format(value)) - - def _get_by_index(self, i): - if i < 0: - i += self._len - if i < 0 or i >= self._len: - raise IndexError("numeric range object index out of range") - return self._start + i * self._step - - -def count_cycle(iterable, n=None): - """Cycle through the items from *iterable* up to *n* times, yielding - the number of completed cycles along with each item. If *n* is omitted the - process repeats indefinitely. - - >>> list(count_cycle('AB', 3)) - [(0, 'A'), (0, 'B'), (1, 'A'), (1, 'B'), (2, 'A'), (2, 'B')] - - """ - iterable = tuple(iterable) - if not iterable: - return iter(()) - counter = count() if n is None else range(n) - return ((i, item) for i in counter for item in iterable) - - -def mark_ends(iterable): - """Yield 3-tuples of the form ``(is_first, is_last, item)``. - - >>> list(mark_ends('ABC')) - [(True, False, 'A'), (False, False, 'B'), (False, True, 'C')] - - Use this when looping over an iterable to take special action on its first - and/or last items: - - >>> iterable = ['Header', 100, 200, 'Footer'] - >>> total = 0 - >>> for is_first, is_last, item in mark_ends(iterable): - ... if is_first: - ... continue # Skip the header - ... if is_last: - ... continue # Skip the footer - ... total += item - >>> print(total) - 300 - """ - it = iter(iterable) - - try: - b = next(it) - except StopIteration: - return - - try: - for i in count(): - a = b - b = next(it) - yield i == 0, False, a - - except StopIteration: - yield i == 0, True, a - - -def locate(iterable, pred=bool, window_size=None): - """Yield the index of each item in *iterable* for which *pred* returns - ``True``. - - *pred* defaults to :func:`bool`, which will select truthy items: - - >>> list(locate([0, 1, 1, 0, 1, 0, 0])) - [1, 2, 4] - - Set *pred* to a custom function to, e.g., find the indexes for a particular - item. - - >>> list(locate(['a', 'b', 'c', 'b'], lambda x: x == 'b')) - [1, 3] - - If *window_size* is given, then the *pred* function will be called with - that many items. This enables searching for sub-sequences: - - >>> iterable = [0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3] - >>> pred = lambda *args: args == (1, 2, 3) - >>> list(locate(iterable, pred=pred, window_size=3)) - [1, 5, 9] - - Use with :func:`seekable` to find indexes and then retrieve the associated - items: - - >>> from itertools import count - >>> from more_itertools import seekable - >>> source = (3 * n + 1 if (n % 2) else n // 2 for n in count()) - >>> it = seekable(source) - >>> pred = lambda x: x > 100 - >>> indexes = locate(it, pred=pred) - >>> i = next(indexes) - >>> it.seek(i) - >>> next(it) - 106 - - """ - if window_size is None: - return compress(count(), map(pred, iterable)) - - if window_size < 1: - raise ValueError('window size must be at least 1') - - it = windowed(iterable, window_size, fillvalue=_marker) - return compress(count(), starmap(pred, it)) - - -def lstrip(iterable, pred): - """Yield the items from *iterable*, but strip any from the beginning - for which *pred* returns ``True``. - - For example, to remove a set of items from the start of an iterable: - - >>> iterable = (None, False, None, 1, 2, None, 3, False, None) - >>> pred = lambda x: x in {None, False, ''} - >>> list(lstrip(iterable, pred)) - [1, 2, None, 3, False, None] - - This function is analogous to to :func:`str.lstrip`, and is essentially - an wrapper for :func:`itertools.dropwhile`. - - """ - return dropwhile(pred, iterable) - - -def rstrip(iterable, pred): - """Yield the items from *iterable*, but strip any from the end - for which *pred* returns ``True``. - - For example, to remove a set of items from the end of an iterable: - - >>> iterable = (None, False, None, 1, 2, None, 3, False, None) - >>> pred = lambda x: x in {None, False, ''} - >>> list(rstrip(iterable, pred)) - [None, False, None, 1, 2, None, 3] - - This function is analogous to :func:`str.rstrip`. - - """ - cache = [] - cache_append = cache.append - cache_clear = cache.clear - for x in iterable: - if pred(x): - cache_append(x) - else: - yield from cache - cache_clear() - yield x - - -def strip(iterable, pred): - """Yield the items from *iterable*, but strip any from the - beginning and end for which *pred* returns ``True``. - - For example, to remove a set of items from both ends of an iterable: - - >>> iterable = (None, False, None, 1, 2, None, 3, False, None) - >>> pred = lambda x: x in {None, False, ''} - >>> list(strip(iterable, pred)) - [1, 2, None, 3] - - This function is analogous to :func:`str.strip`. - - """ - return rstrip(lstrip(iterable, pred), pred) - - -class islice_extended: - """An extension of :func:`itertools.islice` that supports negative values - for *stop*, *start*, and *step*. - - >>> iterable = iter('abcdefgh') - >>> list(islice_extended(iterable, -4, -1)) - ['e', 'f', 'g'] - - Slices with negative values require some caching of *iterable*, but this - function takes care to minimize the amount of memory required. - - For example, you can use a negative step with an infinite iterator: - - >>> from itertools import count - >>> list(islice_extended(count(), 110, 99, -2)) - [110, 108, 106, 104, 102, 100] - - You can also use slice notation directly: - - >>> iterable = map(str, count()) - >>> it = islice_extended(iterable)[10:20:2] - >>> list(it) - ['10', '12', '14', '16', '18'] - - """ - - def __init__(self, iterable, *args): - it = iter(iterable) - if args: - self._iterable = _islice_helper(it, slice(*args)) - else: - self._iterable = it - - def __iter__(self): - return self - - def __next__(self): - return next(self._iterable) - - def __getitem__(self, key): - if isinstance(key, slice): - return islice_extended(_islice_helper(self._iterable, key)) - - raise TypeError('islice_extended.__getitem__ argument must be a slice') - - -def _islice_helper(it, s): - start = s.start - stop = s.stop - if s.step == 0: - raise ValueError('step argument must be a non-zero integer or None.') - step = s.step or 1 - - if step > 0: - start = 0 if (start is None) else start - - if start < 0: - # Consume all but the last -start items - cache = deque(enumerate(it, 1), maxlen=-start) - len_iter = cache[-1][0] if cache else 0 - - # Adjust start to be positive - i = max(len_iter + start, 0) - - # Adjust stop to be positive - if stop is None: - j = len_iter - elif stop >= 0: - j = min(stop, len_iter) - else: - j = max(len_iter + stop, 0) - - # Slice the cache - n = j - i - if n <= 0: - return - - for index, item in islice(cache, 0, n, step): - yield item - elif (stop is not None) and (stop < 0): - # Advance to the start position - next(islice(it, start, start), None) - - # When stop is negative, we have to carry -stop items while - # iterating - cache = deque(islice(it, -stop), maxlen=-stop) - - for index, item in enumerate(it): - cached_item = cache.popleft() - if index % step == 0: - yield cached_item - cache.append(item) - else: - # When both start and stop are positive we have the normal case - yield from islice(it, start, stop, step) - else: - start = -1 if (start is None) else start - - if (stop is not None) and (stop < 0): - # Consume all but the last items - n = -stop - 1 - cache = deque(enumerate(it, 1), maxlen=n) - len_iter = cache[-1][0] if cache else 0 - - # If start and stop are both negative they are comparable and - # we can just slice. Otherwise we can adjust start to be negative - # and then slice. - if start < 0: - i, j = start, stop - else: - i, j = min(start - len_iter, -1), None - - for index, item in list(cache)[i:j:step]: - yield item - else: - # Advance to the stop position - if stop is not None: - m = stop + 1 - next(islice(it, m, m), None) - - # stop is positive, so if start is negative they are not comparable - # and we need the rest of the items. - if start < 0: - i = start - n = None - # stop is None and start is positive, so we just need items up to - # the start index. - elif stop is None: - i = None - n = start + 1 - # Both stop and start are positive, so they are comparable. - else: - i = None - n = start - stop - if n <= 0: - return - - cache = list(islice(it, n)) - - yield from cache[i::step] - - -def always_reversible(iterable): - """An extension of :func:`reversed` that supports all iterables, not - just those which implement the ``Reversible`` or ``Sequence`` protocols. - - >>> print(*always_reversible(x for x in range(3))) - 2 1 0 - - If the iterable is already reversible, this function returns the - result of :func:`reversed()`. If the iterable is not reversible, - this function will cache the remaining items in the iterable and - yield them in reverse order, which may require significant storage. - """ - try: - return reversed(iterable) - except TypeError: - return reversed(list(iterable)) - - -def consecutive_groups(iterable, ordering=lambda x: x): - """Yield groups of consecutive items using :func:`itertools.groupby`. - The *ordering* function determines whether two items are adjacent by - returning their position. - - By default, the ordering function is the identity function. This is - suitable for finding runs of numbers: - - >>> iterable = [1, 10, 11, 12, 20, 30, 31, 32, 33, 40] - >>> for group in consecutive_groups(iterable): - ... print(list(group)) - [1] - [10, 11, 12] - [20] - [30, 31, 32, 33] - [40] - - For finding runs of adjacent letters, try using the :meth:`index` method - of a string of letters: - - >>> from string import ascii_lowercase - >>> iterable = 'abcdfgilmnop' - >>> ordering = ascii_lowercase.index - >>> for group in consecutive_groups(iterable, ordering): - ... print(list(group)) - ['a', 'b', 'c', 'd'] - ['f', 'g'] - ['i'] - ['l', 'm', 'n', 'o', 'p'] - - Each group of consecutive items is an iterator that shares it source with - *iterable*. When an an output group is advanced, the previous group is - no longer available unless its elements are copied (e.g., into a ``list``). - - >>> iterable = [1, 2, 11, 12, 21, 22] - >>> saved_groups = [] - >>> for group in consecutive_groups(iterable): - ... saved_groups.append(list(group)) # Copy group elements - >>> saved_groups - [[1, 2], [11, 12], [21, 22]] - - """ - for k, g in groupby( - enumerate(iterable), key=lambda x: x[0] - ordering(x[1]) - ): - yield map(itemgetter(1), g) - - -def difference(iterable, func=sub, *, initial=None): - """This function is the inverse of :func:`itertools.accumulate`. By default - it will compute the first difference of *iterable* using - :func:`operator.sub`: - - >>> from itertools import accumulate - >>> iterable = accumulate([0, 1, 2, 3, 4]) # produces 0, 1, 3, 6, 10 - >>> list(difference(iterable)) - [0, 1, 2, 3, 4] - - *func* defaults to :func:`operator.sub`, but other functions can be - specified. They will be applied as follows:: - - A, B, C, D, ... --> A, func(B, A), func(C, B), func(D, C), ... - - For example, to do progressive division: - - >>> iterable = [1, 2, 6, 24, 120] - >>> func = lambda x, y: x // y - >>> list(difference(iterable, func)) - [1, 2, 3, 4, 5] - - If the *initial* keyword is set, the first element will be skipped when - computing successive differences. - - >>> it = [10, 11, 13, 16] # from accumulate([1, 2, 3], initial=10) - >>> list(difference(it, initial=10)) - [1, 2, 3] - - """ - a, b = tee(iterable) - try: - first = [next(b)] - except StopIteration: - return iter([]) - - if initial is not None: - first = [] - - return chain(first, starmap(func, zip(b, a))) - - -class SequenceView(Sequence): - """Return a read-only view of the sequence object *target*. - - :class:`SequenceView` objects are analogous to Python's built-in - "dictionary view" types. They provide a dynamic view of a sequence's items, - meaning that when the sequence updates, so does the view. - - >>> seq = ['0', '1', '2'] - >>> view = SequenceView(seq) - >>> view - SequenceView(['0', '1', '2']) - >>> seq.append('3') - >>> view - SequenceView(['0', '1', '2', '3']) - - Sequence views support indexing, slicing, and length queries. They act - like the underlying sequence, except they don't allow assignment: - - >>> view[1] - '1' - >>> view[1:-1] - ['1', '2'] - >>> len(view) - 4 - - Sequence views are useful as an alternative to copying, as they don't - require (much) extra storage. - - """ - - def __init__(self, target): - if not isinstance(target, Sequence): - raise TypeError - self._target = target - - def __getitem__(self, index): - return self._target[index] - - def __len__(self): - return len(self._target) - - def __repr__(self): - return '{}({})'.format(self.__class__.__name__, repr(self._target)) - - -class seekable: - """Wrap an iterator to allow for seeking backward and forward. This - progressively caches the items in the source iterable so they can be - re-visited. - - Call :meth:`seek` with an index to seek to that position in the source - iterable. - - To "reset" an iterator, seek to ``0``: - - >>> from itertools import count - >>> it = seekable((str(n) for n in count())) - >>> next(it), next(it), next(it) - ('0', '1', '2') - >>> it.seek(0) - >>> next(it), next(it), next(it) - ('0', '1', '2') - >>> next(it) - '3' - - You can also seek forward: - - >>> it = seekable((str(n) for n in range(20))) - >>> it.seek(10) - >>> next(it) - '10' - >>> it.seek(20) # Seeking past the end of the source isn't a problem - >>> list(it) - [] - >>> it.seek(0) # Resetting works even after hitting the end - >>> next(it), next(it), next(it) - ('0', '1', '2') - - Call :meth:`peek` to look ahead one item without advancing the iterator: - - >>> it = seekable('1234') - >>> it.peek() - '1' - >>> list(it) - ['1', '2', '3', '4'] - >>> it.peek(default='empty') - 'empty' - - Before the iterator is at its end, calling :func:`bool` on it will return - ``True``. After it will return ``False``: - - >>> it = seekable('5678') - >>> bool(it) - True - >>> list(it) - ['5', '6', '7', '8'] - >>> bool(it) - False - - You may view the contents of the cache with the :meth:`elements` method. - That returns a :class:`SequenceView`, a view that updates automatically: - - >>> it = seekable((str(n) for n in range(10))) - >>> next(it), next(it), next(it) - ('0', '1', '2') - >>> elements = it.elements() - >>> elements - SequenceView(['0', '1', '2']) - >>> next(it) - '3' - >>> elements - SequenceView(['0', '1', '2', '3']) - - By default, the cache grows as the source iterable progresses, so beware of - wrapping very large or infinite iterables. Supply *maxlen* to limit the - size of the cache (this of course limits how far back you can seek). - - >>> from itertools import count - >>> it = seekable((str(n) for n in count()), maxlen=2) - >>> next(it), next(it), next(it), next(it) - ('0', '1', '2', '3') - >>> list(it.elements()) - ['2', '3'] - >>> it.seek(0) - >>> next(it), next(it), next(it), next(it) - ('2', '3', '4', '5') - >>> next(it) - '6' - - """ - - def __init__(self, iterable, maxlen=None): - self._source = iter(iterable) - if maxlen is None: - self._cache = [] - else: - self._cache = deque([], maxlen) - self._index = None - - def __iter__(self): - return self - - def __next__(self): - if self._index is not None: - try: - item = self._cache[self._index] - except IndexError: - self._index = None - else: - self._index += 1 - return item - - item = next(self._source) - self._cache.append(item) - return item - - def __bool__(self): - try: - self.peek() - except StopIteration: - return False - return True - - def peek(self, default=_marker): - try: - peeked = next(self) - except StopIteration: - if default is _marker: - raise - return default - if self._index is None: - self._index = len(self._cache) - self._index -= 1 - return peeked - - def elements(self): - return SequenceView(self._cache) - - def seek(self, index): - self._index = index - remainder = index - len(self._cache) - if remainder > 0: - consume(self, remainder) - - -class run_length: - """ - :func:`run_length.encode` compresses an iterable with run-length encoding. - It yields groups of repeated items with the count of how many times they - were repeated: - - >>> uncompressed = 'abbcccdddd' - >>> list(run_length.encode(uncompressed)) - [('a', 1), ('b', 2), ('c', 3), ('d', 4)] - - :func:`run_length.decode` decompresses an iterable that was previously - compressed with run-length encoding. It yields the items of the - decompressed iterable: - - >>> compressed = [('a', 1), ('b', 2), ('c', 3), ('d', 4)] - >>> list(run_length.decode(compressed)) - ['a', 'b', 'b', 'c', 'c', 'c', 'd', 'd', 'd', 'd'] - - """ - - @staticmethod - def encode(iterable): - return ((k, ilen(g)) for k, g in groupby(iterable)) - - @staticmethod - def decode(iterable): - return chain.from_iterable(repeat(k, n) for k, n in iterable) - - -def exactly_n(iterable, n, predicate=bool): - """Return ``True`` if exactly ``n`` items in the iterable are ``True`` - according to the *predicate* function. - - >>> exactly_n([True, True, False], 2) - True - >>> exactly_n([True, True, False], 1) - False - >>> exactly_n([0, 1, 2, 3, 4, 5], 3, lambda x: x < 3) - True - - The iterable will be advanced until ``n + 1`` truthy items are encountered, - so avoid calling it on infinite iterables. - - """ - return len(take(n + 1, filter(predicate, iterable))) == n - - -def circular_shifts(iterable): - """Return a list of circular shifts of *iterable*. - - >>> circular_shifts(range(4)) - [(0, 1, 2, 3), (1, 2, 3, 0), (2, 3, 0, 1), (3, 0, 1, 2)] - """ - lst = list(iterable) - return take(len(lst), windowed(cycle(lst), len(lst))) - - -def make_decorator(wrapping_func, result_index=0): - """Return a decorator version of *wrapping_func*, which is a function that - modifies an iterable. *result_index* is the position in that function's - signature where the iterable goes. - - This lets you use itertools on the "production end," i.e. at function - definition. This can augment what the function returns without changing the - function's code. - - For example, to produce a decorator version of :func:`chunked`: - - >>> from more_itertools import chunked - >>> chunker = make_decorator(chunked, result_index=0) - >>> @chunker(3) - ... def iter_range(n): - ... return iter(range(n)) - ... - >>> list(iter_range(9)) - [[0, 1, 2], [3, 4, 5], [6, 7, 8]] - - To only allow truthy items to be returned: - - >>> truth_serum = make_decorator(filter, result_index=1) - >>> @truth_serum(bool) - ... def boolean_test(): - ... return [0, 1, '', ' ', False, True] - ... - >>> list(boolean_test()) - [1, ' ', True] - - The :func:`peekable` and :func:`seekable` wrappers make for practical - decorators: - - >>> from more_itertools import peekable - >>> peekable_function = make_decorator(peekable) - >>> @peekable_function() - ... def str_range(*args): - ... return (str(x) for x in range(*args)) - ... - >>> it = str_range(1, 20, 2) - >>> next(it), next(it), next(it) - ('1', '3', '5') - >>> it.peek() - '7' - >>> next(it) - '7' - - """ - # See https://sites.google.com/site/bbayles/index/decorator_factory for - # notes on how this works. - def decorator(*wrapping_args, **wrapping_kwargs): - def outer_wrapper(f): - def inner_wrapper(*args, **kwargs): - result = f(*args, **kwargs) - wrapping_args_ = list(wrapping_args) - wrapping_args_.insert(result_index, result) - return wrapping_func(*wrapping_args_, **wrapping_kwargs) - - return inner_wrapper - - return outer_wrapper - - return decorator - - -def map_reduce(iterable, keyfunc, valuefunc=None, reducefunc=None): - """Return a dictionary that maps the items in *iterable* to categories - defined by *keyfunc*, transforms them with *valuefunc*, and - then summarizes them by category with *reducefunc*. - - *valuefunc* defaults to the identity function if it is unspecified. - If *reducefunc* is unspecified, no summarization takes place: - - >>> keyfunc = lambda x: x.upper() - >>> result = map_reduce('abbccc', keyfunc) - >>> sorted(result.items()) - [('A', ['a']), ('B', ['b', 'b']), ('C', ['c', 'c', 'c'])] - - Specifying *valuefunc* transforms the categorized items: - - >>> keyfunc = lambda x: x.upper() - >>> valuefunc = lambda x: 1 - >>> result = map_reduce('abbccc', keyfunc, valuefunc) - >>> sorted(result.items()) - [('A', [1]), ('B', [1, 1]), ('C', [1, 1, 1])] - - Specifying *reducefunc* summarizes the categorized items: - - >>> keyfunc = lambda x: x.upper() - >>> valuefunc = lambda x: 1 - >>> reducefunc = sum - >>> result = map_reduce('abbccc', keyfunc, valuefunc, reducefunc) - >>> sorted(result.items()) - [('A', 1), ('B', 2), ('C', 3)] - - You may want to filter the input iterable before applying the map/reduce - procedure: - - >>> all_items = range(30) - >>> items = [x for x in all_items if 10 <= x <= 20] # Filter - >>> keyfunc = lambda x: x % 2 # Evens map to 0; odds to 1 - >>> categories = map_reduce(items, keyfunc=keyfunc) - >>> sorted(categories.items()) - [(0, [10, 12, 14, 16, 18, 20]), (1, [11, 13, 15, 17, 19])] - >>> summaries = map_reduce(items, keyfunc=keyfunc, reducefunc=sum) - >>> sorted(summaries.items()) - [(0, 90), (1, 75)] - - Note that all items in the iterable are gathered into a list before the - summarization step, which may require significant storage. - - The returned object is a :obj:`collections.defaultdict` with the - ``default_factory`` set to ``None``, such that it behaves like a normal - dictionary. - - """ - valuefunc = (lambda x: x) if (valuefunc is None) else valuefunc - - ret = defaultdict(list) - for item in iterable: - key = keyfunc(item) - value = valuefunc(item) - ret[key].append(value) - - if reducefunc is not None: - for key, value_list in ret.items(): - ret[key] = reducefunc(value_list) - - ret.default_factory = None - return ret - - -def rlocate(iterable, pred=bool, window_size=None): - """Yield the index of each item in *iterable* for which *pred* returns - ``True``, starting from the right and moving left. - - *pred* defaults to :func:`bool`, which will select truthy items: - - >>> list(rlocate([0, 1, 1, 0, 1, 0, 0])) # Truthy at 1, 2, and 4 - [4, 2, 1] - - Set *pred* to a custom function to, e.g., find the indexes for a particular - item: - - >>> iterable = iter('abcb') - >>> pred = lambda x: x == 'b' - >>> list(rlocate(iterable, pred)) - [3, 1] - - If *window_size* is given, then the *pred* function will be called with - that many items. This enables searching for sub-sequences: - - >>> iterable = [0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3] - >>> pred = lambda *args: args == (1, 2, 3) - >>> list(rlocate(iterable, pred=pred, window_size=3)) - [9, 5, 1] - - Beware, this function won't return anything for infinite iterables. - If *iterable* is reversible, ``rlocate`` will reverse it and search from - the right. Otherwise, it will search from the left and return the results - in reverse order. - - See :func:`locate` to for other example applications. - - """ - if window_size is None: - try: - len_iter = len(iterable) - return (len_iter - i - 1 for i in locate(reversed(iterable), pred)) - except TypeError: - pass - - return reversed(list(locate(iterable, pred, window_size))) - - -def replace(iterable, pred, substitutes, count=None, window_size=1): - """Yield the items from *iterable*, replacing the items for which *pred* - returns ``True`` with the items from the iterable *substitutes*. - - >>> iterable = [1, 1, 0, 1, 1, 0, 1, 1] - >>> pred = lambda x: x == 0 - >>> substitutes = (2, 3) - >>> list(replace(iterable, pred, substitutes)) - [1, 1, 2, 3, 1, 1, 2, 3, 1, 1] - - If *count* is given, the number of replacements will be limited: - - >>> iterable = [1, 1, 0, 1, 1, 0, 1, 1, 0] - >>> pred = lambda x: x == 0 - >>> substitutes = [None] - >>> list(replace(iterable, pred, substitutes, count=2)) - [1, 1, None, 1, 1, None, 1, 1, 0] - - Use *window_size* to control the number of items passed as arguments to - *pred*. This allows for locating and replacing subsequences. - - >>> iterable = [0, 1, 2, 5, 0, 1, 2, 5] - >>> window_size = 3 - >>> pred = lambda *args: args == (0, 1, 2) # 3 items passed to pred - >>> substitutes = [3, 4] # Splice in these items - >>> list(replace(iterable, pred, substitutes, window_size=window_size)) - [3, 4, 5, 3, 4, 5] - - """ - if window_size < 1: - raise ValueError('window_size must be at least 1') - - # Save the substitutes iterable, since it's used more than once - substitutes = tuple(substitutes) - - # Add padding such that the number of windows matches the length of the - # iterable - it = chain(iterable, [_marker] * (window_size - 1)) - windows = windowed(it, window_size) - - n = 0 - for w in windows: - # If the current window matches our predicate (and we haven't hit - # our maximum number of replacements), splice in the substitutes - # and then consume the following windows that overlap with this one. - # For example, if the iterable is (0, 1, 2, 3, 4...) - # and the window size is 2, we have (0, 1), (1, 2), (2, 3)... - # If the predicate matches on (0, 1), we need to zap (0, 1) and (1, 2) - if pred(*w): - if (count is None) or (n < count): - n += 1 - yield from substitutes - consume(windows, window_size - 1) - continue - - # If there was no match (or we've reached the replacement limit), - # yield the first item from the window. - if w and (w[0] is not _marker): - yield w[0] - - -def partitions(iterable): - """Yield all possible order-preserving partitions of *iterable*. - - >>> iterable = 'abc' - >>> for part in partitions(iterable): - ... print([''.join(p) for p in part]) - ['abc'] - ['a', 'bc'] - ['ab', 'c'] - ['a', 'b', 'c'] - - This is unrelated to :func:`partition`. - - """ - sequence = list(iterable) - n = len(sequence) - for i in powerset(range(1, n)): - yield [sequence[i:j] for i, j in zip((0,) + i, i + (n,))] - - -def set_partitions(iterable, k=None): - """ - Yield the set partitions of *iterable* into *k* parts. Set partitions are - not order-preserving. - - >>> iterable = 'abc' - >>> for part in set_partitions(iterable, 2): - ... print([''.join(p) for p in part]) - ['a', 'bc'] - ['ab', 'c'] - ['b', 'ac'] - - - If *k* is not given, every set partition is generated. - - >>> iterable = 'abc' - >>> for part in set_partitions(iterable): - ... print([''.join(p) for p in part]) - ['abc'] - ['a', 'bc'] - ['ab', 'c'] - ['b', 'ac'] - ['a', 'b', 'c'] - - """ - L = list(iterable) - n = len(L) - if k is not None: - if k < 1: - raise ValueError( - "Can't partition in a negative or zero number of groups" - ) - elif k > n: - return - - def set_partitions_helper(L, k): - n = len(L) - if k == 1: - yield [L] - elif n == k: - yield [[s] for s in L] - else: - e, *M = L - for p in set_partitions_helper(M, k - 1): - yield [[e], *p] - for p in set_partitions_helper(M, k): - for i in range(len(p)): - yield p[:i] + [[e] + p[i]] + p[i + 1 :] - - if k is None: - for k in range(1, n + 1): - yield from set_partitions_helper(L, k) - else: - yield from set_partitions_helper(L, k) - - -class time_limited: - """ - Yield items from *iterable* until *limit_seconds* have passed. - If the time limit expires before all items have been yielded, the - ``timed_out`` parameter will be set to ``True``. - - >>> from time import sleep - >>> def generator(): - ... yield 1 - ... yield 2 - ... sleep(0.2) - ... yield 3 - >>> iterable = time_limited(0.1, generator()) - >>> list(iterable) - [1, 2] - >>> iterable.timed_out - True - - Note that the time is checked before each item is yielded, and iteration - stops if the time elapsed is greater than *limit_seconds*. If your time - limit is 1 second, but it takes 2 seconds to generate the first item from - the iterable, the function will run for 2 seconds and not yield anything. - - """ - - def __init__(self, limit_seconds, iterable): - if limit_seconds < 0: - raise ValueError('limit_seconds must be positive') - self.limit_seconds = limit_seconds - self._iterable = iter(iterable) - self._start_time = monotonic() - self.timed_out = False - - def __iter__(self): - return self - - def __next__(self): - item = next(self._iterable) - if monotonic() - self._start_time > self.limit_seconds: - self.timed_out = True - raise StopIteration - - return item - - -def only(iterable, default=None, too_long=None): - """If *iterable* has only one item, return it. - If it has zero items, return *default*. - If it has more than one item, raise the exception given by *too_long*, - which is ``ValueError`` by default. - - >>> only([], default='missing') - 'missing' - >>> only([1]) - 1 - >>> only([1, 2]) # doctest: +IGNORE_EXCEPTION_DETAIL - Traceback (most recent call last): - ... - ValueError: Expected exactly one item in iterable, but got 1, 2, - and perhaps more.' - >>> only([1, 2], too_long=TypeError) # doctest: +IGNORE_EXCEPTION_DETAIL - Traceback (most recent call last): - ... - TypeError - - Note that :func:`only` attempts to advance *iterable* twice to ensure there - is only one item. See :func:`spy` or :func:`peekable` to check - iterable contents less destructively. - """ - it = iter(iterable) - first_value = next(it, default) - - try: - second_value = next(it) - except StopIteration: - pass - else: - msg = ( - 'Expected exactly one item in iterable, but got {!r}, {!r}, ' - 'and perhaps more.'.format(first_value, second_value) - ) - raise too_long or ValueError(msg) - - return first_value - - -def ichunked(iterable, n): - """Break *iterable* into sub-iterables with *n* elements each. - :func:`ichunked` is like :func:`chunked`, but it yields iterables - instead of lists. - - If the sub-iterables are read in order, the elements of *iterable* - won't be stored in memory. - If they are read out of order, :func:`itertools.tee` is used to cache - elements as necessary. - - >>> from itertools import count - >>> all_chunks = ichunked(count(), 4) - >>> c_1, c_2, c_3 = next(all_chunks), next(all_chunks), next(all_chunks) - >>> list(c_2) # c_1's elements have been cached; c_3's haven't been - [4, 5, 6, 7] - >>> list(c_1) - [0, 1, 2, 3] - >>> list(c_3) - [8, 9, 10, 11] - - """ - source = iter(iterable) - - while True: - # Check to see whether we're at the end of the source iterable - item = next(source, _marker) - if item is _marker: - return - - # Clone the source and yield an n-length slice - source, it = tee(chain([item], source)) - yield islice(it, n) - - # Advance the source iterable - consume(source, n) - - -def distinct_combinations(iterable, r): - """Yield the distinct combinations of *r* items taken from *iterable*. - - >>> list(distinct_combinations([0, 0, 1], 2)) - [(0, 0), (0, 1)] - - Equivalent to ``set(combinations(iterable))``, except duplicates are not - generated and thrown away. For larger input sequences this is much more - efficient. - - """ - if r < 0: - raise ValueError('r must be non-negative') - elif r == 0: - yield () - return - pool = tuple(iterable) - generators = [unique_everseen(enumerate(pool), key=itemgetter(1))] - current_combo = [None] * r - level = 0 - while generators: - try: - cur_idx, p = next(generators[-1]) - except StopIteration: - generators.pop() - level -= 1 - continue - current_combo[level] = p - if level + 1 == r: - yield tuple(current_combo) - else: - generators.append( - unique_everseen( - enumerate(pool[cur_idx + 1 :], cur_idx + 1), - key=itemgetter(1), - ) - ) - level += 1 - - -def filter_except(validator, iterable, *exceptions): - """Yield the items from *iterable* for which the *validator* function does - not raise one of the specified *exceptions*. - - *validator* is called for each item in *iterable*. - It should be a function that accepts one argument and raises an exception - if that item is not valid. - - >>> iterable = ['1', '2', 'three', '4', None] - >>> list(filter_except(int, iterable, ValueError, TypeError)) - ['1', '2', '4'] - - If an exception other than one given by *exceptions* is raised by - *validator*, it is raised like normal. - """ - for item in iterable: - try: - validator(item) - except exceptions: - pass - else: - yield item - - -def map_except(function, iterable, *exceptions): - """Transform each item from *iterable* with *function* and yield the - result, unless *function* raises one of the specified *exceptions*. - - *function* is called to transform each item in *iterable*. - It should accept one argument. - - >>> iterable = ['1', '2', 'three', '4', None] - >>> list(map_except(int, iterable, ValueError, TypeError)) - [1, 2, 4] - - If an exception other than one given by *exceptions* is raised by - *function*, it is raised like normal. - """ - for item in iterable: - try: - yield function(item) - except exceptions: - pass - - -def map_if(iterable, pred, func, func_else=lambda x: x): - """Evaluate each item from *iterable* using *pred*. If the result is - equivalent to ``True``, transform the item with *func* and yield it. - Otherwise, transform the item with *func_else* and yield it. - - *pred*, *func*, and *func_else* should each be functions that accept - one argument. By default, *func_else* is the identity function. - - >>> from math import sqrt - >>> iterable = list(range(-5, 5)) - >>> iterable - [-5, -4, -3, -2, -1, 0, 1, 2, 3, 4] - >>> list(map_if(iterable, lambda x: x > 3, lambda x: 'toobig')) - [-5, -4, -3, -2, -1, 0, 1, 2, 3, 'toobig'] - >>> list(map_if(iterable, lambda x: x >= 0, - ... lambda x: f'{sqrt(x):.2f}', lambda x: None)) - [None, None, None, None, None, '0.00', '1.00', '1.41', '1.73', '2.00'] - """ - for item in iterable: - yield func(item) if pred(item) else func_else(item) - - -def _sample_unweighted(iterable, k): - # Implementation of "Algorithm L" from the 1994 paper by Kim-Hung Li: - # "Reservoir-Sampling Algorithms of Time Complexity O(n(1+log(N/n)))". - - # Fill up the reservoir (collection of samples) with the first `k` samples - reservoir = take(k, iterable) - - # Generate random number that's the largest in a sample of k U(0,1) numbers - # Largest order statistic: https://en.wikipedia.org/wiki/Order_statistic - W = exp(log(random()) / k) - - # The number of elements to skip before changing the reservoir is a random - # number with a geometric distribution. Sample it using random() and logs. - next_index = k + floor(log(random()) / log(1 - W)) - - for index, element in enumerate(iterable, k): - - if index == next_index: - reservoir[randrange(k)] = element - # The new W is the largest in a sample of k U(0, `old_W`) numbers - W *= exp(log(random()) / k) - next_index += floor(log(random()) / log(1 - W)) + 1 - - return reservoir - - -def _sample_weighted(iterable, k, weights): - # Implementation of "A-ExpJ" from the 2006 paper by Efraimidis et al. : - # "Weighted random sampling with a reservoir". - - # Log-transform for numerical stability for weights that are small/large - weight_keys = (log(random()) / weight for weight in weights) - - # Fill up the reservoir (collection of samples) with the first `k` - # weight-keys and elements, then heapify the list. - reservoir = take(k, zip(weight_keys, iterable)) - heapify(reservoir) - - # The number of jumps before changing the reservoir is a random variable - # with an exponential distribution. Sample it using random() and logs. - smallest_weight_key, _ = reservoir[0] - weights_to_skip = log(random()) / smallest_weight_key - - for weight, element in zip(weights, iterable): - if weight >= weights_to_skip: - # The notation here is consistent with the paper, but we store - # the weight-keys in log-space for better numerical stability. - smallest_weight_key, _ = reservoir[0] - t_w = exp(weight * smallest_weight_key) - r_2 = uniform(t_w, 1) # generate U(t_w, 1) - weight_key = log(r_2) / weight - heapreplace(reservoir, (weight_key, element)) - smallest_weight_key, _ = reservoir[0] - weights_to_skip = log(random()) / smallest_weight_key - else: - weights_to_skip -= weight - - # Equivalent to [element for weight_key, element in sorted(reservoir)] - return [heappop(reservoir)[1] for _ in range(k)] - - -def sample(iterable, k, weights=None): - """Return a *k*-length list of elements chosen (without replacement) - from the *iterable*. Like :func:`random.sample`, but works on iterables - of unknown length. - - >>> iterable = range(100) - >>> sample(iterable, 5) # doctest: +SKIP - [81, 60, 96, 16, 4] - - An iterable with *weights* may also be given: - - >>> iterable = range(100) - >>> weights = (i * i + 1 for i in range(100)) - >>> sampled = sample(iterable, 5, weights=weights) # doctest: +SKIP - [79, 67, 74, 66, 78] - - The algorithm can also be used to generate weighted random permutations. - The relative weight of each item determines the probability that it - appears late in the permutation. - - >>> data = "abcdefgh" - >>> weights = range(1, len(data) + 1) - >>> sample(data, k=len(data), weights=weights) # doctest: +SKIP - ['c', 'a', 'b', 'e', 'g', 'd', 'h', 'f'] - """ - if k == 0: - return [] - - iterable = iter(iterable) - if weights is None: - return _sample_unweighted(iterable, k) - else: - weights = iter(weights) - return _sample_weighted(iterable, k, weights) - - -def is_sorted(iterable, key=None, reverse=False, strict=False): - """Returns ``True`` if the items of iterable are in sorted order, and - ``False`` otherwise. *key* and *reverse* have the same meaning that they do - in the built-in :func:`sorted` function. - - >>> is_sorted(['1', '2', '3', '4', '5'], key=int) - True - >>> is_sorted([5, 4, 3, 1, 2], reverse=True) - False - - If *strict*, tests for strict sorting, that is, returns ``False`` if equal - elements are found: - - >>> is_sorted([1, 2, 2]) - True - >>> is_sorted([1, 2, 2], strict=True) - False - - The function returns ``False`` after encountering the first out-of-order - item. If there are no out-of-order items, the iterable is exhausted. - """ - - compare = (le if reverse else ge) if strict else (lt if reverse else gt) - it = iterable if key is None else map(key, iterable) - return not any(starmap(compare, pairwise(it))) - - -class AbortThread(BaseException): - pass - - -class callback_iter: - """Convert a function that uses callbacks to an iterator. - - Let *func* be a function that takes a `callback` keyword argument. - For example: - - >>> def func(callback=None): - ... for i, c in [(1, 'a'), (2, 'b'), (3, 'c')]: - ... if callback: - ... callback(i, c) - ... return 4 - - - Use ``with callback_iter(func)`` to get an iterator over the parameters - that are delivered to the callback. - - >>> with callback_iter(func) as it: - ... for args, kwargs in it: - ... print(args) - (1, 'a') - (2, 'b') - (3, 'c') - - The function will be called in a background thread. The ``done`` property - indicates whether it has completed execution. - - >>> it.done - True - - If it completes successfully, its return value will be available - in the ``result`` property. - - >>> it.result - 4 - - Notes: - - * If the function uses some keyword argument besides ``callback``, supply - *callback_kwd*. - * If it finished executing, but raised an exception, accessing the - ``result`` property will raise the same exception. - * If it hasn't finished executing, accessing the ``result`` - property from within the ``with`` block will raise ``RuntimeError``. - * If it hasn't finished executing, accessing the ``result`` property from - outside the ``with`` block will raise a - ``more_itertools.AbortThread`` exception. - * Provide *wait_seconds* to adjust how frequently the it is polled for - output. - - """ - - def __init__(self, func, callback_kwd='callback', wait_seconds=0.1): - self._func = func - self._callback_kwd = callback_kwd - self._aborted = False - self._future = None - self._wait_seconds = wait_seconds - self._executor = __import__("concurrent.futures").futures.ThreadPoolExecutor(max_workers=1) - self._iterator = self._reader() - - def __enter__(self): - return self - - def __exit__(self, exc_type, exc_value, traceback): - self._aborted = True - self._executor.shutdown() - - def __iter__(self): - return self - - def __next__(self): - return next(self._iterator) - - @property - def done(self): - if self._future is None: - return False - return self._future.done() - - @property - def result(self): - if not self.done: - raise RuntimeError('Function has not yet completed') - - return self._future.result() - - def _reader(self): - q = Queue() - - def callback(*args, **kwargs): - if self._aborted: - raise AbortThread('canceled by user') - - q.put((args, kwargs)) - - self._future = self._executor.submit( - self._func, **{self._callback_kwd: callback} - ) - - while True: - try: - item = q.get(timeout=self._wait_seconds) - except Empty: - pass - else: - q.task_done() - yield item - - if self._future.done(): - break - - remaining = [] - while True: - try: - item = q.get_nowait() - except Empty: - break - else: - q.task_done() - remaining.append(item) - q.join() - yield from remaining - - -def windowed_complete(iterable, n): - """ - Yield ``(beginning, middle, end)`` tuples, where: - - * Each ``middle`` has *n* items from *iterable* - * Each ``beginning`` has the items before the ones in ``middle`` - * Each ``end`` has the items after the ones in ``middle`` - - >>> iterable = range(7) - >>> n = 3 - >>> for beginning, middle, end in windowed_complete(iterable, n): - ... print(beginning, middle, end) - () (0, 1, 2) (3, 4, 5, 6) - (0,) (1, 2, 3) (4, 5, 6) - (0, 1) (2, 3, 4) (5, 6) - (0, 1, 2) (3, 4, 5) (6,) - (0, 1, 2, 3) (4, 5, 6) () - - Note that *n* must be at least 0 and most equal to the length of - *iterable*. - - This function will exhaust the iterable and may require significant - storage. - """ - if n < 0: - raise ValueError('n must be >= 0') - - seq = tuple(iterable) - size = len(seq) - - if n > size: - raise ValueError('n must be <= len(seq)') - - for i in range(size - n + 1): - beginning = seq[:i] - middle = seq[i : i + n] - end = seq[i + n :] - yield beginning, middle, end - - -def all_unique(iterable, key=None): - """ - Returns ``True`` if all the elements of *iterable* are unique (no two - elements are equal). - - >>> all_unique('ABCB') - False - - If a *key* function is specified, it will be used to make comparisons. - - >>> all_unique('ABCb') - True - >>> all_unique('ABCb', str.lower) - False - - The function returns as soon as the first non-unique element is - encountered. Iterables with a mix of hashable and unhashable items can - be used, but the function will be slower for unhashable items. - """ - seenset = set() - seenset_add = seenset.add - seenlist = [] - seenlist_add = seenlist.append - for element in map(key, iterable) if key else iterable: - try: - if element in seenset: - return False - seenset_add(element) - except TypeError: - if element in seenlist: - return False - seenlist_add(element) - return True - - -def nth_product(index, *args): - """Equivalent to ``list(product(*args))[index]``. - - The products of *args* can be ordered lexicographically. - :func:`nth_product` computes the product at sort position *index* without - computing the previous products. - - >>> nth_product(8, range(2), range(2), range(2), range(2)) - (1, 0, 0, 0) - - ``IndexError`` will be raised if the given *index* is invalid. - """ - pools = list(map(tuple, reversed(args))) - ns = list(map(len, pools)) - - c = reduce(mul, ns) - - if index < 0: - index += c - - if not 0 <= index < c: - raise IndexError - - result = [] - for pool, n in zip(pools, ns): - result.append(pool[index % n]) - index //= n - - return tuple(reversed(result)) - - -def nth_permutation(iterable, r, index): - """Equivalent to ``list(permutations(iterable, r))[index]``` - - The subsequences of *iterable* that are of length *r* where order is - important can be ordered lexicographically. :func:`nth_permutation` - computes the subsequence at sort position *index* directly, without - computing the previous subsequences. - - >>> nth_permutation('ghijk', 2, 5) - ('h', 'i') - - ``ValueError`` will be raised If *r* is negative or greater than the length - of *iterable*. - ``IndexError`` will be raised if the given *index* is invalid. - """ - pool = list(iterable) - n = len(pool) - - if r is None or r == n: - r, c = n, factorial(n) - elif not 0 <= r < n: - raise ValueError - else: - c = factorial(n) // factorial(n - r) - - if index < 0: - index += c - - if not 0 <= index < c: - raise IndexError - - if c == 0: - return tuple() - - result = [0] * r - q = index * factorial(n) // c if r < n else index - for d in range(1, n + 1): - q, i = divmod(q, d) - if 0 <= n - d < r: - result[n - d] = i - if q == 0: - break - - return tuple(map(pool.pop, result)) - - -def value_chain(*args): - """Yield all arguments passed to the function in the same order in which - they were passed. If an argument itself is iterable then iterate over its - values. - - >>> list(value_chain(1, 2, 3, [4, 5, 6])) - [1, 2, 3, 4, 5, 6] - - Binary and text strings are not considered iterable and are emitted - as-is: - - >>> list(value_chain('12', '34', ['56', '78'])) - ['12', '34', '56', '78'] - - - Multiple levels of nesting are not flattened. - - """ - for value in args: - if isinstance(value, (str, bytes)): - yield value - continue - try: - yield from value - except TypeError: - yield value - - -def product_index(element, *args): - """Equivalent to ``list(product(*args)).index(element)`` - - The products of *args* can be ordered lexicographically. - :func:`product_index` computes the first index of *element* without - computing the previous products. - - >>> product_index([8, 2], range(10), range(5)) - 42 - - ``ValueError`` will be raised if the given *element* isn't in the product - of *args*. - """ - index = 0 - - for x, pool in zip_longest(element, args, fillvalue=_marker): - if x is _marker or pool is _marker: - raise ValueError('element is not a product of args') - - pool = tuple(pool) - index = index * len(pool) + pool.index(x) - - return index - - -def combination_index(element, iterable): - """Equivalent to ``list(combinations(iterable, r)).index(element)`` - - The subsequences of *iterable* that are of length *r* can be ordered - lexicographically. :func:`combination_index` computes the index of the - first *element*, without computing the previous combinations. - - >>> combination_index('adf', 'abcdefg') - 10 - - ``ValueError`` will be raised if the given *element* isn't one of the - combinations of *iterable*. - """ - element = enumerate(element) - k, y = next(element, (None, None)) - if k is None: - return 0 - - indexes = [] - pool = enumerate(iterable) - for n, x in pool: - if x == y: - indexes.append(n) - tmp, y = next(element, (None, None)) - if tmp is None: - break - else: - k = tmp - else: - raise ValueError('element is not a combination of iterable') - - n, _ = last(pool, default=(n, None)) - - # Python versiosn below 3.8 don't have math.comb - index = 1 - for i, j in enumerate(reversed(indexes), start=1): - j = n - j - if i <= j: - index += factorial(j) // (factorial(i) * factorial(j - i)) - - return factorial(n + 1) // (factorial(k + 1) * factorial(n - k)) - index - - -def permutation_index(element, iterable): - """Equivalent to ``list(permutations(iterable, r)).index(element)``` - - The subsequences of *iterable* that are of length *r* where order is - important can be ordered lexicographically. :func:`permutation_index` - computes the index of the first *element* directly, without computing - the previous permutations. - - >>> permutation_index([1, 3, 2], range(5)) - 19 - - ``ValueError`` will be raised if the given *element* isn't one of the - permutations of *iterable*. - """ - index = 0 - pool = list(iterable) - for i, x in zip(range(len(pool), -1, -1), element): - r = pool.index(x) - index = index * i + r - del pool[r] - - return index - - -class countable: - """Wrap *iterable* and keep a count of how many items have been consumed. - - The ``items_seen`` attribute starts at ``0`` and increments as the iterable - is consumed: - - >>> iterable = map(str, range(10)) - >>> it = countable(iterable) - >>> it.items_seen - 0 - >>> next(it), next(it) - ('0', '1') - >>> list(it) - ['2', '3', '4', '5', '6', '7', '8', '9'] - >>> it.items_seen - 10 - """ - - def __init__(self, iterable): - self._it = iter(iterable) - self.items_seen = 0 - - def __iter__(self): - return self - - def __next__(self): - item = next(self._it) - self.items_seen += 1 - - return item - - -def chunked_even(iterable, n): - """Break *iterable* into lists of approximately length *n*. - Items are distributed such the lengths of the lists differ by at most - 1 item. - - >>> iterable = [1, 2, 3, 4, 5, 6, 7] - >>> n = 3 - >>> list(chunked_even(iterable, n)) # List lengths: 3, 2, 2 - [[1, 2, 3], [4, 5], [6, 7]] - >>> list(chunked(iterable, n)) # List lengths: 3, 3, 1 - [[1, 2, 3], [4, 5, 6], [7]] - - """ - - len_method = getattr(iterable, '__len__', None) - - if len_method is None: - return _chunked_even_online(iterable, n) - else: - return _chunked_even_finite(iterable, len_method(), n) - - -def _chunked_even_online(iterable, n): - buffer = [] - maxbuf = n + (n - 2) * (n - 1) - for x in iterable: - buffer.append(x) - if len(buffer) == maxbuf: - yield buffer[:n] - buffer = buffer[n:] - yield from _chunked_even_finite(buffer, len(buffer), n) - - -def _chunked_even_finite(iterable, N, n): - if N < 1: - return - - # Lists are either size `full_size <= n` or `partial_size = full_size - 1` - q, r = divmod(N, n) - num_lists = q + (1 if r > 0 else 0) - q, r = divmod(N, num_lists) - full_size = q + (1 if r > 0 else 0) - partial_size = full_size - 1 - num_full = N - partial_size * num_lists - num_partial = num_lists - num_full - - buffer = [] - iterator = iter(iterable) - - # Yield num_full lists of full_size - for x in iterator: - buffer.append(x) - if len(buffer) == full_size: - yield buffer - buffer = [] - num_full -= 1 - if num_full <= 0: - break - - # Yield num_partial lists of partial_size - for x in iterator: - buffer.append(x) - if len(buffer) == partial_size: - yield buffer - buffer = [] - num_partial -= 1 - - -def zip_broadcast(*objects, scalar_types=(str, bytes), strict=False): - """A version of :func:`zip` that "broadcasts" any scalar - (i.e., non-iterable) items into output tuples. - - >>> iterable_1 = [1, 2, 3] - >>> iterable_2 = ['a', 'b', 'c'] - >>> scalar = '_' - >>> list(zip_broadcast(iterable_1, iterable_2, scalar)) - [(1, 'a', '_'), (2, 'b', '_'), (3, 'c', '_')] - - The *scalar_types* keyword argument determines what types are considered - scalar. It is set to ``(str, bytes)`` by default. Set it to ``None`` to - treat strings and byte strings as iterable: - - >>> list(zip_broadcast('abc', 0, 'xyz', scalar_types=None)) - [('a', 0, 'x'), ('b', 0, 'y'), ('c', 0, 'z')] - - If the *strict* keyword argument is ``True``, then - ``UnequalIterablesError`` will be raised if any of the iterables have - different lengthss. - """ - - def is_scalar(obj): - if scalar_types and isinstance(obj, scalar_types): - return True - try: - iter(obj) - except TypeError: - return True - else: - return False - - size = len(objects) - if not size: - return - - iterables, iterable_positions = [], [] - scalars, scalar_positions = [], [] - for i, obj in enumerate(objects): - if is_scalar(obj): - scalars.append(obj) - scalar_positions.append(i) - else: - iterables.append(iter(obj)) - iterable_positions.append(i) - - if len(scalars) == size: - yield tuple(objects) - return - - zipper = _zip_equal if strict else zip - for item in zipper(*iterables): - new_item = [None] * size - - for i, elem in zip(iterable_positions, item): - new_item[i] = elem - - for i, elem in zip(scalar_positions, scalars): - new_item[i] = elem - - yield tuple(new_item) - - -def unique_in_window(iterable, n, key=None): - """Yield the items from *iterable* that haven't been seen recently. - *n* is the size of the lookback window. - - >>> iterable = [0, 1, 0, 2, 3, 0] - >>> n = 3 - >>> list(unique_in_window(iterable, n)) - [0, 1, 2, 3, 0] - - The *key* function, if provided, will be used to determine uniqueness: - - >>> list(unique_in_window('abAcda', 3, key=lambda x: x.lower())) - ['a', 'b', 'c', 'd', 'a'] - - The items in *iterable* must be hashable. - - """ - if n <= 0: - raise ValueError('n must be greater than 0') - - window = deque(maxlen=n) - uniques = set() - use_key = key is not None - - for item in iterable: - k = key(item) if use_key else item - if k in uniques: - continue - - if len(uniques) == n: - uniques.discard(window[0]) - - uniques.add(k) - window.append(k) - - yield item - - -def duplicates_everseen(iterable, key=None): - """Yield duplicate elements after their first appearance. - - >>> list(duplicates_everseen('mississippi')) - ['s', 'i', 's', 's', 'i', 'p', 'i'] - >>> list(duplicates_everseen('AaaBbbCccAaa', str.lower)) - ['a', 'a', 'b', 'b', 'c', 'c', 'A', 'a', 'a'] - - This function is analagous to :func:`unique_everseen` and is subject to - the same performance considerations. - - """ - seen_set = set() - seen_list = [] - use_key = key is not None - - for element in iterable: - k = key(element) if use_key else element - try: - if k not in seen_set: - seen_set.add(k) - else: - yield element - except TypeError: - if k not in seen_list: - seen_list.append(k) - else: - yield element - - -def duplicates_justseen(iterable, key=None): - """Yields serially-duplicate elements after their first appearance. - - >>> list(duplicates_justseen('mississippi')) - ['s', 's', 'p'] - >>> list(duplicates_justseen('AaaBbbCccAaa', str.lower)) - ['a', 'a', 'b', 'b', 'c', 'c', 'a', 'a'] - - This function is analagous to :func:`unique_justseen`. - - """ - return flatten( - map( - lambda group_tuple: islice_extended(group_tuple[1])[1:], - groupby(iterable, key), - ) - ) - - -def minmax(iterable_or_value, *others, key=None, default=_marker): - """Returns both the smallest and largest items in an iterable - or the largest of two or more arguments. - - >>> minmax([3, 1, 5]) - (1, 5) - - >>> minmax(4, 2, 6) - (2, 6) - - If a *key* function is provided, it will be used to transform the input - items for comparison. - - >>> minmax([5, 30], key=str) # '30' sorts before '5' - (30, 5) - - If a *default* value is provided, it will be returned if there are no - input items. - - >>> minmax([], default=(0, 0)) - (0, 0) - - Otherwise ``ValueError`` is raised. - - This function is based on the - `recipe `__ by - Raymond Hettinger and takes care to minimize the number of comparisons - performed. - """ - iterable = (iterable_or_value, *others) if others else iterable_or_value - - it = iter(iterable) - - try: - lo = hi = next(it) - except StopIteration as e: - if default is _marker: - raise ValueError( - '`minmax()` argument is an empty iterable. ' - 'Provide a `default` value to suppress this error.' - ) from e - return default - - # Different branches depending on the presence of key. This saves a lot - # of unimportant copies which would slow the "key=None" branch - # significantly down. - if key is None: - for x, y in zip_longest(it, it, fillvalue=lo): - if y < x: - x, y = y, x - if x < lo: - lo = x - if hi < y: - hi = y - - else: - lo_key = hi_key = key(lo) - - for x, y in zip_longest(it, it, fillvalue=lo): - - x_key, y_key = key(x), key(y) - - if y_key < x_key: - x, y, x_key, y_key = y, x, y_key, x_key - if x_key < lo_key: - lo, lo_key = x, x_key - if hi_key < y_key: - hi, hi_key = y, y_key - - return lo, hi diff --git a/venv/lib/python3.10/site-packages/pkg_resources/_vendor/more_itertools/recipes.py b/venv/lib/python3.10/site-packages/pkg_resources/_vendor/more_itertools/recipes.py deleted file mode 100644 index a259642..0000000 --- a/venv/lib/python3.10/site-packages/pkg_resources/_vendor/more_itertools/recipes.py +++ /dev/null @@ -1,698 +0,0 @@ -"""Imported from the recipes section of the itertools documentation. - -All functions taken from the recipes section of the itertools library docs -[1]_. -Some backward-compatible usability improvements have been made. - -.. [1] http://docs.python.org/library/itertools.html#recipes - -""" -import warnings -from collections import deque -from itertools import ( - chain, - combinations, - count, - cycle, - groupby, - islice, - repeat, - starmap, - tee, - zip_longest, -) -import operator -from random import randrange, sample, choice - -__all__ = [ - 'all_equal', - 'before_and_after', - 'consume', - 'convolve', - 'dotproduct', - 'first_true', - 'flatten', - 'grouper', - 'iter_except', - 'ncycles', - 'nth', - 'nth_combination', - 'padnone', - 'pad_none', - 'pairwise', - 'partition', - 'powerset', - 'prepend', - 'quantify', - 'random_combination_with_replacement', - 'random_combination', - 'random_permutation', - 'random_product', - 'repeatfunc', - 'roundrobin', - 'sliding_window', - 'tabulate', - 'tail', - 'take', - 'triplewise', - 'unique_everseen', - 'unique_justseen', -] - - -def take(n, iterable): - """Return first *n* items of the iterable as a list. - - >>> take(3, range(10)) - [0, 1, 2] - - If there are fewer than *n* items in the iterable, all of them are - returned. - - >>> take(10, range(3)) - [0, 1, 2] - - """ - return list(islice(iterable, n)) - - -def tabulate(function, start=0): - """Return an iterator over the results of ``func(start)``, - ``func(start + 1)``, ``func(start + 2)``... - - *func* should be a function that accepts one integer argument. - - If *start* is not specified it defaults to 0. It will be incremented each - time the iterator is advanced. - - >>> square = lambda x: x ** 2 - >>> iterator = tabulate(square, -3) - >>> take(4, iterator) - [9, 4, 1, 0] - - """ - return map(function, count(start)) - - -def tail(n, iterable): - """Return an iterator over the last *n* items of *iterable*. - - >>> t = tail(3, 'ABCDEFG') - >>> list(t) - ['E', 'F', 'G'] - - """ - return iter(deque(iterable, maxlen=n)) - - -def consume(iterator, n=None): - """Advance *iterable* by *n* steps. If *n* is ``None``, consume it - entirely. - - Efficiently exhausts an iterator without returning values. Defaults to - consuming the whole iterator, but an optional second argument may be - provided to limit consumption. - - >>> i = (x for x in range(10)) - >>> next(i) - 0 - >>> consume(i, 3) - >>> next(i) - 4 - >>> consume(i) - >>> next(i) - Traceback (most recent call last): - File "", line 1, in - StopIteration - - If the iterator has fewer items remaining than the provided limit, the - whole iterator will be consumed. - - >>> i = (x for x in range(3)) - >>> consume(i, 5) - >>> next(i) - Traceback (most recent call last): - File "", line 1, in - StopIteration - - """ - # Use functions that consume iterators at C speed. - if n is None: - # feed the entire iterator into a zero-length deque - deque(iterator, maxlen=0) - else: - # advance to the empty slice starting at position n - next(islice(iterator, n, n), None) - - -def nth(iterable, n, default=None): - """Returns the nth item or a default value. - - >>> l = range(10) - >>> nth(l, 3) - 3 - >>> nth(l, 20, "zebra") - 'zebra' - - """ - return next(islice(iterable, n, None), default) - - -def all_equal(iterable): - """ - Returns ``True`` if all the elements are equal to each other. - - >>> all_equal('aaaa') - True - >>> all_equal('aaab') - False - - """ - g = groupby(iterable) - return next(g, True) and not next(g, False) - - -def quantify(iterable, pred=bool): - """Return the how many times the predicate is true. - - >>> quantify([True, False, True]) - 2 - - """ - return sum(map(pred, iterable)) - - -def pad_none(iterable): - """Returns the sequence of elements and then returns ``None`` indefinitely. - - >>> take(5, pad_none(range(3))) - [0, 1, 2, None, None] - - Useful for emulating the behavior of the built-in :func:`map` function. - - See also :func:`padded`. - - """ - return chain(iterable, repeat(None)) - - -padnone = pad_none - - -def ncycles(iterable, n): - """Returns the sequence elements *n* times - - >>> list(ncycles(["a", "b"], 3)) - ['a', 'b', 'a', 'b', 'a', 'b'] - - """ - return chain.from_iterable(repeat(tuple(iterable), n)) - - -def dotproduct(vec1, vec2): - """Returns the dot product of the two iterables. - - >>> dotproduct([10, 10], [20, 20]) - 400 - - """ - return sum(map(operator.mul, vec1, vec2)) - - -def flatten(listOfLists): - """Return an iterator flattening one level of nesting in a list of lists. - - >>> list(flatten([[0, 1], [2, 3]])) - [0, 1, 2, 3] - - See also :func:`collapse`, which can flatten multiple levels of nesting. - - """ - return chain.from_iterable(listOfLists) - - -def repeatfunc(func, times=None, *args): - """Call *func* with *args* repeatedly, returning an iterable over the - results. - - If *times* is specified, the iterable will terminate after that many - repetitions: - - >>> from operator import add - >>> times = 4 - >>> args = 3, 5 - >>> list(repeatfunc(add, times, *args)) - [8, 8, 8, 8] - - If *times* is ``None`` the iterable will not terminate: - - >>> from random import randrange - >>> times = None - >>> args = 1, 11 - >>> take(6, repeatfunc(randrange, times, *args)) # doctest:+SKIP - [2, 4, 8, 1, 8, 4] - - """ - if times is None: - return starmap(func, repeat(args)) - return starmap(func, repeat(args, times)) - - -def _pairwise(iterable): - """Returns an iterator of paired items, overlapping, from the original - - >>> take(4, pairwise(count())) - [(0, 1), (1, 2), (2, 3), (3, 4)] - - On Python 3.10 and above, this is an alias for :func:`itertools.pairwise`. - - """ - a, b = tee(iterable) - next(b, None) - yield from zip(a, b) - - -try: - from itertools import pairwise as itertools_pairwise -except ImportError: - pairwise = _pairwise -else: - - def pairwise(iterable): - yield from itertools_pairwise(iterable) - - pairwise.__doc__ = _pairwise.__doc__ - - -def grouper(iterable, n, fillvalue=None): - """Collect data into fixed-length chunks or blocks. - - >>> list(grouper('ABCDEFG', 3, 'x')) - [('A', 'B', 'C'), ('D', 'E', 'F'), ('G', 'x', 'x')] - - """ - if isinstance(iterable, int): - warnings.warn( - "grouper expects iterable as first parameter", DeprecationWarning - ) - n, iterable = iterable, n - args = [iter(iterable)] * n - return zip_longest(fillvalue=fillvalue, *args) - - -def roundrobin(*iterables): - """Yields an item from each iterable, alternating between them. - - >>> list(roundrobin('ABC', 'D', 'EF')) - ['A', 'D', 'E', 'B', 'F', 'C'] - - This function produces the same output as :func:`interleave_longest`, but - may perform better for some inputs (in particular when the number of - iterables is small). - - """ - # Recipe credited to George Sakkis - pending = len(iterables) - nexts = cycle(iter(it).__next__ for it in iterables) - while pending: - try: - for next in nexts: - yield next() - except StopIteration: - pending -= 1 - nexts = cycle(islice(nexts, pending)) - - -def partition(pred, iterable): - """ - Returns a 2-tuple of iterables derived from the input iterable. - The first yields the items that have ``pred(item) == False``. - The second yields the items that have ``pred(item) == True``. - - >>> is_odd = lambda x: x % 2 != 0 - >>> iterable = range(10) - >>> even_items, odd_items = partition(is_odd, iterable) - >>> list(even_items), list(odd_items) - ([0, 2, 4, 6, 8], [1, 3, 5, 7, 9]) - - If *pred* is None, :func:`bool` is used. - - >>> iterable = [0, 1, False, True, '', ' '] - >>> false_items, true_items = partition(None, iterable) - >>> list(false_items), list(true_items) - ([0, False, ''], [1, True, ' ']) - - """ - if pred is None: - pred = bool - - evaluations = ((pred(x), x) for x in iterable) - t1, t2 = tee(evaluations) - return ( - (x for (cond, x) in t1 if not cond), - (x for (cond, x) in t2 if cond), - ) - - -def powerset(iterable): - """Yields all possible subsets of the iterable. - - >>> list(powerset([1, 2, 3])) - [(), (1,), (2,), (3,), (1, 2), (1, 3), (2, 3), (1, 2, 3)] - - :func:`powerset` will operate on iterables that aren't :class:`set` - instances, so repeated elements in the input will produce repeated elements - in the output. Use :func:`unique_everseen` on the input to avoid generating - duplicates: - - >>> seq = [1, 1, 0] - >>> list(powerset(seq)) - [(), (1,), (1,), (0,), (1, 1), (1, 0), (1, 0), (1, 1, 0)] - >>> from more_itertools import unique_everseen - >>> list(powerset(unique_everseen(seq))) - [(), (1,), (0,), (1, 0)] - - """ - s = list(iterable) - return chain.from_iterable(combinations(s, r) for r in range(len(s) + 1)) - - -def unique_everseen(iterable, key=None): - """ - Yield unique elements, preserving order. - - >>> list(unique_everseen('AAAABBBCCDAABBB')) - ['A', 'B', 'C', 'D'] - >>> list(unique_everseen('ABBCcAD', str.lower)) - ['A', 'B', 'C', 'D'] - - Sequences with a mix of hashable and unhashable items can be used. - The function will be slower (i.e., `O(n^2)`) for unhashable items. - - Remember that ``list`` objects are unhashable - you can use the *key* - parameter to transform the list to a tuple (which is hashable) to - avoid a slowdown. - - >>> iterable = ([1, 2], [2, 3], [1, 2]) - >>> list(unique_everseen(iterable)) # Slow - [[1, 2], [2, 3]] - >>> list(unique_everseen(iterable, key=tuple)) # Faster - [[1, 2], [2, 3]] - - Similary, you may want to convert unhashable ``set`` objects with - ``key=frozenset``. For ``dict`` objects, - ``key=lambda x: frozenset(x.items())`` can be used. - - """ - seenset = set() - seenset_add = seenset.add - seenlist = [] - seenlist_add = seenlist.append - use_key = key is not None - - for element in iterable: - k = key(element) if use_key else element - try: - if k not in seenset: - seenset_add(k) - yield element - except TypeError: - if k not in seenlist: - seenlist_add(k) - yield element - - -def unique_justseen(iterable, key=None): - """Yields elements in order, ignoring serial duplicates - - >>> list(unique_justseen('AAAABBBCCDAABBB')) - ['A', 'B', 'C', 'D', 'A', 'B'] - >>> list(unique_justseen('ABBCcAD', str.lower)) - ['A', 'B', 'C', 'A', 'D'] - - """ - return map(next, map(operator.itemgetter(1), groupby(iterable, key))) - - -def iter_except(func, exception, first=None): - """Yields results from a function repeatedly until an exception is raised. - - Converts a call-until-exception interface to an iterator interface. - Like ``iter(func, sentinel)``, but uses an exception instead of a sentinel - to end the loop. - - >>> l = [0, 1, 2] - >>> list(iter_except(l.pop, IndexError)) - [2, 1, 0] - - Multiple exceptions can be specified as a stopping condition: - - >>> l = [1, 2, 3, '...', 4, 5, 6] - >>> list(iter_except(lambda: 1 + l.pop(), (IndexError, TypeError))) - [7, 6, 5] - >>> list(iter_except(lambda: 1 + l.pop(), (IndexError, TypeError))) - [4, 3, 2] - >>> list(iter_except(lambda: 1 + l.pop(), (IndexError, TypeError))) - [] - - """ - try: - if first is not None: - yield first() - while 1: - yield func() - except exception: - pass - - -def first_true(iterable, default=None, pred=None): - """ - Returns the first true value in the iterable. - - If no true value is found, returns *default* - - If *pred* is not None, returns the first item for which - ``pred(item) == True`` . - - >>> first_true(range(10)) - 1 - >>> first_true(range(10), pred=lambda x: x > 5) - 6 - >>> first_true(range(10), default='missing', pred=lambda x: x > 9) - 'missing' - - """ - return next(filter(pred, iterable), default) - - -def random_product(*args, repeat=1): - """Draw an item at random from each of the input iterables. - - >>> random_product('abc', range(4), 'XYZ') # doctest:+SKIP - ('c', 3, 'Z') - - If *repeat* is provided as a keyword argument, that many items will be - drawn from each iterable. - - >>> random_product('abcd', range(4), repeat=2) # doctest:+SKIP - ('a', 2, 'd', 3) - - This equivalent to taking a random selection from - ``itertools.product(*args, **kwarg)``. - - """ - pools = [tuple(pool) for pool in args] * repeat - return tuple(choice(pool) for pool in pools) - - -def random_permutation(iterable, r=None): - """Return a random *r* length permutation of the elements in *iterable*. - - If *r* is not specified or is ``None``, then *r* defaults to the length of - *iterable*. - - >>> random_permutation(range(5)) # doctest:+SKIP - (3, 4, 0, 1, 2) - - This equivalent to taking a random selection from - ``itertools.permutations(iterable, r)``. - - """ - pool = tuple(iterable) - r = len(pool) if r is None else r - return tuple(sample(pool, r)) - - -def random_combination(iterable, r): - """Return a random *r* length subsequence of the elements in *iterable*. - - >>> random_combination(range(5), 3) # doctest:+SKIP - (2, 3, 4) - - This equivalent to taking a random selection from - ``itertools.combinations(iterable, r)``. - - """ - pool = tuple(iterable) - n = len(pool) - indices = sorted(sample(range(n), r)) - return tuple(pool[i] for i in indices) - - -def random_combination_with_replacement(iterable, r): - """Return a random *r* length subsequence of elements in *iterable*, - allowing individual elements to be repeated. - - >>> random_combination_with_replacement(range(3), 5) # doctest:+SKIP - (0, 0, 1, 2, 2) - - This equivalent to taking a random selection from - ``itertools.combinations_with_replacement(iterable, r)``. - - """ - pool = tuple(iterable) - n = len(pool) - indices = sorted(randrange(n) for i in range(r)) - return tuple(pool[i] for i in indices) - - -def nth_combination(iterable, r, index): - """Equivalent to ``list(combinations(iterable, r))[index]``. - - The subsequences of *iterable* that are of length *r* can be ordered - lexicographically. :func:`nth_combination` computes the subsequence at - sort position *index* directly, without computing the previous - subsequences. - - >>> nth_combination(range(5), 3, 5) - (0, 3, 4) - - ``ValueError`` will be raised If *r* is negative or greater than the length - of *iterable*. - ``IndexError`` will be raised if the given *index* is invalid. - """ - pool = tuple(iterable) - n = len(pool) - if (r < 0) or (r > n): - raise ValueError - - c = 1 - k = min(r, n - r) - for i in range(1, k + 1): - c = c * (n - k + i) // i - - if index < 0: - index += c - - if (index < 0) or (index >= c): - raise IndexError - - result = [] - while r: - c, n, r = c * r // n, n - 1, r - 1 - while index >= c: - index -= c - c, n = c * (n - r) // n, n - 1 - result.append(pool[-1 - n]) - - return tuple(result) - - -def prepend(value, iterator): - """Yield *value*, followed by the elements in *iterator*. - - >>> value = '0' - >>> iterator = ['1', '2', '3'] - >>> list(prepend(value, iterator)) - ['0', '1', '2', '3'] - - To prepend multiple values, see :func:`itertools.chain` - or :func:`value_chain`. - - """ - return chain([value], iterator) - - -def convolve(signal, kernel): - """Convolve the iterable *signal* with the iterable *kernel*. - - >>> signal = (1, 2, 3, 4, 5) - >>> kernel = [3, 2, 1] - >>> list(convolve(signal, kernel)) - [3, 8, 14, 20, 26, 14, 5] - - Note: the input arguments are not interchangeable, as the *kernel* - is immediately consumed and stored. - - """ - kernel = tuple(kernel)[::-1] - n = len(kernel) - window = deque([0], maxlen=n) * n - for x in chain(signal, repeat(0, n - 1)): - window.append(x) - yield sum(map(operator.mul, kernel, window)) - - -def before_and_after(predicate, it): - """A variant of :func:`takewhile` that allows complete access to the - remainder of the iterator. - - >>> it = iter('ABCdEfGhI') - >>> all_upper, remainder = before_and_after(str.isupper, it) - >>> ''.join(all_upper) - 'ABC' - >>> ''.join(remainder) # takewhile() would lose the 'd' - 'dEfGhI' - - Note that the first iterator must be fully consumed before the second - iterator can generate valid results. - """ - it = iter(it) - transition = [] - - def true_iterator(): - for elem in it: - if predicate(elem): - yield elem - else: - transition.append(elem) - return - - def remainder_iterator(): - yield from transition - yield from it - - return true_iterator(), remainder_iterator() - - -def triplewise(iterable): - """Return overlapping triplets from *iterable*. - - >>> list(triplewise('ABCDE')) - [('A', 'B', 'C'), ('B', 'C', 'D'), ('C', 'D', 'E')] - - """ - for (a, _), (b, c) in pairwise(pairwise(iterable)): - yield a, b, c - - -def sliding_window(iterable, n): - """Return a sliding window of width *n* over *iterable*. - - >>> list(sliding_window(range(6), 4)) - [(0, 1, 2, 3), (1, 2, 3, 4), (2, 3, 4, 5)] - - If *iterable* has fewer than *n* items, then nothing is yielded: - - >>> list(sliding_window(range(3), 4)) - [] - - For a variant with more features, see :func:`windowed`. - """ - it = iter(iterable) - window = deque(islice(it, n), maxlen=n) - if len(window) == n: - yield tuple(window) - for x in it: - window.append(x) - yield tuple(window) diff --git a/venv/lib/python3.10/site-packages/pkg_resources/_vendor/packaging/__about__.py b/venv/lib/python3.10/site-packages/pkg_resources/_vendor/packaging/__about__.py index 3551bc2..c359122 100644 --- a/venv/lib/python3.10/site-packages/pkg_resources/_vendor/packaging/__about__.py +++ b/venv/lib/python3.10/site-packages/pkg_resources/_vendor/packaging/__about__.py @@ -17,7 +17,7 @@ __summary__ = "Core utilities for Python packages" __uri__ = "https://github.com/pypa/packaging" -__version__ = "21.3" +__version__ = "21.2" __author__ = "Donald Stufft and individual contributors" __email__ = "donald@stufft.io" diff --git a/venv/lib/python3.10/site-packages/pkg_resources/_vendor/packaging/_musllinux.py b/venv/lib/python3.10/site-packages/pkg_resources/_vendor/packaging/_musllinux.py index 8ac3059..85450fa 100644 --- a/venv/lib/python3.10/site-packages/pkg_resources/_vendor/packaging/_musllinux.py +++ b/venv/lib/python3.10/site-packages/pkg_resources/_vendor/packaging/_musllinux.py @@ -98,7 +98,7 @@ def _get_musl_version(executable: str) -> Optional[_MuslVersion]: with contextlib.ExitStack() as stack: try: f = stack.enter_context(open(executable, "rb")) - except OSError: + except IOError: return None ld = _parse_ld_musl_from_elf(f) if not ld: diff --git a/venv/lib/python3.10/site-packages/pkg_resources/_vendor/packaging/_structures.py b/venv/lib/python3.10/site-packages/pkg_resources/_vendor/packaging/_structures.py index 90a6465..9515497 100644 --- a/venv/lib/python3.10/site-packages/pkg_resources/_vendor/packaging/_structures.py +++ b/venv/lib/python3.10/site-packages/pkg_resources/_vendor/packaging/_structures.py @@ -19,6 +19,9 @@ def __le__(self, other: object) -> bool: def __eq__(self, other: object) -> bool: return isinstance(other, self.__class__) + def __ne__(self, other: object) -> bool: + return not isinstance(other, self.__class__) + def __gt__(self, other: object) -> bool: return True @@ -48,6 +51,9 @@ def __le__(self, other: object) -> bool: def __eq__(self, other: object) -> bool: return isinstance(other, self.__class__) + def __ne__(self, other: object) -> bool: + return not isinstance(other, self.__class__) + def __gt__(self, other: object) -> bool: return False diff --git a/venv/lib/python3.10/site-packages/pkg_resources/_vendor/packaging/specifiers.py b/venv/lib/python3.10/site-packages/pkg_resources/_vendor/packaging/specifiers.py index 0e218a6..ce66bd4 100644 --- a/venv/lib/python3.10/site-packages/pkg_resources/_vendor/packaging/specifiers.py +++ b/venv/lib/python3.10/site-packages/pkg_resources/_vendor/packaging/specifiers.py @@ -57,6 +57,13 @@ def __eq__(self, other: object) -> bool: objects are equal. """ + @abc.abstractmethod + def __ne__(self, other: object) -> bool: + """ + Returns a boolean representing whether or not the two Specifier like + objects are not equal. + """ + @abc.abstractproperty def prereleases(self) -> Optional[bool]: """ @@ -112,7 +119,7 @@ def __repr__(self) -> str: else "" ) - return f"<{self.__class__.__name__}({str(self)!r}{pre})>" + return "<{}({!r}{})>".format(self.__class__.__name__, str(self), pre) def __str__(self) -> str: return "{}{}".format(*self._spec) @@ -135,6 +142,17 @@ def __eq__(self, other: object) -> bool: return self._canonical_spec == other._canonical_spec + def __ne__(self, other: object) -> bool: + if isinstance(other, str): + try: + other = self.__class__(str(other)) + except InvalidSpecifier: + return NotImplemented + elif not isinstance(other, self.__class__): + return NotImplemented + + return self._spec != other._spec + def _get_operator(self, op: str) -> CallableOperator: operator_callable: CallableOperator = getattr( self, f"_compare_{self._operators[op]}" @@ -649,7 +667,7 @@ def __repr__(self) -> str: else "" ) - return f"" + return "".format(str(self), pre) def __str__(self) -> str: return ",".join(sorted(str(s) for s in self._specs)) @@ -688,6 +706,14 @@ def __eq__(self, other: object) -> bool: return self._specs == other._specs + def __ne__(self, other: object) -> bool: + if isinstance(other, (str, _IndividualSpecifier)): + other = SpecifierSet(str(other)) + elif not isinstance(other, SpecifierSet): + return NotImplemented + + return self._specs != other._specs + def __len__(self) -> int: return len(self._specs) diff --git a/venv/lib/python3.10/site-packages/pkg_resources/_vendor/packaging/tags.py b/venv/lib/python3.10/site-packages/pkg_resources/_vendor/packaging/tags.py index 9a3d25a..e65890a 100644 --- a/venv/lib/python3.10/site-packages/pkg_resources/_vendor/packaging/tags.py +++ b/venv/lib/python3.10/site-packages/pkg_resources/_vendor/packaging/tags.py @@ -90,7 +90,7 @@ def __str__(self) -> str: return f"{self._interpreter}-{self._abi}-{self._platform}" def __repr__(self) -> str: - return f"<{self} @ {id(self)}>" + return "<{self} @ {self_id}>".format(self=self, self_id=id(self)) def parse_tag(tag: str) -> FrozenSet[Tag]: @@ -192,7 +192,7 @@ def cpython_tags( if not python_version: python_version = sys.version_info[:2] - interpreter = f"cp{_version_nodot(python_version[:2])}" + interpreter = "cp{}".format(_version_nodot(python_version[:2])) if abis is None: if len(python_version) > 1: @@ -268,11 +268,11 @@ def _py_interpreter_range(py_version: PythonVersion) -> Iterator[str]: all previous versions of that major version. """ if len(py_version) > 1: - yield f"py{_version_nodot(py_version[:2])}" - yield f"py{py_version[0]}" + yield "py{version}".format(version=_version_nodot(py_version[:2])) + yield "py{major}".format(major=py_version[0]) if len(py_version) > 1: for minor in range(py_version[1] - 1, -1, -1): - yield f"py{_version_nodot((py_version[0], minor))}" + yield "py{version}".format(version=_version_nodot((py_version[0], minor))) def compatible_tags( @@ -481,7 +481,4 @@ def sys_tags(*, warn: bool = False) -> Iterator[Tag]: else: yield from generic_tags() - if interp_name == "pp": - yield from compatible_tags(interpreter="pp3") - else: - yield from compatible_tags() + yield from compatible_tags() diff --git a/venv/lib/python3.10/site-packages/pkg_resources/_vendor/pyparsing/__init__.py b/venv/lib/python3.10/site-packages/pkg_resources/_vendor/pyparsing/__init__.py deleted file mode 100644 index 7802ff1..0000000 --- a/venv/lib/python3.10/site-packages/pkg_resources/_vendor/pyparsing/__init__.py +++ /dev/null @@ -1,331 +0,0 @@ -# module pyparsing.py -# -# Copyright (c) 2003-2022 Paul T. McGuire -# -# Permission is hereby granted, free of charge, to any person obtaining -# a copy of this software and associated documentation files (the -# "Software"), to deal in the Software without restriction, including -# without limitation the rights to use, copy, modify, merge, publish, -# distribute, sublicense, and/or sell copies of the Software, and to -# permit persons to whom the Software is furnished to do so, subject to -# the following conditions: -# -# The above copyright notice and this permission notice shall be -# included in all copies or substantial portions of the Software. -# -# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. -# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, -# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE -# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -# - -__doc__ = """ -pyparsing module - Classes and methods to define and execute parsing grammars -============================================================================= - -The pyparsing module is an alternative approach to creating and -executing simple grammars, vs. the traditional lex/yacc approach, or the -use of regular expressions. With pyparsing, you don't need to learn -a new syntax for defining grammars or matching expressions - the parsing -module provides a library of classes that you use to construct the -grammar directly in Python. - -Here is a program to parse "Hello, World!" (or any greeting of the form -``", !"``), built up using :class:`Word`, -:class:`Literal`, and :class:`And` elements -(the :meth:`'+'` operators create :class:`And` expressions, -and the strings are auto-converted to :class:`Literal` expressions):: - - from pyparsing import Word, alphas - - # define grammar of a greeting - greet = Word(alphas) + "," + Word(alphas) + "!" - - hello = "Hello, World!" - print(hello, "->", greet.parse_string(hello)) - -The program outputs the following:: - - Hello, World! -> ['Hello', ',', 'World', '!'] - -The Python representation of the grammar is quite readable, owing to the -self-explanatory class names, and the use of :class:`'+'`, -:class:`'|'`, :class:`'^'` and :class:`'&'` operators. - -The :class:`ParseResults` object returned from -:class:`ParserElement.parseString` can be -accessed as a nested list, a dictionary, or an object with named -attributes. - -The pyparsing module handles some of the problems that are typically -vexing when writing text parsers: - - - extra or missing whitespace (the above program will also handle - "Hello,World!", "Hello , World !", etc.) - - quoted strings - - embedded comments - - -Getting Started - ------------------ -Visit the classes :class:`ParserElement` and :class:`ParseResults` to -see the base classes that most other pyparsing -classes inherit from. Use the docstrings for examples of how to: - - - construct literal match expressions from :class:`Literal` and - :class:`CaselessLiteral` classes - - construct character word-group expressions using the :class:`Word` - class - - see how to create repetitive expressions using :class:`ZeroOrMore` - and :class:`OneOrMore` classes - - use :class:`'+'`, :class:`'|'`, :class:`'^'`, - and :class:`'&'` operators to combine simple expressions into - more complex ones - - associate names with your parsed results using - :class:`ParserElement.setResultsName` - - access the parsed data, which is returned as a :class:`ParseResults` - object - - find some helpful expression short-cuts like :class:`delimitedList` - and :class:`oneOf` - - find more useful common expressions in the :class:`pyparsing_common` - namespace class -""" -from typing import NamedTuple - - -class version_info(NamedTuple): - major: int - minor: int - micro: int - releaselevel: str - serial: int - - @property - def __version__(self): - return ( - "{}.{}.{}".format(self.major, self.minor, self.micro) - + ( - "{}{}{}".format( - "r" if self.releaselevel[0] == "c" else "", - self.releaselevel[0], - self.serial, - ), - "", - )[self.releaselevel == "final"] - ) - - def __str__(self): - return "{} {} / {}".format(__name__, self.__version__, __version_time__) - - def __repr__(self): - return "{}.{}({})".format( - __name__, - type(self).__name__, - ", ".join("{}={!r}".format(*nv) for nv in zip(self._fields, self)), - ) - - -__version_info__ = version_info(3, 0, 9, "final", 0) -__version_time__ = "05 May 2022 07:02 UTC" -__version__ = __version_info__.__version__ -__versionTime__ = __version_time__ -__author__ = "Paul McGuire " - -from .util import * -from .exceptions import * -from .actions import * -from .core import __diag__, __compat__ -from .results import * -from .core import * -from .core import _builtin_exprs as core_builtin_exprs -from .helpers import * -from .helpers import _builtin_exprs as helper_builtin_exprs - -from .unicode import unicode_set, UnicodeRangeList, pyparsing_unicode as unicode -from .testing import pyparsing_test as testing -from .common import ( - pyparsing_common as common, - _builtin_exprs as common_builtin_exprs, -) - -# define backward compat synonyms -if "pyparsing_unicode" not in globals(): - pyparsing_unicode = unicode -if "pyparsing_common" not in globals(): - pyparsing_common = common -if "pyparsing_test" not in globals(): - pyparsing_test = testing - -core_builtin_exprs += common_builtin_exprs + helper_builtin_exprs - - -__all__ = [ - "__version__", - "__version_time__", - "__author__", - "__compat__", - "__diag__", - "And", - "AtLineStart", - "AtStringStart", - "CaselessKeyword", - "CaselessLiteral", - "CharsNotIn", - "Combine", - "Dict", - "Each", - "Empty", - "FollowedBy", - "Forward", - "GoToColumn", - "Group", - "IndentedBlock", - "Keyword", - "LineEnd", - "LineStart", - "Literal", - "Located", - "PrecededBy", - "MatchFirst", - "NoMatch", - "NotAny", - "OneOrMore", - "OnlyOnce", - "OpAssoc", - "Opt", - "Optional", - "Or", - "ParseBaseException", - "ParseElementEnhance", - "ParseException", - "ParseExpression", - "ParseFatalException", - "ParseResults", - "ParseSyntaxException", - "ParserElement", - "PositionToken", - "QuotedString", - "RecursiveGrammarException", - "Regex", - "SkipTo", - "StringEnd", - "StringStart", - "Suppress", - "Token", - "TokenConverter", - "White", - "Word", - "WordEnd", - "WordStart", - "ZeroOrMore", - "Char", - "alphanums", - "alphas", - "alphas8bit", - "any_close_tag", - "any_open_tag", - "c_style_comment", - "col", - "common_html_entity", - "counted_array", - "cpp_style_comment", - "dbl_quoted_string", - "dbl_slash_comment", - "delimited_list", - "dict_of", - "empty", - "hexnums", - "html_comment", - "identchars", - "identbodychars", - "java_style_comment", - "line", - "line_end", - "line_start", - "lineno", - "make_html_tags", - "make_xml_tags", - "match_only_at_col", - "match_previous_expr", - "match_previous_literal", - "nested_expr", - "null_debug_action", - "nums", - "one_of", - "printables", - "punc8bit", - "python_style_comment", - "quoted_string", - "remove_quotes", - "replace_with", - "replace_html_entity", - "rest_of_line", - "sgl_quoted_string", - "srange", - "string_end", - "string_start", - "trace_parse_action", - "unicode_string", - "with_attribute", - "indentedBlock", - "original_text_for", - "ungroup", - "infix_notation", - "locatedExpr", - "with_class", - "CloseMatch", - "token_map", - "pyparsing_common", - "pyparsing_unicode", - "unicode_set", - "condition_as_parse_action", - "pyparsing_test", - # pre-PEP8 compatibility names - "__versionTime__", - "anyCloseTag", - "anyOpenTag", - "cStyleComment", - "commonHTMLEntity", - "countedArray", - "cppStyleComment", - "dblQuotedString", - "dblSlashComment", - "delimitedList", - "dictOf", - "htmlComment", - "javaStyleComment", - "lineEnd", - "lineStart", - "makeHTMLTags", - "makeXMLTags", - "matchOnlyAtCol", - "matchPreviousExpr", - "matchPreviousLiteral", - "nestedExpr", - "nullDebugAction", - "oneOf", - "opAssoc", - "pythonStyleComment", - "quotedString", - "removeQuotes", - "replaceHTMLEntity", - "replaceWith", - "restOfLine", - "sglQuotedString", - "stringEnd", - "stringStart", - "traceParseAction", - "unicodeString", - "withAttribute", - "indentedBlock", - "originalTextFor", - "infixNotation", - "locatedExpr", - "withClass", - "tokenMap", - "conditionAsParseAction", - "autoname_elements", -] diff --git a/venv/lib/python3.10/site-packages/pkg_resources/_vendor/pyparsing/actions.py b/venv/lib/python3.10/site-packages/pkg_resources/_vendor/pyparsing/actions.py deleted file mode 100644 index f72c66e..0000000 --- a/venv/lib/python3.10/site-packages/pkg_resources/_vendor/pyparsing/actions.py +++ /dev/null @@ -1,207 +0,0 @@ -# actions.py - -from .exceptions import ParseException -from .util import col - - -class OnlyOnce: - """ - Wrapper for parse actions, to ensure they are only called once. - """ - - def __init__(self, method_call): - from .core import _trim_arity - - self.callable = _trim_arity(method_call) - self.called = False - - def __call__(self, s, l, t): - if not self.called: - results = self.callable(s, l, t) - self.called = True - return results - raise ParseException(s, l, "OnlyOnce obj called multiple times w/out reset") - - def reset(self): - """ - Allow the associated parse action to be called once more. - """ - - self.called = False - - -def match_only_at_col(n): - """ - Helper method for defining parse actions that require matching at - a specific column in the input text. - """ - - def verify_col(strg, locn, toks): - if col(locn, strg) != n: - raise ParseException(strg, locn, "matched token not at column {}".format(n)) - - return verify_col - - -def replace_with(repl_str): - """ - Helper method for common parse actions that simply return - a literal value. Especially useful when used with - :class:`transform_string` (). - - Example:: - - num = Word(nums).set_parse_action(lambda toks: int(toks[0])) - na = one_of("N/A NA").set_parse_action(replace_with(math.nan)) - term = na | num - - term[1, ...].parse_string("324 234 N/A 234") # -> [324, 234, nan, 234] - """ - return lambda s, l, t: [repl_str] - - -def remove_quotes(s, l, t): - """ - Helper parse action for removing quotation marks from parsed - quoted strings. - - Example:: - - # by default, quotation marks are included in parsed results - quoted_string.parse_string("'Now is the Winter of our Discontent'") # -> ["'Now is the Winter of our Discontent'"] - - # use remove_quotes to strip quotation marks from parsed results - quoted_string.set_parse_action(remove_quotes) - quoted_string.parse_string("'Now is the Winter of our Discontent'") # -> ["Now is the Winter of our Discontent"] - """ - return t[0][1:-1] - - -def with_attribute(*args, **attr_dict): - """ - Helper to create a validating parse action to be used with start - tags created with :class:`make_xml_tags` or - :class:`make_html_tags`. Use ``with_attribute`` to qualify - a starting tag with a required attribute value, to avoid false - matches on common tags such as ```` or ``
``. - - Call ``with_attribute`` with a series of attribute names and - values. Specify the list of filter attributes names and values as: - - - keyword arguments, as in ``(align="right")``, or - - as an explicit dict with ``**`` operator, when an attribute - name is also a Python reserved word, as in ``**{"class":"Customer", "align":"right"}`` - - a list of name-value tuples, as in ``(("ns1:class", "Customer"), ("ns2:align", "right"))`` - - For attribute names with a namespace prefix, you must use the second - form. Attribute names are matched insensitive to upper/lower case. - - If just testing for ``class`` (with or without a namespace), use - :class:`with_class`. - - To verify that the attribute exists, but without specifying a value, - pass ``with_attribute.ANY_VALUE`` as the value. - - Example:: - - html = ''' -
- Some text -
1 4 0 1 0
-
1,3 2,3 1,1
-
this has no type
-
- - ''' - div,div_end = make_html_tags("div") - - # only match div tag having a type attribute with value "grid" - div_grid = div().set_parse_action(with_attribute(type="grid")) - grid_expr = div_grid + SkipTo(div | div_end)("body") - for grid_header in grid_expr.search_string(html): - print(grid_header.body) - - # construct a match with any div tag having a type attribute, regardless of the value - div_any_type = div().set_parse_action(with_attribute(type=with_attribute.ANY_VALUE)) - div_expr = div_any_type + SkipTo(div | div_end)("body") - for div_header in div_expr.search_string(html): - print(div_header.body) - - prints:: - - 1 4 0 1 0 - - 1 4 0 1 0 - 1,3 2,3 1,1 - """ - if args: - attrs = args[:] - else: - attrs = attr_dict.items() - attrs = [(k, v) for k, v in attrs] - - def pa(s, l, tokens): - for attrName, attrValue in attrs: - if attrName not in tokens: - raise ParseException(s, l, "no matching attribute " + attrName) - if attrValue != with_attribute.ANY_VALUE and tokens[attrName] != attrValue: - raise ParseException( - s, - l, - "attribute {!r} has value {!r}, must be {!r}".format( - attrName, tokens[attrName], attrValue - ), - ) - - return pa - - -with_attribute.ANY_VALUE = object() - - -def with_class(classname, namespace=""): - """ - Simplified version of :class:`with_attribute` when - matching on a div class - made difficult because ``class`` is - a reserved word in Python. - - Example:: - - html = ''' -
- Some text -
1 4 0 1 0
-
1,3 2,3 1,1
-
this <div> has no class
-
- - ''' - div,div_end = make_html_tags("div") - div_grid = div().set_parse_action(with_class("grid")) - - grid_expr = div_grid + SkipTo(div | div_end)("body") - for grid_header in grid_expr.search_string(html): - print(grid_header.body) - - div_any_type = div().set_parse_action(with_class(withAttribute.ANY_VALUE)) - div_expr = div_any_type + SkipTo(div | div_end)("body") - for div_header in div_expr.search_string(html): - print(div_header.body) - - prints:: - - 1 4 0 1 0 - - 1 4 0 1 0 - 1,3 2,3 1,1 - """ - classattr = "{}:class".format(namespace) if namespace else "class" - return with_attribute(**{classattr: classname}) - - -# pre-PEP8 compatibility symbols -replaceWith = replace_with -removeQuotes = remove_quotes -withAttribute = with_attribute -withClass = with_class -matchOnlyAtCol = match_only_at_col diff --git a/venv/lib/python3.10/site-packages/pkg_resources/_vendor/pyparsing/common.py b/venv/lib/python3.10/site-packages/pkg_resources/_vendor/pyparsing/common.py deleted file mode 100644 index 1859fb7..0000000 --- a/venv/lib/python3.10/site-packages/pkg_resources/_vendor/pyparsing/common.py +++ /dev/null @@ -1,424 +0,0 @@ -# common.py -from .core import * -from .helpers import delimited_list, any_open_tag, any_close_tag -from datetime import datetime - - -# some other useful expressions - using lower-case class name since we are really using this as a namespace -class pyparsing_common: - """Here are some common low-level expressions that may be useful in - jump-starting parser development: - - - numeric forms (:class:`integers`, :class:`reals`, - :class:`scientific notation`) - - common :class:`programming identifiers` - - network addresses (:class:`MAC`, - :class:`IPv4`, :class:`IPv6`) - - ISO8601 :class:`dates` and - :class:`datetime` - - :class:`UUID` - - :class:`comma-separated list` - - :class:`url` - - Parse actions: - - - :class:`convertToInteger` - - :class:`convertToFloat` - - :class:`convertToDate` - - :class:`convertToDatetime` - - :class:`stripHTMLTags` - - :class:`upcaseTokens` - - :class:`downcaseTokens` - - Example:: - - pyparsing_common.number.runTests(''' - # any int or real number, returned as the appropriate type - 100 - -100 - +100 - 3.14159 - 6.02e23 - 1e-12 - ''') - - pyparsing_common.fnumber.runTests(''' - # any int or real number, returned as float - 100 - -100 - +100 - 3.14159 - 6.02e23 - 1e-12 - ''') - - pyparsing_common.hex_integer.runTests(''' - # hex numbers - 100 - FF - ''') - - pyparsing_common.fraction.runTests(''' - # fractions - 1/2 - -3/4 - ''') - - pyparsing_common.mixed_integer.runTests(''' - # mixed fractions - 1 - 1/2 - -3/4 - 1-3/4 - ''') - - import uuid - pyparsing_common.uuid.setParseAction(tokenMap(uuid.UUID)) - pyparsing_common.uuid.runTests(''' - # uuid - 12345678-1234-5678-1234-567812345678 - ''') - - prints:: - - # any int or real number, returned as the appropriate type - 100 - [100] - - -100 - [-100] - - +100 - [100] - - 3.14159 - [3.14159] - - 6.02e23 - [6.02e+23] - - 1e-12 - [1e-12] - - # any int or real number, returned as float - 100 - [100.0] - - -100 - [-100.0] - - +100 - [100.0] - - 3.14159 - [3.14159] - - 6.02e23 - [6.02e+23] - - 1e-12 - [1e-12] - - # hex numbers - 100 - [256] - - FF - [255] - - # fractions - 1/2 - [0.5] - - -3/4 - [-0.75] - - # mixed fractions - 1 - [1] - - 1/2 - [0.5] - - -3/4 - [-0.75] - - 1-3/4 - [1.75] - - # uuid - 12345678-1234-5678-1234-567812345678 - [UUID('12345678-1234-5678-1234-567812345678')] - """ - - convert_to_integer = token_map(int) - """ - Parse action for converting parsed integers to Python int - """ - - convert_to_float = token_map(float) - """ - Parse action for converting parsed numbers to Python float - """ - - integer = Word(nums).set_name("integer").set_parse_action(convert_to_integer) - """expression that parses an unsigned integer, returns an int""" - - hex_integer = ( - Word(hexnums).set_name("hex integer").set_parse_action(token_map(int, 16)) - ) - """expression that parses a hexadecimal integer, returns an int""" - - signed_integer = ( - Regex(r"[+-]?\d+") - .set_name("signed integer") - .set_parse_action(convert_to_integer) - ) - """expression that parses an integer with optional leading sign, returns an int""" - - fraction = ( - signed_integer().set_parse_action(convert_to_float) - + "/" - + signed_integer().set_parse_action(convert_to_float) - ).set_name("fraction") - """fractional expression of an integer divided by an integer, returns a float""" - fraction.add_parse_action(lambda tt: tt[0] / tt[-1]) - - mixed_integer = ( - fraction | signed_integer + Opt(Opt("-").suppress() + fraction) - ).set_name("fraction or mixed integer-fraction") - """mixed integer of the form 'integer - fraction', with optional leading integer, returns float""" - mixed_integer.add_parse_action(sum) - - real = ( - Regex(r"[+-]?(?:\d+\.\d*|\.\d+)") - .set_name("real number") - .set_parse_action(convert_to_float) - ) - """expression that parses a floating point number and returns a float""" - - sci_real = ( - Regex(r"[+-]?(?:\d+(?:[eE][+-]?\d+)|(?:\d+\.\d*|\.\d+)(?:[eE][+-]?\d+)?)") - .set_name("real number with scientific notation") - .set_parse_action(convert_to_float) - ) - """expression that parses a floating point number with optional - scientific notation and returns a float""" - - # streamlining this expression makes the docs nicer-looking - number = (sci_real | real | signed_integer).setName("number").streamline() - """any numeric expression, returns the corresponding Python type""" - - fnumber = ( - Regex(r"[+-]?\d+\.?\d*([eE][+-]?\d+)?") - .set_name("fnumber") - .set_parse_action(convert_to_float) - ) - """any int or real number, returned as float""" - - identifier = Word(identchars, identbodychars).set_name("identifier") - """typical code identifier (leading alpha or '_', followed by 0 or more alphas, nums, or '_')""" - - ipv4_address = Regex( - r"(25[0-5]|2[0-4][0-9]|1?[0-9]{1,2})(\.(25[0-5]|2[0-4][0-9]|1?[0-9]{1,2})){3}" - ).set_name("IPv4 address") - "IPv4 address (``0.0.0.0 - 255.255.255.255``)" - - _ipv6_part = Regex(r"[0-9a-fA-F]{1,4}").set_name("hex_integer") - _full_ipv6_address = (_ipv6_part + (":" + _ipv6_part) * 7).set_name( - "full IPv6 address" - ) - _short_ipv6_address = ( - Opt(_ipv6_part + (":" + _ipv6_part) * (0, 6)) - + "::" - + Opt(_ipv6_part + (":" + _ipv6_part) * (0, 6)) - ).set_name("short IPv6 address") - _short_ipv6_address.add_condition( - lambda t: sum(1 for tt in t if pyparsing_common._ipv6_part.matches(tt)) < 8 - ) - _mixed_ipv6_address = ("::ffff:" + ipv4_address).set_name("mixed IPv6 address") - ipv6_address = Combine( - (_full_ipv6_address | _mixed_ipv6_address | _short_ipv6_address).set_name( - "IPv6 address" - ) - ).set_name("IPv6 address") - "IPv6 address (long, short, or mixed form)" - - mac_address = Regex( - r"[0-9a-fA-F]{2}([:.-])[0-9a-fA-F]{2}(?:\1[0-9a-fA-F]{2}){4}" - ).set_name("MAC address") - "MAC address xx:xx:xx:xx:xx (may also have '-' or '.' delimiters)" - - @staticmethod - def convert_to_date(fmt: str = "%Y-%m-%d"): - """ - Helper to create a parse action for converting parsed date string to Python datetime.date - - Params - - - fmt - format to be passed to datetime.strptime (default= ``"%Y-%m-%d"``) - - Example:: - - date_expr = pyparsing_common.iso8601_date.copy() - date_expr.setParseAction(pyparsing_common.convertToDate()) - print(date_expr.parseString("1999-12-31")) - - prints:: - - [datetime.date(1999, 12, 31)] - """ - - def cvt_fn(ss, ll, tt): - try: - return datetime.strptime(tt[0], fmt).date() - except ValueError as ve: - raise ParseException(ss, ll, str(ve)) - - return cvt_fn - - @staticmethod - def convert_to_datetime(fmt: str = "%Y-%m-%dT%H:%M:%S.%f"): - """Helper to create a parse action for converting parsed - datetime string to Python datetime.datetime - - Params - - - fmt - format to be passed to datetime.strptime (default= ``"%Y-%m-%dT%H:%M:%S.%f"``) - - Example:: - - dt_expr = pyparsing_common.iso8601_datetime.copy() - dt_expr.setParseAction(pyparsing_common.convertToDatetime()) - print(dt_expr.parseString("1999-12-31T23:59:59.999")) - - prints:: - - [datetime.datetime(1999, 12, 31, 23, 59, 59, 999000)] - """ - - def cvt_fn(s, l, t): - try: - return datetime.strptime(t[0], fmt) - except ValueError as ve: - raise ParseException(s, l, str(ve)) - - return cvt_fn - - iso8601_date = Regex( - r"(?P\d{4})(?:-(?P\d\d)(?:-(?P\d\d))?)?" - ).set_name("ISO8601 date") - "ISO8601 date (``yyyy-mm-dd``)" - - iso8601_datetime = Regex( - r"(?P\d{4})-(?P\d\d)-(?P\d\d)[T ](?P\d\d):(?P\d\d)(:(?P\d\d(\.\d*)?)?)?(?PZ|[+-]\d\d:?\d\d)?" - ).set_name("ISO8601 datetime") - "ISO8601 datetime (``yyyy-mm-ddThh:mm:ss.s(Z|+-00:00)``) - trailing seconds, milliseconds, and timezone optional; accepts separating ``'T'`` or ``' '``" - - uuid = Regex(r"[0-9a-fA-F]{8}(-[0-9a-fA-F]{4}){3}-[0-9a-fA-F]{12}").set_name("UUID") - "UUID (``xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx``)" - - _html_stripper = any_open_tag.suppress() | any_close_tag.suppress() - - @staticmethod - def strip_html_tags(s: str, l: int, tokens: ParseResults): - """Parse action to remove HTML tags from web page HTML source - - Example:: - - # strip HTML links from normal text - text = 'More info at the pyparsing wiki page' - td, td_end = makeHTMLTags("TD") - table_text = td + SkipTo(td_end).setParseAction(pyparsing_common.stripHTMLTags)("body") + td_end - print(table_text.parseString(text).body) - - Prints:: - - More info at the pyparsing wiki page - """ - return pyparsing_common._html_stripper.transform_string(tokens[0]) - - _commasepitem = ( - Combine( - OneOrMore( - ~Literal(",") - + ~LineEnd() - + Word(printables, exclude_chars=",") - + Opt(White(" \t") + ~FollowedBy(LineEnd() | ",")) - ) - ) - .streamline() - .set_name("commaItem") - ) - comma_separated_list = delimited_list( - Opt(quoted_string.copy() | _commasepitem, default="") - ).set_name("comma separated list") - """Predefined expression of 1 or more printable words or quoted strings, separated by commas.""" - - upcase_tokens = staticmethod(token_map(lambda t: t.upper())) - """Parse action to convert tokens to upper case.""" - - downcase_tokens = staticmethod(token_map(lambda t: t.lower())) - """Parse action to convert tokens to lower case.""" - - # fmt: off - url = Regex( - # https://mathiasbynens.be/demo/url-regex - # https://gist.github.com/dperini/729294 - r"^" + - # protocol identifier (optional) - # short syntax // still required - r"(?:(?:(?Phttps?|ftp):)?\/\/)" + - # user:pass BasicAuth (optional) - r"(?:(?P\S+(?::\S*)?)@)?" + - r"(?P" + - # IP address exclusion - # private & local networks - r"(?!(?:10|127)(?:\.\d{1,3}){3})" + - r"(?!(?:169\.254|192\.168)(?:\.\d{1,3}){2})" + - r"(?!172\.(?:1[6-9]|2\d|3[0-1])(?:\.\d{1,3}){2})" + - # IP address dotted notation octets - # excludes loopback network 0.0.0.0 - # excludes reserved space >= 224.0.0.0 - # excludes network & broadcast addresses - # (first & last IP address of each class) - r"(?:[1-9]\d?|1\d\d|2[01]\d|22[0-3])" + - r"(?:\.(?:1?\d{1,2}|2[0-4]\d|25[0-5])){2}" + - r"(?:\.(?:[1-9]\d?|1\d\d|2[0-4]\d|25[0-4]))" + - r"|" + - # host & domain names, may end with dot - # can be replaced by a shortest alternative - # (?![-_])(?:[-\w\u00a1-\uffff]{0,63}[^-_]\.)+ - r"(?:" + - r"(?:" + - r"[a-z0-9\u00a1-\uffff]" + - r"[a-z0-9\u00a1-\uffff_-]{0,62}" + - r")?" + - r"[a-z0-9\u00a1-\uffff]\." + - r")+" + - # TLD identifier name, may end with dot - r"(?:[a-z\u00a1-\uffff]{2,}\.?)" + - r")" + - # port number (optional) - r"(:(?P\d{2,5}))?" + - # resource path (optional) - r"(?P\/[^?# ]*)?" + - # query string (optional) - r"(\?(?P[^#]*))?" + - # fragment (optional) - r"(#(?P\S*))?" + - r"$" - ).set_name("url") - # fmt: on - - # pre-PEP8 compatibility names - convertToInteger = convert_to_integer - convertToFloat = convert_to_float - convertToDate = convert_to_date - convertToDatetime = convert_to_datetime - stripHTMLTags = strip_html_tags - upcaseTokens = upcase_tokens - downcaseTokens = downcase_tokens - - -_builtin_exprs = [ - v for v in vars(pyparsing_common).values() if isinstance(v, ParserElement) -] diff --git a/venv/lib/python3.10/site-packages/pkg_resources/_vendor/pyparsing/core.py b/venv/lib/python3.10/site-packages/pkg_resources/_vendor/pyparsing/core.py deleted file mode 100644 index 9acba3f..0000000 --- a/venv/lib/python3.10/site-packages/pkg_resources/_vendor/pyparsing/core.py +++ /dev/null @@ -1,5814 +0,0 @@ -# -# core.py -# -import os -import typing -from typing import ( - NamedTuple, - Union, - Callable, - Any, - Generator, - Tuple, - List, - TextIO, - Set, - Sequence, -) -from abc import ABC, abstractmethod -from enum import Enum -import string -import copy -import warnings -import re -import sys -from collections.abc import Iterable -import traceback -import types -from operator import itemgetter -from functools import wraps -from threading import RLock -from pathlib import Path - -from .util import ( - _FifoCache, - _UnboundedCache, - __config_flags, - _collapse_string_to_ranges, - _escape_regex_range_chars, - _bslash, - _flatten, - LRUMemo as _LRUMemo, - UnboundedMemo as _UnboundedMemo, -) -from .exceptions import * -from .actions import * -from .results import ParseResults, _ParseResultsWithOffset -from .unicode import pyparsing_unicode - -_MAX_INT = sys.maxsize -str_type: Tuple[type, ...] = (str, bytes) - -# -# Copyright (c) 2003-2022 Paul T. McGuire -# -# Permission is hereby granted, free of charge, to any person obtaining -# a copy of this software and associated documentation files (the -# "Software"), to deal in the Software without restriction, including -# without limitation the rights to use, copy, modify, merge, publish, -# distribute, sublicense, and/or sell copies of the Software, and to -# permit persons to whom the Software is furnished to do so, subject to -# the following conditions: -# -# The above copyright notice and this permission notice shall be -# included in all copies or substantial portions of the Software. -# -# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. -# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, -# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE -# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -# - - -if sys.version_info >= (3, 8): - from functools import cached_property -else: - - class cached_property: - def __init__(self, func): - self._func = func - - def __get__(self, instance, owner=None): - ret = instance.__dict__[self._func.__name__] = self._func(instance) - return ret - - -class __compat__(__config_flags): - """ - A cross-version compatibility configuration for pyparsing features that will be - released in a future version. By setting values in this configuration to True, - those features can be enabled in prior versions for compatibility development - and testing. - - - ``collect_all_And_tokens`` - flag to enable fix for Issue #63 that fixes erroneous grouping - of results names when an :class:`And` expression is nested within an :class:`Or` or :class:`MatchFirst`; - maintained for compatibility, but setting to ``False`` no longer restores pre-2.3.1 - behavior - """ - - _type_desc = "compatibility" - - collect_all_And_tokens = True - - _all_names = [__ for __ in locals() if not __.startswith("_")] - _fixed_names = """ - collect_all_And_tokens - """.split() - - -class __diag__(__config_flags): - _type_desc = "diagnostic" - - warn_multiple_tokens_in_named_alternation = False - warn_ungrouped_named_tokens_in_collection = False - warn_name_set_on_empty_Forward = False - warn_on_parse_using_empty_Forward = False - warn_on_assignment_to_Forward = False - warn_on_multiple_string_args_to_oneof = False - warn_on_match_first_with_lshift_operator = False - enable_debug_on_named_expressions = False - - _all_names = [__ for __ in locals() if not __.startswith("_")] - _warning_names = [name for name in _all_names if name.startswith("warn")] - _debug_names = [name for name in _all_names if name.startswith("enable_debug")] - - @classmethod - def enable_all_warnings(cls) -> None: - for name in cls._warning_names: - cls.enable(name) - - -class Diagnostics(Enum): - """ - Diagnostic configuration (all default to disabled) - - ``warn_multiple_tokens_in_named_alternation`` - flag to enable warnings when a results - name is defined on a :class:`MatchFirst` or :class:`Or` expression with one or more :class:`And` subexpressions - - ``warn_ungrouped_named_tokens_in_collection`` - flag to enable warnings when a results - name is defined on a containing expression with ungrouped subexpressions that also - have results names - - ``warn_name_set_on_empty_Forward`` - flag to enable warnings when a :class:`Forward` is defined - with a results name, but has no contents defined - - ``warn_on_parse_using_empty_Forward`` - flag to enable warnings when a :class:`Forward` is - defined in a grammar but has never had an expression attached to it - - ``warn_on_assignment_to_Forward`` - flag to enable warnings when a :class:`Forward` is defined - but is overwritten by assigning using ``'='`` instead of ``'<<='`` or ``'<<'`` - - ``warn_on_multiple_string_args_to_oneof`` - flag to enable warnings when :class:`one_of` is - incorrectly called with multiple str arguments - - ``enable_debug_on_named_expressions`` - flag to auto-enable debug on all subsequent - calls to :class:`ParserElement.set_name` - - Diagnostics are enabled/disabled by calling :class:`enable_diag` and :class:`disable_diag`. - All warnings can be enabled by calling :class:`enable_all_warnings`. - """ - - warn_multiple_tokens_in_named_alternation = 0 - warn_ungrouped_named_tokens_in_collection = 1 - warn_name_set_on_empty_Forward = 2 - warn_on_parse_using_empty_Forward = 3 - warn_on_assignment_to_Forward = 4 - warn_on_multiple_string_args_to_oneof = 5 - warn_on_match_first_with_lshift_operator = 6 - enable_debug_on_named_expressions = 7 - - -def enable_diag(diag_enum: Diagnostics) -> None: - """ - Enable a global pyparsing diagnostic flag (see :class:`Diagnostics`). - """ - __diag__.enable(diag_enum.name) - - -def disable_diag(diag_enum: Diagnostics) -> None: - """ - Disable a global pyparsing diagnostic flag (see :class:`Diagnostics`). - """ - __diag__.disable(diag_enum.name) - - -def enable_all_warnings() -> None: - """ - Enable all global pyparsing diagnostic warnings (see :class:`Diagnostics`). - """ - __diag__.enable_all_warnings() - - -# hide abstract class -del __config_flags - - -def _should_enable_warnings( - cmd_line_warn_options: typing.Iterable[str], warn_env_var: typing.Optional[str] -) -> bool: - enable = bool(warn_env_var) - for warn_opt in cmd_line_warn_options: - w_action, w_message, w_category, w_module, w_line = (warn_opt + "::::").split( - ":" - )[:5] - if not w_action.lower().startswith("i") and ( - not (w_message or w_category or w_module) or w_module == "pyparsing" - ): - enable = True - elif w_action.lower().startswith("i") and w_module in ("pyparsing", ""): - enable = False - return enable - - -if _should_enable_warnings( - sys.warnoptions, os.environ.get("PYPARSINGENABLEALLWARNINGS") -): - enable_all_warnings() - - -# build list of single arg builtins, that can be used as parse actions -_single_arg_builtins = { - sum, - len, - sorted, - reversed, - list, - tuple, - set, - any, - all, - min, - max, -} - -_generatorType = types.GeneratorType -ParseAction = Union[ - Callable[[], Any], - Callable[[ParseResults], Any], - Callable[[int, ParseResults], Any], - Callable[[str, int, ParseResults], Any], -] -ParseCondition = Union[ - Callable[[], bool], - Callable[[ParseResults], bool], - Callable[[int, ParseResults], bool], - Callable[[str, int, ParseResults], bool], -] -ParseFailAction = Callable[[str, int, "ParserElement", Exception], None] -DebugStartAction = Callable[[str, int, "ParserElement", bool], None] -DebugSuccessAction = Callable[ - [str, int, int, "ParserElement", ParseResults, bool], None -] -DebugExceptionAction = Callable[[str, int, "ParserElement", Exception, bool], None] - - -alphas = string.ascii_uppercase + string.ascii_lowercase -identchars = pyparsing_unicode.Latin1.identchars -identbodychars = pyparsing_unicode.Latin1.identbodychars -nums = "0123456789" -hexnums = nums + "ABCDEFabcdef" -alphanums = alphas + nums -printables = "".join([c for c in string.printable if c not in string.whitespace]) - -_trim_arity_call_line: traceback.StackSummary = None - - -def _trim_arity(func, max_limit=3): - """decorator to trim function calls to match the arity of the target""" - global _trim_arity_call_line - - if func in _single_arg_builtins: - return lambda s, l, t: func(t) - - limit = 0 - found_arity = False - - def extract_tb(tb, limit=0): - frames = traceback.extract_tb(tb, limit=limit) - frame_summary = frames[-1] - return [frame_summary[:2]] - - # synthesize what would be returned by traceback.extract_stack at the call to - # user's parse action 'func', so that we don't incur call penalty at parse time - - # fmt: off - LINE_DIFF = 7 - # IF ANY CODE CHANGES, EVEN JUST COMMENTS OR BLANK LINES, BETWEEN THE NEXT LINE AND - # THE CALL TO FUNC INSIDE WRAPPER, LINE_DIFF MUST BE MODIFIED!!!! - _trim_arity_call_line = (_trim_arity_call_line or traceback.extract_stack(limit=2)[-1]) - pa_call_line_synth = (_trim_arity_call_line[0], _trim_arity_call_line[1] + LINE_DIFF) - - def wrapper(*args): - nonlocal found_arity, limit - while 1: - try: - ret = func(*args[limit:]) - found_arity = True - return ret - except TypeError as te: - # re-raise TypeErrors if they did not come from our arity testing - if found_arity: - raise - else: - tb = te.__traceback__ - trim_arity_type_error = ( - extract_tb(tb, limit=2)[-1][:2] == pa_call_line_synth - ) - del tb - - if trim_arity_type_error: - if limit < max_limit: - limit += 1 - continue - - raise - # fmt: on - - # copy func name to wrapper for sensible debug output - # (can't use functools.wraps, since that messes with function signature) - func_name = getattr(func, "__name__", getattr(func, "__class__").__name__) - wrapper.__name__ = func_name - wrapper.__doc__ = func.__doc__ - - return wrapper - - -def condition_as_parse_action( - fn: ParseCondition, message: str = None, fatal: bool = False -) -> ParseAction: - """ - Function to convert a simple predicate function that returns ``True`` or ``False`` - into a parse action. Can be used in places when a parse action is required - and :class:`ParserElement.add_condition` cannot be used (such as when adding a condition - to an operator level in :class:`infix_notation`). - - Optional keyword arguments: - - - ``message`` - define a custom message to be used in the raised exception - - ``fatal`` - if True, will raise :class:`ParseFatalException` to stop parsing immediately; - otherwise will raise :class:`ParseException` - - """ - msg = message if message is not None else "failed user-defined condition" - exc_type = ParseFatalException if fatal else ParseException - fn = _trim_arity(fn) - - @wraps(fn) - def pa(s, l, t): - if not bool(fn(s, l, t)): - raise exc_type(s, l, msg) - - return pa - - -def _default_start_debug_action( - instring: str, loc: int, expr: "ParserElement", cache_hit: bool = False -): - cache_hit_str = "*" if cache_hit else "" - print( - ( - "{}Match {} at loc {}({},{})\n {}\n {}^".format( - cache_hit_str, - expr, - loc, - lineno(loc, instring), - col(loc, instring), - line(loc, instring), - " " * (col(loc, instring) - 1), - ) - ) - ) - - -def _default_success_debug_action( - instring: str, - startloc: int, - endloc: int, - expr: "ParserElement", - toks: ParseResults, - cache_hit: bool = False, -): - cache_hit_str = "*" if cache_hit else "" - print("{}Matched {} -> {}".format(cache_hit_str, expr, toks.as_list())) - - -def _default_exception_debug_action( - instring: str, - loc: int, - expr: "ParserElement", - exc: Exception, - cache_hit: bool = False, -): - cache_hit_str = "*" if cache_hit else "" - print( - "{}Match {} failed, {} raised: {}".format( - cache_hit_str, expr, type(exc).__name__, exc - ) - ) - - -def null_debug_action(*args): - """'Do-nothing' debug action, to suppress debugging output during parsing.""" - - -class ParserElement(ABC): - """Abstract base level parser element class.""" - - DEFAULT_WHITE_CHARS: str = " \n\t\r" - verbose_stacktrace: bool = False - _literalStringClass: typing.Optional[type] = None - - @staticmethod - def set_default_whitespace_chars(chars: str) -> None: - r""" - Overrides the default whitespace chars - - Example:: - - # default whitespace chars are space, and newline - Word(alphas)[1, ...].parse_string("abc def\nghi jkl") # -> ['abc', 'def', 'ghi', 'jkl'] - - # change to just treat newline as significant - ParserElement.set_default_whitespace_chars(" \t") - Word(alphas)[1, ...].parse_string("abc def\nghi jkl") # -> ['abc', 'def'] - """ - ParserElement.DEFAULT_WHITE_CHARS = chars - - # update whitespace all parse expressions defined in this module - for expr in _builtin_exprs: - if expr.copyDefaultWhiteChars: - expr.whiteChars = set(chars) - - @staticmethod - def inline_literals_using(cls: type) -> None: - """ - Set class to be used for inclusion of string literals into a parser. - - Example:: - - # default literal class used is Literal - integer = Word(nums) - date_str = integer("year") + '/' + integer("month") + '/' + integer("day") - - date_str.parse_string("1999/12/31") # -> ['1999', '/', '12', '/', '31'] - - - # change to Suppress - ParserElement.inline_literals_using(Suppress) - date_str = integer("year") + '/' + integer("month") + '/' + integer("day") - - date_str.parse_string("1999/12/31") # -> ['1999', '12', '31'] - """ - ParserElement._literalStringClass = cls - - class DebugActions(NamedTuple): - debug_try: typing.Optional[DebugStartAction] - debug_match: typing.Optional[DebugSuccessAction] - debug_fail: typing.Optional[DebugExceptionAction] - - def __init__(self, savelist: bool = False): - self.parseAction: List[ParseAction] = list() - self.failAction: typing.Optional[ParseFailAction] = None - self.customName = None - self._defaultName = None - self.resultsName = None - self.saveAsList = savelist - self.skipWhitespace = True - self.whiteChars = set(ParserElement.DEFAULT_WHITE_CHARS) - self.copyDefaultWhiteChars = True - # used when checking for left-recursion - self.mayReturnEmpty = False - self.keepTabs = False - self.ignoreExprs: List["ParserElement"] = list() - self.debug = False - self.streamlined = False - # optimize exception handling for subclasses that don't advance parse index - self.mayIndexError = True - self.errmsg = "" - # mark results names as modal (report only last) or cumulative (list all) - self.modalResults = True - # custom debug actions - self.debugActions = self.DebugActions(None, None, None) - # avoid redundant calls to preParse - self.callPreparse = True - self.callDuringTry = False - self.suppress_warnings_: List[Diagnostics] = [] - - def suppress_warning(self, warning_type: Diagnostics) -> "ParserElement": - """ - Suppress warnings emitted for a particular diagnostic on this expression. - - Example:: - - base = pp.Forward() - base.suppress_warning(Diagnostics.warn_on_parse_using_empty_Forward) - - # statement would normally raise a warning, but is now suppressed - print(base.parseString("x")) - - """ - self.suppress_warnings_.append(warning_type) - return self - - def copy(self) -> "ParserElement": - """ - Make a copy of this :class:`ParserElement`. Useful for defining - different parse actions for the same parsing pattern, using copies of - the original parse element. - - Example:: - - integer = Word(nums).set_parse_action(lambda toks: int(toks[0])) - integerK = integer.copy().add_parse_action(lambda toks: toks[0] * 1024) + Suppress("K") - integerM = integer.copy().add_parse_action(lambda toks: toks[0] * 1024 * 1024) + Suppress("M") - - print((integerK | integerM | integer)[1, ...].parse_string("5K 100 640K 256M")) - - prints:: - - [5120, 100, 655360, 268435456] - - Equivalent form of ``expr.copy()`` is just ``expr()``:: - - integerM = integer().add_parse_action(lambda toks: toks[0] * 1024 * 1024) + Suppress("M") - """ - cpy = copy.copy(self) - cpy.parseAction = self.parseAction[:] - cpy.ignoreExprs = self.ignoreExprs[:] - if self.copyDefaultWhiteChars: - cpy.whiteChars = set(ParserElement.DEFAULT_WHITE_CHARS) - return cpy - - def set_results_name( - self, name: str, list_all_matches: bool = False, *, listAllMatches: bool = False - ) -> "ParserElement": - """ - Define name for referencing matching tokens as a nested attribute - of the returned parse results. - - Normally, results names are assigned as you would assign keys in a dict: - any existing value is overwritten by later values. If it is necessary to - keep all values captured for a particular results name, call ``set_results_name`` - with ``list_all_matches`` = True. - - NOTE: ``set_results_name`` returns a *copy* of the original :class:`ParserElement` object; - this is so that the client can define a basic element, such as an - integer, and reference it in multiple places with different names. - - You can also set results names using the abbreviated syntax, - ``expr("name")`` in place of ``expr.set_results_name("name")`` - - see :class:`__call__`. If ``list_all_matches`` is required, use - ``expr("name*")``. - - Example:: - - date_str = (integer.set_results_name("year") + '/' - + integer.set_results_name("month") + '/' - + integer.set_results_name("day")) - - # equivalent form: - date_str = integer("year") + '/' + integer("month") + '/' + integer("day") - """ - listAllMatches = listAllMatches or list_all_matches - return self._setResultsName(name, listAllMatches) - - def _setResultsName(self, name, listAllMatches=False): - if name is None: - return self - newself = self.copy() - if name.endswith("*"): - name = name[:-1] - listAllMatches = True - newself.resultsName = name - newself.modalResults = not listAllMatches - return newself - - def set_break(self, break_flag: bool = True) -> "ParserElement": - """ - Method to invoke the Python pdb debugger when this element is - about to be parsed. Set ``break_flag`` to ``True`` to enable, ``False`` to - disable. - """ - if break_flag: - _parseMethod = self._parse - - def breaker(instring, loc, doActions=True, callPreParse=True): - import pdb - - # this call to pdb.set_trace() is intentional, not a checkin error - pdb.set_trace() - return _parseMethod(instring, loc, doActions, callPreParse) - - breaker._originalParseMethod = _parseMethod - self._parse = breaker - else: - if hasattr(self._parse, "_originalParseMethod"): - self._parse = self._parse._originalParseMethod - return self - - def set_parse_action(self, *fns: ParseAction, **kwargs) -> "ParserElement": - """ - Define one or more actions to perform when successfully matching parse element definition. - - Parse actions can be called to perform data conversions, do extra validation, - update external data structures, or enhance or replace the parsed tokens. - Each parse action ``fn`` is a callable method with 0-3 arguments, called as - ``fn(s, loc, toks)`` , ``fn(loc, toks)`` , ``fn(toks)`` , or just ``fn()`` , where: - - - s = the original string being parsed (see note below) - - loc = the location of the matching substring - - toks = a list of the matched tokens, packaged as a :class:`ParseResults` object - - The parsed tokens are passed to the parse action as ParseResults. They can be - modified in place using list-style append, extend, and pop operations to update - the parsed list elements; and with dictionary-style item set and del operations - to add, update, or remove any named results. If the tokens are modified in place, - it is not necessary to return them with a return statement. - - Parse actions can also completely replace the given tokens, with another ``ParseResults`` - object, or with some entirely different object (common for parse actions that perform data - conversions). A convenient way to build a new parse result is to define the values - using a dict, and then create the return value using :class:`ParseResults.from_dict`. - - If None is passed as the ``fn`` parse action, all previously added parse actions for this - expression are cleared. - - Optional keyword arguments: - - - call_during_try = (default= ``False``) indicate if parse action should be run during - lookaheads and alternate testing. For parse actions that have side effects, it is - important to only call the parse action once it is determined that it is being - called as part of a successful parse. For parse actions that perform additional - validation, then call_during_try should be passed as True, so that the validation - code is included in the preliminary "try" parses. - - Note: the default parsing behavior is to expand tabs in the input string - before starting the parsing process. See :class:`parse_string` for more - information on parsing strings containing ```` s, and suggested - methods to maintain a consistent view of the parsed string, the parse - location, and line and column positions within the parsed string. - - Example:: - - # parse dates in the form YYYY/MM/DD - - # use parse action to convert toks from str to int at parse time - def convert_to_int(toks): - return int(toks[0]) - - # use a parse action to verify that the date is a valid date - def is_valid_date(instring, loc, toks): - from datetime import date - year, month, day = toks[::2] - try: - date(year, month, day) - except ValueError: - raise ParseException(instring, loc, "invalid date given") - - integer = Word(nums) - date_str = integer + '/' + integer + '/' + integer - - # add parse actions - integer.set_parse_action(convert_to_int) - date_str.set_parse_action(is_valid_date) - - # note that integer fields are now ints, not strings - date_str.run_tests(''' - # successful parse - note that integer fields were converted to ints - 1999/12/31 - - # fail - invalid date - 1999/13/31 - ''') - """ - if list(fns) == [None]: - self.parseAction = [] - else: - if not all(callable(fn) for fn in fns): - raise TypeError("parse actions must be callable") - self.parseAction = [_trim_arity(fn) for fn in fns] - self.callDuringTry = kwargs.get( - "call_during_try", kwargs.get("callDuringTry", False) - ) - return self - - def add_parse_action(self, *fns: ParseAction, **kwargs) -> "ParserElement": - """ - Add one or more parse actions to expression's list of parse actions. See :class:`set_parse_action`. - - See examples in :class:`copy`. - """ - self.parseAction += [_trim_arity(fn) for fn in fns] - self.callDuringTry = self.callDuringTry or kwargs.get( - "call_during_try", kwargs.get("callDuringTry", False) - ) - return self - - def add_condition(self, *fns: ParseCondition, **kwargs) -> "ParserElement": - """Add a boolean predicate function to expression's list of parse actions. See - :class:`set_parse_action` for function call signatures. Unlike ``set_parse_action``, - functions passed to ``add_condition`` need to return boolean success/fail of the condition. - - Optional keyword arguments: - - - message = define a custom message to be used in the raised exception - - fatal = if True, will raise ParseFatalException to stop parsing immediately; otherwise will raise - ParseException - - call_during_try = boolean to indicate if this method should be called during internal tryParse calls, - default=False - - Example:: - - integer = Word(nums).set_parse_action(lambda toks: int(toks[0])) - year_int = integer.copy() - year_int.add_condition(lambda toks: toks[0] >= 2000, message="Only support years 2000 and later") - date_str = year_int + '/' + integer + '/' + integer - - result = date_str.parse_string("1999/12/31") # -> Exception: Only support years 2000 and later (at char 0), - (line:1, col:1) - """ - for fn in fns: - self.parseAction.append( - condition_as_parse_action( - fn, message=kwargs.get("message"), fatal=kwargs.get("fatal", False) - ) - ) - - self.callDuringTry = self.callDuringTry or kwargs.get( - "call_during_try", kwargs.get("callDuringTry", False) - ) - return self - - def set_fail_action(self, fn: ParseFailAction) -> "ParserElement": - """ - Define action to perform if parsing fails at this expression. - Fail acton fn is a callable function that takes the arguments - ``fn(s, loc, expr, err)`` where: - - - s = string being parsed - - loc = location where expression match was attempted and failed - - expr = the parse expression that failed - - err = the exception thrown - - The function returns no value. It may throw :class:`ParseFatalException` - if it is desired to stop parsing immediately.""" - self.failAction = fn - return self - - def _skipIgnorables(self, instring, loc): - exprsFound = True - while exprsFound: - exprsFound = False - for e in self.ignoreExprs: - try: - while 1: - loc, dummy = e._parse(instring, loc) - exprsFound = True - except ParseException: - pass - return loc - - def preParse(self, instring, loc): - if self.ignoreExprs: - loc = self._skipIgnorables(instring, loc) - - if self.skipWhitespace: - instrlen = len(instring) - white_chars = self.whiteChars - while loc < instrlen and instring[loc] in white_chars: - loc += 1 - - return loc - - def parseImpl(self, instring, loc, doActions=True): - return loc, [] - - def postParse(self, instring, loc, tokenlist): - return tokenlist - - # @profile - def _parseNoCache( - self, instring, loc, doActions=True, callPreParse=True - ) -> Tuple[int, ParseResults]: - TRY, MATCH, FAIL = 0, 1, 2 - debugging = self.debug # and doActions) - len_instring = len(instring) - - if debugging or self.failAction: - # print("Match {} at loc {}({}, {})".format(self, loc, lineno(loc, instring), col(loc, instring))) - try: - if callPreParse and self.callPreparse: - pre_loc = self.preParse(instring, loc) - else: - pre_loc = loc - tokens_start = pre_loc - if self.debugActions.debug_try: - self.debugActions.debug_try(instring, tokens_start, self, False) - if self.mayIndexError or pre_loc >= len_instring: - try: - loc, tokens = self.parseImpl(instring, pre_loc, doActions) - except IndexError: - raise ParseException(instring, len_instring, self.errmsg, self) - else: - loc, tokens = self.parseImpl(instring, pre_loc, doActions) - except Exception as err: - # print("Exception raised:", err) - if self.debugActions.debug_fail: - self.debugActions.debug_fail( - instring, tokens_start, self, err, False - ) - if self.failAction: - self.failAction(instring, tokens_start, self, err) - raise - else: - if callPreParse and self.callPreparse: - pre_loc = self.preParse(instring, loc) - else: - pre_loc = loc - tokens_start = pre_loc - if self.mayIndexError or pre_loc >= len_instring: - try: - loc, tokens = self.parseImpl(instring, pre_loc, doActions) - except IndexError: - raise ParseException(instring, len_instring, self.errmsg, self) - else: - loc, tokens = self.parseImpl(instring, pre_loc, doActions) - - tokens = self.postParse(instring, loc, tokens) - - ret_tokens = ParseResults( - tokens, self.resultsName, asList=self.saveAsList, modal=self.modalResults - ) - if self.parseAction and (doActions or self.callDuringTry): - if debugging: - try: - for fn in self.parseAction: - try: - tokens = fn(instring, tokens_start, ret_tokens) - except IndexError as parse_action_exc: - exc = ParseException("exception raised in parse action") - raise exc from parse_action_exc - - if tokens is not None and tokens is not ret_tokens: - ret_tokens = ParseResults( - tokens, - self.resultsName, - asList=self.saveAsList - and isinstance(tokens, (ParseResults, list)), - modal=self.modalResults, - ) - except Exception as err: - # print "Exception raised in user parse action:", err - if self.debugActions.debug_fail: - self.debugActions.debug_fail( - instring, tokens_start, self, err, False - ) - raise - else: - for fn in self.parseAction: - try: - tokens = fn(instring, tokens_start, ret_tokens) - except IndexError as parse_action_exc: - exc = ParseException("exception raised in parse action") - raise exc from parse_action_exc - - if tokens is not None and tokens is not ret_tokens: - ret_tokens = ParseResults( - tokens, - self.resultsName, - asList=self.saveAsList - and isinstance(tokens, (ParseResults, list)), - modal=self.modalResults, - ) - if debugging: - # print("Matched", self, "->", ret_tokens.as_list()) - if self.debugActions.debug_match: - self.debugActions.debug_match( - instring, tokens_start, loc, self, ret_tokens, False - ) - - return loc, ret_tokens - - def try_parse(self, instring: str, loc: int, raise_fatal: bool = False) -> int: - try: - return self._parse(instring, loc, doActions=False)[0] - except ParseFatalException: - if raise_fatal: - raise - raise ParseException(instring, loc, self.errmsg, self) - - def can_parse_next(self, instring: str, loc: int) -> bool: - try: - self.try_parse(instring, loc) - except (ParseException, IndexError): - return False - else: - return True - - # cache for left-recursion in Forward references - recursion_lock = RLock() - recursion_memos: typing.Dict[ - Tuple[int, "Forward", bool], Tuple[int, Union[ParseResults, Exception]] - ] = {} - - # argument cache for optimizing repeated calls when backtracking through recursive expressions - packrat_cache = ( - {} - ) # this is set later by enabled_packrat(); this is here so that reset_cache() doesn't fail - packrat_cache_lock = RLock() - packrat_cache_stats = [0, 0] - - # this method gets repeatedly called during backtracking with the same arguments - - # we can cache these arguments and save ourselves the trouble of re-parsing the contained expression - def _parseCache( - self, instring, loc, doActions=True, callPreParse=True - ) -> Tuple[int, ParseResults]: - HIT, MISS = 0, 1 - TRY, MATCH, FAIL = 0, 1, 2 - lookup = (self, instring, loc, callPreParse, doActions) - with ParserElement.packrat_cache_lock: - cache = ParserElement.packrat_cache - value = cache.get(lookup) - if value is cache.not_in_cache: - ParserElement.packrat_cache_stats[MISS] += 1 - try: - value = self._parseNoCache(instring, loc, doActions, callPreParse) - except ParseBaseException as pe: - # cache a copy of the exception, without the traceback - cache.set(lookup, pe.__class__(*pe.args)) - raise - else: - cache.set(lookup, (value[0], value[1].copy(), loc)) - return value - else: - ParserElement.packrat_cache_stats[HIT] += 1 - if self.debug and self.debugActions.debug_try: - try: - self.debugActions.debug_try(instring, loc, self, cache_hit=True) - except TypeError: - pass - if isinstance(value, Exception): - if self.debug and self.debugActions.debug_fail: - try: - self.debugActions.debug_fail( - instring, loc, self, value, cache_hit=True - ) - except TypeError: - pass - raise value - - loc_, result, endloc = value[0], value[1].copy(), value[2] - if self.debug and self.debugActions.debug_match: - try: - self.debugActions.debug_match( - instring, loc_, endloc, self, result, cache_hit=True - ) - except TypeError: - pass - - return loc_, result - - _parse = _parseNoCache - - @staticmethod - def reset_cache() -> None: - ParserElement.packrat_cache.clear() - ParserElement.packrat_cache_stats[:] = [0] * len( - ParserElement.packrat_cache_stats - ) - ParserElement.recursion_memos.clear() - - _packratEnabled = False - _left_recursion_enabled = False - - @staticmethod - def disable_memoization() -> None: - """ - Disables active Packrat or Left Recursion parsing and their memoization - - This method also works if neither Packrat nor Left Recursion are enabled. - This makes it safe to call before activating Packrat nor Left Recursion - to clear any previous settings. - """ - ParserElement.reset_cache() - ParserElement._left_recursion_enabled = False - ParserElement._packratEnabled = False - ParserElement._parse = ParserElement._parseNoCache - - @staticmethod - def enable_left_recursion( - cache_size_limit: typing.Optional[int] = None, *, force=False - ) -> None: - """ - Enables "bounded recursion" parsing, which allows for both direct and indirect - left-recursion. During parsing, left-recursive :class:`Forward` elements are - repeatedly matched with a fixed recursion depth that is gradually increased - until finding the longest match. - - Example:: - - import pyparsing as pp - pp.ParserElement.enable_left_recursion() - - E = pp.Forward("E") - num = pp.Word(pp.nums) - # match `num`, or `num '+' num`, or `num '+' num '+' num`, ... - E <<= E + '+' - num | num - - print(E.parse_string("1+2+3")) - - Recursion search naturally memoizes matches of ``Forward`` elements and may - thus skip reevaluation of parse actions during backtracking. This may break - programs with parse actions which rely on strict ordering of side-effects. - - Parameters: - - - cache_size_limit - (default=``None``) - memoize at most this many - ``Forward`` elements during matching; if ``None`` (the default), - memoize all ``Forward`` elements. - - Bounded Recursion parsing works similar but not identical to Packrat parsing, - thus the two cannot be used together. Use ``force=True`` to disable any - previous, conflicting settings. - """ - if force: - ParserElement.disable_memoization() - elif ParserElement._packratEnabled: - raise RuntimeError("Packrat and Bounded Recursion are not compatible") - if cache_size_limit is None: - ParserElement.recursion_memos = _UnboundedMemo() - elif cache_size_limit > 0: - ParserElement.recursion_memos = _LRUMemo(capacity=cache_size_limit) - else: - raise NotImplementedError("Memo size of %s" % cache_size_limit) - ParserElement._left_recursion_enabled = True - - @staticmethod - def enable_packrat(cache_size_limit: int = 128, *, force: bool = False) -> None: - """ - Enables "packrat" parsing, which adds memoizing to the parsing logic. - Repeated parse attempts at the same string location (which happens - often in many complex grammars) can immediately return a cached value, - instead of re-executing parsing/validating code. Memoizing is done of - both valid results and parsing exceptions. - - Parameters: - - - cache_size_limit - (default= ``128``) - if an integer value is provided - will limit the size of the packrat cache; if None is passed, then - the cache size will be unbounded; if 0 is passed, the cache will - be effectively disabled. - - This speedup may break existing programs that use parse actions that - have side-effects. For this reason, packrat parsing is disabled when - you first import pyparsing. To activate the packrat feature, your - program must call the class method :class:`ParserElement.enable_packrat`. - For best results, call ``enable_packrat()`` immediately after - importing pyparsing. - - Example:: - - import pyparsing - pyparsing.ParserElement.enable_packrat() - - Packrat parsing works similar but not identical to Bounded Recursion parsing, - thus the two cannot be used together. Use ``force=True`` to disable any - previous, conflicting settings. - """ - if force: - ParserElement.disable_memoization() - elif ParserElement._left_recursion_enabled: - raise RuntimeError("Packrat and Bounded Recursion are not compatible") - if not ParserElement._packratEnabled: - ParserElement._packratEnabled = True - if cache_size_limit is None: - ParserElement.packrat_cache = _UnboundedCache() - else: - ParserElement.packrat_cache = _FifoCache(cache_size_limit) - ParserElement._parse = ParserElement._parseCache - - def parse_string( - self, instring: str, parse_all: bool = False, *, parseAll: bool = False - ) -> ParseResults: - """ - Parse a string with respect to the parser definition. This function is intended as the primary interface to the - client code. - - :param instring: The input string to be parsed. - :param parse_all: If set, the entire input string must match the grammar. - :param parseAll: retained for pre-PEP8 compatibility, will be removed in a future release. - :raises ParseException: Raised if ``parse_all`` is set and the input string does not match the whole grammar. - :returns: the parsed data as a :class:`ParseResults` object, which may be accessed as a `list`, a `dict`, or - an object with attributes if the given parser includes results names. - - If the input string is required to match the entire grammar, ``parse_all`` flag must be set to ``True``. This - is also equivalent to ending the grammar with :class:`StringEnd`(). - - To report proper column numbers, ``parse_string`` operates on a copy of the input string where all tabs are - converted to spaces (8 spaces per tab, as per the default in ``string.expandtabs``). If the input string - contains tabs and the grammar uses parse actions that use the ``loc`` argument to index into the string - being parsed, one can ensure a consistent view of the input string by doing one of the following: - - - calling ``parse_with_tabs`` on your grammar before calling ``parse_string`` (see :class:`parse_with_tabs`), - - define your parse action using the full ``(s,loc,toks)`` signature, and reference the input string using the - parse action's ``s`` argument, or - - explicitly expand the tabs in your input string before calling ``parse_string``. - - Examples: - - By default, partial matches are OK. - - >>> res = Word('a').parse_string('aaaaabaaa') - >>> print(res) - ['aaaaa'] - - The parsing behavior varies by the inheriting class of this abstract class. Please refer to the children - directly to see more examples. - - It raises an exception if parse_all flag is set and instring does not match the whole grammar. - - >>> res = Word('a').parse_string('aaaaabaaa', parse_all=True) - Traceback (most recent call last): - ... - pyparsing.ParseException: Expected end of text, found 'b' (at char 5), (line:1, col:6) - """ - parseAll = parse_all or parseAll - - ParserElement.reset_cache() - if not self.streamlined: - self.streamline() - for e in self.ignoreExprs: - e.streamline() - if not self.keepTabs: - instring = instring.expandtabs() - try: - loc, tokens = self._parse(instring, 0) - if parseAll: - loc = self.preParse(instring, loc) - se = Empty() + StringEnd() - se._parse(instring, loc) - except ParseBaseException as exc: - if ParserElement.verbose_stacktrace: - raise - else: - # catch and re-raise exception from here, clearing out pyparsing internal stack trace - raise exc.with_traceback(None) - else: - return tokens - - def scan_string( - self, - instring: str, - max_matches: int = _MAX_INT, - overlap: bool = False, - *, - debug: bool = False, - maxMatches: int = _MAX_INT, - ) -> Generator[Tuple[ParseResults, int, int], None, None]: - """ - Scan the input string for expression matches. Each match will return the - matching tokens, start location, and end location. May be called with optional - ``max_matches`` argument, to clip scanning after 'n' matches are found. If - ``overlap`` is specified, then overlapping matches will be reported. - - Note that the start and end locations are reported relative to the string - being parsed. See :class:`parse_string` for more information on parsing - strings with embedded tabs. - - Example:: - - source = "sldjf123lsdjjkf345sldkjf879lkjsfd987" - print(source) - for tokens, start, end in Word(alphas).scan_string(source): - print(' '*start + '^'*(end-start)) - print(' '*start + tokens[0]) - - prints:: - - sldjf123lsdjjkf345sldkjf879lkjsfd987 - ^^^^^ - sldjf - ^^^^^^^ - lsdjjkf - ^^^^^^ - sldkjf - ^^^^^^ - lkjsfd - """ - maxMatches = min(maxMatches, max_matches) - if not self.streamlined: - self.streamline() - for e in self.ignoreExprs: - e.streamline() - - if not self.keepTabs: - instring = str(instring).expandtabs() - instrlen = len(instring) - loc = 0 - preparseFn = self.preParse - parseFn = self._parse - ParserElement.resetCache() - matches = 0 - try: - while loc <= instrlen and matches < maxMatches: - try: - preloc = preparseFn(instring, loc) - nextLoc, tokens = parseFn(instring, preloc, callPreParse=False) - except ParseException: - loc = preloc + 1 - else: - if nextLoc > loc: - matches += 1 - if debug: - print( - { - "tokens": tokens.asList(), - "start": preloc, - "end": nextLoc, - } - ) - yield tokens, preloc, nextLoc - if overlap: - nextloc = preparseFn(instring, loc) - if nextloc > loc: - loc = nextLoc - else: - loc += 1 - else: - loc = nextLoc - else: - loc = preloc + 1 - except ParseBaseException as exc: - if ParserElement.verbose_stacktrace: - raise - else: - # catch and re-raise exception from here, clears out pyparsing internal stack trace - raise exc.with_traceback(None) - - def transform_string(self, instring: str, *, debug: bool = False) -> str: - """ - Extension to :class:`scan_string`, to modify matching text with modified tokens that may - be returned from a parse action. To use ``transform_string``, define a grammar and - attach a parse action to it that modifies the returned token list. - Invoking ``transform_string()`` on a target string will then scan for matches, - and replace the matched text patterns according to the logic in the parse - action. ``transform_string()`` returns the resulting transformed string. - - Example:: - - wd = Word(alphas) - wd.set_parse_action(lambda toks: toks[0].title()) - - print(wd.transform_string("now is the winter of our discontent made glorious summer by this sun of york.")) - - prints:: - - Now Is The Winter Of Our Discontent Made Glorious Summer By This Sun Of York. - """ - out: List[str] = [] - lastE = 0 - # force preservation of s, to minimize unwanted transformation of string, and to - # keep string locs straight between transform_string and scan_string - self.keepTabs = True - try: - for t, s, e in self.scan_string(instring, debug=debug): - out.append(instring[lastE:s]) - if t: - if isinstance(t, ParseResults): - out += t.as_list() - elif isinstance(t, Iterable) and not isinstance(t, str_type): - out.extend(t) - else: - out.append(t) - lastE = e - out.append(instring[lastE:]) - out = [o for o in out if o] - return "".join([str(s) for s in _flatten(out)]) - except ParseBaseException as exc: - if ParserElement.verbose_stacktrace: - raise - else: - # catch and re-raise exception from here, clears out pyparsing internal stack trace - raise exc.with_traceback(None) - - def search_string( - self, - instring: str, - max_matches: int = _MAX_INT, - *, - debug: bool = False, - maxMatches: int = _MAX_INT, - ) -> ParseResults: - """ - Another extension to :class:`scan_string`, simplifying the access to the tokens found - to match the given parse expression. May be called with optional - ``max_matches`` argument, to clip searching after 'n' matches are found. - - Example:: - - # a capitalized word starts with an uppercase letter, followed by zero or more lowercase letters - cap_word = Word(alphas.upper(), alphas.lower()) - - print(cap_word.search_string("More than Iron, more than Lead, more than Gold I need Electricity")) - - # the sum() builtin can be used to merge results into a single ParseResults object - print(sum(cap_word.search_string("More than Iron, more than Lead, more than Gold I need Electricity"))) - - prints:: - - [['More'], ['Iron'], ['Lead'], ['Gold'], ['I'], ['Electricity']] - ['More', 'Iron', 'Lead', 'Gold', 'I', 'Electricity'] - """ - maxMatches = min(maxMatches, max_matches) - try: - return ParseResults( - [t for t, s, e in self.scan_string(instring, maxMatches, debug=debug)] - ) - except ParseBaseException as exc: - if ParserElement.verbose_stacktrace: - raise - else: - # catch and re-raise exception from here, clears out pyparsing internal stack trace - raise exc.with_traceback(None) - - def split( - self, - instring: str, - maxsplit: int = _MAX_INT, - include_separators: bool = False, - *, - includeSeparators=False, - ) -> Generator[str, None, None]: - """ - Generator method to split a string using the given expression as a separator. - May be called with optional ``maxsplit`` argument, to limit the number of splits; - and the optional ``include_separators`` argument (default= ``False``), if the separating - matching text should be included in the split results. - - Example:: - - punc = one_of(list(".,;:/-!?")) - print(list(punc.split("This, this?, this sentence, is badly punctuated!"))) - - prints:: - - ['This', ' this', '', ' this sentence', ' is badly punctuated', ''] - """ - includeSeparators = includeSeparators or include_separators - last = 0 - for t, s, e in self.scan_string(instring, max_matches=maxsplit): - yield instring[last:s] - if includeSeparators: - yield t[0] - last = e - yield instring[last:] - - def __add__(self, other) -> "ParserElement": - """ - Implementation of ``+`` operator - returns :class:`And`. Adding strings to a :class:`ParserElement` - converts them to :class:`Literal`s by default. - - Example:: - - greet = Word(alphas) + "," + Word(alphas) + "!" - hello = "Hello, World!" - print(hello, "->", greet.parse_string(hello)) - - prints:: - - Hello, World! -> ['Hello', ',', 'World', '!'] - - ``...`` may be used as a parse expression as a short form of :class:`SkipTo`. - - Literal('start') + ... + Literal('end') - - is equivalent to: - - Literal('start') + SkipTo('end')("_skipped*") + Literal('end') - - Note that the skipped text is returned with '_skipped' as a results name, - and to support having multiple skips in the same parser, the value returned is - a list of all skipped text. - """ - if other is Ellipsis: - return _PendingSkip(self) - - if isinstance(other, str_type): - other = self._literalStringClass(other) - if not isinstance(other, ParserElement): - raise TypeError( - "Cannot combine element of type {} with ParserElement".format( - type(other).__name__ - ) - ) - return And([self, other]) - - def __radd__(self, other) -> "ParserElement": - """ - Implementation of ``+`` operator when left operand is not a :class:`ParserElement` - """ - if other is Ellipsis: - return SkipTo(self)("_skipped*") + self - - if isinstance(other, str_type): - other = self._literalStringClass(other) - if not isinstance(other, ParserElement): - raise TypeError( - "Cannot combine element of type {} with ParserElement".format( - type(other).__name__ - ) - ) - return other + self - - def __sub__(self, other) -> "ParserElement": - """ - Implementation of ``-`` operator, returns :class:`And` with error stop - """ - if isinstance(other, str_type): - other = self._literalStringClass(other) - if not isinstance(other, ParserElement): - raise TypeError( - "Cannot combine element of type {} with ParserElement".format( - type(other).__name__ - ) - ) - return self + And._ErrorStop() + other - - def __rsub__(self, other) -> "ParserElement": - """ - Implementation of ``-`` operator when left operand is not a :class:`ParserElement` - """ - if isinstance(other, str_type): - other = self._literalStringClass(other) - if not isinstance(other, ParserElement): - raise TypeError( - "Cannot combine element of type {} with ParserElement".format( - type(other).__name__ - ) - ) - return other - self - - def __mul__(self, other) -> "ParserElement": - """ - Implementation of ``*`` operator, allows use of ``expr * 3`` in place of - ``expr + expr + expr``. Expressions may also be multiplied by a 2-integer - tuple, similar to ``{min, max}`` multipliers in regular expressions. Tuples - may also include ``None`` as in: - - ``expr*(n, None)`` or ``expr*(n, )`` is equivalent - to ``expr*n + ZeroOrMore(expr)`` - (read as "at least n instances of ``expr``") - - ``expr*(None, n)`` is equivalent to ``expr*(0, n)`` - (read as "0 to n instances of ``expr``") - - ``expr*(None, None)`` is equivalent to ``ZeroOrMore(expr)`` - - ``expr*(1, None)`` is equivalent to ``OneOrMore(expr)`` - - Note that ``expr*(None, n)`` does not raise an exception if - more than n exprs exist in the input stream; that is, - ``expr*(None, n)`` does not enforce a maximum number of expr - occurrences. If this behavior is desired, then write - ``expr*(None, n) + ~expr`` - """ - if other is Ellipsis: - other = (0, None) - elif isinstance(other, tuple) and other[:1] == (Ellipsis,): - other = ((0,) + other[1:] + (None,))[:2] - - if isinstance(other, int): - minElements, optElements = other, 0 - elif isinstance(other, tuple): - other = tuple(o if o is not Ellipsis else None for o in other) - other = (other + (None, None))[:2] - if other[0] is None: - other = (0, other[1]) - if isinstance(other[0], int) and other[1] is None: - if other[0] == 0: - return ZeroOrMore(self) - if other[0] == 1: - return OneOrMore(self) - else: - return self * other[0] + ZeroOrMore(self) - elif isinstance(other[0], int) and isinstance(other[1], int): - minElements, optElements = other - optElements -= minElements - else: - raise TypeError( - "cannot multiply ParserElement and ({}) objects".format( - ",".join(type(item).__name__ for item in other) - ) - ) - else: - raise TypeError( - "cannot multiply ParserElement and {} objects".format( - type(other).__name__ - ) - ) - - if minElements < 0: - raise ValueError("cannot multiply ParserElement by negative value") - if optElements < 0: - raise ValueError( - "second tuple value must be greater or equal to first tuple value" - ) - if minElements == optElements == 0: - return And([]) - - if optElements: - - def makeOptionalList(n): - if n > 1: - return Opt(self + makeOptionalList(n - 1)) - else: - return Opt(self) - - if minElements: - if minElements == 1: - ret = self + makeOptionalList(optElements) - else: - ret = And([self] * minElements) + makeOptionalList(optElements) - else: - ret = makeOptionalList(optElements) - else: - if minElements == 1: - ret = self - else: - ret = And([self] * minElements) - return ret - - def __rmul__(self, other) -> "ParserElement": - return self.__mul__(other) - - def __or__(self, other) -> "ParserElement": - """ - Implementation of ``|`` operator - returns :class:`MatchFirst` - """ - if other is Ellipsis: - return _PendingSkip(self, must_skip=True) - - if isinstance(other, str_type): - other = self._literalStringClass(other) - if not isinstance(other, ParserElement): - raise TypeError( - "Cannot combine element of type {} with ParserElement".format( - type(other).__name__ - ) - ) - return MatchFirst([self, other]) - - def __ror__(self, other) -> "ParserElement": - """ - Implementation of ``|`` operator when left operand is not a :class:`ParserElement` - """ - if isinstance(other, str_type): - other = self._literalStringClass(other) - if not isinstance(other, ParserElement): - raise TypeError( - "Cannot combine element of type {} with ParserElement".format( - type(other).__name__ - ) - ) - return other | self - - def __xor__(self, other) -> "ParserElement": - """ - Implementation of ``^`` operator - returns :class:`Or` - """ - if isinstance(other, str_type): - other = self._literalStringClass(other) - if not isinstance(other, ParserElement): - raise TypeError( - "Cannot combine element of type {} with ParserElement".format( - type(other).__name__ - ) - ) - return Or([self, other]) - - def __rxor__(self, other) -> "ParserElement": - """ - Implementation of ``^`` operator when left operand is not a :class:`ParserElement` - """ - if isinstance(other, str_type): - other = self._literalStringClass(other) - if not isinstance(other, ParserElement): - raise TypeError( - "Cannot combine element of type {} with ParserElement".format( - type(other).__name__ - ) - ) - return other ^ self - - def __and__(self, other) -> "ParserElement": - """ - Implementation of ``&`` operator - returns :class:`Each` - """ - if isinstance(other, str_type): - other = self._literalStringClass(other) - if not isinstance(other, ParserElement): - raise TypeError( - "Cannot combine element of type {} with ParserElement".format( - type(other).__name__ - ) - ) - return Each([self, other]) - - def __rand__(self, other) -> "ParserElement": - """ - Implementation of ``&`` operator when left operand is not a :class:`ParserElement` - """ - if isinstance(other, str_type): - other = self._literalStringClass(other) - if not isinstance(other, ParserElement): - raise TypeError( - "Cannot combine element of type {} with ParserElement".format( - type(other).__name__ - ) - ) - return other & self - - def __invert__(self) -> "ParserElement": - """ - Implementation of ``~`` operator - returns :class:`NotAny` - """ - return NotAny(self) - - # disable __iter__ to override legacy use of sequential access to __getitem__ to - # iterate over a sequence - __iter__ = None - - def __getitem__(self, key): - """ - use ``[]`` indexing notation as a short form for expression repetition: - - - ``expr[n]`` is equivalent to ``expr*n`` - - ``expr[m, n]`` is equivalent to ``expr*(m, n)`` - - ``expr[n, ...]`` or ``expr[n,]`` is equivalent - to ``expr*n + ZeroOrMore(expr)`` - (read as "at least n instances of ``expr``") - - ``expr[..., n]`` is equivalent to ``expr*(0, n)`` - (read as "0 to n instances of ``expr``") - - ``expr[...]`` and ``expr[0, ...]`` are equivalent to ``ZeroOrMore(expr)`` - - ``expr[1, ...]`` is equivalent to ``OneOrMore(expr)`` - - ``None`` may be used in place of ``...``. - - Note that ``expr[..., n]`` and ``expr[m, n]``do not raise an exception - if more than ``n`` ``expr``s exist in the input stream. If this behavior is - desired, then write ``expr[..., n] + ~expr``. - """ - - # convert single arg keys to tuples - try: - if isinstance(key, str_type): - key = (key,) - iter(key) - except TypeError: - key = (key, key) - - if len(key) > 2: - raise TypeError( - "only 1 or 2 index arguments supported ({}{})".format( - key[:5], "... [{}]".format(len(key)) if len(key) > 5 else "" - ) - ) - - # clip to 2 elements - ret = self * tuple(key[:2]) - return ret - - def __call__(self, name: str = None) -> "ParserElement": - """ - Shortcut for :class:`set_results_name`, with ``list_all_matches=False``. - - If ``name`` is given with a trailing ``'*'`` character, then ``list_all_matches`` will be - passed as ``True``. - - If ``name` is omitted, same as calling :class:`copy`. - - Example:: - - # these are equivalent - userdata = Word(alphas).set_results_name("name") + Word(nums + "-").set_results_name("socsecno") - userdata = Word(alphas)("name") + Word(nums + "-")("socsecno") - """ - if name is not None: - return self._setResultsName(name) - else: - return self.copy() - - def suppress(self) -> "ParserElement": - """ - Suppresses the output of this :class:`ParserElement`; useful to keep punctuation from - cluttering up returned output. - """ - return Suppress(self) - - def ignore_whitespace(self, recursive: bool = True) -> "ParserElement": - """ - Enables the skipping of whitespace before matching the characters in the - :class:`ParserElement`'s defined pattern. - - :param recursive: If ``True`` (the default), also enable whitespace skipping in child elements (if any) - """ - self.skipWhitespace = True - return self - - def leave_whitespace(self, recursive: bool = True) -> "ParserElement": - """ - Disables the skipping of whitespace before matching the characters in the - :class:`ParserElement`'s defined pattern. This is normally only used internally by - the pyparsing module, but may be needed in some whitespace-sensitive grammars. - - :param recursive: If true (the default), also disable whitespace skipping in child elements (if any) - """ - self.skipWhitespace = False - return self - - def set_whitespace_chars( - self, chars: Union[Set[str], str], copy_defaults: bool = False - ) -> "ParserElement": - """ - Overrides the default whitespace chars - """ - self.skipWhitespace = True - self.whiteChars = set(chars) - self.copyDefaultWhiteChars = copy_defaults - return self - - def parse_with_tabs(self) -> "ParserElement": - """ - Overrides default behavior to expand ```` s to spaces before parsing the input string. - Must be called before ``parse_string`` when the input grammar contains elements that - match ```` characters. - """ - self.keepTabs = True - return self - - def ignore(self, other: "ParserElement") -> "ParserElement": - """ - Define expression to be ignored (e.g., comments) while doing pattern - matching; may be called repeatedly, to define multiple comment or other - ignorable patterns. - - Example:: - - patt = Word(alphas)[1, ...] - patt.parse_string('ablaj /* comment */ lskjd') - # -> ['ablaj'] - - patt.ignore(c_style_comment) - patt.parse_string('ablaj /* comment */ lskjd') - # -> ['ablaj', 'lskjd'] - """ - import typing - - if isinstance(other, str_type): - other = Suppress(other) - - if isinstance(other, Suppress): - if other not in self.ignoreExprs: - self.ignoreExprs.append(other) - else: - self.ignoreExprs.append(Suppress(other.copy())) - return self - - def set_debug_actions( - self, - start_action: DebugStartAction, - success_action: DebugSuccessAction, - exception_action: DebugExceptionAction, - ) -> "ParserElement": - """ - Customize display of debugging messages while doing pattern matching: - - - ``start_action`` - method to be called when an expression is about to be parsed; - should have the signature ``fn(input_string: str, location: int, expression: ParserElement, cache_hit: bool)`` - - - ``success_action`` - method to be called when an expression has successfully parsed; - should have the signature ``fn(input_string: str, start_location: int, end_location: int, expression: ParserELement, parsed_tokens: ParseResults, cache_hit: bool)`` - - - ``exception_action`` - method to be called when expression fails to parse; - should have the signature ``fn(input_string: str, location: int, expression: ParserElement, exception: Exception, cache_hit: bool)`` - """ - self.debugActions = self.DebugActions( - start_action or _default_start_debug_action, - success_action or _default_success_debug_action, - exception_action or _default_exception_debug_action, - ) - self.debug = True - return self - - def set_debug(self, flag: bool = True) -> "ParserElement": - """ - Enable display of debugging messages while doing pattern matching. - Set ``flag`` to ``True`` to enable, ``False`` to disable. - - Example:: - - wd = Word(alphas).set_name("alphaword") - integer = Word(nums).set_name("numword") - term = wd | integer - - # turn on debugging for wd - wd.set_debug() - - term[1, ...].parse_string("abc 123 xyz 890") - - prints:: - - Match alphaword at loc 0(1,1) - Matched alphaword -> ['abc'] - Match alphaword at loc 3(1,4) - Exception raised:Expected alphaword (at char 4), (line:1, col:5) - Match alphaword at loc 7(1,8) - Matched alphaword -> ['xyz'] - Match alphaword at loc 11(1,12) - Exception raised:Expected alphaword (at char 12), (line:1, col:13) - Match alphaword at loc 15(1,16) - Exception raised:Expected alphaword (at char 15), (line:1, col:16) - - The output shown is that produced by the default debug actions - custom debug actions can be - specified using :class:`set_debug_actions`. Prior to attempting - to match the ``wd`` expression, the debugging message ``"Match at loc (,)"`` - is shown. Then if the parse succeeds, a ``"Matched"`` message is shown, or an ``"Exception raised"`` - message is shown. Also note the use of :class:`set_name` to assign a human-readable name to the expression, - which makes debugging and exception messages easier to understand - for instance, the default - name created for the :class:`Word` expression without calling ``set_name`` is ``"W:(A-Za-z)"``. - """ - if flag: - self.set_debug_actions( - _default_start_debug_action, - _default_success_debug_action, - _default_exception_debug_action, - ) - else: - self.debug = False - return self - - @property - def default_name(self) -> str: - if self._defaultName is None: - self._defaultName = self._generateDefaultName() - return self._defaultName - - @abstractmethod - def _generateDefaultName(self): - """ - Child classes must define this method, which defines how the ``default_name`` is set. - """ - - def set_name(self, name: str) -> "ParserElement": - """ - Define name for this expression, makes debugging and exception messages clearer. - Example:: - Word(nums).parse_string("ABC") # -> Exception: Expected W:(0-9) (at char 0), (line:1, col:1) - Word(nums).set_name("integer").parse_string("ABC") # -> Exception: Expected integer (at char 0), (line:1, col:1) - """ - self.customName = name - self.errmsg = "Expected " + self.name - if __diag__.enable_debug_on_named_expressions: - self.set_debug() - return self - - @property - def name(self) -> str: - # This will use a user-defined name if available, but otherwise defaults back to the auto-generated name - return self.customName if self.customName is not None else self.default_name - - def __str__(self) -> str: - return self.name - - def __repr__(self) -> str: - return str(self) - - def streamline(self) -> "ParserElement": - self.streamlined = True - self._defaultName = None - return self - - def recurse(self) -> Sequence["ParserElement"]: - return [] - - def _checkRecursion(self, parseElementList): - subRecCheckList = parseElementList[:] + [self] - for e in self.recurse(): - e._checkRecursion(subRecCheckList) - - def validate(self, validateTrace=None) -> None: - """ - Check defined expressions for valid structure, check for infinite recursive definitions. - """ - self._checkRecursion([]) - - def parse_file( - self, - file_or_filename: Union[str, Path, TextIO], - encoding: str = "utf-8", - parse_all: bool = False, - *, - parseAll: bool = False, - ) -> ParseResults: - """ - Execute the parse expression on the given file or filename. - If a filename is specified (instead of a file object), - the entire file is opened, read, and closed before parsing. - """ - parseAll = parseAll or parse_all - try: - file_contents = file_or_filename.read() - except AttributeError: - with open(file_or_filename, "r", encoding=encoding) as f: - file_contents = f.read() - try: - return self.parse_string(file_contents, parseAll) - except ParseBaseException as exc: - if ParserElement.verbose_stacktrace: - raise - else: - # catch and re-raise exception from here, clears out pyparsing internal stack trace - raise exc.with_traceback(None) - - def __eq__(self, other): - if self is other: - return True - elif isinstance(other, str_type): - return self.matches(other, parse_all=True) - elif isinstance(other, ParserElement): - return vars(self) == vars(other) - return False - - def __hash__(self): - return id(self) - - def matches( - self, test_string: str, parse_all: bool = True, *, parseAll: bool = True - ) -> bool: - """ - Method for quick testing of a parser against a test string. Good for simple - inline microtests of sub expressions while building up larger parser. - - Parameters: - - ``test_string`` - to test against this expression for a match - - ``parse_all`` - (default= ``True``) - flag to pass to :class:`parse_string` when running tests - - Example:: - - expr = Word(nums) - assert expr.matches("100") - """ - parseAll = parseAll and parse_all - try: - self.parse_string(str(test_string), parse_all=parseAll) - return True - except ParseBaseException: - return False - - def run_tests( - self, - tests: Union[str, List[str]], - parse_all: bool = True, - comment: typing.Optional[Union["ParserElement", str]] = "#", - full_dump: bool = True, - print_results: bool = True, - failure_tests: bool = False, - post_parse: Callable[[str, ParseResults], str] = None, - file: typing.Optional[TextIO] = None, - with_line_numbers: bool = False, - *, - parseAll: bool = True, - fullDump: bool = True, - printResults: bool = True, - failureTests: bool = False, - postParse: Callable[[str, ParseResults], str] = None, - ) -> Tuple[bool, List[Tuple[str, Union[ParseResults, Exception]]]]: - """ - Execute the parse expression on a series of test strings, showing each - test, the parsed results or where the parse failed. Quick and easy way to - run a parse expression against a list of sample strings. - - Parameters: - - ``tests`` - a list of separate test strings, or a multiline string of test strings - - ``parse_all`` - (default= ``True``) - flag to pass to :class:`parse_string` when running tests - - ``comment`` - (default= ``'#'``) - expression for indicating embedded comments in the test - string; pass None to disable comment filtering - - ``full_dump`` - (default= ``True``) - dump results as list followed by results names in nested outline; - if False, only dump nested list - - ``print_results`` - (default= ``True``) prints test output to stdout - - ``failure_tests`` - (default= ``False``) indicates if these tests are expected to fail parsing - - ``post_parse`` - (default= ``None``) optional callback for successful parse results; called as - `fn(test_string, parse_results)` and returns a string to be added to the test output - - ``file`` - (default= ``None``) optional file-like object to which test output will be written; - if None, will default to ``sys.stdout`` - - ``with_line_numbers`` - default= ``False``) show test strings with line and column numbers - - Returns: a (success, results) tuple, where success indicates that all tests succeeded - (or failed if ``failure_tests`` is True), and the results contain a list of lines of each - test's output - - Example:: - - number_expr = pyparsing_common.number.copy() - - result = number_expr.run_tests(''' - # unsigned integer - 100 - # negative integer - -100 - # float with scientific notation - 6.02e23 - # integer with scientific notation - 1e-12 - ''') - print("Success" if result[0] else "Failed!") - - result = number_expr.run_tests(''' - # stray character - 100Z - # missing leading digit before '.' - -.100 - # too many '.' - 3.14.159 - ''', failure_tests=True) - print("Success" if result[0] else "Failed!") - - prints:: - - # unsigned integer - 100 - [100] - - # negative integer - -100 - [-100] - - # float with scientific notation - 6.02e23 - [6.02e+23] - - # integer with scientific notation - 1e-12 - [1e-12] - - Success - - # stray character - 100Z - ^ - FAIL: Expected end of text (at char 3), (line:1, col:4) - - # missing leading digit before '.' - -.100 - ^ - FAIL: Expected {real number with scientific notation | real number | signed integer} (at char 0), (line:1, col:1) - - # too many '.' - 3.14.159 - ^ - FAIL: Expected end of text (at char 4), (line:1, col:5) - - Success - - Each test string must be on a single line. If you want to test a string that spans multiple - lines, create a test like this:: - - expr.run_tests(r"this is a test\\n of strings that spans \\n 3 lines") - - (Note that this is a raw string literal, you must include the leading ``'r'``.) - """ - from .testing import pyparsing_test - - parseAll = parseAll and parse_all - fullDump = fullDump and full_dump - printResults = printResults and print_results - failureTests = failureTests or failure_tests - postParse = postParse or post_parse - if isinstance(tests, str_type): - line_strip = type(tests).strip - tests = [line_strip(test_line) for test_line in tests.rstrip().splitlines()] - if isinstance(comment, str_type): - comment = Literal(comment) - if file is None: - file = sys.stdout - print_ = file.write - - result: Union[ParseResults, Exception] - allResults = [] - comments = [] - success = True - NL = Literal(r"\n").add_parse_action(replace_with("\n")).ignore(quoted_string) - BOM = "\ufeff" - for t in tests: - if comment is not None and comment.matches(t, False) or comments and not t: - comments.append( - pyparsing_test.with_line_numbers(t) if with_line_numbers else t - ) - continue - if not t: - continue - out = [ - "\n" + "\n".join(comments) if comments else "", - pyparsing_test.with_line_numbers(t) if with_line_numbers else t, - ] - comments = [] - try: - # convert newline marks to actual newlines, and strip leading BOM if present - t = NL.transform_string(t.lstrip(BOM)) - result = self.parse_string(t, parse_all=parseAll) - except ParseBaseException as pe: - fatal = "(FATAL)" if isinstance(pe, ParseFatalException) else "" - out.append(pe.explain()) - out.append("FAIL: " + str(pe)) - if ParserElement.verbose_stacktrace: - out.extend(traceback.format_tb(pe.__traceback__)) - success = success and failureTests - result = pe - except Exception as exc: - out.append("FAIL-EXCEPTION: {}: {}".format(type(exc).__name__, exc)) - if ParserElement.verbose_stacktrace: - out.extend(traceback.format_tb(exc.__traceback__)) - success = success and failureTests - result = exc - else: - success = success and not failureTests - if postParse is not None: - try: - pp_value = postParse(t, result) - if pp_value is not None: - if isinstance(pp_value, ParseResults): - out.append(pp_value.dump()) - else: - out.append(str(pp_value)) - else: - out.append(result.dump()) - except Exception as e: - out.append(result.dump(full=fullDump)) - out.append( - "{} failed: {}: {}".format( - postParse.__name__, type(e).__name__, e - ) - ) - else: - out.append(result.dump(full=fullDump)) - out.append("") - - if printResults: - print_("\n".join(out)) - - allResults.append((t, result)) - - return success, allResults - - def create_diagram( - self, - output_html: Union[TextIO, Path, str], - vertical: int = 3, - show_results_names: bool = False, - show_groups: bool = False, - **kwargs, - ) -> None: - """ - Create a railroad diagram for the parser. - - Parameters: - - output_html (str or file-like object) - output target for generated - diagram HTML - - vertical (int) - threshold for formatting multiple alternatives vertically - instead of horizontally (default=3) - - show_results_names - bool flag whether diagram should show annotations for - defined results names - - show_groups - bool flag whether groups should be highlighted with an unlabeled surrounding box - Additional diagram-formatting keyword arguments can also be included; - see railroad.Diagram class. - """ - - try: - from .diagram import to_railroad, railroad_to_html - except ImportError as ie: - raise Exception( - "must ``pip install pyparsing[diagrams]`` to generate parser railroad diagrams" - ) from ie - - self.streamline() - - railroad = to_railroad( - self, - vertical=vertical, - show_results_names=show_results_names, - show_groups=show_groups, - diagram_kwargs=kwargs, - ) - if isinstance(output_html, (str, Path)): - with open(output_html, "w", encoding="utf-8") as diag_file: - diag_file.write(railroad_to_html(railroad)) - else: - # we were passed a file-like object, just write to it - output_html.write(railroad_to_html(railroad)) - - setDefaultWhitespaceChars = set_default_whitespace_chars - inlineLiteralsUsing = inline_literals_using - setResultsName = set_results_name - setBreak = set_break - setParseAction = set_parse_action - addParseAction = add_parse_action - addCondition = add_condition - setFailAction = set_fail_action - tryParse = try_parse - canParseNext = can_parse_next - resetCache = reset_cache - enableLeftRecursion = enable_left_recursion - enablePackrat = enable_packrat - parseString = parse_string - scanString = scan_string - searchString = search_string - transformString = transform_string - setWhitespaceChars = set_whitespace_chars - parseWithTabs = parse_with_tabs - setDebugActions = set_debug_actions - setDebug = set_debug - defaultName = default_name - setName = set_name - parseFile = parse_file - runTests = run_tests - ignoreWhitespace = ignore_whitespace - leaveWhitespace = leave_whitespace - - -class _PendingSkip(ParserElement): - # internal placeholder class to hold a place were '...' is added to a parser element, - # once another ParserElement is added, this placeholder will be replaced with a SkipTo - def __init__(self, expr: ParserElement, must_skip: bool = False): - super().__init__() - self.anchor = expr - self.must_skip = must_skip - - def _generateDefaultName(self): - return str(self.anchor + Empty()).replace("Empty", "...") - - def __add__(self, other) -> "ParserElement": - skipper = SkipTo(other).set_name("...")("_skipped*") - if self.must_skip: - - def must_skip(t): - if not t._skipped or t._skipped.as_list() == [""]: - del t[0] - t.pop("_skipped", None) - - def show_skip(t): - if t._skipped.as_list()[-1:] == [""]: - t.pop("_skipped") - t["_skipped"] = "missing <" + repr(self.anchor) + ">" - - return ( - self.anchor + skipper().add_parse_action(must_skip) - | skipper().add_parse_action(show_skip) - ) + other - - return self.anchor + skipper + other - - def __repr__(self): - return self.defaultName - - def parseImpl(self, *args): - raise Exception( - "use of `...` expression without following SkipTo target expression" - ) - - -class Token(ParserElement): - """Abstract :class:`ParserElement` subclass, for defining atomic - matching patterns. - """ - - def __init__(self): - super().__init__(savelist=False) - - def _generateDefaultName(self): - return type(self).__name__ - - -class Empty(Token): - """ - An empty token, will always match. - """ - - def __init__(self): - super().__init__() - self.mayReturnEmpty = True - self.mayIndexError = False - - -class NoMatch(Token): - """ - A token that will never match. - """ - - def __init__(self): - super().__init__() - self.mayReturnEmpty = True - self.mayIndexError = False - self.errmsg = "Unmatchable token" - - def parseImpl(self, instring, loc, doActions=True): - raise ParseException(instring, loc, self.errmsg, self) - - -class Literal(Token): - """ - Token to exactly match a specified string. - - Example:: - - Literal('blah').parse_string('blah') # -> ['blah'] - Literal('blah').parse_string('blahfooblah') # -> ['blah'] - Literal('blah').parse_string('bla') # -> Exception: Expected "blah" - - For case-insensitive matching, use :class:`CaselessLiteral`. - - For keyword matching (force word break before and after the matched string), - use :class:`Keyword` or :class:`CaselessKeyword`. - """ - - def __init__(self, match_string: str = "", *, matchString: str = ""): - super().__init__() - match_string = matchString or match_string - self.match = match_string - self.matchLen = len(match_string) - try: - self.firstMatchChar = match_string[0] - except IndexError: - raise ValueError("null string passed to Literal; use Empty() instead") - self.errmsg = "Expected " + self.name - self.mayReturnEmpty = False - self.mayIndexError = False - - # Performance tuning: modify __class__ to select - # a parseImpl optimized for single-character check - if self.matchLen == 1 and type(self) is Literal: - self.__class__ = _SingleCharLiteral - - def _generateDefaultName(self): - return repr(self.match) - - def parseImpl(self, instring, loc, doActions=True): - if instring[loc] == self.firstMatchChar and instring.startswith( - self.match, loc - ): - return loc + self.matchLen, self.match - raise ParseException(instring, loc, self.errmsg, self) - - -class _SingleCharLiteral(Literal): - def parseImpl(self, instring, loc, doActions=True): - if instring[loc] == self.firstMatchChar: - return loc + 1, self.match - raise ParseException(instring, loc, self.errmsg, self) - - -ParserElement._literalStringClass = Literal - - -class Keyword(Token): - """ - Token to exactly match a specified string as a keyword, that is, - it must be immediately followed by a non-keyword character. Compare - with :class:`Literal`: - - - ``Literal("if")`` will match the leading ``'if'`` in - ``'ifAndOnlyIf'``. - - ``Keyword("if")`` will not; it will only match the leading - ``'if'`` in ``'if x=1'``, or ``'if(y==2)'`` - - Accepts two optional constructor arguments in addition to the - keyword string: - - - ``identChars`` is a string of characters that would be valid - identifier characters, defaulting to all alphanumerics + "_" and - "$" - - ``caseless`` allows case-insensitive matching, default is ``False``. - - Example:: - - Keyword("start").parse_string("start") # -> ['start'] - Keyword("start").parse_string("starting") # -> Exception - - For case-insensitive matching, use :class:`CaselessKeyword`. - """ - - DEFAULT_KEYWORD_CHARS = alphanums + "_$" - - def __init__( - self, - match_string: str = "", - ident_chars: typing.Optional[str] = None, - caseless: bool = False, - *, - matchString: str = "", - identChars: typing.Optional[str] = None, - ): - super().__init__() - identChars = identChars or ident_chars - if identChars is None: - identChars = Keyword.DEFAULT_KEYWORD_CHARS - match_string = matchString or match_string - self.match = match_string - self.matchLen = len(match_string) - try: - self.firstMatchChar = match_string[0] - except IndexError: - raise ValueError("null string passed to Keyword; use Empty() instead") - self.errmsg = "Expected {} {}".format(type(self).__name__, self.name) - self.mayReturnEmpty = False - self.mayIndexError = False - self.caseless = caseless - if caseless: - self.caselessmatch = match_string.upper() - identChars = identChars.upper() - self.identChars = set(identChars) - - def _generateDefaultName(self): - return repr(self.match) - - def parseImpl(self, instring, loc, doActions=True): - errmsg = self.errmsg - errloc = loc - if self.caseless: - if instring[loc : loc + self.matchLen].upper() == self.caselessmatch: - if loc == 0 or instring[loc - 1].upper() not in self.identChars: - if ( - loc >= len(instring) - self.matchLen - or instring[loc + self.matchLen].upper() not in self.identChars - ): - return loc + self.matchLen, self.match - else: - # followed by keyword char - errmsg += ", was immediately followed by keyword character" - errloc = loc + self.matchLen - else: - # preceded by keyword char - errmsg += ", keyword was immediately preceded by keyword character" - errloc = loc - 1 - # else no match just raise plain exception - - else: - if ( - instring[loc] == self.firstMatchChar - and self.matchLen == 1 - or instring.startswith(self.match, loc) - ): - if loc == 0 or instring[loc - 1] not in self.identChars: - if ( - loc >= len(instring) - self.matchLen - or instring[loc + self.matchLen] not in self.identChars - ): - return loc + self.matchLen, self.match - else: - # followed by keyword char - errmsg += ( - ", keyword was immediately followed by keyword character" - ) - errloc = loc + self.matchLen - else: - # preceded by keyword char - errmsg += ", keyword was immediately preceded by keyword character" - errloc = loc - 1 - # else no match just raise plain exception - - raise ParseException(instring, errloc, errmsg, self) - - @staticmethod - def set_default_keyword_chars(chars) -> None: - """ - Overrides the default characters used by :class:`Keyword` expressions. - """ - Keyword.DEFAULT_KEYWORD_CHARS = chars - - setDefaultKeywordChars = set_default_keyword_chars - - -class CaselessLiteral(Literal): - """ - Token to match a specified string, ignoring case of letters. - Note: the matched results will always be in the case of the given - match string, NOT the case of the input text. - - Example:: - - CaselessLiteral("CMD")[1, ...].parse_string("cmd CMD Cmd10") - # -> ['CMD', 'CMD', 'CMD'] - - (Contrast with example for :class:`CaselessKeyword`.) - """ - - def __init__(self, match_string: str = "", *, matchString: str = ""): - match_string = matchString or match_string - super().__init__(match_string.upper()) - # Preserve the defining literal. - self.returnString = match_string - self.errmsg = "Expected " + self.name - - def parseImpl(self, instring, loc, doActions=True): - if instring[loc : loc + self.matchLen].upper() == self.match: - return loc + self.matchLen, self.returnString - raise ParseException(instring, loc, self.errmsg, self) - - -class CaselessKeyword(Keyword): - """ - Caseless version of :class:`Keyword`. - - Example:: - - CaselessKeyword("CMD")[1, ...].parse_string("cmd CMD Cmd10") - # -> ['CMD', 'CMD'] - - (Contrast with example for :class:`CaselessLiteral`.) - """ - - def __init__( - self, - match_string: str = "", - ident_chars: typing.Optional[str] = None, - *, - matchString: str = "", - identChars: typing.Optional[str] = None, - ): - identChars = identChars or ident_chars - match_string = matchString or match_string - super().__init__(match_string, identChars, caseless=True) - - -class CloseMatch(Token): - """A variation on :class:`Literal` which matches "close" matches, - that is, strings with at most 'n' mismatching characters. - :class:`CloseMatch` takes parameters: - - - ``match_string`` - string to be matched - - ``caseless`` - a boolean indicating whether to ignore casing when comparing characters - - ``max_mismatches`` - (``default=1``) maximum number of - mismatches allowed to count as a match - - The results from a successful parse will contain the matched text - from the input string and the following named results: - - - ``mismatches`` - a list of the positions within the - match_string where mismatches were found - - ``original`` - the original match_string used to compare - against the input string - - If ``mismatches`` is an empty list, then the match was an exact - match. - - Example:: - - patt = CloseMatch("ATCATCGAATGGA") - patt.parse_string("ATCATCGAAXGGA") # -> (['ATCATCGAAXGGA'], {'mismatches': [[9]], 'original': ['ATCATCGAATGGA']}) - patt.parse_string("ATCAXCGAAXGGA") # -> Exception: Expected 'ATCATCGAATGGA' (with up to 1 mismatches) (at char 0), (line:1, col:1) - - # exact match - patt.parse_string("ATCATCGAATGGA") # -> (['ATCATCGAATGGA'], {'mismatches': [[]], 'original': ['ATCATCGAATGGA']}) - - # close match allowing up to 2 mismatches - patt = CloseMatch("ATCATCGAATGGA", max_mismatches=2) - patt.parse_string("ATCAXCGAAXGGA") # -> (['ATCAXCGAAXGGA'], {'mismatches': [[4, 9]], 'original': ['ATCATCGAATGGA']}) - """ - - def __init__( - self, - match_string: str, - max_mismatches: int = None, - *, - maxMismatches: int = 1, - caseless=False, - ): - maxMismatches = max_mismatches if max_mismatches is not None else maxMismatches - super().__init__() - self.match_string = match_string - self.maxMismatches = maxMismatches - self.errmsg = "Expected {!r} (with up to {} mismatches)".format( - self.match_string, self.maxMismatches - ) - self.caseless = caseless - self.mayIndexError = False - self.mayReturnEmpty = False - - def _generateDefaultName(self): - return "{}:{!r}".format(type(self).__name__, self.match_string) - - def parseImpl(self, instring, loc, doActions=True): - start = loc - instrlen = len(instring) - maxloc = start + len(self.match_string) - - if maxloc <= instrlen: - match_string = self.match_string - match_stringloc = 0 - mismatches = [] - maxMismatches = self.maxMismatches - - for match_stringloc, s_m in enumerate( - zip(instring[loc:maxloc], match_string) - ): - src, mat = s_m - if self.caseless: - src, mat = src.lower(), mat.lower() - - if src != mat: - mismatches.append(match_stringloc) - if len(mismatches) > maxMismatches: - break - else: - loc = start + match_stringloc + 1 - results = ParseResults([instring[start:loc]]) - results["original"] = match_string - results["mismatches"] = mismatches - return loc, results - - raise ParseException(instring, loc, self.errmsg, self) - - -class Word(Token): - """Token for matching words composed of allowed character sets. - Parameters: - - ``init_chars`` - string of all characters that should be used to - match as a word; "ABC" will match "AAA", "ABAB", "CBAC", etc.; - if ``body_chars`` is also specified, then this is the string of - initial characters - - ``body_chars`` - string of characters that - can be used for matching after a matched initial character as - given in ``init_chars``; if omitted, same as the initial characters - (default=``None``) - - ``min`` - minimum number of characters to match (default=1) - - ``max`` - maximum number of characters to match (default=0) - - ``exact`` - exact number of characters to match (default=0) - - ``as_keyword`` - match as a keyword (default=``False``) - - ``exclude_chars`` - characters that might be - found in the input ``body_chars`` string but which should not be - accepted for matching ;useful to define a word of all - printables except for one or two characters, for instance - (default=``None``) - - :class:`srange` is useful for defining custom character set strings - for defining :class:`Word` expressions, using range notation from - regular expression character sets. - - A common mistake is to use :class:`Word` to match a specific literal - string, as in ``Word("Address")``. Remember that :class:`Word` - uses the string argument to define *sets* of matchable characters. - This expression would match "Add", "AAA", "dAred", or any other word - made up of the characters 'A', 'd', 'r', 'e', and 's'. To match an - exact literal string, use :class:`Literal` or :class:`Keyword`. - - pyparsing includes helper strings for building Words: - - - :class:`alphas` - - :class:`nums` - - :class:`alphanums` - - :class:`hexnums` - - :class:`alphas8bit` (alphabetic characters in ASCII range 128-255 - - accented, tilded, umlauted, etc.) - - :class:`punc8bit` (non-alphabetic characters in ASCII range - 128-255 - currency, symbols, superscripts, diacriticals, etc.) - - :class:`printables` (any non-whitespace character) - - ``alphas``, ``nums``, and ``printables`` are also defined in several - Unicode sets - see :class:`pyparsing_unicode``. - - Example:: - - # a word composed of digits - integer = Word(nums) # equivalent to Word("0123456789") or Word(srange("0-9")) - - # a word with a leading capital, and zero or more lowercase - capital_word = Word(alphas.upper(), alphas.lower()) - - # hostnames are alphanumeric, with leading alpha, and '-' - hostname = Word(alphas, alphanums + '-') - - # roman numeral (not a strict parser, accepts invalid mix of characters) - roman = Word("IVXLCDM") - - # any string of non-whitespace characters, except for ',' - csv_value = Word(printables, exclude_chars=",") - """ - - def __init__( - self, - init_chars: str = "", - body_chars: typing.Optional[str] = None, - min: int = 1, - max: int = 0, - exact: int = 0, - as_keyword: bool = False, - exclude_chars: typing.Optional[str] = None, - *, - initChars: typing.Optional[str] = None, - bodyChars: typing.Optional[str] = None, - asKeyword: bool = False, - excludeChars: typing.Optional[str] = None, - ): - initChars = initChars or init_chars - bodyChars = bodyChars or body_chars - asKeyword = asKeyword or as_keyword - excludeChars = excludeChars or exclude_chars - super().__init__() - if not initChars: - raise ValueError( - "invalid {}, initChars cannot be empty string".format( - type(self).__name__ - ) - ) - - initChars = set(initChars) - self.initChars = initChars - if excludeChars: - excludeChars = set(excludeChars) - initChars -= excludeChars - if bodyChars: - bodyChars = set(bodyChars) - excludeChars - self.initCharsOrig = "".join(sorted(initChars)) - - if bodyChars: - self.bodyCharsOrig = "".join(sorted(bodyChars)) - self.bodyChars = set(bodyChars) - else: - self.bodyCharsOrig = "".join(sorted(initChars)) - self.bodyChars = set(initChars) - - self.maxSpecified = max > 0 - - if min < 1: - raise ValueError( - "cannot specify a minimum length < 1; use Opt(Word()) if zero-length word is permitted" - ) - - self.minLen = min - - if max > 0: - self.maxLen = max - else: - self.maxLen = _MAX_INT - - if exact > 0: - self.maxLen = exact - self.minLen = exact - - self.errmsg = "Expected " + self.name - self.mayIndexError = False - self.asKeyword = asKeyword - - # see if we can make a regex for this Word - if " " not in self.initChars | self.bodyChars and (min == 1 and exact == 0): - if self.bodyChars == self.initChars: - if max == 0: - repeat = "+" - elif max == 1: - repeat = "" - else: - repeat = "{{{},{}}}".format( - self.minLen, "" if self.maxLen == _MAX_INT else self.maxLen - ) - self.reString = "[{}]{}".format( - _collapse_string_to_ranges(self.initChars), - repeat, - ) - elif len(self.initChars) == 1: - if max == 0: - repeat = "*" - else: - repeat = "{{0,{}}}".format(max - 1) - self.reString = "{}[{}]{}".format( - re.escape(self.initCharsOrig), - _collapse_string_to_ranges(self.bodyChars), - repeat, - ) - else: - if max == 0: - repeat = "*" - elif max == 2: - repeat = "" - else: - repeat = "{{0,{}}}".format(max - 1) - self.reString = "[{}][{}]{}".format( - _collapse_string_to_ranges(self.initChars), - _collapse_string_to_ranges(self.bodyChars), - repeat, - ) - if self.asKeyword: - self.reString = r"\b" + self.reString + r"\b" - - try: - self.re = re.compile(self.reString) - except re.error: - self.re = None - else: - self.re_match = self.re.match - self.__class__ = _WordRegex - - def _generateDefaultName(self): - def charsAsStr(s): - max_repr_len = 16 - s = _collapse_string_to_ranges(s, re_escape=False) - if len(s) > max_repr_len: - return s[: max_repr_len - 3] + "..." - else: - return s - - if self.initChars != self.bodyChars: - base = "W:({}, {})".format( - charsAsStr(self.initChars), charsAsStr(self.bodyChars) - ) - else: - base = "W:({})".format(charsAsStr(self.initChars)) - - # add length specification - if self.minLen > 1 or self.maxLen != _MAX_INT: - if self.minLen == self.maxLen: - if self.minLen == 1: - return base[2:] - else: - return base + "{{{}}}".format(self.minLen) - elif self.maxLen == _MAX_INT: - return base + "{{{},...}}".format(self.minLen) - else: - return base + "{{{},{}}}".format(self.minLen, self.maxLen) - return base - - def parseImpl(self, instring, loc, doActions=True): - if instring[loc] not in self.initChars: - raise ParseException(instring, loc, self.errmsg, self) - - start = loc - loc += 1 - instrlen = len(instring) - bodychars = self.bodyChars - maxloc = start + self.maxLen - maxloc = min(maxloc, instrlen) - while loc < maxloc and instring[loc] in bodychars: - loc += 1 - - throwException = False - if loc - start < self.minLen: - throwException = True - elif self.maxSpecified and loc < instrlen and instring[loc] in bodychars: - throwException = True - elif self.asKeyword: - if ( - start > 0 - and instring[start - 1] in bodychars - or loc < instrlen - and instring[loc] in bodychars - ): - throwException = True - - if throwException: - raise ParseException(instring, loc, self.errmsg, self) - - return loc, instring[start:loc] - - -class _WordRegex(Word): - def parseImpl(self, instring, loc, doActions=True): - result = self.re_match(instring, loc) - if not result: - raise ParseException(instring, loc, self.errmsg, self) - - loc = result.end() - return loc, result.group() - - -class Char(_WordRegex): - """A short-cut class for defining :class:`Word` ``(characters, exact=1)``, - when defining a match of any single character in a string of - characters. - """ - - def __init__( - self, - charset: str, - as_keyword: bool = False, - exclude_chars: typing.Optional[str] = None, - *, - asKeyword: bool = False, - excludeChars: typing.Optional[str] = None, - ): - asKeyword = asKeyword or as_keyword - excludeChars = excludeChars or exclude_chars - super().__init__( - charset, exact=1, asKeyword=asKeyword, excludeChars=excludeChars - ) - self.reString = "[{}]".format(_collapse_string_to_ranges(self.initChars)) - if asKeyword: - self.reString = r"\b{}\b".format(self.reString) - self.re = re.compile(self.reString) - self.re_match = self.re.match - - -class Regex(Token): - r"""Token for matching strings that match a given regular - expression. Defined with string specifying the regular expression in - a form recognized by the stdlib Python `re module `_. - If the given regex contains named groups (defined using ``(?P...)``), - these will be preserved as named :class:`ParseResults`. - - If instead of the Python stdlib ``re`` module you wish to use a different RE module - (such as the ``regex`` module), you can do so by building your ``Regex`` object with - a compiled RE that was compiled using ``regex``. - - Example:: - - realnum = Regex(r"[+-]?\d+\.\d*") - # ref: https://stackoverflow.com/questions/267399/how-do-you-match-only-valid-roman-numerals-with-a-regular-expression - roman = Regex(r"M{0,4}(CM|CD|D?{0,3})(XC|XL|L?X{0,3})(IX|IV|V?I{0,3})") - - # named fields in a regex will be returned as named results - date = Regex(r'(?P\d{4})-(?P\d\d?)-(?P\d\d?)') - - # the Regex class will accept re's compiled using the regex module - import regex - parser = pp.Regex(regex.compile(r'[0-9]')) - """ - - def __init__( - self, - pattern: Any, - flags: Union[re.RegexFlag, int] = 0, - as_group_list: bool = False, - as_match: bool = False, - *, - asGroupList: bool = False, - asMatch: bool = False, - ): - """The parameters ``pattern`` and ``flags`` are passed - to the ``re.compile()`` function as-is. See the Python - `re module `_ module for an - explanation of the acceptable patterns and flags. - """ - super().__init__() - asGroupList = asGroupList or as_group_list - asMatch = asMatch or as_match - - if isinstance(pattern, str_type): - if not pattern: - raise ValueError("null string passed to Regex; use Empty() instead") - - self._re = None - self.reString = self.pattern = pattern - self.flags = flags - - elif hasattr(pattern, "pattern") and hasattr(pattern, "match"): - self._re = pattern - self.pattern = self.reString = pattern.pattern - self.flags = flags - - else: - raise TypeError( - "Regex may only be constructed with a string or a compiled RE object" - ) - - self.errmsg = "Expected " + self.name - self.mayIndexError = False - self.asGroupList = asGroupList - self.asMatch = asMatch - if self.asGroupList: - self.parseImpl = self.parseImplAsGroupList - if self.asMatch: - self.parseImpl = self.parseImplAsMatch - - @cached_property - def re(self): - if self._re: - return self._re - else: - try: - return re.compile(self.pattern, self.flags) - except re.error: - raise ValueError( - "invalid pattern ({!r}) passed to Regex".format(self.pattern) - ) - - @cached_property - def re_match(self): - return self.re.match - - @cached_property - def mayReturnEmpty(self): - return self.re_match("") is not None - - def _generateDefaultName(self): - return "Re:({})".format(repr(self.pattern).replace("\\\\", "\\")) - - def parseImpl(self, instring, loc, doActions=True): - result = self.re_match(instring, loc) - if not result: - raise ParseException(instring, loc, self.errmsg, self) - - loc = result.end() - ret = ParseResults(result.group()) - d = result.groupdict() - if d: - for k, v in d.items(): - ret[k] = v - return loc, ret - - def parseImplAsGroupList(self, instring, loc, doActions=True): - result = self.re_match(instring, loc) - if not result: - raise ParseException(instring, loc, self.errmsg, self) - - loc = result.end() - ret = result.groups() - return loc, ret - - def parseImplAsMatch(self, instring, loc, doActions=True): - result = self.re_match(instring, loc) - if not result: - raise ParseException(instring, loc, self.errmsg, self) - - loc = result.end() - ret = result - return loc, ret - - def sub(self, repl: str) -> ParserElement: - r""" - Return :class:`Regex` with an attached parse action to transform the parsed - result as if called using `re.sub(expr, repl, string) `_. - - Example:: - - make_html = Regex(r"(\w+):(.*?):").sub(r"<\1>\2") - print(make_html.transform_string("h1:main title:")) - # prints "

main title

" - """ - if self.asGroupList: - raise TypeError("cannot use sub() with Regex(asGroupList=True)") - - if self.asMatch and callable(repl): - raise TypeError("cannot use sub() with a callable with Regex(asMatch=True)") - - if self.asMatch: - - def pa(tokens): - return tokens[0].expand(repl) - - else: - - def pa(tokens): - return self.re.sub(repl, tokens[0]) - - return self.add_parse_action(pa) - - -class QuotedString(Token): - r""" - Token for matching strings that are delimited by quoting characters. - - Defined with the following parameters: - - - ``quote_char`` - string of one or more characters defining the - quote delimiting string - - ``esc_char`` - character to re_escape quotes, typically backslash - (default= ``None``) - - ``esc_quote`` - special quote sequence to re_escape an embedded quote - string (such as SQL's ``""`` to re_escape an embedded ``"``) - (default= ``None``) - - ``multiline`` - boolean indicating whether quotes can span - multiple lines (default= ``False``) - - ``unquote_results`` - boolean indicating whether the matched text - should be unquoted (default= ``True``) - - ``end_quote_char`` - string of one or more characters defining the - end of the quote delimited string (default= ``None`` => same as - quote_char) - - ``convert_whitespace_escapes`` - convert escaped whitespace - (``'\t'``, ``'\n'``, etc.) to actual whitespace - (default= ``True``) - - Example:: - - qs = QuotedString('"') - print(qs.search_string('lsjdf "This is the quote" sldjf')) - complex_qs = QuotedString('{{', end_quote_char='}}') - print(complex_qs.search_string('lsjdf {{This is the "quote"}} sldjf')) - sql_qs = QuotedString('"', esc_quote='""') - print(sql_qs.search_string('lsjdf "This is the quote with ""embedded"" quotes" sldjf')) - - prints:: - - [['This is the quote']] - [['This is the "quote"']] - [['This is the quote with "embedded" quotes']] - """ - ws_map = ((r"\t", "\t"), (r"\n", "\n"), (r"\f", "\f"), (r"\r", "\r")) - - def __init__( - self, - quote_char: str = "", - esc_char: typing.Optional[str] = None, - esc_quote: typing.Optional[str] = None, - multiline: bool = False, - unquote_results: bool = True, - end_quote_char: typing.Optional[str] = None, - convert_whitespace_escapes: bool = True, - *, - quoteChar: str = "", - escChar: typing.Optional[str] = None, - escQuote: typing.Optional[str] = None, - unquoteResults: bool = True, - endQuoteChar: typing.Optional[str] = None, - convertWhitespaceEscapes: bool = True, - ): - super().__init__() - escChar = escChar or esc_char - escQuote = escQuote or esc_quote - unquoteResults = unquoteResults and unquote_results - endQuoteChar = endQuoteChar or end_quote_char - convertWhitespaceEscapes = ( - convertWhitespaceEscapes and convert_whitespace_escapes - ) - quote_char = quoteChar or quote_char - - # remove white space from quote chars - wont work anyway - quote_char = quote_char.strip() - if not quote_char: - raise ValueError("quote_char cannot be the empty string") - - if endQuoteChar is None: - endQuoteChar = quote_char - else: - endQuoteChar = endQuoteChar.strip() - if not endQuoteChar: - raise ValueError("endQuoteChar cannot be the empty string") - - self.quoteChar = quote_char - self.quoteCharLen = len(quote_char) - self.firstQuoteChar = quote_char[0] - self.endQuoteChar = endQuoteChar - self.endQuoteCharLen = len(endQuoteChar) - self.escChar = escChar - self.escQuote = escQuote - self.unquoteResults = unquoteResults - self.convertWhitespaceEscapes = convertWhitespaceEscapes - - sep = "" - inner_pattern = "" - - if escQuote: - inner_pattern += r"{}(?:{})".format(sep, re.escape(escQuote)) - sep = "|" - - if escChar: - inner_pattern += r"{}(?:{}.)".format(sep, re.escape(escChar)) - sep = "|" - self.escCharReplacePattern = re.escape(self.escChar) + "(.)" - - if len(self.endQuoteChar) > 1: - inner_pattern += ( - "{}(?:".format(sep) - + "|".join( - "(?:{}(?!{}))".format( - re.escape(self.endQuoteChar[:i]), - re.escape(self.endQuoteChar[i:]), - ) - for i in range(len(self.endQuoteChar) - 1, 0, -1) - ) - + ")" - ) - sep = "|" - - if multiline: - self.flags = re.MULTILINE | re.DOTALL - inner_pattern += r"{}(?:[^{}{}])".format( - sep, - _escape_regex_range_chars(self.endQuoteChar[0]), - (_escape_regex_range_chars(escChar) if escChar is not None else ""), - ) - else: - self.flags = 0 - inner_pattern += r"{}(?:[^{}\n\r{}])".format( - sep, - _escape_regex_range_chars(self.endQuoteChar[0]), - (_escape_regex_range_chars(escChar) if escChar is not None else ""), - ) - - self.pattern = "".join( - [ - re.escape(self.quoteChar), - "(?:", - inner_pattern, - ")*", - re.escape(self.endQuoteChar), - ] - ) - - try: - self.re = re.compile(self.pattern, self.flags) - self.reString = self.pattern - self.re_match = self.re.match - except re.error: - raise ValueError( - "invalid pattern {!r} passed to Regex".format(self.pattern) - ) - - self.errmsg = "Expected " + self.name - self.mayIndexError = False - self.mayReturnEmpty = True - - def _generateDefaultName(self): - if self.quoteChar == self.endQuoteChar and isinstance(self.quoteChar, str_type): - return "string enclosed in {!r}".format(self.quoteChar) - - return "quoted string, starting with {} ending with {}".format( - self.quoteChar, self.endQuoteChar - ) - - def parseImpl(self, instring, loc, doActions=True): - result = ( - instring[loc] == self.firstQuoteChar - and self.re_match(instring, loc) - or None - ) - if not result: - raise ParseException(instring, loc, self.errmsg, self) - - loc = result.end() - ret = result.group() - - if self.unquoteResults: - - # strip off quotes - ret = ret[self.quoteCharLen : -self.endQuoteCharLen] - - if isinstance(ret, str_type): - # replace escaped whitespace - if "\\" in ret and self.convertWhitespaceEscapes: - for wslit, wschar in self.ws_map: - ret = ret.replace(wslit, wschar) - - # replace escaped characters - if self.escChar: - ret = re.sub(self.escCharReplacePattern, r"\g<1>", ret) - - # replace escaped quotes - if self.escQuote: - ret = ret.replace(self.escQuote, self.endQuoteChar) - - return loc, ret - - -class CharsNotIn(Token): - """Token for matching words composed of characters *not* in a given - set (will include whitespace in matched characters if not listed in - the provided exclusion set - see example). Defined with string - containing all disallowed characters, and an optional minimum, - maximum, and/or exact length. The default value for ``min`` is - 1 (a minimum value < 1 is not valid); the default values for - ``max`` and ``exact`` are 0, meaning no maximum or exact - length restriction. - - Example:: - - # define a comma-separated-value as anything that is not a ',' - csv_value = CharsNotIn(',') - print(delimited_list(csv_value).parse_string("dkls,lsdkjf,s12 34,@!#,213")) - - prints:: - - ['dkls', 'lsdkjf', 's12 34', '@!#', '213'] - """ - - def __init__( - self, - not_chars: str = "", - min: int = 1, - max: int = 0, - exact: int = 0, - *, - notChars: str = "", - ): - super().__init__() - self.skipWhitespace = False - self.notChars = not_chars or notChars - self.notCharsSet = set(self.notChars) - - if min < 1: - raise ValueError( - "cannot specify a minimum length < 1; use " - "Opt(CharsNotIn()) if zero-length char group is permitted" - ) - - self.minLen = min - - if max > 0: - self.maxLen = max - else: - self.maxLen = _MAX_INT - - if exact > 0: - self.maxLen = exact - self.minLen = exact - - self.errmsg = "Expected " + self.name - self.mayReturnEmpty = self.minLen == 0 - self.mayIndexError = False - - def _generateDefaultName(self): - not_chars_str = _collapse_string_to_ranges(self.notChars) - if len(not_chars_str) > 16: - return "!W:({}...)".format(self.notChars[: 16 - 3]) - else: - return "!W:({})".format(self.notChars) - - def parseImpl(self, instring, loc, doActions=True): - notchars = self.notCharsSet - if instring[loc] in notchars: - raise ParseException(instring, loc, self.errmsg, self) - - start = loc - loc += 1 - maxlen = min(start + self.maxLen, len(instring)) - while loc < maxlen and instring[loc] not in notchars: - loc += 1 - - if loc - start < self.minLen: - raise ParseException(instring, loc, self.errmsg, self) - - return loc, instring[start:loc] - - -class White(Token): - """Special matching class for matching whitespace. Normally, - whitespace is ignored by pyparsing grammars. This class is included - when some whitespace structures are significant. Define with - a string containing the whitespace characters to be matched; default - is ``" \\t\\r\\n"``. Also takes optional ``min``, - ``max``, and ``exact`` arguments, as defined for the - :class:`Word` class. - """ - - whiteStrs = { - " ": "", - "\t": "", - "\n": "", - "\r": "", - "\f": "", - "\u00A0": "", - "\u1680": "", - "\u180E": "", - "\u2000": "", - "\u2001": "", - "\u2002": "", - "\u2003": "", - "\u2004": "", - "\u2005": "", - "\u2006": "", - "\u2007": "", - "\u2008": "", - "\u2009": "", - "\u200A": "", - "\u200B": "", - "\u202F": "", - "\u205F": "", - "\u3000": "", - } - - def __init__(self, ws: str = " \t\r\n", min: int = 1, max: int = 0, exact: int = 0): - super().__init__() - self.matchWhite = ws - self.set_whitespace_chars( - "".join(c for c in self.whiteStrs if c not in self.matchWhite), - copy_defaults=True, - ) - # self.leave_whitespace() - self.mayReturnEmpty = True - self.errmsg = "Expected " + self.name - - self.minLen = min - - if max > 0: - self.maxLen = max - else: - self.maxLen = _MAX_INT - - if exact > 0: - self.maxLen = exact - self.minLen = exact - - def _generateDefaultName(self): - return "".join(White.whiteStrs[c] for c in self.matchWhite) - - def parseImpl(self, instring, loc, doActions=True): - if instring[loc] not in self.matchWhite: - raise ParseException(instring, loc, self.errmsg, self) - start = loc - loc += 1 - maxloc = start + self.maxLen - maxloc = min(maxloc, len(instring)) - while loc < maxloc and instring[loc] in self.matchWhite: - loc += 1 - - if loc - start < self.minLen: - raise ParseException(instring, loc, self.errmsg, self) - - return loc, instring[start:loc] - - -class PositionToken(Token): - def __init__(self): - super().__init__() - self.mayReturnEmpty = True - self.mayIndexError = False - - -class GoToColumn(PositionToken): - """Token to advance to a specific column of input text; useful for - tabular report scraping. - """ - - def __init__(self, colno: int): - super().__init__() - self.col = colno - - def preParse(self, instring, loc): - if col(loc, instring) != self.col: - instrlen = len(instring) - if self.ignoreExprs: - loc = self._skipIgnorables(instring, loc) - while ( - loc < instrlen - and instring[loc].isspace() - and col(loc, instring) != self.col - ): - loc += 1 - return loc - - def parseImpl(self, instring, loc, doActions=True): - thiscol = col(loc, instring) - if thiscol > self.col: - raise ParseException(instring, loc, "Text not in expected column", self) - newloc = loc + self.col - thiscol - ret = instring[loc:newloc] - return newloc, ret - - -class LineStart(PositionToken): - r"""Matches if current position is at the beginning of a line within - the parse string - - Example:: - - test = '''\ - AAA this line - AAA and this line - AAA but not this one - B AAA and definitely not this one - ''' - - for t in (LineStart() + 'AAA' + restOfLine).search_string(test): - print(t) - - prints:: - - ['AAA', ' this line'] - ['AAA', ' and this line'] - - """ - - def __init__(self): - super().__init__() - self.leave_whitespace() - self.orig_whiteChars = set() | self.whiteChars - self.whiteChars.discard("\n") - self.skipper = Empty().set_whitespace_chars(self.whiteChars) - self.errmsg = "Expected start of line" - - def preParse(self, instring, loc): - if loc == 0: - return loc - else: - ret = self.skipper.preParse(instring, loc) - if "\n" in self.orig_whiteChars: - while instring[ret : ret + 1] == "\n": - ret = self.skipper.preParse(instring, ret + 1) - return ret - - def parseImpl(self, instring, loc, doActions=True): - if col(loc, instring) == 1: - return loc, [] - raise ParseException(instring, loc, self.errmsg, self) - - -class LineEnd(PositionToken): - """Matches if current position is at the end of a line within the - parse string - """ - - def __init__(self): - super().__init__() - self.whiteChars.discard("\n") - self.set_whitespace_chars(self.whiteChars, copy_defaults=False) - self.errmsg = "Expected end of line" - - def parseImpl(self, instring, loc, doActions=True): - if loc < len(instring): - if instring[loc] == "\n": - return loc + 1, "\n" - else: - raise ParseException(instring, loc, self.errmsg, self) - elif loc == len(instring): - return loc + 1, [] - else: - raise ParseException(instring, loc, self.errmsg, self) - - -class StringStart(PositionToken): - """Matches if current position is at the beginning of the parse - string - """ - - def __init__(self): - super().__init__() - self.errmsg = "Expected start of text" - - def parseImpl(self, instring, loc, doActions=True): - if loc != 0: - # see if entire string up to here is just whitespace and ignoreables - if loc != self.preParse(instring, 0): - raise ParseException(instring, loc, self.errmsg, self) - return loc, [] - - -class StringEnd(PositionToken): - """ - Matches if current position is at the end of the parse string - """ - - def __init__(self): - super().__init__() - self.errmsg = "Expected end of text" - - def parseImpl(self, instring, loc, doActions=True): - if loc < len(instring): - raise ParseException(instring, loc, self.errmsg, self) - elif loc == len(instring): - return loc + 1, [] - elif loc > len(instring): - return loc, [] - else: - raise ParseException(instring, loc, self.errmsg, self) - - -class WordStart(PositionToken): - """Matches if the current position is at the beginning of a - :class:`Word`, and is not preceded by any character in a given - set of ``word_chars`` (default= ``printables``). To emulate the - ``\b`` behavior of regular expressions, use - ``WordStart(alphanums)``. ``WordStart`` will also match at - the beginning of the string being parsed, or at the beginning of - a line. - """ - - def __init__(self, word_chars: str = printables, *, wordChars: str = printables): - wordChars = word_chars if wordChars == printables else wordChars - super().__init__() - self.wordChars = set(wordChars) - self.errmsg = "Not at the start of a word" - - def parseImpl(self, instring, loc, doActions=True): - if loc != 0: - if ( - instring[loc - 1] in self.wordChars - or instring[loc] not in self.wordChars - ): - raise ParseException(instring, loc, self.errmsg, self) - return loc, [] - - -class WordEnd(PositionToken): - """Matches if the current position is at the end of a :class:`Word`, - and is not followed by any character in a given set of ``word_chars`` - (default= ``printables``). To emulate the ``\b`` behavior of - regular expressions, use ``WordEnd(alphanums)``. ``WordEnd`` - will also match at the end of the string being parsed, or at the end - of a line. - """ - - def __init__(self, word_chars: str = printables, *, wordChars: str = printables): - wordChars = word_chars if wordChars == printables else wordChars - super().__init__() - self.wordChars = set(wordChars) - self.skipWhitespace = False - self.errmsg = "Not at the end of a word" - - def parseImpl(self, instring, loc, doActions=True): - instrlen = len(instring) - if instrlen > 0 and loc < instrlen: - if ( - instring[loc] in self.wordChars - or instring[loc - 1] not in self.wordChars - ): - raise ParseException(instring, loc, self.errmsg, self) - return loc, [] - - -class ParseExpression(ParserElement): - """Abstract subclass of ParserElement, for combining and - post-processing parsed tokens. - """ - - def __init__(self, exprs: typing.Iterable[ParserElement], savelist: bool = False): - super().__init__(savelist) - self.exprs: List[ParserElement] - if isinstance(exprs, _generatorType): - exprs = list(exprs) - - if isinstance(exprs, str_type): - self.exprs = [self._literalStringClass(exprs)] - elif isinstance(exprs, ParserElement): - self.exprs = [exprs] - elif isinstance(exprs, Iterable): - exprs = list(exprs) - # if sequence of strings provided, wrap with Literal - if any(isinstance(expr, str_type) for expr in exprs): - exprs = ( - self._literalStringClass(e) if isinstance(e, str_type) else e - for e in exprs - ) - self.exprs = list(exprs) - else: - try: - self.exprs = list(exprs) - except TypeError: - self.exprs = [exprs] - self.callPreparse = False - - def recurse(self) -> Sequence[ParserElement]: - return self.exprs[:] - - def append(self, other) -> ParserElement: - self.exprs.append(other) - self._defaultName = None - return self - - def leave_whitespace(self, recursive: bool = True) -> ParserElement: - """ - Extends ``leave_whitespace`` defined in base class, and also invokes ``leave_whitespace`` on - all contained expressions. - """ - super().leave_whitespace(recursive) - - if recursive: - self.exprs = [e.copy() for e in self.exprs] - for e in self.exprs: - e.leave_whitespace(recursive) - return self - - def ignore_whitespace(self, recursive: bool = True) -> ParserElement: - """ - Extends ``ignore_whitespace`` defined in base class, and also invokes ``leave_whitespace`` on - all contained expressions. - """ - super().ignore_whitespace(recursive) - if recursive: - self.exprs = [e.copy() for e in self.exprs] - for e in self.exprs: - e.ignore_whitespace(recursive) - return self - - def ignore(self, other) -> ParserElement: - if isinstance(other, Suppress): - if other not in self.ignoreExprs: - super().ignore(other) - for e in self.exprs: - e.ignore(self.ignoreExprs[-1]) - else: - super().ignore(other) - for e in self.exprs: - e.ignore(self.ignoreExprs[-1]) - return self - - def _generateDefaultName(self): - return "{}:({})".format(self.__class__.__name__, str(self.exprs)) - - def streamline(self) -> ParserElement: - if self.streamlined: - return self - - super().streamline() - - for e in self.exprs: - e.streamline() - - # collapse nested :class:`And`'s of the form ``And(And(And(a, b), c), d)`` to ``And(a, b, c, d)`` - # but only if there are no parse actions or resultsNames on the nested And's - # (likewise for :class:`Or`'s and :class:`MatchFirst`'s) - if len(self.exprs) == 2: - other = self.exprs[0] - if ( - isinstance(other, self.__class__) - and not other.parseAction - and other.resultsName is None - and not other.debug - ): - self.exprs = other.exprs[:] + [self.exprs[1]] - self._defaultName = None - self.mayReturnEmpty |= other.mayReturnEmpty - self.mayIndexError |= other.mayIndexError - - other = self.exprs[-1] - if ( - isinstance(other, self.__class__) - and not other.parseAction - and other.resultsName is None - and not other.debug - ): - self.exprs = self.exprs[:-1] + other.exprs[:] - self._defaultName = None - self.mayReturnEmpty |= other.mayReturnEmpty - self.mayIndexError |= other.mayIndexError - - self.errmsg = "Expected " + str(self) - - return self - - def validate(self, validateTrace=None) -> None: - tmp = (validateTrace if validateTrace is not None else [])[:] + [self] - for e in self.exprs: - e.validate(tmp) - self._checkRecursion([]) - - def copy(self) -> ParserElement: - ret = super().copy() - ret.exprs = [e.copy() for e in self.exprs] - return ret - - def _setResultsName(self, name, listAllMatches=False): - if ( - __diag__.warn_ungrouped_named_tokens_in_collection - and Diagnostics.warn_ungrouped_named_tokens_in_collection - not in self.suppress_warnings_ - ): - for e in self.exprs: - if ( - isinstance(e, ParserElement) - and e.resultsName - and Diagnostics.warn_ungrouped_named_tokens_in_collection - not in e.suppress_warnings_ - ): - warnings.warn( - "{}: setting results name {!r} on {} expression " - "collides with {!r} on contained expression".format( - "warn_ungrouped_named_tokens_in_collection", - name, - type(self).__name__, - e.resultsName, - ), - stacklevel=3, - ) - - return super()._setResultsName(name, listAllMatches) - - ignoreWhitespace = ignore_whitespace - leaveWhitespace = leave_whitespace - - -class And(ParseExpression): - """ - Requires all given :class:`ParseExpression` s to be found in the given order. - Expressions may be separated by whitespace. - May be constructed using the ``'+'`` operator. - May also be constructed using the ``'-'`` operator, which will - suppress backtracking. - - Example:: - - integer = Word(nums) - name_expr = Word(alphas)[1, ...] - - expr = And([integer("id"), name_expr("name"), integer("age")]) - # more easily written as: - expr = integer("id") + name_expr("name") + integer("age") - """ - - class _ErrorStop(Empty): - def __init__(self, *args, **kwargs): - super().__init__(*args, **kwargs) - self.leave_whitespace() - - def _generateDefaultName(self): - return "-" - - def __init__( - self, exprs_arg: typing.Iterable[ParserElement], savelist: bool = True - ): - exprs: List[ParserElement] = list(exprs_arg) - if exprs and Ellipsis in exprs: - tmp = [] - for i, expr in enumerate(exprs): - if expr is Ellipsis: - if i < len(exprs) - 1: - skipto_arg: ParserElement = (Empty() + exprs[i + 1]).exprs[-1] - tmp.append(SkipTo(skipto_arg)("_skipped*")) - else: - raise Exception( - "cannot construct And with sequence ending in ..." - ) - else: - tmp.append(expr) - exprs[:] = tmp - super().__init__(exprs, savelist) - if self.exprs: - self.mayReturnEmpty = all(e.mayReturnEmpty for e in self.exprs) - if not isinstance(self.exprs[0], White): - self.set_whitespace_chars( - self.exprs[0].whiteChars, - copy_defaults=self.exprs[0].copyDefaultWhiteChars, - ) - self.skipWhitespace = self.exprs[0].skipWhitespace - else: - self.skipWhitespace = False - else: - self.mayReturnEmpty = True - self.callPreparse = True - - def streamline(self) -> ParserElement: - # collapse any _PendingSkip's - if self.exprs: - if any( - isinstance(e, ParseExpression) - and e.exprs - and isinstance(e.exprs[-1], _PendingSkip) - for e in self.exprs[:-1] - ): - for i, e in enumerate(self.exprs[:-1]): - if e is None: - continue - if ( - isinstance(e, ParseExpression) - and e.exprs - and isinstance(e.exprs[-1], _PendingSkip) - ): - e.exprs[-1] = e.exprs[-1] + self.exprs[i + 1] - self.exprs[i + 1] = None - self.exprs = [e for e in self.exprs if e is not None] - - super().streamline() - - # link any IndentedBlocks to the prior expression - for prev, cur in zip(self.exprs, self.exprs[1:]): - # traverse cur or any first embedded expr of cur looking for an IndentedBlock - # (but watch out for recursive grammar) - seen = set() - while cur: - if id(cur) in seen: - break - seen.add(id(cur)) - if isinstance(cur, IndentedBlock): - prev.add_parse_action( - lambda s, l, t, cur_=cur: setattr( - cur_, "parent_anchor", col(l, s) - ) - ) - break - subs = cur.recurse() - cur = next(iter(subs), None) - - self.mayReturnEmpty = all(e.mayReturnEmpty for e in self.exprs) - return self - - def parseImpl(self, instring, loc, doActions=True): - # pass False as callPreParse arg to _parse for first element, since we already - # pre-parsed the string as part of our And pre-parsing - loc, resultlist = self.exprs[0]._parse( - instring, loc, doActions, callPreParse=False - ) - errorStop = False - for e in self.exprs[1:]: - # if isinstance(e, And._ErrorStop): - if type(e) is And._ErrorStop: - errorStop = True - continue - if errorStop: - try: - loc, exprtokens = e._parse(instring, loc, doActions) - except ParseSyntaxException: - raise - except ParseBaseException as pe: - pe.__traceback__ = None - raise ParseSyntaxException._from_exception(pe) - except IndexError: - raise ParseSyntaxException( - instring, len(instring), self.errmsg, self - ) - else: - loc, exprtokens = e._parse(instring, loc, doActions) - if exprtokens or exprtokens.haskeys(): - resultlist += exprtokens - return loc, resultlist - - def __iadd__(self, other): - if isinstance(other, str_type): - other = self._literalStringClass(other) - return self.append(other) # And([self, other]) - - def _checkRecursion(self, parseElementList): - subRecCheckList = parseElementList[:] + [self] - for e in self.exprs: - e._checkRecursion(subRecCheckList) - if not e.mayReturnEmpty: - break - - def _generateDefaultName(self): - inner = " ".join(str(e) for e in self.exprs) - # strip off redundant inner {}'s - while len(inner) > 1 and inner[0 :: len(inner) - 1] == "{}": - inner = inner[1:-1] - return "{" + inner + "}" - - -class Or(ParseExpression): - """Requires that at least one :class:`ParseExpression` is found. If - two expressions match, the expression that matches the longest - string will be used. May be constructed using the ``'^'`` - operator. - - Example:: - - # construct Or using '^' operator - - number = Word(nums) ^ Combine(Word(nums) + '.' + Word(nums)) - print(number.search_string("123 3.1416 789")) - - prints:: - - [['123'], ['3.1416'], ['789']] - """ - - def __init__(self, exprs: typing.Iterable[ParserElement], savelist: bool = False): - super().__init__(exprs, savelist) - if self.exprs: - self.mayReturnEmpty = any(e.mayReturnEmpty for e in self.exprs) - self.skipWhitespace = all(e.skipWhitespace for e in self.exprs) - else: - self.mayReturnEmpty = True - - def streamline(self) -> ParserElement: - super().streamline() - if self.exprs: - self.mayReturnEmpty = any(e.mayReturnEmpty for e in self.exprs) - self.saveAsList = any(e.saveAsList for e in self.exprs) - self.skipWhitespace = all( - e.skipWhitespace and not isinstance(e, White) for e in self.exprs - ) - else: - self.saveAsList = False - return self - - def parseImpl(self, instring, loc, doActions=True): - maxExcLoc = -1 - maxException = None - matches = [] - fatals = [] - if all(e.callPreparse for e in self.exprs): - loc = self.preParse(instring, loc) - for e in self.exprs: - try: - loc2 = e.try_parse(instring, loc, raise_fatal=True) - except ParseFatalException as pfe: - pfe.__traceback__ = None - pfe.parserElement = e - fatals.append(pfe) - maxException = None - maxExcLoc = -1 - except ParseException as err: - if not fatals: - err.__traceback__ = None - if err.loc > maxExcLoc: - maxException = err - maxExcLoc = err.loc - except IndexError: - if len(instring) > maxExcLoc: - maxException = ParseException( - instring, len(instring), e.errmsg, self - ) - maxExcLoc = len(instring) - else: - # save match among all matches, to retry longest to shortest - matches.append((loc2, e)) - - if matches: - # re-evaluate all matches in descending order of length of match, in case attached actions - # might change whether or how much they match of the input. - matches.sort(key=itemgetter(0), reverse=True) - - if not doActions: - # no further conditions or parse actions to change the selection of - # alternative, so the first match will be the best match - best_expr = matches[0][1] - return best_expr._parse(instring, loc, doActions) - - longest = -1, None - for loc1, expr1 in matches: - if loc1 <= longest[0]: - # already have a longer match than this one will deliver, we are done - return longest - - try: - loc2, toks = expr1._parse(instring, loc, doActions) - except ParseException as err: - err.__traceback__ = None - if err.loc > maxExcLoc: - maxException = err - maxExcLoc = err.loc - else: - if loc2 >= loc1: - return loc2, toks - # didn't match as much as before - elif loc2 > longest[0]: - longest = loc2, toks - - if longest != (-1, None): - return longest - - if fatals: - if len(fatals) > 1: - fatals.sort(key=lambda e: -e.loc) - if fatals[0].loc == fatals[1].loc: - fatals.sort(key=lambda e: (-e.loc, -len(str(e.parserElement)))) - max_fatal = fatals[0] - raise max_fatal - - if maxException is not None: - maxException.msg = self.errmsg - raise maxException - else: - raise ParseException( - instring, loc, "no defined alternatives to match", self - ) - - def __ixor__(self, other): - if isinstance(other, str_type): - other = self._literalStringClass(other) - return self.append(other) # Or([self, other]) - - def _generateDefaultName(self): - return "{" + " ^ ".join(str(e) for e in self.exprs) + "}" - - def _setResultsName(self, name, listAllMatches=False): - if ( - __diag__.warn_multiple_tokens_in_named_alternation - and Diagnostics.warn_multiple_tokens_in_named_alternation - not in self.suppress_warnings_ - ): - if any( - isinstance(e, And) - and Diagnostics.warn_multiple_tokens_in_named_alternation - not in e.suppress_warnings_ - for e in self.exprs - ): - warnings.warn( - "{}: setting results name {!r} on {} expression " - "will return a list of all parsed tokens in an And alternative, " - "in prior versions only the first token was returned; enclose " - "contained argument in Group".format( - "warn_multiple_tokens_in_named_alternation", - name, - type(self).__name__, - ), - stacklevel=3, - ) - - return super()._setResultsName(name, listAllMatches) - - -class MatchFirst(ParseExpression): - """Requires that at least one :class:`ParseExpression` is found. If - more than one expression matches, the first one listed is the one that will - match. May be constructed using the ``'|'`` operator. - - Example:: - - # construct MatchFirst using '|' operator - - # watch the order of expressions to match - number = Word(nums) | Combine(Word(nums) + '.' + Word(nums)) - print(number.search_string("123 3.1416 789")) # Fail! -> [['123'], ['3'], ['1416'], ['789']] - - # put more selective expression first - number = Combine(Word(nums) + '.' + Word(nums)) | Word(nums) - print(number.search_string("123 3.1416 789")) # Better -> [['123'], ['3.1416'], ['789']] - """ - - def __init__(self, exprs: typing.Iterable[ParserElement], savelist: bool = False): - super().__init__(exprs, savelist) - if self.exprs: - self.mayReturnEmpty = any(e.mayReturnEmpty for e in self.exprs) - self.skipWhitespace = all(e.skipWhitespace for e in self.exprs) - else: - self.mayReturnEmpty = True - - def streamline(self) -> ParserElement: - if self.streamlined: - return self - - super().streamline() - if self.exprs: - self.saveAsList = any(e.saveAsList for e in self.exprs) - self.mayReturnEmpty = any(e.mayReturnEmpty for e in self.exprs) - self.skipWhitespace = all( - e.skipWhitespace and not isinstance(e, White) for e in self.exprs - ) - else: - self.saveAsList = False - self.mayReturnEmpty = True - return self - - def parseImpl(self, instring, loc, doActions=True): - maxExcLoc = -1 - maxException = None - - for e in self.exprs: - try: - return e._parse( - instring, - loc, - doActions, - ) - except ParseFatalException as pfe: - pfe.__traceback__ = None - pfe.parserElement = e - raise - except ParseException as err: - if err.loc > maxExcLoc: - maxException = err - maxExcLoc = err.loc - except IndexError: - if len(instring) > maxExcLoc: - maxException = ParseException( - instring, len(instring), e.errmsg, self - ) - maxExcLoc = len(instring) - - if maxException is not None: - maxException.msg = self.errmsg - raise maxException - else: - raise ParseException( - instring, loc, "no defined alternatives to match", self - ) - - def __ior__(self, other): - if isinstance(other, str_type): - other = self._literalStringClass(other) - return self.append(other) # MatchFirst([self, other]) - - def _generateDefaultName(self): - return "{" + " | ".join(str(e) for e in self.exprs) + "}" - - def _setResultsName(self, name, listAllMatches=False): - if ( - __diag__.warn_multiple_tokens_in_named_alternation - and Diagnostics.warn_multiple_tokens_in_named_alternation - not in self.suppress_warnings_ - ): - if any( - isinstance(e, And) - and Diagnostics.warn_multiple_tokens_in_named_alternation - not in e.suppress_warnings_ - for e in self.exprs - ): - warnings.warn( - "{}: setting results name {!r} on {} expression " - "will return a list of all parsed tokens in an And alternative, " - "in prior versions only the first token was returned; enclose " - "contained argument in Group".format( - "warn_multiple_tokens_in_named_alternation", - name, - type(self).__name__, - ), - stacklevel=3, - ) - - return super()._setResultsName(name, listAllMatches) - - -class Each(ParseExpression): - """Requires all given :class:`ParseExpression` s to be found, but in - any order. Expressions may be separated by whitespace. - - May be constructed using the ``'&'`` operator. - - Example:: - - color = one_of("RED ORANGE YELLOW GREEN BLUE PURPLE BLACK WHITE BROWN") - shape_type = one_of("SQUARE CIRCLE TRIANGLE STAR HEXAGON OCTAGON") - integer = Word(nums) - shape_attr = "shape:" + shape_type("shape") - posn_attr = "posn:" + Group(integer("x") + ',' + integer("y"))("posn") - color_attr = "color:" + color("color") - size_attr = "size:" + integer("size") - - # use Each (using operator '&') to accept attributes in any order - # (shape and posn are required, color and size are optional) - shape_spec = shape_attr & posn_attr & Opt(color_attr) & Opt(size_attr) - - shape_spec.run_tests(''' - shape: SQUARE color: BLACK posn: 100, 120 - shape: CIRCLE size: 50 color: BLUE posn: 50,80 - color:GREEN size:20 shape:TRIANGLE posn:20,40 - ''' - ) - - prints:: - - shape: SQUARE color: BLACK posn: 100, 120 - ['shape:', 'SQUARE', 'color:', 'BLACK', 'posn:', ['100', ',', '120']] - - color: BLACK - - posn: ['100', ',', '120'] - - x: 100 - - y: 120 - - shape: SQUARE - - - shape: CIRCLE size: 50 color: BLUE posn: 50,80 - ['shape:', 'CIRCLE', 'size:', '50', 'color:', 'BLUE', 'posn:', ['50', ',', '80']] - - color: BLUE - - posn: ['50', ',', '80'] - - x: 50 - - y: 80 - - shape: CIRCLE - - size: 50 - - - color: GREEN size: 20 shape: TRIANGLE posn: 20,40 - ['color:', 'GREEN', 'size:', '20', 'shape:', 'TRIANGLE', 'posn:', ['20', ',', '40']] - - color: GREEN - - posn: ['20', ',', '40'] - - x: 20 - - y: 40 - - shape: TRIANGLE - - size: 20 - """ - - def __init__(self, exprs: typing.Iterable[ParserElement], savelist: bool = True): - super().__init__(exprs, savelist) - if self.exprs: - self.mayReturnEmpty = all(e.mayReturnEmpty for e in self.exprs) - else: - self.mayReturnEmpty = True - self.skipWhitespace = True - self.initExprGroups = True - self.saveAsList = True - - def streamline(self) -> ParserElement: - super().streamline() - if self.exprs: - self.mayReturnEmpty = all(e.mayReturnEmpty for e in self.exprs) - else: - self.mayReturnEmpty = True - return self - - def parseImpl(self, instring, loc, doActions=True): - if self.initExprGroups: - self.opt1map = dict( - (id(e.expr), e) for e in self.exprs if isinstance(e, Opt) - ) - opt1 = [e.expr for e in self.exprs if isinstance(e, Opt)] - opt2 = [ - e - for e in self.exprs - if e.mayReturnEmpty and not isinstance(e, (Opt, Regex, ZeroOrMore)) - ] - self.optionals = opt1 + opt2 - self.multioptionals = [ - e.expr.set_results_name(e.resultsName, list_all_matches=True) - for e in self.exprs - if isinstance(e, _MultipleMatch) - ] - self.multirequired = [ - e.expr.set_results_name(e.resultsName, list_all_matches=True) - for e in self.exprs - if isinstance(e, OneOrMore) - ] - self.required = [ - e for e in self.exprs if not isinstance(e, (Opt, ZeroOrMore, OneOrMore)) - ] - self.required += self.multirequired - self.initExprGroups = False - - tmpLoc = loc - tmpReqd = self.required[:] - tmpOpt = self.optionals[:] - multis = self.multioptionals[:] - matchOrder = [] - - keepMatching = True - failed = [] - fatals = [] - while keepMatching: - tmpExprs = tmpReqd + tmpOpt + multis - failed.clear() - fatals.clear() - for e in tmpExprs: - try: - tmpLoc = e.try_parse(instring, tmpLoc, raise_fatal=True) - except ParseFatalException as pfe: - pfe.__traceback__ = None - pfe.parserElement = e - fatals.append(pfe) - failed.append(e) - except ParseException: - failed.append(e) - else: - matchOrder.append(self.opt1map.get(id(e), e)) - if e in tmpReqd: - tmpReqd.remove(e) - elif e in tmpOpt: - tmpOpt.remove(e) - if len(failed) == len(tmpExprs): - keepMatching = False - - # look for any ParseFatalExceptions - if fatals: - if len(fatals) > 1: - fatals.sort(key=lambda e: -e.loc) - if fatals[0].loc == fatals[1].loc: - fatals.sort(key=lambda e: (-e.loc, -len(str(e.parserElement)))) - max_fatal = fatals[0] - raise max_fatal - - if tmpReqd: - missing = ", ".join([str(e) for e in tmpReqd]) - raise ParseException( - instring, - loc, - "Missing one or more required elements ({})".format(missing), - ) - - # add any unmatched Opts, in case they have default values defined - matchOrder += [e for e in self.exprs if isinstance(e, Opt) and e.expr in tmpOpt] - - total_results = ParseResults([]) - for e in matchOrder: - loc, results = e._parse(instring, loc, doActions) - total_results += results - - return loc, total_results - - def _generateDefaultName(self): - return "{" + " & ".join(str(e) for e in self.exprs) + "}" - - -class ParseElementEnhance(ParserElement): - """Abstract subclass of :class:`ParserElement`, for combining and - post-processing parsed tokens. - """ - - def __init__(self, expr: Union[ParserElement, str], savelist: bool = False): - super().__init__(savelist) - if isinstance(expr, str_type): - if issubclass(self._literalStringClass, Token): - expr = self._literalStringClass(expr) - elif issubclass(type(self), self._literalStringClass): - expr = Literal(expr) - else: - expr = self._literalStringClass(Literal(expr)) - self.expr = expr - if expr is not None: - self.mayIndexError = expr.mayIndexError - self.mayReturnEmpty = expr.mayReturnEmpty - self.set_whitespace_chars( - expr.whiteChars, copy_defaults=expr.copyDefaultWhiteChars - ) - self.skipWhitespace = expr.skipWhitespace - self.saveAsList = expr.saveAsList - self.callPreparse = expr.callPreparse - self.ignoreExprs.extend(expr.ignoreExprs) - - def recurse(self) -> Sequence[ParserElement]: - return [self.expr] if self.expr is not None else [] - - def parseImpl(self, instring, loc, doActions=True): - if self.expr is not None: - return self.expr._parse(instring, loc, doActions, callPreParse=False) - else: - raise ParseException(instring, loc, "No expression defined", self) - - def leave_whitespace(self, recursive: bool = True) -> ParserElement: - super().leave_whitespace(recursive) - - if recursive: - self.expr = self.expr.copy() - if self.expr is not None: - self.expr.leave_whitespace(recursive) - return self - - def ignore_whitespace(self, recursive: bool = True) -> ParserElement: - super().ignore_whitespace(recursive) - - if recursive: - self.expr = self.expr.copy() - if self.expr is not None: - self.expr.ignore_whitespace(recursive) - return self - - def ignore(self, other) -> ParserElement: - if isinstance(other, Suppress): - if other not in self.ignoreExprs: - super().ignore(other) - if self.expr is not None: - self.expr.ignore(self.ignoreExprs[-1]) - else: - super().ignore(other) - if self.expr is not None: - self.expr.ignore(self.ignoreExprs[-1]) - return self - - def streamline(self) -> ParserElement: - super().streamline() - if self.expr is not None: - self.expr.streamline() - return self - - def _checkRecursion(self, parseElementList): - if self in parseElementList: - raise RecursiveGrammarException(parseElementList + [self]) - subRecCheckList = parseElementList[:] + [self] - if self.expr is not None: - self.expr._checkRecursion(subRecCheckList) - - def validate(self, validateTrace=None) -> None: - if validateTrace is None: - validateTrace = [] - tmp = validateTrace[:] + [self] - if self.expr is not None: - self.expr.validate(tmp) - self._checkRecursion([]) - - def _generateDefaultName(self): - return "{}:({})".format(self.__class__.__name__, str(self.expr)) - - ignoreWhitespace = ignore_whitespace - leaveWhitespace = leave_whitespace - - -class IndentedBlock(ParseElementEnhance): - """ - Expression to match one or more expressions at a given indentation level. - Useful for parsing text where structure is implied by indentation (like Python source code). - """ - - class _Indent(Empty): - def __init__(self, ref_col: int): - super().__init__() - self.errmsg = "expected indent at column {}".format(ref_col) - self.add_condition(lambda s, l, t: col(l, s) == ref_col) - - class _IndentGreater(Empty): - def __init__(self, ref_col: int): - super().__init__() - self.errmsg = "expected indent at column greater than {}".format(ref_col) - self.add_condition(lambda s, l, t: col(l, s) > ref_col) - - def __init__( - self, expr: ParserElement, *, recursive: bool = False, grouped: bool = True - ): - super().__init__(expr, savelist=True) - # if recursive: - # raise NotImplementedError("IndentedBlock with recursive is not implemented") - self._recursive = recursive - self._grouped = grouped - self.parent_anchor = 1 - - def parseImpl(self, instring, loc, doActions=True): - # advance parse position to non-whitespace by using an Empty() - # this should be the column to be used for all subsequent indented lines - anchor_loc = Empty().preParse(instring, loc) - - # see if self.expr matches at the current location - if not it will raise an exception - # and no further work is necessary - self.expr.try_parse(instring, anchor_loc, doActions) - - indent_col = col(anchor_loc, instring) - peer_detect_expr = self._Indent(indent_col) - - inner_expr = Empty() + peer_detect_expr + self.expr - if self._recursive: - sub_indent = self._IndentGreater(indent_col) - nested_block = IndentedBlock( - self.expr, recursive=self._recursive, grouped=self._grouped - ) - nested_block.set_debug(self.debug) - nested_block.parent_anchor = indent_col - inner_expr += Opt(sub_indent + nested_block) - - inner_expr.set_name(f"inner {hex(id(inner_expr))[-4:].upper()}@{indent_col}") - block = OneOrMore(inner_expr) - - trailing_undent = self._Indent(self.parent_anchor) | StringEnd() - - if self._grouped: - wrapper = Group - else: - wrapper = lambda expr: expr - return (wrapper(block) + Optional(trailing_undent)).parseImpl( - instring, anchor_loc, doActions - ) - - -class AtStringStart(ParseElementEnhance): - """Matches if expression matches at the beginning of the parse - string:: - - AtStringStart(Word(nums)).parse_string("123") - # prints ["123"] - - AtStringStart(Word(nums)).parse_string(" 123") - # raises ParseException - """ - - def __init__(self, expr: Union[ParserElement, str]): - super().__init__(expr) - self.callPreparse = False - - def parseImpl(self, instring, loc, doActions=True): - if loc != 0: - raise ParseException(instring, loc, "not found at string start") - return super().parseImpl(instring, loc, doActions) - - -class AtLineStart(ParseElementEnhance): - r"""Matches if an expression matches at the beginning of a line within - the parse string - - Example:: - - test = '''\ - AAA this line - AAA and this line - AAA but not this one - B AAA and definitely not this one - ''' - - for t in (AtLineStart('AAA') + restOfLine).search_string(test): - print(t) - - prints:: - - ['AAA', ' this line'] - ['AAA', ' and this line'] - - """ - - def __init__(self, expr: Union[ParserElement, str]): - super().__init__(expr) - self.callPreparse = False - - def parseImpl(self, instring, loc, doActions=True): - if col(loc, instring) != 1: - raise ParseException(instring, loc, "not found at line start") - return super().parseImpl(instring, loc, doActions) - - -class FollowedBy(ParseElementEnhance): - """Lookahead matching of the given parse expression. - ``FollowedBy`` does *not* advance the parsing position within - the input string, it only verifies that the specified parse - expression matches at the current position. ``FollowedBy`` - always returns a null token list. If any results names are defined - in the lookahead expression, those *will* be returned for access by - name. - - Example:: - - # use FollowedBy to match a label only if it is followed by a ':' - data_word = Word(alphas) - label = data_word + FollowedBy(':') - attr_expr = Group(label + Suppress(':') + OneOrMore(data_word, stop_on=label).set_parse_action(' '.join)) - - attr_expr[1, ...].parse_string("shape: SQUARE color: BLACK posn: upper left").pprint() - - prints:: - - [['shape', 'SQUARE'], ['color', 'BLACK'], ['posn', 'upper left']] - """ - - def __init__(self, expr: Union[ParserElement, str]): - super().__init__(expr) - self.mayReturnEmpty = True - - def parseImpl(self, instring, loc, doActions=True): - # by using self._expr.parse and deleting the contents of the returned ParseResults list - # we keep any named results that were defined in the FollowedBy expression - _, ret = self.expr._parse(instring, loc, doActions=doActions) - del ret[:] - - return loc, ret - - -class PrecededBy(ParseElementEnhance): - """Lookbehind matching of the given parse expression. - ``PrecededBy`` does not advance the parsing position within the - input string, it only verifies that the specified parse expression - matches prior to the current position. ``PrecededBy`` always - returns a null token list, but if a results name is defined on the - given expression, it is returned. - - Parameters: - - - expr - expression that must match prior to the current parse - location - - retreat - (default= ``None``) - (int) maximum number of characters - to lookbehind prior to the current parse location - - If the lookbehind expression is a string, :class:`Literal`, - :class:`Keyword`, or a :class:`Word` or :class:`CharsNotIn` - with a specified exact or maximum length, then the retreat - parameter is not required. Otherwise, retreat must be specified to - give a maximum number of characters to look back from - the current parse position for a lookbehind match. - - Example:: - - # VB-style variable names with type prefixes - int_var = PrecededBy("#") + pyparsing_common.identifier - str_var = PrecededBy("$") + pyparsing_common.identifier - - """ - - def __init__( - self, expr: Union[ParserElement, str], retreat: typing.Optional[int] = None - ): - super().__init__(expr) - self.expr = self.expr().leave_whitespace() - self.mayReturnEmpty = True - self.mayIndexError = False - self.exact = False - if isinstance(expr, str_type): - retreat = len(expr) - self.exact = True - elif isinstance(expr, (Literal, Keyword)): - retreat = expr.matchLen - self.exact = True - elif isinstance(expr, (Word, CharsNotIn)) and expr.maxLen != _MAX_INT: - retreat = expr.maxLen - self.exact = True - elif isinstance(expr, PositionToken): - retreat = 0 - self.exact = True - self.retreat = retreat - self.errmsg = "not preceded by " + str(expr) - self.skipWhitespace = False - self.parseAction.append(lambda s, l, t: t.__delitem__(slice(None, None))) - - def parseImpl(self, instring, loc=0, doActions=True): - if self.exact: - if loc < self.retreat: - raise ParseException(instring, loc, self.errmsg) - start = loc - self.retreat - _, ret = self.expr._parse(instring, start) - else: - # retreat specified a maximum lookbehind window, iterate - test_expr = self.expr + StringEnd() - instring_slice = instring[max(0, loc - self.retreat) : loc] - last_expr = ParseException(instring, loc, self.errmsg) - for offset in range(1, min(loc, self.retreat + 1) + 1): - try: - # print('trying', offset, instring_slice, repr(instring_slice[loc - offset:])) - _, ret = test_expr._parse( - instring_slice, len(instring_slice) - offset - ) - except ParseBaseException as pbe: - last_expr = pbe - else: - break - else: - raise last_expr - return loc, ret - - -class Located(ParseElementEnhance): - """ - Decorates a returned token with its starting and ending - locations in the input string. - - This helper adds the following results names: - - - ``locn_start`` - location where matched expression begins - - ``locn_end`` - location where matched expression ends - - ``value`` - the actual parsed results - - Be careful if the input text contains ```` characters, you - may want to call :class:`ParserElement.parse_with_tabs` - - Example:: - - wd = Word(alphas) - for match in Located(wd).search_string("ljsdf123lksdjjf123lkkjj1222"): - print(match) - - prints:: - - [0, ['ljsdf'], 5] - [8, ['lksdjjf'], 15] - [18, ['lkkjj'], 23] - - """ - - def parseImpl(self, instring, loc, doActions=True): - start = loc - loc, tokens = self.expr._parse(instring, start, doActions, callPreParse=False) - ret_tokens = ParseResults([start, tokens, loc]) - ret_tokens["locn_start"] = start - ret_tokens["value"] = tokens - ret_tokens["locn_end"] = loc - if self.resultsName: - # must return as a list, so that the name will be attached to the complete group - return loc, [ret_tokens] - else: - return loc, ret_tokens - - -class NotAny(ParseElementEnhance): - """ - Lookahead to disallow matching with the given parse expression. - ``NotAny`` does *not* advance the parsing position within the - input string, it only verifies that the specified parse expression - does *not* match at the current position. Also, ``NotAny`` does - *not* skip over leading whitespace. ``NotAny`` always returns - a null token list. May be constructed using the ``'~'`` operator. - - Example:: - - AND, OR, NOT = map(CaselessKeyword, "AND OR NOT".split()) - - # take care not to mistake keywords for identifiers - ident = ~(AND | OR | NOT) + Word(alphas) - boolean_term = Opt(NOT) + ident - - # very crude boolean expression - to support parenthesis groups and - # operation hierarchy, use infix_notation - boolean_expr = boolean_term + ((AND | OR) + boolean_term)[...] - - # integers that are followed by "." are actually floats - integer = Word(nums) + ~Char(".") - """ - - def __init__(self, expr: Union[ParserElement, str]): - super().__init__(expr) - # do NOT use self.leave_whitespace(), don't want to propagate to exprs - # self.leave_whitespace() - self.skipWhitespace = False - - self.mayReturnEmpty = True - self.errmsg = "Found unwanted token, " + str(self.expr) - - def parseImpl(self, instring, loc, doActions=True): - if self.expr.can_parse_next(instring, loc): - raise ParseException(instring, loc, self.errmsg, self) - return loc, [] - - def _generateDefaultName(self): - return "~{" + str(self.expr) + "}" - - -class _MultipleMatch(ParseElementEnhance): - def __init__( - self, - expr: ParserElement, - stop_on: typing.Optional[Union[ParserElement, str]] = None, - *, - stopOn: typing.Optional[Union[ParserElement, str]] = None, - ): - super().__init__(expr) - stopOn = stopOn or stop_on - self.saveAsList = True - ender = stopOn - if isinstance(ender, str_type): - ender = self._literalStringClass(ender) - self.stopOn(ender) - - def stopOn(self, ender) -> ParserElement: - if isinstance(ender, str_type): - ender = self._literalStringClass(ender) - self.not_ender = ~ender if ender is not None else None - return self - - def parseImpl(self, instring, loc, doActions=True): - self_expr_parse = self.expr._parse - self_skip_ignorables = self._skipIgnorables - check_ender = self.not_ender is not None - if check_ender: - try_not_ender = self.not_ender.tryParse - - # must be at least one (but first see if we are the stopOn sentinel; - # if so, fail) - if check_ender: - try_not_ender(instring, loc) - loc, tokens = self_expr_parse(instring, loc, doActions) - try: - hasIgnoreExprs = not not self.ignoreExprs - while 1: - if check_ender: - try_not_ender(instring, loc) - if hasIgnoreExprs: - preloc = self_skip_ignorables(instring, loc) - else: - preloc = loc - loc, tmptokens = self_expr_parse(instring, preloc, doActions) - if tmptokens or tmptokens.haskeys(): - tokens += tmptokens - except (ParseException, IndexError): - pass - - return loc, tokens - - def _setResultsName(self, name, listAllMatches=False): - if ( - __diag__.warn_ungrouped_named_tokens_in_collection - and Diagnostics.warn_ungrouped_named_tokens_in_collection - not in self.suppress_warnings_ - ): - for e in [self.expr] + self.expr.recurse(): - if ( - isinstance(e, ParserElement) - and e.resultsName - and Diagnostics.warn_ungrouped_named_tokens_in_collection - not in e.suppress_warnings_ - ): - warnings.warn( - "{}: setting results name {!r} on {} expression " - "collides with {!r} on contained expression".format( - "warn_ungrouped_named_tokens_in_collection", - name, - type(self).__name__, - e.resultsName, - ), - stacklevel=3, - ) - - return super()._setResultsName(name, listAllMatches) - - -class OneOrMore(_MultipleMatch): - """ - Repetition of one or more of the given expression. - - Parameters: - - expr - expression that must match one or more times - - stop_on - (default= ``None``) - expression for a terminating sentinel - (only required if the sentinel would ordinarily match the repetition - expression) - - Example:: - - data_word = Word(alphas) - label = data_word + FollowedBy(':') - attr_expr = Group(label + Suppress(':') + OneOrMore(data_word).set_parse_action(' '.join)) - - text = "shape: SQUARE posn: upper left color: BLACK" - attr_expr[1, ...].parse_string(text).pprint() # Fail! read 'color' as data instead of next label -> [['shape', 'SQUARE color']] - - # use stop_on attribute for OneOrMore to avoid reading label string as part of the data - attr_expr = Group(label + Suppress(':') + OneOrMore(data_word, stop_on=label).set_parse_action(' '.join)) - OneOrMore(attr_expr).parse_string(text).pprint() # Better -> [['shape', 'SQUARE'], ['posn', 'upper left'], ['color', 'BLACK']] - - # could also be written as - (attr_expr * (1,)).parse_string(text).pprint() - """ - - def _generateDefaultName(self): - return "{" + str(self.expr) + "}..." - - -class ZeroOrMore(_MultipleMatch): - """ - Optional repetition of zero or more of the given expression. - - Parameters: - - ``expr`` - expression that must match zero or more times - - ``stop_on`` - expression for a terminating sentinel - (only required if the sentinel would ordinarily match the repetition - expression) - (default= ``None``) - - Example: similar to :class:`OneOrMore` - """ - - def __init__( - self, - expr: ParserElement, - stop_on: typing.Optional[Union[ParserElement, str]] = None, - *, - stopOn: typing.Optional[Union[ParserElement, str]] = None, - ): - super().__init__(expr, stopOn=stopOn or stop_on) - self.mayReturnEmpty = True - - def parseImpl(self, instring, loc, doActions=True): - try: - return super().parseImpl(instring, loc, doActions) - except (ParseException, IndexError): - return loc, ParseResults([], name=self.resultsName) - - def _generateDefaultName(self): - return "[" + str(self.expr) + "]..." - - -class _NullToken: - def __bool__(self): - return False - - def __str__(self): - return "" - - -class Opt(ParseElementEnhance): - """ - Optional matching of the given expression. - - Parameters: - - ``expr`` - expression that must match zero or more times - - ``default`` (optional) - value to be returned if the optional expression is not found. - - Example:: - - # US postal code can be a 5-digit zip, plus optional 4-digit qualifier - zip = Combine(Word(nums, exact=5) + Opt('-' + Word(nums, exact=4))) - zip.run_tests(''' - # traditional ZIP code - 12345 - - # ZIP+4 form - 12101-0001 - - # invalid ZIP - 98765- - ''') - - prints:: - - # traditional ZIP code - 12345 - ['12345'] - - # ZIP+4 form - 12101-0001 - ['12101-0001'] - - # invalid ZIP - 98765- - ^ - FAIL: Expected end of text (at char 5), (line:1, col:6) - """ - - __optionalNotMatched = _NullToken() - - def __init__( - self, expr: Union[ParserElement, str], default: Any = __optionalNotMatched - ): - super().__init__(expr, savelist=False) - self.saveAsList = self.expr.saveAsList - self.defaultValue = default - self.mayReturnEmpty = True - - def parseImpl(self, instring, loc, doActions=True): - self_expr = self.expr - try: - loc, tokens = self_expr._parse(instring, loc, doActions, callPreParse=False) - except (ParseException, IndexError): - default_value = self.defaultValue - if default_value is not self.__optionalNotMatched: - if self_expr.resultsName: - tokens = ParseResults([default_value]) - tokens[self_expr.resultsName] = default_value - else: - tokens = [default_value] - else: - tokens = [] - return loc, tokens - - def _generateDefaultName(self): - inner = str(self.expr) - # strip off redundant inner {}'s - while len(inner) > 1 and inner[0 :: len(inner) - 1] == "{}": - inner = inner[1:-1] - return "[" + inner + "]" - - -Optional = Opt - - -class SkipTo(ParseElementEnhance): - """ - Token for skipping over all undefined text until the matched - expression is found. - - Parameters: - - ``expr`` - target expression marking the end of the data to be skipped - - ``include`` - if ``True``, the target expression is also parsed - (the skipped text and target expression are returned as a 2-element - list) (default= ``False``). - - ``ignore`` - (default= ``None``) used to define grammars (typically quoted strings and - comments) that might contain false matches to the target expression - - ``fail_on`` - (default= ``None``) define expressions that are not allowed to be - included in the skipped test; if found before the target expression is found, - the :class:`SkipTo` is not a match - - Example:: - - report = ''' - Outstanding Issues Report - 1 Jan 2000 - - # | Severity | Description | Days Open - -----+----------+-------------------------------------------+----------- - 101 | Critical | Intermittent system crash | 6 - 94 | Cosmetic | Spelling error on Login ('log|n') | 14 - 79 | Minor | System slow when running too many reports | 47 - ''' - integer = Word(nums) - SEP = Suppress('|') - # use SkipTo to simply match everything up until the next SEP - # - ignore quoted strings, so that a '|' character inside a quoted string does not match - # - parse action will call token.strip() for each matched token, i.e., the description body - string_data = SkipTo(SEP, ignore=quoted_string) - string_data.set_parse_action(token_map(str.strip)) - ticket_expr = (integer("issue_num") + SEP - + string_data("sev") + SEP - + string_data("desc") + SEP - + integer("days_open")) - - for tkt in ticket_expr.search_string(report): - print tkt.dump() - - prints:: - - ['101', 'Critical', 'Intermittent system crash', '6'] - - days_open: '6' - - desc: 'Intermittent system crash' - - issue_num: '101' - - sev: 'Critical' - ['94', 'Cosmetic', "Spelling error on Login ('log|n')", '14'] - - days_open: '14' - - desc: "Spelling error on Login ('log|n')" - - issue_num: '94' - - sev: 'Cosmetic' - ['79', 'Minor', 'System slow when running too many reports', '47'] - - days_open: '47' - - desc: 'System slow when running too many reports' - - issue_num: '79' - - sev: 'Minor' - """ - - def __init__( - self, - other: Union[ParserElement, str], - include: bool = False, - ignore: bool = None, - fail_on: typing.Optional[Union[ParserElement, str]] = None, - *, - failOn: Union[ParserElement, str] = None, - ): - super().__init__(other) - failOn = failOn or fail_on - self.ignoreExpr = ignore - self.mayReturnEmpty = True - self.mayIndexError = False - self.includeMatch = include - self.saveAsList = False - if isinstance(failOn, str_type): - self.failOn = self._literalStringClass(failOn) - else: - self.failOn = failOn - self.errmsg = "No match found for " + str(self.expr) - - def parseImpl(self, instring, loc, doActions=True): - startloc = loc - instrlen = len(instring) - self_expr_parse = self.expr._parse - self_failOn_canParseNext = ( - self.failOn.canParseNext if self.failOn is not None else None - ) - self_ignoreExpr_tryParse = ( - self.ignoreExpr.tryParse if self.ignoreExpr is not None else None - ) - - tmploc = loc - while tmploc <= instrlen: - if self_failOn_canParseNext is not None: - # break if failOn expression matches - if self_failOn_canParseNext(instring, tmploc): - break - - if self_ignoreExpr_tryParse is not None: - # advance past ignore expressions - while 1: - try: - tmploc = self_ignoreExpr_tryParse(instring, tmploc) - except ParseBaseException: - break - - try: - self_expr_parse(instring, tmploc, doActions=False, callPreParse=False) - except (ParseException, IndexError): - # no match, advance loc in string - tmploc += 1 - else: - # matched skipto expr, done - break - - else: - # ran off the end of the input string without matching skipto expr, fail - raise ParseException(instring, loc, self.errmsg, self) - - # build up return values - loc = tmploc - skiptext = instring[startloc:loc] - skipresult = ParseResults(skiptext) - - if self.includeMatch: - loc, mat = self_expr_parse(instring, loc, doActions, callPreParse=False) - skipresult += mat - - return loc, skipresult - - -class Forward(ParseElementEnhance): - """ - Forward declaration of an expression to be defined later - - used for recursive grammars, such as algebraic infix notation. - When the expression is known, it is assigned to the ``Forward`` - variable using the ``'<<'`` operator. - - Note: take care when assigning to ``Forward`` not to overlook - precedence of operators. - - Specifically, ``'|'`` has a lower precedence than ``'<<'``, so that:: - - fwd_expr << a | b | c - - will actually be evaluated as:: - - (fwd_expr << a) | b | c - - thereby leaving b and c out as parseable alternatives. It is recommended that you - explicitly group the values inserted into the ``Forward``:: - - fwd_expr << (a | b | c) - - Converting to use the ``'<<='`` operator instead will avoid this problem. - - See :class:`ParseResults.pprint` for an example of a recursive - parser created using ``Forward``. - """ - - def __init__(self, other: typing.Optional[Union[ParserElement, str]] = None): - self.caller_frame = traceback.extract_stack(limit=2)[0] - super().__init__(other, savelist=False) - self.lshift_line = None - - def __lshift__(self, other): - if hasattr(self, "caller_frame"): - del self.caller_frame - if isinstance(other, str_type): - other = self._literalStringClass(other) - self.expr = other - self.mayIndexError = self.expr.mayIndexError - self.mayReturnEmpty = self.expr.mayReturnEmpty - self.set_whitespace_chars( - self.expr.whiteChars, copy_defaults=self.expr.copyDefaultWhiteChars - ) - self.skipWhitespace = self.expr.skipWhitespace - self.saveAsList = self.expr.saveAsList - self.ignoreExprs.extend(self.expr.ignoreExprs) - self.lshift_line = traceback.extract_stack(limit=2)[-2] - return self - - def __ilshift__(self, other): - return self << other - - def __or__(self, other): - caller_line = traceback.extract_stack(limit=2)[-2] - if ( - __diag__.warn_on_match_first_with_lshift_operator - and caller_line == self.lshift_line - and Diagnostics.warn_on_match_first_with_lshift_operator - not in self.suppress_warnings_ - ): - warnings.warn( - "using '<<' operator with '|' is probably an error, use '<<='", - stacklevel=2, - ) - ret = super().__or__(other) - return ret - - def __del__(self): - # see if we are getting dropped because of '=' reassignment of var instead of '<<=' or '<<' - if ( - self.expr is None - and __diag__.warn_on_assignment_to_Forward - and Diagnostics.warn_on_assignment_to_Forward not in self.suppress_warnings_ - ): - warnings.warn_explicit( - "Forward defined here but no expression attached later using '<<=' or '<<'", - UserWarning, - filename=self.caller_frame.filename, - lineno=self.caller_frame.lineno, - ) - - def parseImpl(self, instring, loc, doActions=True): - if ( - self.expr is None - and __diag__.warn_on_parse_using_empty_Forward - and Diagnostics.warn_on_parse_using_empty_Forward - not in self.suppress_warnings_ - ): - # walk stack until parse_string, scan_string, search_string, or transform_string is found - parse_fns = [ - "parse_string", - "scan_string", - "search_string", - "transform_string", - ] - tb = traceback.extract_stack(limit=200) - for i, frm in enumerate(reversed(tb), start=1): - if frm.name in parse_fns: - stacklevel = i + 1 - break - else: - stacklevel = 2 - warnings.warn( - "Forward expression was never assigned a value, will not parse any input", - stacklevel=stacklevel, - ) - if not ParserElement._left_recursion_enabled: - return super().parseImpl(instring, loc, doActions) - # ## Bounded Recursion algorithm ## - # Recursion only needs to be processed at ``Forward`` elements, since they are - # the only ones that can actually refer to themselves. The general idea is - # to handle recursion stepwise: We start at no recursion, then recurse once, - # recurse twice, ..., until more recursion offers no benefit (we hit the bound). - # - # The "trick" here is that each ``Forward`` gets evaluated in two contexts - # - to *match* a specific recursion level, and - # - to *search* the bounded recursion level - # and the two run concurrently. The *search* must *match* each recursion level - # to find the best possible match. This is handled by a memo table, which - # provides the previous match to the next level match attempt. - # - # See also "Left Recursion in Parsing Expression Grammars", Medeiros et al. - # - # There is a complication since we not only *parse* but also *transform* via - # actions: We do not want to run the actions too often while expanding. Thus, - # we expand using `doActions=False` and only run `doActions=True` if the next - # recursion level is acceptable. - with ParserElement.recursion_lock: - memo = ParserElement.recursion_memos - try: - # we are parsing at a specific recursion expansion - use it as-is - prev_loc, prev_result = memo[loc, self, doActions] - if isinstance(prev_result, Exception): - raise prev_result - return prev_loc, prev_result.copy() - except KeyError: - act_key = (loc, self, True) - peek_key = (loc, self, False) - # we are searching for the best recursion expansion - keep on improving - # both `doActions` cases must be tracked separately here! - prev_loc, prev_peek = memo[peek_key] = ( - loc - 1, - ParseException( - instring, loc, "Forward recursion without base case", self - ), - ) - if doActions: - memo[act_key] = memo[peek_key] - while True: - try: - new_loc, new_peek = super().parseImpl(instring, loc, False) - except ParseException: - # we failed before getting any match – do not hide the error - if isinstance(prev_peek, Exception): - raise - new_loc, new_peek = prev_loc, prev_peek - # the match did not get better: we are done - if new_loc <= prev_loc: - if doActions: - # replace the match for doActions=False as well, - # in case the action did backtrack - prev_loc, prev_result = memo[peek_key] = memo[act_key] - del memo[peek_key], memo[act_key] - return prev_loc, prev_result.copy() - del memo[peek_key] - return prev_loc, prev_peek.copy() - # the match did get better: see if we can improve further - else: - if doActions: - try: - memo[act_key] = super().parseImpl(instring, loc, True) - except ParseException as e: - memo[peek_key] = memo[act_key] = (new_loc, e) - raise - prev_loc, prev_peek = memo[peek_key] = new_loc, new_peek - - def leave_whitespace(self, recursive: bool = True) -> ParserElement: - self.skipWhitespace = False - return self - - def ignore_whitespace(self, recursive: bool = True) -> ParserElement: - self.skipWhitespace = True - return self - - def streamline(self) -> ParserElement: - if not self.streamlined: - self.streamlined = True - if self.expr is not None: - self.expr.streamline() - return self - - def validate(self, validateTrace=None) -> None: - if validateTrace is None: - validateTrace = [] - - if self not in validateTrace: - tmp = validateTrace[:] + [self] - if self.expr is not None: - self.expr.validate(tmp) - self._checkRecursion([]) - - def _generateDefaultName(self): - # Avoid infinite recursion by setting a temporary _defaultName - self._defaultName = ": ..." - - # Use the string representation of main expression. - retString = "..." - try: - if self.expr is not None: - retString = str(self.expr)[:1000] - else: - retString = "None" - finally: - return self.__class__.__name__ + ": " + retString - - def copy(self) -> ParserElement: - if self.expr is not None: - return super().copy() - else: - ret = Forward() - ret <<= self - return ret - - def _setResultsName(self, name, list_all_matches=False): - if ( - __diag__.warn_name_set_on_empty_Forward - and Diagnostics.warn_name_set_on_empty_Forward - not in self.suppress_warnings_ - ): - if self.expr is None: - warnings.warn( - "{}: setting results name {!r} on {} expression " - "that has no contained expression".format( - "warn_name_set_on_empty_Forward", name, type(self).__name__ - ), - stacklevel=3, - ) - - return super()._setResultsName(name, list_all_matches) - - ignoreWhitespace = ignore_whitespace - leaveWhitespace = leave_whitespace - - -class TokenConverter(ParseElementEnhance): - """ - Abstract subclass of :class:`ParseExpression`, for converting parsed results. - """ - - def __init__(self, expr: Union[ParserElement, str], savelist=False): - super().__init__(expr) # , savelist) - self.saveAsList = False - - -class Combine(TokenConverter): - """Converter to concatenate all matching tokens to a single string. - By default, the matching patterns must also be contiguous in the - input string; this can be disabled by specifying - ``'adjacent=False'`` in the constructor. - - Example:: - - real = Word(nums) + '.' + Word(nums) - print(real.parse_string('3.1416')) # -> ['3', '.', '1416'] - # will also erroneously match the following - print(real.parse_string('3. 1416')) # -> ['3', '.', '1416'] - - real = Combine(Word(nums) + '.' + Word(nums)) - print(real.parse_string('3.1416')) # -> ['3.1416'] - # no match when there are internal spaces - print(real.parse_string('3. 1416')) # -> Exception: Expected W:(0123...) - """ - - def __init__( - self, - expr: ParserElement, - join_string: str = "", - adjacent: bool = True, - *, - joinString: typing.Optional[str] = None, - ): - super().__init__(expr) - joinString = joinString if joinString is not None else join_string - # suppress whitespace-stripping in contained parse expressions, but re-enable it on the Combine itself - if adjacent: - self.leave_whitespace() - self.adjacent = adjacent - self.skipWhitespace = True - self.joinString = joinString - self.callPreparse = True - - def ignore(self, other) -> ParserElement: - if self.adjacent: - ParserElement.ignore(self, other) - else: - super().ignore(other) - return self - - def postParse(self, instring, loc, tokenlist): - retToks = tokenlist.copy() - del retToks[:] - retToks += ParseResults( - ["".join(tokenlist._asStringList(self.joinString))], modal=self.modalResults - ) - - if self.resultsName and retToks.haskeys(): - return [retToks] - else: - return retToks - - -class Group(TokenConverter): - """Converter to return the matched tokens as a list - useful for - returning tokens of :class:`ZeroOrMore` and :class:`OneOrMore` expressions. - - The optional ``aslist`` argument when set to True will return the - parsed tokens as a Python list instead of a pyparsing ParseResults. - - Example:: - - ident = Word(alphas) - num = Word(nums) - term = ident | num - func = ident + Opt(delimited_list(term)) - print(func.parse_string("fn a, b, 100")) - # -> ['fn', 'a', 'b', '100'] - - func = ident + Group(Opt(delimited_list(term))) - print(func.parse_string("fn a, b, 100")) - # -> ['fn', ['a', 'b', '100']] - """ - - def __init__(self, expr: ParserElement, aslist: bool = False): - super().__init__(expr) - self.saveAsList = True - self._asPythonList = aslist - - def postParse(self, instring, loc, tokenlist): - if self._asPythonList: - return ParseResults.List( - tokenlist.asList() - if isinstance(tokenlist, ParseResults) - else list(tokenlist) - ) - else: - return [tokenlist] - - -class Dict(TokenConverter): - """Converter to return a repetitive expression as a list, but also - as a dictionary. Each element can also be referenced using the first - token in the expression as its key. Useful for tabular report - scraping when the first column can be used as a item key. - - The optional ``asdict`` argument when set to True will return the - parsed tokens as a Python dict instead of a pyparsing ParseResults. - - Example:: - - data_word = Word(alphas) - label = data_word + FollowedBy(':') - - text = "shape: SQUARE posn: upper left color: light blue texture: burlap" - attr_expr = (label + Suppress(':') + OneOrMore(data_word, stop_on=label).set_parse_action(' '.join)) - - # print attributes as plain groups - print(attr_expr[1, ...].parse_string(text).dump()) - - # instead of OneOrMore(expr), parse using Dict(Group(expr)[1, ...]) - Dict will auto-assign names - result = Dict(Group(attr_expr)[1, ...]).parse_string(text) - print(result.dump()) - - # access named fields as dict entries, or output as dict - print(result['shape']) - print(result.as_dict()) - - prints:: - - ['shape', 'SQUARE', 'posn', 'upper left', 'color', 'light blue', 'texture', 'burlap'] - [['shape', 'SQUARE'], ['posn', 'upper left'], ['color', 'light blue'], ['texture', 'burlap']] - - color: 'light blue' - - posn: 'upper left' - - shape: 'SQUARE' - - texture: 'burlap' - SQUARE - {'color': 'light blue', 'posn': 'upper left', 'texture': 'burlap', 'shape': 'SQUARE'} - - See more examples at :class:`ParseResults` of accessing fields by results name. - """ - - def __init__(self, expr: ParserElement, asdict: bool = False): - super().__init__(expr) - self.saveAsList = True - self._asPythonDict = asdict - - def postParse(self, instring, loc, tokenlist): - for i, tok in enumerate(tokenlist): - if len(tok) == 0: - continue - - ikey = tok[0] - if isinstance(ikey, int): - ikey = str(ikey).strip() - - if len(tok) == 1: - tokenlist[ikey] = _ParseResultsWithOffset("", i) - - elif len(tok) == 2 and not isinstance(tok[1], ParseResults): - tokenlist[ikey] = _ParseResultsWithOffset(tok[1], i) - - else: - try: - dictvalue = tok.copy() # ParseResults(i) - except Exception: - exc = TypeError( - "could not extract dict values from parsed results" - " - Dict expression must contain Grouped expressions" - ) - raise exc from None - - del dictvalue[0] - - if len(dictvalue) != 1 or ( - isinstance(dictvalue, ParseResults) and dictvalue.haskeys() - ): - tokenlist[ikey] = _ParseResultsWithOffset(dictvalue, i) - else: - tokenlist[ikey] = _ParseResultsWithOffset(dictvalue[0], i) - - if self._asPythonDict: - return [tokenlist.as_dict()] if self.resultsName else tokenlist.as_dict() - else: - return [tokenlist] if self.resultsName else tokenlist - - -class Suppress(TokenConverter): - """Converter for ignoring the results of a parsed expression. - - Example:: - - source = "a, b, c,d" - wd = Word(alphas) - wd_list1 = wd + (',' + wd)[...] - print(wd_list1.parse_string(source)) - - # often, delimiters that are useful during parsing are just in the - # way afterward - use Suppress to keep them out of the parsed output - wd_list2 = wd + (Suppress(',') + wd)[...] - print(wd_list2.parse_string(source)) - - # Skipped text (using '...') can be suppressed as well - source = "lead in START relevant text END trailing text" - start_marker = Keyword("START") - end_marker = Keyword("END") - find_body = Suppress(...) + start_marker + ... + end_marker - print(find_body.parse_string(source) - - prints:: - - ['a', ',', 'b', ',', 'c', ',', 'd'] - ['a', 'b', 'c', 'd'] - ['START', 'relevant text ', 'END'] - - (See also :class:`delimited_list`.) - """ - - def __init__(self, expr: Union[ParserElement, str], savelist: bool = False): - if expr is ...: - expr = _PendingSkip(NoMatch()) - super().__init__(expr) - - def __add__(self, other) -> "ParserElement": - if isinstance(self.expr, _PendingSkip): - return Suppress(SkipTo(other)) + other - else: - return super().__add__(other) - - def __sub__(self, other) -> "ParserElement": - if isinstance(self.expr, _PendingSkip): - return Suppress(SkipTo(other)) - other - else: - return super().__sub__(other) - - def postParse(self, instring, loc, tokenlist): - return [] - - def suppress(self) -> ParserElement: - return self - - -def trace_parse_action(f: ParseAction) -> ParseAction: - """Decorator for debugging parse actions. - - When the parse action is called, this decorator will print - ``">> entering method-name(line:, , )"``. - When the parse action completes, the decorator will print - ``"<<"`` followed by the returned value, or any exception that the parse action raised. - - Example:: - - wd = Word(alphas) - - @trace_parse_action - def remove_duplicate_chars(tokens): - return ''.join(sorted(set(''.join(tokens)))) - - wds = wd[1, ...].set_parse_action(remove_duplicate_chars) - print(wds.parse_string("slkdjs sld sldd sdlf sdljf")) - - prints:: - - >>entering remove_duplicate_chars(line: 'slkdjs sld sldd sdlf sdljf', 0, (['slkdjs', 'sld', 'sldd', 'sdlf', 'sdljf'], {})) - < 3: - thisFunc = paArgs[0].__class__.__name__ + "." + thisFunc - sys.stderr.write( - ">>entering {}(line: {!r}, {}, {!r})\n".format(thisFunc, line(l, s), l, t) - ) - try: - ret = f(*paArgs) - except Exception as exc: - sys.stderr.write("< str: - r"""Helper to easily define string ranges for use in :class:`Word` - construction. Borrows syntax from regexp ``'[]'`` string range - definitions:: - - srange("[0-9]") -> "0123456789" - srange("[a-z]") -> "abcdefghijklmnopqrstuvwxyz" - srange("[a-z$_]") -> "abcdefghijklmnopqrstuvwxyz$_" - - The input string must be enclosed in []'s, and the returned string - is the expanded character set joined into a single string. The - values enclosed in the []'s may be: - - - a single character - - an escaped character with a leading backslash (such as ``\-`` - or ``\]``) - - an escaped hex character with a leading ``'\x'`` - (``\x21``, which is a ``'!'`` character) (``\0x##`` - is also supported for backwards compatibility) - - an escaped octal character with a leading ``'\0'`` - (``\041``, which is a ``'!'`` character) - - a range of any of the above, separated by a dash (``'a-z'``, - etc.) - - any combination of the above (``'aeiouy'``, - ``'a-zA-Z0-9_$'``, etc.) - """ - _expanded = ( - lambda p: p - if not isinstance(p, ParseResults) - else "".join(chr(c) for c in range(ord(p[0]), ord(p[1]) + 1)) - ) - try: - return "".join(_expanded(part) for part in _reBracketExpr.parse_string(s).body) - except Exception: - return "" - - -def token_map(func, *args) -> ParseAction: - """Helper to define a parse action by mapping a function to all - elements of a :class:`ParseResults` list. If any additional args are passed, - they are forwarded to the given function as additional arguments - after the token, as in - ``hex_integer = Word(hexnums).set_parse_action(token_map(int, 16))``, - which will convert the parsed data to an integer using base 16. - - Example (compare the last to example in :class:`ParserElement.transform_string`:: - - hex_ints = Word(hexnums)[1, ...].set_parse_action(token_map(int, 16)) - hex_ints.run_tests(''' - 00 11 22 aa FF 0a 0d 1a - ''') - - upperword = Word(alphas).set_parse_action(token_map(str.upper)) - upperword[1, ...].run_tests(''' - my kingdom for a horse - ''') - - wd = Word(alphas).set_parse_action(token_map(str.title)) - wd[1, ...].set_parse_action(' '.join).run_tests(''' - now is the winter of our discontent made glorious summer by this sun of york - ''') - - prints:: - - 00 11 22 aa FF 0a 0d 1a - [0, 17, 34, 170, 255, 10, 13, 26] - - my kingdom for a horse - ['MY', 'KINGDOM', 'FOR', 'A', 'HORSE'] - - now is the winter of our discontent made glorious summer by this sun of york - ['Now Is The Winter Of Our Discontent Made Glorious Summer By This Sun Of York'] - """ - - def pa(s, l, t): - return [func(tokn, *args) for tokn in t] - - func_name = getattr(func, "__name__", getattr(func, "__class__").__name__) - pa.__name__ = func_name - - return pa - - -def autoname_elements() -> None: - """ - Utility to simplify mass-naming of parser elements, for - generating railroad diagram with named subdiagrams. - """ - for name, var in sys._getframe().f_back.f_locals.items(): - if isinstance(var, ParserElement) and not var.customName: - var.set_name(name) - - -dbl_quoted_string = Combine( - Regex(r'"(?:[^"\n\r\\]|(?:"")|(?:\\(?:[^x]|x[0-9a-fA-F]+)))*') + '"' -).set_name("string enclosed in double quotes") - -sgl_quoted_string = Combine( - Regex(r"'(?:[^'\n\r\\]|(?:'')|(?:\\(?:[^x]|x[0-9a-fA-F]+)))*") + "'" -).set_name("string enclosed in single quotes") - -quoted_string = Combine( - Regex(r'"(?:[^"\n\r\\]|(?:"")|(?:\\(?:[^x]|x[0-9a-fA-F]+)))*') + '"' - | Regex(r"'(?:[^'\n\r\\]|(?:'')|(?:\\(?:[^x]|x[0-9a-fA-F]+)))*") + "'" -).set_name("quotedString using single or double quotes") - -unicode_string = Combine("u" + quoted_string.copy()).set_name("unicode string literal") - - -alphas8bit = srange(r"[\0xc0-\0xd6\0xd8-\0xf6\0xf8-\0xff]") -punc8bit = srange(r"[\0xa1-\0xbf\0xd7\0xf7]") - -# build list of built-in expressions, for future reference if a global default value -# gets updated -_builtin_exprs: List[ParserElement] = [ - v for v in vars().values() if isinstance(v, ParserElement) -] - -# backward compatibility names -tokenMap = token_map -conditionAsParseAction = condition_as_parse_action -nullDebugAction = null_debug_action -sglQuotedString = sgl_quoted_string -dblQuotedString = dbl_quoted_string -quotedString = quoted_string -unicodeString = unicode_string -lineStart = line_start -lineEnd = line_end -stringStart = string_start -stringEnd = string_end -traceParseAction = trace_parse_action diff --git a/venv/lib/python3.10/site-packages/pkg_resources/_vendor/pyparsing/diagram/__init__.py b/venv/lib/python3.10/site-packages/pkg_resources/_vendor/pyparsing/diagram/__init__.py deleted file mode 100644 index 8986447..0000000 --- a/venv/lib/python3.10/site-packages/pkg_resources/_vendor/pyparsing/diagram/__init__.py +++ /dev/null @@ -1,642 +0,0 @@ -import railroad -import pyparsing -import typing -from typing import ( - List, - NamedTuple, - Generic, - TypeVar, - Dict, - Callable, - Set, - Iterable, -) -from jinja2 import Template -from io import StringIO -import inspect - - -jinja2_template_source = """\ - - - - {% if not head %} - - {% else %} - {{ head | safe }} - {% endif %} - - -{{ body | safe }} -{% for diagram in diagrams %} -
-

{{ diagram.title }}

-
{{ diagram.text }}
-
- {{ diagram.svg }} -
-
-{% endfor %} - - -""" - -template = Template(jinja2_template_source) - -# Note: ideally this would be a dataclass, but we're supporting Python 3.5+ so we can't do this yet -NamedDiagram = NamedTuple( - "NamedDiagram", - [("name", str), ("diagram", typing.Optional[railroad.DiagramItem]), ("index", int)], -) -""" -A simple structure for associating a name with a railroad diagram -""" - -T = TypeVar("T") - - -class EachItem(railroad.Group): - """ - Custom railroad item to compose a: - - Group containing a - - OneOrMore containing a - - Choice of the elements in the Each - with the group label indicating that all must be matched - """ - - all_label = "[ALL]" - - def __init__(self, *items): - choice_item = railroad.Choice(len(items) - 1, *items) - one_or_more_item = railroad.OneOrMore(item=choice_item) - super().__init__(one_or_more_item, label=self.all_label) - - -class AnnotatedItem(railroad.Group): - """ - Simple subclass of Group that creates an annotation label - """ - - def __init__(self, label: str, item): - super().__init__(item=item, label="[{}]".format(label) if label else label) - - -class EditablePartial(Generic[T]): - """ - Acts like a functools.partial, but can be edited. In other words, it represents a type that hasn't yet been - constructed. - """ - - # We need this here because the railroad constructors actually transform the data, so can't be called until the - # entire tree is assembled - - def __init__(self, func: Callable[..., T], args: list, kwargs: dict): - self.func = func - self.args = args - self.kwargs = kwargs - - @classmethod - def from_call(cls, func: Callable[..., T], *args, **kwargs) -> "EditablePartial[T]": - """ - If you call this function in the same way that you would call the constructor, it will store the arguments - as you expect. For example EditablePartial.from_call(Fraction, 1, 3)() == Fraction(1, 3) - """ - return EditablePartial(func=func, args=list(args), kwargs=kwargs) - - @property - def name(self): - return self.kwargs["name"] - - def __call__(self) -> T: - """ - Evaluate the partial and return the result - """ - args = self.args.copy() - kwargs = self.kwargs.copy() - - # This is a helpful hack to allow you to specify varargs parameters (e.g. *args) as keyword args (e.g. - # args=['list', 'of', 'things']) - arg_spec = inspect.getfullargspec(self.func) - if arg_spec.varargs in self.kwargs: - args += kwargs.pop(arg_spec.varargs) - - return self.func(*args, **kwargs) - - -def railroad_to_html(diagrams: List[NamedDiagram], **kwargs) -> str: - """ - Given a list of NamedDiagram, produce a single HTML string that visualises those diagrams - :params kwargs: kwargs to be passed in to the template - """ - data = [] - for diagram in diagrams: - if diagram.diagram is None: - continue - io = StringIO() - diagram.diagram.writeSvg(io.write) - title = diagram.name - if diagram.index == 0: - title += " (root)" - data.append({"title": title, "text": "", "svg": io.getvalue()}) - - return template.render(diagrams=data, **kwargs) - - -def resolve_partial(partial: "EditablePartial[T]") -> T: - """ - Recursively resolves a collection of Partials into whatever type they are - """ - if isinstance(partial, EditablePartial): - partial.args = resolve_partial(partial.args) - partial.kwargs = resolve_partial(partial.kwargs) - return partial() - elif isinstance(partial, list): - return [resolve_partial(x) for x in partial] - elif isinstance(partial, dict): - return {key: resolve_partial(x) for key, x in partial.items()} - else: - return partial - - -def to_railroad( - element: pyparsing.ParserElement, - diagram_kwargs: typing.Optional[dict] = None, - vertical: int = 3, - show_results_names: bool = False, - show_groups: bool = False, -) -> List[NamedDiagram]: - """ - Convert a pyparsing element tree into a list of diagrams. This is the recommended entrypoint to diagram - creation if you want to access the Railroad tree before it is converted to HTML - :param element: base element of the parser being diagrammed - :param diagram_kwargs: kwargs to pass to the Diagram() constructor - :param vertical: (optional) - int - limit at which number of alternatives should be - shown vertically instead of horizontally - :param show_results_names - bool to indicate whether results name annotations should be - included in the diagram - :param show_groups - bool to indicate whether groups should be highlighted with an unlabeled - surrounding box - """ - # Convert the whole tree underneath the root - lookup = ConverterState(diagram_kwargs=diagram_kwargs or {}) - _to_diagram_element( - element, - lookup=lookup, - parent=None, - vertical=vertical, - show_results_names=show_results_names, - show_groups=show_groups, - ) - - root_id = id(element) - # Convert the root if it hasn't been already - if root_id in lookup: - if not element.customName: - lookup[root_id].name = "" - lookup[root_id].mark_for_extraction(root_id, lookup, force=True) - - # Now that we're finished, we can convert from intermediate structures into Railroad elements - diags = list(lookup.diagrams.values()) - if len(diags) > 1: - # collapse out duplicate diags with the same name - seen = set() - deduped_diags = [] - for d in diags: - # don't extract SkipTo elements, they are uninformative as subdiagrams - if d.name == "...": - continue - if d.name is not None and d.name not in seen: - seen.add(d.name) - deduped_diags.append(d) - resolved = [resolve_partial(partial) for partial in deduped_diags] - else: - # special case - if just one diagram, always display it, even if - # it has no name - resolved = [resolve_partial(partial) for partial in diags] - return sorted(resolved, key=lambda diag: diag.index) - - -def _should_vertical( - specification: int, exprs: Iterable[pyparsing.ParserElement] -) -> bool: - """ - Returns true if we should return a vertical list of elements - """ - if specification is None: - return False - else: - return len(_visible_exprs(exprs)) >= specification - - -class ElementState: - """ - State recorded for an individual pyparsing Element - """ - - # Note: this should be a dataclass, but we have to support Python 3.5 - def __init__( - self, - element: pyparsing.ParserElement, - converted: EditablePartial, - parent: EditablePartial, - number: int, - name: str = None, - parent_index: typing.Optional[int] = None, - ): - #: The pyparsing element that this represents - self.element: pyparsing.ParserElement = element - #: The name of the element - self.name: typing.Optional[str] = name - #: The output Railroad element in an unconverted state - self.converted: EditablePartial = converted - #: The parent Railroad element, which we store so that we can extract this if it's duplicated - self.parent: EditablePartial = parent - #: The order in which we found this element, used for sorting diagrams if this is extracted into a diagram - self.number: int = number - #: The index of this inside its parent - self.parent_index: typing.Optional[int] = parent_index - #: If true, we should extract this out into a subdiagram - self.extract: bool = False - #: If true, all of this element's children have been filled out - self.complete: bool = False - - def mark_for_extraction( - self, el_id: int, state: "ConverterState", name: str = None, force: bool = False - ): - """ - Called when this instance has been seen twice, and thus should eventually be extracted into a sub-diagram - :param el_id: id of the element - :param state: element/diagram state tracker - :param name: name to use for this element's text - :param force: If true, force extraction now, regardless of the state of this. Only useful for extracting the - root element when we know we're finished - """ - self.extract = True - - # Set the name - if not self.name: - if name: - # Allow forcing a custom name - self.name = name - elif self.element.customName: - self.name = self.element.customName - else: - self.name = "" - - # Just because this is marked for extraction doesn't mean we can do it yet. We may have to wait for children - # to be added - # Also, if this is just a string literal etc, don't bother extracting it - if force or (self.complete and _worth_extracting(self.element)): - state.extract_into_diagram(el_id) - - -class ConverterState: - """ - Stores some state that persists between recursions into the element tree - """ - - def __init__(self, diagram_kwargs: typing.Optional[dict] = None): - #: A dictionary mapping ParserElements to state relating to them - self._element_diagram_states: Dict[int, ElementState] = {} - #: A dictionary mapping ParserElement IDs to subdiagrams generated from them - self.diagrams: Dict[int, EditablePartial[NamedDiagram]] = {} - #: The index of the next unnamed element - self.unnamed_index: int = 1 - #: The index of the next element. This is used for sorting - self.index: int = 0 - #: Shared kwargs that are used to customize the construction of diagrams - self.diagram_kwargs: dict = diagram_kwargs or {} - self.extracted_diagram_names: Set[str] = set() - - def __setitem__(self, key: int, value: ElementState): - self._element_diagram_states[key] = value - - def __getitem__(self, key: int) -> ElementState: - return self._element_diagram_states[key] - - def __delitem__(self, key: int): - del self._element_diagram_states[key] - - def __contains__(self, key: int): - return key in self._element_diagram_states - - def generate_unnamed(self) -> int: - """ - Generate a number used in the name of an otherwise unnamed diagram - """ - self.unnamed_index += 1 - return self.unnamed_index - - def generate_index(self) -> int: - """ - Generate a number used to index a diagram - """ - self.index += 1 - return self.index - - def extract_into_diagram(self, el_id: int): - """ - Used when we encounter the same token twice in the same tree. When this - happens, we replace all instances of that token with a terminal, and - create a new subdiagram for the token - """ - position = self[el_id] - - # Replace the original definition of this element with a regular block - if position.parent: - ret = EditablePartial.from_call(railroad.NonTerminal, text=position.name) - if "item" in position.parent.kwargs: - position.parent.kwargs["item"] = ret - elif "items" in position.parent.kwargs: - position.parent.kwargs["items"][position.parent_index] = ret - - # If the element we're extracting is a group, skip to its content but keep the title - if position.converted.func == railroad.Group: - content = position.converted.kwargs["item"] - else: - content = position.converted - - self.diagrams[el_id] = EditablePartial.from_call( - NamedDiagram, - name=position.name, - diagram=EditablePartial.from_call( - railroad.Diagram, content, **self.diagram_kwargs - ), - index=position.number, - ) - - del self[el_id] - - -def _worth_extracting(element: pyparsing.ParserElement) -> bool: - """ - Returns true if this element is worth having its own sub-diagram. Simply, if any of its children - themselves have children, then its complex enough to extract - """ - children = element.recurse() - return any(child.recurse() for child in children) - - -def _apply_diagram_item_enhancements(fn): - """ - decorator to ensure enhancements to a diagram item (such as results name annotations) - get applied on return from _to_diagram_element (we do this since there are several - returns in _to_diagram_element) - """ - - def _inner( - element: pyparsing.ParserElement, - parent: typing.Optional[EditablePartial], - lookup: ConverterState = None, - vertical: int = None, - index: int = 0, - name_hint: str = None, - show_results_names: bool = False, - show_groups: bool = False, - ) -> typing.Optional[EditablePartial]: - - ret = fn( - element, - parent, - lookup, - vertical, - index, - name_hint, - show_results_names, - show_groups, - ) - - # apply annotation for results name, if present - if show_results_names and ret is not None: - element_results_name = element.resultsName - if element_results_name: - # add "*" to indicate if this is a "list all results" name - element_results_name += "" if element.modalResults else "*" - ret = EditablePartial.from_call( - railroad.Group, item=ret, label=element_results_name - ) - - return ret - - return _inner - - -def _visible_exprs(exprs: Iterable[pyparsing.ParserElement]): - non_diagramming_exprs = ( - pyparsing.ParseElementEnhance, - pyparsing.PositionToken, - pyparsing.And._ErrorStop, - ) - return [ - e - for e in exprs - if not (e.customName or e.resultsName or isinstance(e, non_diagramming_exprs)) - ] - - -@_apply_diagram_item_enhancements -def _to_diagram_element( - element: pyparsing.ParserElement, - parent: typing.Optional[EditablePartial], - lookup: ConverterState = None, - vertical: int = None, - index: int = 0, - name_hint: str = None, - show_results_names: bool = False, - show_groups: bool = False, -) -> typing.Optional[EditablePartial]: - """ - Recursively converts a PyParsing Element to a railroad Element - :param lookup: The shared converter state that keeps track of useful things - :param index: The index of this element within the parent - :param parent: The parent of this element in the output tree - :param vertical: Controls at what point we make a list of elements vertical. If this is an integer (the default), - it sets the threshold of the number of items before we go vertical. If True, always go vertical, if False, never - do so - :param name_hint: If provided, this will override the generated name - :param show_results_names: bool flag indicating whether to add annotations for results names - :returns: The converted version of the input element, but as a Partial that hasn't yet been constructed - :param show_groups: bool flag indicating whether to show groups using bounding box - """ - exprs = element.recurse() - name = name_hint or element.customName or element.__class__.__name__ - - # Python's id() is used to provide a unique identifier for elements - el_id = id(element) - - element_results_name = element.resultsName - - # Here we basically bypass processing certain wrapper elements if they contribute nothing to the diagram - if not element.customName: - if isinstance( - element, - ( - # pyparsing.TokenConverter, - # pyparsing.Forward, - pyparsing.Located, - ), - ): - # However, if this element has a useful custom name, and its child does not, we can pass it on to the child - if exprs: - if not exprs[0].customName: - propagated_name = name - else: - propagated_name = None - - return _to_diagram_element( - element.expr, - parent=parent, - lookup=lookup, - vertical=vertical, - index=index, - name_hint=propagated_name, - show_results_names=show_results_names, - show_groups=show_groups, - ) - - # If the element isn't worth extracting, we always treat it as the first time we say it - if _worth_extracting(element): - if el_id in lookup: - # If we've seen this element exactly once before, we are only just now finding out that it's a duplicate, - # so we have to extract it into a new diagram. - looked_up = lookup[el_id] - looked_up.mark_for_extraction(el_id, lookup, name=name_hint) - ret = EditablePartial.from_call(railroad.NonTerminal, text=looked_up.name) - return ret - - elif el_id in lookup.diagrams: - # If we have seen the element at least twice before, and have already extracted it into a subdiagram, we - # just put in a marker element that refers to the sub-diagram - ret = EditablePartial.from_call( - railroad.NonTerminal, text=lookup.diagrams[el_id].kwargs["name"] - ) - return ret - - # Recursively convert child elements - # Here we find the most relevant Railroad element for matching pyparsing Element - # We use ``items=[]`` here to hold the place for where the child elements will go once created - if isinstance(element, pyparsing.And): - # detect And's created with ``expr*N`` notation - for these use a OneOrMore with a repeat - # (all will have the same name, and resultsName) - if not exprs: - return None - if len(set((e.name, e.resultsName) for e in exprs)) == 1: - ret = EditablePartial.from_call( - railroad.OneOrMore, item="", repeat=str(len(exprs)) - ) - elif _should_vertical(vertical, exprs): - ret = EditablePartial.from_call(railroad.Stack, items=[]) - else: - ret = EditablePartial.from_call(railroad.Sequence, items=[]) - elif isinstance(element, (pyparsing.Or, pyparsing.MatchFirst)): - if not exprs: - return None - if _should_vertical(vertical, exprs): - ret = EditablePartial.from_call(railroad.Choice, 0, items=[]) - else: - ret = EditablePartial.from_call(railroad.HorizontalChoice, items=[]) - elif isinstance(element, pyparsing.Each): - if not exprs: - return None - ret = EditablePartial.from_call(EachItem, items=[]) - elif isinstance(element, pyparsing.NotAny): - ret = EditablePartial.from_call(AnnotatedItem, label="NOT", item="") - elif isinstance(element, pyparsing.FollowedBy): - ret = EditablePartial.from_call(AnnotatedItem, label="LOOKAHEAD", item="") - elif isinstance(element, pyparsing.PrecededBy): - ret = EditablePartial.from_call(AnnotatedItem, label="LOOKBEHIND", item="") - elif isinstance(element, pyparsing.Group): - if show_groups: - ret = EditablePartial.from_call(AnnotatedItem, label="", item="") - else: - ret = EditablePartial.from_call(railroad.Group, label="", item="") - elif isinstance(element, pyparsing.TokenConverter): - ret = EditablePartial.from_call( - AnnotatedItem, label=type(element).__name__.lower(), item="" - ) - elif isinstance(element, pyparsing.Opt): - ret = EditablePartial.from_call(railroad.Optional, item="") - elif isinstance(element, pyparsing.OneOrMore): - ret = EditablePartial.from_call(railroad.OneOrMore, item="") - elif isinstance(element, pyparsing.ZeroOrMore): - ret = EditablePartial.from_call(railroad.ZeroOrMore, item="") - elif isinstance(element, pyparsing.Group): - ret = EditablePartial.from_call( - railroad.Group, item=None, label=element_results_name - ) - elif isinstance(element, pyparsing.Empty) and not element.customName: - # Skip unnamed "Empty" elements - ret = None - elif len(exprs) > 1: - ret = EditablePartial.from_call(railroad.Sequence, items=[]) - elif len(exprs) > 0 and not element_results_name: - ret = EditablePartial.from_call(railroad.Group, item="", label=name) - else: - terminal = EditablePartial.from_call(railroad.Terminal, element.defaultName) - ret = terminal - - if ret is None: - return - - # Indicate this element's position in the tree so we can extract it if necessary - lookup[el_id] = ElementState( - element=element, - converted=ret, - parent=parent, - parent_index=index, - number=lookup.generate_index(), - ) - if element.customName: - lookup[el_id].mark_for_extraction(el_id, lookup, element.customName) - - i = 0 - for expr in exprs: - # Add a placeholder index in case we have to extract the child before we even add it to the parent - if "items" in ret.kwargs: - ret.kwargs["items"].insert(i, None) - - item = _to_diagram_element( - expr, - parent=ret, - lookup=lookup, - vertical=vertical, - index=i, - show_results_names=show_results_names, - show_groups=show_groups, - ) - - # Some elements don't need to be shown in the diagram - if item is not None: - if "item" in ret.kwargs: - ret.kwargs["item"] = item - elif "items" in ret.kwargs: - # If we've already extracted the child, don't touch this index, since it's occupied by a nonterminal - ret.kwargs["items"][i] = item - i += 1 - elif "items" in ret.kwargs: - # If we're supposed to skip this element, remove it from the parent - del ret.kwargs["items"][i] - - # If all this items children are none, skip this item - if ret and ( - ("items" in ret.kwargs and len(ret.kwargs["items"]) == 0) - or ("item" in ret.kwargs and ret.kwargs["item"] is None) - ): - ret = EditablePartial.from_call(railroad.Terminal, name) - - # Mark this element as "complete", ie it has all of its children - if el_id in lookup: - lookup[el_id].complete = True - - if el_id in lookup and lookup[el_id].extract and lookup[el_id].complete: - lookup.extract_into_diagram(el_id) - if ret is not None: - ret = EditablePartial.from_call( - railroad.NonTerminal, text=lookup.diagrams[el_id].kwargs["name"] - ) - - return ret diff --git a/venv/lib/python3.10/site-packages/pkg_resources/_vendor/pyparsing/exceptions.py b/venv/lib/python3.10/site-packages/pkg_resources/_vendor/pyparsing/exceptions.py deleted file mode 100644 index a38447b..0000000 --- a/venv/lib/python3.10/site-packages/pkg_resources/_vendor/pyparsing/exceptions.py +++ /dev/null @@ -1,267 +0,0 @@ -# exceptions.py - -import re -import sys -import typing - -from .util import col, line, lineno, _collapse_string_to_ranges -from .unicode import pyparsing_unicode as ppu - - -class ExceptionWordUnicode(ppu.Latin1, ppu.LatinA, ppu.LatinB, ppu.Greek, ppu.Cyrillic): - pass - - -_extract_alphanums = _collapse_string_to_ranges(ExceptionWordUnicode.alphanums) -_exception_word_extractor = re.compile("([" + _extract_alphanums + "]{1,16})|.") - - -class ParseBaseException(Exception): - """base exception class for all parsing runtime exceptions""" - - # Performance tuning: we construct a *lot* of these, so keep this - # constructor as small and fast as possible - def __init__( - self, - pstr: str, - loc: int = 0, - msg: typing.Optional[str] = None, - elem=None, - ): - self.loc = loc - if msg is None: - self.msg = pstr - self.pstr = "" - else: - self.msg = msg - self.pstr = pstr - self.parser_element = self.parserElement = elem - self.args = (pstr, loc, msg) - - @staticmethod - def explain_exception(exc, depth=16): - """ - Method to take an exception and translate the Python internal traceback into a list - of the pyparsing expressions that caused the exception to be raised. - - Parameters: - - - exc - exception raised during parsing (need not be a ParseException, in support - of Python exceptions that might be raised in a parse action) - - depth (default=16) - number of levels back in the stack trace to list expression - and function names; if None, the full stack trace names will be listed; if 0, only - the failing input line, marker, and exception string will be shown - - Returns a multi-line string listing the ParserElements and/or function names in the - exception's stack trace. - """ - import inspect - from .core import ParserElement - - if depth is None: - depth = sys.getrecursionlimit() - ret = [] - if isinstance(exc, ParseBaseException): - ret.append(exc.line) - ret.append(" " * (exc.column - 1) + "^") - ret.append("{}: {}".format(type(exc).__name__, exc)) - - if depth > 0: - callers = inspect.getinnerframes(exc.__traceback__, context=depth) - seen = set() - for i, ff in enumerate(callers[-depth:]): - frm = ff[0] - - f_self = frm.f_locals.get("self", None) - if isinstance(f_self, ParserElement): - if frm.f_code.co_name not in ("parseImpl", "_parseNoCache"): - continue - if id(f_self) in seen: - continue - seen.add(id(f_self)) - - self_type = type(f_self) - ret.append( - "{}.{} - {}".format( - self_type.__module__, self_type.__name__, f_self - ) - ) - - elif f_self is not None: - self_type = type(f_self) - ret.append("{}.{}".format(self_type.__module__, self_type.__name__)) - - else: - code = frm.f_code - if code.co_name in ("wrapper", ""): - continue - - ret.append("{}".format(code.co_name)) - - depth -= 1 - if not depth: - break - - return "\n".join(ret) - - @classmethod - def _from_exception(cls, pe): - """ - internal factory method to simplify creating one type of ParseException - from another - avoids having __init__ signature conflicts among subclasses - """ - return cls(pe.pstr, pe.loc, pe.msg, pe.parserElement) - - @property - def line(self) -> str: - """ - Return the line of text where the exception occurred. - """ - return line(self.loc, self.pstr) - - @property - def lineno(self) -> int: - """ - Return the 1-based line number of text where the exception occurred. - """ - return lineno(self.loc, self.pstr) - - @property - def col(self) -> int: - """ - Return the 1-based column on the line of text where the exception occurred. - """ - return col(self.loc, self.pstr) - - @property - def column(self) -> int: - """ - Return the 1-based column on the line of text where the exception occurred. - """ - return col(self.loc, self.pstr) - - def __str__(self) -> str: - if self.pstr: - if self.loc >= len(self.pstr): - foundstr = ", found end of text" - else: - # pull out next word at error location - found_match = _exception_word_extractor.match(self.pstr, self.loc) - if found_match is not None: - found = found_match.group(0) - else: - found = self.pstr[self.loc : self.loc + 1] - foundstr = (", found %r" % found).replace(r"\\", "\\") - else: - foundstr = "" - return "{}{} (at char {}), (line:{}, col:{})".format( - self.msg, foundstr, self.loc, self.lineno, self.column - ) - - def __repr__(self): - return str(self) - - def mark_input_line(self, marker_string: str = None, *, markerString=">!<") -> str: - """ - Extracts the exception line from the input string, and marks - the location of the exception with a special symbol. - """ - markerString = marker_string if marker_string is not None else markerString - line_str = self.line - line_column = self.column - 1 - if markerString: - line_str = "".join( - (line_str[:line_column], markerString, line_str[line_column:]) - ) - return line_str.strip() - - def explain(self, depth=16) -> str: - """ - Method to translate the Python internal traceback into a list - of the pyparsing expressions that caused the exception to be raised. - - Parameters: - - - depth (default=16) - number of levels back in the stack trace to list expression - and function names; if None, the full stack trace names will be listed; if 0, only - the failing input line, marker, and exception string will be shown - - Returns a multi-line string listing the ParserElements and/or function names in the - exception's stack trace. - - Example:: - - expr = pp.Word(pp.nums) * 3 - try: - expr.parse_string("123 456 A789") - except pp.ParseException as pe: - print(pe.explain(depth=0)) - - prints:: - - 123 456 A789 - ^ - ParseException: Expected W:(0-9), found 'A' (at char 8), (line:1, col:9) - - Note: the diagnostic output will include string representations of the expressions - that failed to parse. These representations will be more helpful if you use `set_name` to - give identifiable names to your expressions. Otherwise they will use the default string - forms, which may be cryptic to read. - - Note: pyparsing's default truncation of exception tracebacks may also truncate the - stack of expressions that are displayed in the ``explain`` output. To get the full listing - of parser expressions, you may have to set ``ParserElement.verbose_stacktrace = True`` - """ - return self.explain_exception(self, depth) - - markInputline = mark_input_line - - -class ParseException(ParseBaseException): - """ - Exception thrown when a parse expression doesn't match the input string - - Example:: - - try: - Word(nums).set_name("integer").parse_string("ABC") - except ParseException as pe: - print(pe) - print("column: {}".format(pe.column)) - - prints:: - - Expected integer (at char 0), (line:1, col:1) - column: 1 - - """ - - -class ParseFatalException(ParseBaseException): - """ - User-throwable exception thrown when inconsistent parse content - is found; stops all parsing immediately - """ - - -class ParseSyntaxException(ParseFatalException): - """ - Just like :class:`ParseFatalException`, but thrown internally - when an :class:`ErrorStop` ('-' operator) indicates - that parsing is to stop immediately because an unbacktrackable - syntax error has been found. - """ - - -class RecursiveGrammarException(Exception): - """ - Exception thrown by :class:`ParserElement.validate` if the - grammar could be left-recursive; parser may need to enable - left recursion using :class:`ParserElement.enable_left_recursion` - """ - - def __init__(self, parseElementList): - self.parseElementTrace = parseElementList - - def __str__(self) -> str: - return "RecursiveGrammarException: {}".format(self.parseElementTrace) diff --git a/venv/lib/python3.10/site-packages/pkg_resources/_vendor/pyparsing/helpers.py b/venv/lib/python3.10/site-packages/pkg_resources/_vendor/pyparsing/helpers.py deleted file mode 100644 index 9588b3b..0000000 --- a/venv/lib/python3.10/site-packages/pkg_resources/_vendor/pyparsing/helpers.py +++ /dev/null @@ -1,1088 +0,0 @@ -# helpers.py -import html.entities -import re -import typing - -from . import __diag__ -from .core import * -from .util import _bslash, _flatten, _escape_regex_range_chars - - -# -# global helpers -# -def delimited_list( - expr: Union[str, ParserElement], - delim: Union[str, ParserElement] = ",", - combine: bool = False, - min: typing.Optional[int] = None, - max: typing.Optional[int] = None, - *, - allow_trailing_delim: bool = False, -) -> ParserElement: - """Helper to define a delimited list of expressions - the delimiter - defaults to ','. By default, the list elements and delimiters can - have intervening whitespace, and comments, but this can be - overridden by passing ``combine=True`` in the constructor. If - ``combine`` is set to ``True``, the matching tokens are - returned as a single token string, with the delimiters included; - otherwise, the matching tokens are returned as a list of tokens, - with the delimiters suppressed. - - If ``allow_trailing_delim`` is set to True, then the list may end with - a delimiter. - - Example:: - - delimited_list(Word(alphas)).parse_string("aa,bb,cc") # -> ['aa', 'bb', 'cc'] - delimited_list(Word(hexnums), delim=':', combine=True).parse_string("AA:BB:CC:DD:EE") # -> ['AA:BB:CC:DD:EE'] - """ - if isinstance(expr, str_type): - expr = ParserElement._literalStringClass(expr) - - dlName = "{expr} [{delim} {expr}]...{end}".format( - expr=str(expr.copy().streamline()), - delim=str(delim), - end=" [{}]".format(str(delim)) if allow_trailing_delim else "", - ) - - if not combine: - delim = Suppress(delim) - - if min is not None: - if min < 1: - raise ValueError("min must be greater than 0") - min -= 1 - if max is not None: - if min is not None and max <= min: - raise ValueError("max must be greater than, or equal to min") - max -= 1 - delimited_list_expr = expr + (delim + expr)[min, max] - - if allow_trailing_delim: - delimited_list_expr += Opt(delim) - - if combine: - return Combine(delimited_list_expr).set_name(dlName) - else: - return delimited_list_expr.set_name(dlName) - - -def counted_array( - expr: ParserElement, - int_expr: typing.Optional[ParserElement] = None, - *, - intExpr: typing.Optional[ParserElement] = None, -) -> ParserElement: - """Helper to define a counted list of expressions. - - This helper defines a pattern of the form:: - - integer expr expr expr... - - where the leading integer tells how many expr expressions follow. - The matched tokens returns the array of expr tokens as a list - the - leading count token is suppressed. - - If ``int_expr`` is specified, it should be a pyparsing expression - that produces an integer value. - - Example:: - - counted_array(Word(alphas)).parse_string('2 ab cd ef') # -> ['ab', 'cd'] - - # in this parser, the leading integer value is given in binary, - # '10' indicating that 2 values are in the array - binary_constant = Word('01').set_parse_action(lambda t: int(t[0], 2)) - counted_array(Word(alphas), int_expr=binary_constant).parse_string('10 ab cd ef') # -> ['ab', 'cd'] - - # if other fields must be parsed after the count but before the - # list items, give the fields results names and they will - # be preserved in the returned ParseResults: - count_with_metadata = integer + Word(alphas)("type") - typed_array = counted_array(Word(alphanums), int_expr=count_with_metadata)("items") - result = typed_array.parse_string("3 bool True True False") - print(result.dump()) - - # prints - # ['True', 'True', 'False'] - # - items: ['True', 'True', 'False'] - # - type: 'bool' - """ - intExpr = intExpr or int_expr - array_expr = Forward() - - def count_field_parse_action(s, l, t): - nonlocal array_expr - n = t[0] - array_expr <<= (expr * n) if n else Empty() - # clear list contents, but keep any named results - del t[:] - - if intExpr is None: - intExpr = Word(nums).set_parse_action(lambda t: int(t[0])) - else: - intExpr = intExpr.copy() - intExpr.set_name("arrayLen") - intExpr.add_parse_action(count_field_parse_action, call_during_try=True) - return (intExpr + array_expr).set_name("(len) " + str(expr) + "...") - - -def match_previous_literal(expr: ParserElement) -> ParserElement: - """Helper to define an expression that is indirectly defined from - the tokens matched in a previous expression, that is, it looks for - a 'repeat' of a previous expression. For example:: - - first = Word(nums) - second = match_previous_literal(first) - match_expr = first + ":" + second - - will match ``"1:1"``, but not ``"1:2"``. Because this - matches a previous literal, will also match the leading - ``"1:1"`` in ``"1:10"``. If this is not desired, use - :class:`match_previous_expr`. Do *not* use with packrat parsing - enabled. - """ - rep = Forward() - - def copy_token_to_repeater(s, l, t): - if t: - if len(t) == 1: - rep << t[0] - else: - # flatten t tokens - tflat = _flatten(t.as_list()) - rep << And(Literal(tt) for tt in tflat) - else: - rep << Empty() - - expr.add_parse_action(copy_token_to_repeater, callDuringTry=True) - rep.set_name("(prev) " + str(expr)) - return rep - - -def match_previous_expr(expr: ParserElement) -> ParserElement: - """Helper to define an expression that is indirectly defined from - the tokens matched in a previous expression, that is, it looks for - a 'repeat' of a previous expression. For example:: - - first = Word(nums) - second = match_previous_expr(first) - match_expr = first + ":" + second - - will match ``"1:1"``, but not ``"1:2"``. Because this - matches by expressions, will *not* match the leading ``"1:1"`` - in ``"1:10"``; the expressions are evaluated first, and then - compared, so ``"1"`` is compared with ``"10"``. Do *not* use - with packrat parsing enabled. - """ - rep = Forward() - e2 = expr.copy() - rep <<= e2 - - def copy_token_to_repeater(s, l, t): - matchTokens = _flatten(t.as_list()) - - def must_match_these_tokens(s, l, t): - theseTokens = _flatten(t.as_list()) - if theseTokens != matchTokens: - raise ParseException( - s, l, "Expected {}, found{}".format(matchTokens, theseTokens) - ) - - rep.set_parse_action(must_match_these_tokens, callDuringTry=True) - - expr.add_parse_action(copy_token_to_repeater, callDuringTry=True) - rep.set_name("(prev) " + str(expr)) - return rep - - -def one_of( - strs: Union[typing.Iterable[str], str], - caseless: bool = False, - use_regex: bool = True, - as_keyword: bool = False, - *, - useRegex: bool = True, - asKeyword: bool = False, -) -> ParserElement: - """Helper to quickly define a set of alternative :class:`Literal` s, - and makes sure to do longest-first testing when there is a conflict, - regardless of the input order, but returns - a :class:`MatchFirst` for best performance. - - Parameters: - - - ``strs`` - a string of space-delimited literals, or a collection of - string literals - - ``caseless`` - treat all literals as caseless - (default= ``False``) - - ``use_regex`` - as an optimization, will - generate a :class:`Regex` object; otherwise, will generate - a :class:`MatchFirst` object (if ``caseless=True`` or ``asKeyword=True``, or if - creating a :class:`Regex` raises an exception) - (default= ``True``) - - ``as_keyword`` - enforce :class:`Keyword`-style matching on the - generated expressions - (default= ``False``) - - ``asKeyword`` and ``useRegex`` are retained for pre-PEP8 compatibility, - but will be removed in a future release - - Example:: - - comp_oper = one_of("< = > <= >= !=") - var = Word(alphas) - number = Word(nums) - term = var | number - comparison_expr = term + comp_oper + term - print(comparison_expr.search_string("B = 12 AA=23 B<=AA AA>12")) - - prints:: - - [['B', '=', '12'], ['AA', '=', '23'], ['B', '<=', 'AA'], ['AA', '>', '12']] - """ - asKeyword = asKeyword or as_keyword - useRegex = useRegex and use_regex - - if ( - isinstance(caseless, str_type) - and __diag__.warn_on_multiple_string_args_to_oneof - ): - warnings.warn( - "More than one string argument passed to one_of, pass" - " choices as a list or space-delimited string", - stacklevel=2, - ) - - if caseless: - isequal = lambda a, b: a.upper() == b.upper() - masks = lambda a, b: b.upper().startswith(a.upper()) - parseElementClass = CaselessKeyword if asKeyword else CaselessLiteral - else: - isequal = lambda a, b: a == b - masks = lambda a, b: b.startswith(a) - parseElementClass = Keyword if asKeyword else Literal - - symbols: List[str] = [] - if isinstance(strs, str_type): - symbols = strs.split() - elif isinstance(strs, Iterable): - symbols = list(strs) - else: - raise TypeError("Invalid argument to one_of, expected string or iterable") - if not symbols: - return NoMatch() - - # reorder given symbols to take care to avoid masking longer choices with shorter ones - # (but only if the given symbols are not just single characters) - if any(len(sym) > 1 for sym in symbols): - i = 0 - while i < len(symbols) - 1: - cur = symbols[i] - for j, other in enumerate(symbols[i + 1 :]): - if isequal(other, cur): - del symbols[i + j + 1] - break - elif masks(cur, other): - del symbols[i + j + 1] - symbols.insert(i, other) - break - else: - i += 1 - - if useRegex: - re_flags: int = re.IGNORECASE if caseless else 0 - - try: - if all(len(sym) == 1 for sym in symbols): - # symbols are just single characters, create range regex pattern - patt = "[{}]".format( - "".join(_escape_regex_range_chars(sym) for sym in symbols) - ) - else: - patt = "|".join(re.escape(sym) for sym in symbols) - - # wrap with \b word break markers if defining as keywords - if asKeyword: - patt = r"\b(?:{})\b".format(patt) - - ret = Regex(patt, flags=re_flags).set_name(" | ".join(symbols)) - - if caseless: - # add parse action to return symbols as specified, not in random - # casing as found in input string - symbol_map = {sym.lower(): sym for sym in symbols} - ret.add_parse_action(lambda s, l, t: symbol_map[t[0].lower()]) - - return ret - - except re.error: - warnings.warn( - "Exception creating Regex for one_of, building MatchFirst", stacklevel=2 - ) - - # last resort, just use MatchFirst - return MatchFirst(parseElementClass(sym) for sym in symbols).set_name( - " | ".join(symbols) - ) - - -def dict_of(key: ParserElement, value: ParserElement) -> ParserElement: - """Helper to easily and clearly define a dictionary by specifying - the respective patterns for the key and value. Takes care of - defining the :class:`Dict`, :class:`ZeroOrMore`, and - :class:`Group` tokens in the proper order. The key pattern - can include delimiting markers or punctuation, as long as they are - suppressed, thereby leaving the significant key text. The value - pattern can include named results, so that the :class:`Dict` results - can include named token fields. - - Example:: - - text = "shape: SQUARE posn: upper left color: light blue texture: burlap" - attr_expr = (label + Suppress(':') + OneOrMore(data_word, stop_on=label).set_parse_action(' '.join)) - print(attr_expr[1, ...].parse_string(text).dump()) - - attr_label = label - attr_value = Suppress(':') + OneOrMore(data_word, stop_on=label).set_parse_action(' '.join) - - # similar to Dict, but simpler call format - result = dict_of(attr_label, attr_value).parse_string(text) - print(result.dump()) - print(result['shape']) - print(result.shape) # object attribute access works too - print(result.as_dict()) - - prints:: - - [['shape', 'SQUARE'], ['posn', 'upper left'], ['color', 'light blue'], ['texture', 'burlap']] - - color: 'light blue' - - posn: 'upper left' - - shape: 'SQUARE' - - texture: 'burlap' - SQUARE - SQUARE - {'color': 'light blue', 'shape': 'SQUARE', 'posn': 'upper left', 'texture': 'burlap'} - """ - return Dict(OneOrMore(Group(key + value))) - - -def original_text_for( - expr: ParserElement, as_string: bool = True, *, asString: bool = True -) -> ParserElement: - """Helper to return the original, untokenized text for a given - expression. Useful to restore the parsed fields of an HTML start - tag into the raw tag text itself, or to revert separate tokens with - intervening whitespace back to the original matching input text. By - default, returns astring containing the original parsed text. - - If the optional ``as_string`` argument is passed as - ``False``, then the return value is - a :class:`ParseResults` containing any results names that - were originally matched, and a single token containing the original - matched text from the input string. So if the expression passed to - :class:`original_text_for` contains expressions with defined - results names, you must set ``as_string`` to ``False`` if you - want to preserve those results name values. - - The ``asString`` pre-PEP8 argument is retained for compatibility, - but will be removed in a future release. - - Example:: - - src = "this is test bold text normal text " - for tag in ("b", "i"): - opener, closer = make_html_tags(tag) - patt = original_text_for(opener + SkipTo(closer) + closer) - print(patt.search_string(src)[0]) - - prints:: - - [' bold text '] - ['text'] - """ - asString = asString and as_string - - locMarker = Empty().set_parse_action(lambda s, loc, t: loc) - endlocMarker = locMarker.copy() - endlocMarker.callPreparse = False - matchExpr = locMarker("_original_start") + expr + endlocMarker("_original_end") - if asString: - extractText = lambda s, l, t: s[t._original_start : t._original_end] - else: - - def extractText(s, l, t): - t[:] = [s[t.pop("_original_start") : t.pop("_original_end")]] - - matchExpr.set_parse_action(extractText) - matchExpr.ignoreExprs = expr.ignoreExprs - matchExpr.suppress_warning(Diagnostics.warn_ungrouped_named_tokens_in_collection) - return matchExpr - - -def ungroup(expr: ParserElement) -> ParserElement: - """Helper to undo pyparsing's default grouping of And expressions, - even if all but one are non-empty. - """ - return TokenConverter(expr).add_parse_action(lambda t: t[0]) - - -def locatedExpr(expr: ParserElement) -> ParserElement: - """ - (DEPRECATED - future code should use the Located class) - Helper to decorate a returned token with its starting and ending - locations in the input string. - - This helper adds the following results names: - - - ``locn_start`` - location where matched expression begins - - ``locn_end`` - location where matched expression ends - - ``value`` - the actual parsed results - - Be careful if the input text contains ```` characters, you - may want to call :class:`ParserElement.parseWithTabs` - - Example:: - - wd = Word(alphas) - for match in locatedExpr(wd).searchString("ljsdf123lksdjjf123lkkjj1222"): - print(match) - - prints:: - - [[0, 'ljsdf', 5]] - [[8, 'lksdjjf', 15]] - [[18, 'lkkjj', 23]] - """ - locator = Empty().set_parse_action(lambda ss, ll, tt: ll) - return Group( - locator("locn_start") - + expr("value") - + locator.copy().leaveWhitespace()("locn_end") - ) - - -def nested_expr( - opener: Union[str, ParserElement] = "(", - closer: Union[str, ParserElement] = ")", - content: typing.Optional[ParserElement] = None, - ignore_expr: ParserElement = quoted_string(), - *, - ignoreExpr: ParserElement = quoted_string(), -) -> ParserElement: - """Helper method for defining nested lists enclosed in opening and - closing delimiters (``"("`` and ``")"`` are the default). - - Parameters: - - ``opener`` - opening character for a nested list - (default= ``"("``); can also be a pyparsing expression - - ``closer`` - closing character for a nested list - (default= ``")"``); can also be a pyparsing expression - - ``content`` - expression for items within the nested lists - (default= ``None``) - - ``ignore_expr`` - expression for ignoring opening and closing delimiters - (default= :class:`quoted_string`) - - ``ignoreExpr`` - this pre-PEP8 argument is retained for compatibility - but will be removed in a future release - - If an expression is not provided for the content argument, the - nested expression will capture all whitespace-delimited content - between delimiters as a list of separate values. - - Use the ``ignore_expr`` argument to define expressions that may - contain opening or closing characters that should not be treated as - opening or closing characters for nesting, such as quoted_string or - a comment expression. Specify multiple expressions using an - :class:`Or` or :class:`MatchFirst`. The default is - :class:`quoted_string`, but if no expressions are to be ignored, then - pass ``None`` for this argument. - - Example:: - - data_type = one_of("void int short long char float double") - decl_data_type = Combine(data_type + Opt(Word('*'))) - ident = Word(alphas+'_', alphanums+'_') - number = pyparsing_common.number - arg = Group(decl_data_type + ident) - LPAR, RPAR = map(Suppress, "()") - - code_body = nested_expr('{', '}', ignore_expr=(quoted_string | c_style_comment)) - - c_function = (decl_data_type("type") - + ident("name") - + LPAR + Opt(delimited_list(arg), [])("args") + RPAR - + code_body("body")) - c_function.ignore(c_style_comment) - - source_code = ''' - int is_odd(int x) { - return (x%2); - } - - int dec_to_hex(char hchar) { - if (hchar >= '0' && hchar <= '9') { - return (ord(hchar)-ord('0')); - } else { - return (10+ord(hchar)-ord('A')); - } - } - ''' - for func in c_function.search_string(source_code): - print("%(name)s (%(type)s) args: %(args)s" % func) - - - prints:: - - is_odd (int) args: [['int', 'x']] - dec_to_hex (int) args: [['char', 'hchar']] - """ - if ignoreExpr != ignore_expr: - ignoreExpr = ignore_expr if ignoreExpr == quoted_string() else ignoreExpr - if opener == closer: - raise ValueError("opening and closing strings cannot be the same") - if content is None: - if isinstance(opener, str_type) and isinstance(closer, str_type): - if len(opener) == 1 and len(closer) == 1: - if ignoreExpr is not None: - content = Combine( - OneOrMore( - ~ignoreExpr - + CharsNotIn( - opener + closer + ParserElement.DEFAULT_WHITE_CHARS, - exact=1, - ) - ) - ).set_parse_action(lambda t: t[0].strip()) - else: - content = empty.copy() + CharsNotIn( - opener + closer + ParserElement.DEFAULT_WHITE_CHARS - ).set_parse_action(lambda t: t[0].strip()) - else: - if ignoreExpr is not None: - content = Combine( - OneOrMore( - ~ignoreExpr - + ~Literal(opener) - + ~Literal(closer) - + CharsNotIn(ParserElement.DEFAULT_WHITE_CHARS, exact=1) - ) - ).set_parse_action(lambda t: t[0].strip()) - else: - content = Combine( - OneOrMore( - ~Literal(opener) - + ~Literal(closer) - + CharsNotIn(ParserElement.DEFAULT_WHITE_CHARS, exact=1) - ) - ).set_parse_action(lambda t: t[0].strip()) - else: - raise ValueError( - "opening and closing arguments must be strings if no content expression is given" - ) - ret = Forward() - if ignoreExpr is not None: - ret <<= Group( - Suppress(opener) + ZeroOrMore(ignoreExpr | ret | content) + Suppress(closer) - ) - else: - ret <<= Group(Suppress(opener) + ZeroOrMore(ret | content) + Suppress(closer)) - ret.set_name("nested %s%s expression" % (opener, closer)) - return ret - - -def _makeTags(tagStr, xml, suppress_LT=Suppress("<"), suppress_GT=Suppress(">")): - """Internal helper to construct opening and closing tag expressions, given a tag name""" - if isinstance(tagStr, str_type): - resname = tagStr - tagStr = Keyword(tagStr, caseless=not xml) - else: - resname = tagStr.name - - tagAttrName = Word(alphas, alphanums + "_-:") - if xml: - tagAttrValue = dbl_quoted_string.copy().set_parse_action(remove_quotes) - openTag = ( - suppress_LT - + tagStr("tag") - + Dict(ZeroOrMore(Group(tagAttrName + Suppress("=") + tagAttrValue))) - + Opt("/", default=[False])("empty").set_parse_action( - lambda s, l, t: t[0] == "/" - ) - + suppress_GT - ) - else: - tagAttrValue = quoted_string.copy().set_parse_action(remove_quotes) | Word( - printables, exclude_chars=">" - ) - openTag = ( - suppress_LT - + tagStr("tag") - + Dict( - ZeroOrMore( - Group( - tagAttrName.set_parse_action(lambda t: t[0].lower()) - + Opt(Suppress("=") + tagAttrValue) - ) - ) - ) - + Opt("/", default=[False])("empty").set_parse_action( - lambda s, l, t: t[0] == "/" - ) - + suppress_GT - ) - closeTag = Combine(Literal("", adjacent=False) - - openTag.set_name("<%s>" % resname) - # add start results name in parse action now that ungrouped names are not reported at two levels - openTag.add_parse_action( - lambda t: t.__setitem__( - "start" + "".join(resname.replace(":", " ").title().split()), t.copy() - ) - ) - closeTag = closeTag( - "end" + "".join(resname.replace(":", " ").title().split()) - ).set_name("" % resname) - openTag.tag = resname - closeTag.tag = resname - openTag.tag_body = SkipTo(closeTag()) - return openTag, closeTag - - -def make_html_tags( - tag_str: Union[str, ParserElement] -) -> Tuple[ParserElement, ParserElement]: - """Helper to construct opening and closing tag expressions for HTML, - given a tag name. Matches tags in either upper or lower case, - attributes with namespaces and with quoted or unquoted values. - - Example:: - - text = 'More info at the pyparsing wiki page' - # make_html_tags returns pyparsing expressions for the opening and - # closing tags as a 2-tuple - a, a_end = make_html_tags("A") - link_expr = a + SkipTo(a_end)("link_text") + a_end - - for link in link_expr.search_string(text): - # attributes in the tag (like "href" shown here) are - # also accessible as named results - print(link.link_text, '->', link.href) - - prints:: - - pyparsing -> https://github.com/pyparsing/pyparsing/wiki - """ - return _makeTags(tag_str, False) - - -def make_xml_tags( - tag_str: Union[str, ParserElement] -) -> Tuple[ParserElement, ParserElement]: - """Helper to construct opening and closing tag expressions for XML, - given a tag name. Matches tags only in the given upper/lower case. - - Example: similar to :class:`make_html_tags` - """ - return _makeTags(tag_str, True) - - -any_open_tag: ParserElement -any_close_tag: ParserElement -any_open_tag, any_close_tag = make_html_tags( - Word(alphas, alphanums + "_:").set_name("any tag") -) - -_htmlEntityMap = {k.rstrip(";"): v for k, v in html.entities.html5.items()} -common_html_entity = Regex("&(?P" + "|".join(_htmlEntityMap) + ");").set_name( - "common HTML entity" -) - - -def replace_html_entity(t): - """Helper parser action to replace common HTML entities with their special characters""" - return _htmlEntityMap.get(t.entity) - - -class OpAssoc(Enum): - LEFT = 1 - RIGHT = 2 - - -InfixNotationOperatorArgType = Union[ - ParserElement, str, Tuple[Union[ParserElement, str], Union[ParserElement, str]] -] -InfixNotationOperatorSpec = Union[ - Tuple[ - InfixNotationOperatorArgType, - int, - OpAssoc, - typing.Optional[ParseAction], - ], - Tuple[ - InfixNotationOperatorArgType, - int, - OpAssoc, - ], -] - - -def infix_notation( - base_expr: ParserElement, - op_list: List[InfixNotationOperatorSpec], - lpar: Union[str, ParserElement] = Suppress("("), - rpar: Union[str, ParserElement] = Suppress(")"), -) -> ParserElement: - """Helper method for constructing grammars of expressions made up of - operators working in a precedence hierarchy. Operators may be unary - or binary, left- or right-associative. Parse actions can also be - attached to operator expressions. The generated parser will also - recognize the use of parentheses to override operator precedences - (see example below). - - Note: if you define a deep operator list, you may see performance - issues when using infix_notation. See - :class:`ParserElement.enable_packrat` for a mechanism to potentially - improve your parser performance. - - Parameters: - - ``base_expr`` - expression representing the most basic operand to - be used in the expression - - ``op_list`` - list of tuples, one for each operator precedence level - in the expression grammar; each tuple is of the form ``(op_expr, - num_operands, right_left_assoc, (optional)parse_action)``, where: - - - ``op_expr`` is the pyparsing expression for the operator; may also - be a string, which will be converted to a Literal; if ``num_operands`` - is 3, ``op_expr`` is a tuple of two expressions, for the two - operators separating the 3 terms - - ``num_operands`` is the number of terms for this operator (must be 1, - 2, or 3) - - ``right_left_assoc`` is the indicator whether the operator is right - or left associative, using the pyparsing-defined constants - ``OpAssoc.RIGHT`` and ``OpAssoc.LEFT``. - - ``parse_action`` is the parse action to be associated with - expressions matching this operator expression (the parse action - tuple member may be omitted); if the parse action is passed - a tuple or list of functions, this is equivalent to calling - ``set_parse_action(*fn)`` - (:class:`ParserElement.set_parse_action`) - - ``lpar`` - expression for matching left-parentheses; if passed as a - str, then will be parsed as Suppress(lpar). If lpar is passed as - an expression (such as ``Literal('(')``), then it will be kept in - the parsed results, and grouped with them. (default= ``Suppress('(')``) - - ``rpar`` - expression for matching right-parentheses; if passed as a - str, then will be parsed as Suppress(rpar). If rpar is passed as - an expression (such as ``Literal(')')``), then it will be kept in - the parsed results, and grouped with them. (default= ``Suppress(')')``) - - Example:: - - # simple example of four-function arithmetic with ints and - # variable names - integer = pyparsing_common.signed_integer - varname = pyparsing_common.identifier - - arith_expr = infix_notation(integer | varname, - [ - ('-', 1, OpAssoc.RIGHT), - (one_of('* /'), 2, OpAssoc.LEFT), - (one_of('+ -'), 2, OpAssoc.LEFT), - ]) - - arith_expr.run_tests(''' - 5+3*6 - (5+3)*6 - -2--11 - ''', full_dump=False) - - prints:: - - 5+3*6 - [[5, '+', [3, '*', 6]]] - - (5+3)*6 - [[[5, '+', 3], '*', 6]] - - -2--11 - [[['-', 2], '-', ['-', 11]]] - """ - # captive version of FollowedBy that does not do parse actions or capture results names - class _FB(FollowedBy): - def parseImpl(self, instring, loc, doActions=True): - self.expr.try_parse(instring, loc) - return loc, [] - - _FB.__name__ = "FollowedBy>" - - ret = Forward() - if isinstance(lpar, str): - lpar = Suppress(lpar) - if isinstance(rpar, str): - rpar = Suppress(rpar) - - # if lpar and rpar are not suppressed, wrap in group - if not (isinstance(rpar, Suppress) and isinstance(rpar, Suppress)): - lastExpr = base_expr | Group(lpar + ret + rpar) - else: - lastExpr = base_expr | (lpar + ret + rpar) - - for i, operDef in enumerate(op_list): - opExpr, arity, rightLeftAssoc, pa = (operDef + (None,))[:4] - if isinstance(opExpr, str_type): - opExpr = ParserElement._literalStringClass(opExpr) - if arity == 3: - if not isinstance(opExpr, (tuple, list)) or len(opExpr) != 2: - raise ValueError( - "if numterms=3, opExpr must be a tuple or list of two expressions" - ) - opExpr1, opExpr2 = opExpr - term_name = "{}{} term".format(opExpr1, opExpr2) - else: - term_name = "{} term".format(opExpr) - - if not 1 <= arity <= 3: - raise ValueError("operator must be unary (1), binary (2), or ternary (3)") - - if rightLeftAssoc not in (OpAssoc.LEFT, OpAssoc.RIGHT): - raise ValueError("operator must indicate right or left associativity") - - thisExpr: Forward = Forward().set_name(term_name) - if rightLeftAssoc is OpAssoc.LEFT: - if arity == 1: - matchExpr = _FB(lastExpr + opExpr) + Group(lastExpr + opExpr[1, ...]) - elif arity == 2: - if opExpr is not None: - matchExpr = _FB(lastExpr + opExpr + lastExpr) + Group( - lastExpr + (opExpr + lastExpr)[1, ...] - ) - else: - matchExpr = _FB(lastExpr + lastExpr) + Group(lastExpr[2, ...]) - elif arity == 3: - matchExpr = _FB( - lastExpr + opExpr1 + lastExpr + opExpr2 + lastExpr - ) + Group(lastExpr + OneOrMore(opExpr1 + lastExpr + opExpr2 + lastExpr)) - elif rightLeftAssoc is OpAssoc.RIGHT: - if arity == 1: - # try to avoid LR with this extra test - if not isinstance(opExpr, Opt): - opExpr = Opt(opExpr) - matchExpr = _FB(opExpr.expr + thisExpr) + Group(opExpr + thisExpr) - elif arity == 2: - if opExpr is not None: - matchExpr = _FB(lastExpr + opExpr + thisExpr) + Group( - lastExpr + (opExpr + thisExpr)[1, ...] - ) - else: - matchExpr = _FB(lastExpr + thisExpr) + Group( - lastExpr + thisExpr[1, ...] - ) - elif arity == 3: - matchExpr = _FB( - lastExpr + opExpr1 + thisExpr + opExpr2 + thisExpr - ) + Group(lastExpr + opExpr1 + thisExpr + opExpr2 + thisExpr) - if pa: - if isinstance(pa, (tuple, list)): - matchExpr.set_parse_action(*pa) - else: - matchExpr.set_parse_action(pa) - thisExpr <<= (matchExpr | lastExpr).setName(term_name) - lastExpr = thisExpr - ret <<= lastExpr - return ret - - -def indentedBlock(blockStatementExpr, indentStack, indent=True, backup_stacks=[]): - """ - (DEPRECATED - use IndentedBlock class instead) - Helper method for defining space-delimited indentation blocks, - such as those used to define block statements in Python source code. - - Parameters: - - - ``blockStatementExpr`` - expression defining syntax of statement that - is repeated within the indented block - - ``indentStack`` - list created by caller to manage indentation stack - (multiple ``statementWithIndentedBlock`` expressions within a single - grammar should share a common ``indentStack``) - - ``indent`` - boolean indicating whether block must be indented beyond - the current level; set to ``False`` for block of left-most statements - (default= ``True``) - - A valid block must contain at least one ``blockStatement``. - - (Note that indentedBlock uses internal parse actions which make it - incompatible with packrat parsing.) - - Example:: - - data = ''' - def A(z): - A1 - B = 100 - G = A2 - A2 - A3 - B - def BB(a,b,c): - BB1 - def BBA(): - bba1 - bba2 - bba3 - C - D - def spam(x,y): - def eggs(z): - pass - ''' - - - indentStack = [1] - stmt = Forward() - - identifier = Word(alphas, alphanums) - funcDecl = ("def" + identifier + Group("(" + Opt(delimitedList(identifier)) + ")") + ":") - func_body = indentedBlock(stmt, indentStack) - funcDef = Group(funcDecl + func_body) - - rvalue = Forward() - funcCall = Group(identifier + "(" + Opt(delimitedList(rvalue)) + ")") - rvalue << (funcCall | identifier | Word(nums)) - assignment = Group(identifier + "=" + rvalue) - stmt << (funcDef | assignment | identifier) - - module_body = stmt[1, ...] - - parseTree = module_body.parseString(data) - parseTree.pprint() - - prints:: - - [['def', - 'A', - ['(', 'z', ')'], - ':', - [['A1'], [['B', '=', '100']], [['G', '=', 'A2']], ['A2'], ['A3']]], - 'B', - ['def', - 'BB', - ['(', 'a', 'b', 'c', ')'], - ':', - [['BB1'], [['def', 'BBA', ['(', ')'], ':', [['bba1'], ['bba2'], ['bba3']]]]]], - 'C', - 'D', - ['def', - 'spam', - ['(', 'x', 'y', ')'], - ':', - [[['def', 'eggs', ['(', 'z', ')'], ':', [['pass']]]]]]] - """ - backup_stacks.append(indentStack[:]) - - def reset_stack(): - indentStack[:] = backup_stacks[-1] - - def checkPeerIndent(s, l, t): - if l >= len(s): - return - curCol = col(l, s) - if curCol != indentStack[-1]: - if curCol > indentStack[-1]: - raise ParseException(s, l, "illegal nesting") - raise ParseException(s, l, "not a peer entry") - - def checkSubIndent(s, l, t): - curCol = col(l, s) - if curCol > indentStack[-1]: - indentStack.append(curCol) - else: - raise ParseException(s, l, "not a subentry") - - def checkUnindent(s, l, t): - if l >= len(s): - return - curCol = col(l, s) - if not (indentStack and curCol in indentStack): - raise ParseException(s, l, "not an unindent") - if curCol < indentStack[-1]: - indentStack.pop() - - NL = OneOrMore(LineEnd().set_whitespace_chars("\t ").suppress()) - INDENT = (Empty() + Empty().set_parse_action(checkSubIndent)).set_name("INDENT") - PEER = Empty().set_parse_action(checkPeerIndent).set_name("") - UNDENT = Empty().set_parse_action(checkUnindent).set_name("UNINDENT") - if indent: - smExpr = Group( - Opt(NL) - + INDENT - + OneOrMore(PEER + Group(blockStatementExpr) + Opt(NL)) - + UNDENT - ) - else: - smExpr = Group( - Opt(NL) - + OneOrMore(PEER + Group(blockStatementExpr) + Opt(NL)) - + Opt(UNDENT) - ) - - # add a parse action to remove backup_stack from list of backups - smExpr.add_parse_action( - lambda: backup_stacks.pop(-1) and None if backup_stacks else None - ) - smExpr.set_fail_action(lambda a, b, c, d: reset_stack()) - blockStatementExpr.ignore(_bslash + LineEnd()) - return smExpr.set_name("indented block") - - -# it's easy to get these comment structures wrong - they're very common, so may as well make them available -c_style_comment = Combine(Regex(r"/\*(?:[^*]|\*(?!/))*") + "*/").set_name( - "C style comment" -) -"Comment of the form ``/* ... */``" - -html_comment = Regex(r"").set_name("HTML comment") -"Comment of the form ````" - -rest_of_line = Regex(r".*").leave_whitespace().set_name("rest of line") -dbl_slash_comment = Regex(r"//(?:\\\n|[^\n])*").set_name("// comment") -"Comment of the form ``// ... (to end of line)``" - -cpp_style_comment = Combine( - Regex(r"/\*(?:[^*]|\*(?!/))*") + "*/" | dbl_slash_comment -).set_name("C++ style comment") -"Comment of either form :class:`c_style_comment` or :class:`dbl_slash_comment`" - -java_style_comment = cpp_style_comment -"Same as :class:`cpp_style_comment`" - -python_style_comment = Regex(r"#.*").set_name("Python style comment") -"Comment of the form ``# ... (to end of line)``" - - -# build list of built-in expressions, for future reference if a global default value -# gets updated -_builtin_exprs: List[ParserElement] = [ - v for v in vars().values() if isinstance(v, ParserElement) -] - - -# pre-PEP8 compatible names -delimitedList = delimited_list -countedArray = counted_array -matchPreviousLiteral = match_previous_literal -matchPreviousExpr = match_previous_expr -oneOf = one_of -dictOf = dict_of -originalTextFor = original_text_for -nestedExpr = nested_expr -makeHTMLTags = make_html_tags -makeXMLTags = make_xml_tags -anyOpenTag, anyCloseTag = any_open_tag, any_close_tag -commonHTMLEntity = common_html_entity -replaceHTMLEntity = replace_html_entity -opAssoc = OpAssoc -infixNotation = infix_notation -cStyleComment = c_style_comment -htmlComment = html_comment -restOfLine = rest_of_line -dblSlashComment = dbl_slash_comment -cppStyleComment = cpp_style_comment -javaStyleComment = java_style_comment -pythonStyleComment = python_style_comment diff --git a/venv/lib/python3.10/site-packages/pkg_resources/_vendor/pyparsing/results.py b/venv/lib/python3.10/site-packages/pkg_resources/_vendor/pyparsing/results.py deleted file mode 100644 index 00c9421..0000000 --- a/venv/lib/python3.10/site-packages/pkg_resources/_vendor/pyparsing/results.py +++ /dev/null @@ -1,760 +0,0 @@ -# results.py -from collections.abc import MutableMapping, Mapping, MutableSequence, Iterator -import pprint -from weakref import ref as wkref -from typing import Tuple, Any - -str_type: Tuple[type, ...] = (str, bytes) -_generator_type = type((_ for _ in ())) - - -class _ParseResultsWithOffset: - __slots__ = ["tup"] - - def __init__(self, p1, p2): - self.tup = (p1, p2) - - def __getitem__(self, i): - return self.tup[i] - - def __getstate__(self): - return self.tup - - def __setstate__(self, *args): - self.tup = args[0] - - -class ParseResults: - """Structured parse results, to provide multiple means of access to - the parsed data: - - - as a list (``len(results)``) - - by list index (``results[0], results[1]``, etc.) - - by attribute (``results.`` - see :class:`ParserElement.set_results_name`) - - Example:: - - integer = Word(nums) - date_str = (integer.set_results_name("year") + '/' - + integer.set_results_name("month") + '/' - + integer.set_results_name("day")) - # equivalent form: - # date_str = (integer("year") + '/' - # + integer("month") + '/' - # + integer("day")) - - # parse_string returns a ParseResults object - result = date_str.parse_string("1999/12/31") - - def test(s, fn=repr): - print("{} -> {}".format(s, fn(eval(s)))) - test("list(result)") - test("result[0]") - test("result['month']") - test("result.day") - test("'month' in result") - test("'minutes' in result") - test("result.dump()", str) - - prints:: - - list(result) -> ['1999', '/', '12', '/', '31'] - result[0] -> '1999' - result['month'] -> '12' - result.day -> '31' - 'month' in result -> True - 'minutes' in result -> False - result.dump() -> ['1999', '/', '12', '/', '31'] - - day: '31' - - month: '12' - - year: '1999' - """ - - _null_values: Tuple[Any, ...] = (None, [], "", ()) - - __slots__ = [ - "_name", - "_parent", - "_all_names", - "_modal", - "_toklist", - "_tokdict", - "__weakref__", - ] - - class List(list): - """ - Simple wrapper class to distinguish parsed list results that should be preserved - as actual Python lists, instead of being converted to :class:`ParseResults`: - - LBRACK, RBRACK = map(pp.Suppress, "[]") - element = pp.Forward() - item = ppc.integer - element_list = LBRACK + pp.delimited_list(element) + RBRACK - - # add parse actions to convert from ParseResults to actual Python collection types - def as_python_list(t): - return pp.ParseResults.List(t.as_list()) - element_list.add_parse_action(as_python_list) - - element <<= item | element_list - - element.run_tests(''' - 100 - [2,3,4] - [[2, 1],3,4] - [(2, 1),3,4] - (2,3,4) - ''', post_parse=lambda s, r: (r[0], type(r[0]))) - - prints: - - 100 - (100, ) - - [2,3,4] - ([2, 3, 4], ) - - [[2, 1],3,4] - ([[2, 1], 3, 4], ) - - (Used internally by :class:`Group` when `aslist=True`.) - """ - - def __new__(cls, contained=None): - if contained is None: - contained = [] - - if not isinstance(contained, list): - raise TypeError( - "{} may only be constructed with a list," - " not {}".format(cls.__name__, type(contained).__name__) - ) - - return list.__new__(cls) - - def __new__(cls, toklist=None, name=None, **kwargs): - if isinstance(toklist, ParseResults): - return toklist - self = object.__new__(cls) - self._name = None - self._parent = None - self._all_names = set() - - if toklist is None: - self._toklist = [] - elif isinstance(toklist, (list, _generator_type)): - self._toklist = ( - [toklist[:]] - if isinstance(toklist, ParseResults.List) - else list(toklist) - ) - else: - self._toklist = [toklist] - self._tokdict = dict() - return self - - # Performance tuning: we construct a *lot* of these, so keep this - # constructor as small and fast as possible - def __init__( - self, toklist=None, name=None, asList=True, modal=True, isinstance=isinstance - ): - self._modal = modal - if name is not None and name != "": - if isinstance(name, int): - name = str(name) - if not modal: - self._all_names = {name} - self._name = name - if toklist not in self._null_values: - if isinstance(toklist, (str_type, type)): - toklist = [toklist] - if asList: - if isinstance(toklist, ParseResults): - self[name] = _ParseResultsWithOffset( - ParseResults(toklist._toklist), 0 - ) - else: - self[name] = _ParseResultsWithOffset( - ParseResults(toklist[0]), 0 - ) - self[name]._name = name - else: - try: - self[name] = toklist[0] - except (KeyError, TypeError, IndexError): - if toklist is not self: - self[name] = toklist - else: - self._name = name - - def __getitem__(self, i): - if isinstance(i, (int, slice)): - return self._toklist[i] - else: - if i not in self._all_names: - return self._tokdict[i][-1][0] - else: - return ParseResults([v[0] for v in self._tokdict[i]]) - - def __setitem__(self, k, v, isinstance=isinstance): - if isinstance(v, _ParseResultsWithOffset): - self._tokdict[k] = self._tokdict.get(k, list()) + [v] - sub = v[0] - elif isinstance(k, (int, slice)): - self._toklist[k] = v - sub = v - else: - self._tokdict[k] = self._tokdict.get(k, list()) + [ - _ParseResultsWithOffset(v, 0) - ] - sub = v - if isinstance(sub, ParseResults): - sub._parent = wkref(self) - - def __delitem__(self, i): - if isinstance(i, (int, slice)): - mylen = len(self._toklist) - del self._toklist[i] - - # convert int to slice - if isinstance(i, int): - if i < 0: - i += mylen - i = slice(i, i + 1) - # get removed indices - removed = list(range(*i.indices(mylen))) - removed.reverse() - # fixup indices in token dictionary - for name, occurrences in self._tokdict.items(): - for j in removed: - for k, (value, position) in enumerate(occurrences): - occurrences[k] = _ParseResultsWithOffset( - value, position - (position > j) - ) - else: - del self._tokdict[i] - - def __contains__(self, k) -> bool: - return k in self._tokdict - - def __len__(self) -> int: - return len(self._toklist) - - def __bool__(self) -> bool: - return not not (self._toklist or self._tokdict) - - def __iter__(self) -> Iterator: - return iter(self._toklist) - - def __reversed__(self) -> Iterator: - return iter(self._toklist[::-1]) - - def keys(self): - return iter(self._tokdict) - - def values(self): - return (self[k] for k in self.keys()) - - def items(self): - return ((k, self[k]) for k in self.keys()) - - def haskeys(self) -> bool: - """ - Since ``keys()`` returns an iterator, this method is helpful in bypassing - code that looks for the existence of any defined results names.""" - return bool(self._tokdict) - - def pop(self, *args, **kwargs): - """ - Removes and returns item at specified index (default= ``last``). - Supports both ``list`` and ``dict`` semantics for ``pop()``. If - passed no argument or an integer argument, it will use ``list`` - semantics and pop tokens from the list of parsed tokens. If passed - a non-integer argument (most likely a string), it will use ``dict`` - semantics and pop the corresponding value from any defined results - names. A second default return value argument is supported, just as in - ``dict.pop()``. - - Example:: - - numlist = Word(nums)[...] - print(numlist.parse_string("0 123 321")) # -> ['0', '123', '321'] - - def remove_first(tokens): - tokens.pop(0) - numlist.add_parse_action(remove_first) - print(numlist.parse_string("0 123 321")) # -> ['123', '321'] - - label = Word(alphas) - patt = label("LABEL") + Word(nums)[1, ...] - print(patt.parse_string("AAB 123 321").dump()) - - # Use pop() in a parse action to remove named result (note that corresponding value is not - # removed from list form of results) - def remove_LABEL(tokens): - tokens.pop("LABEL") - return tokens - patt.add_parse_action(remove_LABEL) - print(patt.parse_string("AAB 123 321").dump()) - - prints:: - - ['AAB', '123', '321'] - - LABEL: 'AAB' - - ['AAB', '123', '321'] - """ - if not args: - args = [-1] - for k, v in kwargs.items(): - if k == "default": - args = (args[0], v) - else: - raise TypeError( - "pop() got an unexpected keyword argument {!r}".format(k) - ) - if isinstance(args[0], int) or len(args) == 1 or args[0] in self: - index = args[0] - ret = self[index] - del self[index] - return ret - else: - defaultvalue = args[1] - return defaultvalue - - def get(self, key, default_value=None): - """ - Returns named result matching the given key, or if there is no - such name, then returns the given ``default_value`` or ``None`` if no - ``default_value`` is specified. - - Similar to ``dict.get()``. - - Example:: - - integer = Word(nums) - date_str = integer("year") + '/' + integer("month") + '/' + integer("day") - - result = date_str.parse_string("1999/12/31") - print(result.get("year")) # -> '1999' - print(result.get("hour", "not specified")) # -> 'not specified' - print(result.get("hour")) # -> None - """ - if key in self: - return self[key] - else: - return default_value - - def insert(self, index, ins_string): - """ - Inserts new element at location index in the list of parsed tokens. - - Similar to ``list.insert()``. - - Example:: - - numlist = Word(nums)[...] - print(numlist.parse_string("0 123 321")) # -> ['0', '123', '321'] - - # use a parse action to insert the parse location in the front of the parsed results - def insert_locn(locn, tokens): - tokens.insert(0, locn) - numlist.add_parse_action(insert_locn) - print(numlist.parse_string("0 123 321")) # -> [0, '0', '123', '321'] - """ - self._toklist.insert(index, ins_string) - # fixup indices in token dictionary - for name, occurrences in self._tokdict.items(): - for k, (value, position) in enumerate(occurrences): - occurrences[k] = _ParseResultsWithOffset( - value, position + (position > index) - ) - - def append(self, item): - """ - Add single element to end of ``ParseResults`` list of elements. - - Example:: - - numlist = Word(nums)[...] - print(numlist.parse_string("0 123 321")) # -> ['0', '123', '321'] - - # use a parse action to compute the sum of the parsed integers, and add it to the end - def append_sum(tokens): - tokens.append(sum(map(int, tokens))) - numlist.add_parse_action(append_sum) - print(numlist.parse_string("0 123 321")) # -> ['0', '123', '321', 444] - """ - self._toklist.append(item) - - def extend(self, itemseq): - """ - Add sequence of elements to end of ``ParseResults`` list of elements. - - Example:: - - patt = Word(alphas)[1, ...] - - # use a parse action to append the reverse of the matched strings, to make a palindrome - def make_palindrome(tokens): - tokens.extend(reversed([t[::-1] for t in tokens])) - return ''.join(tokens) - patt.add_parse_action(make_palindrome) - print(patt.parse_string("lskdj sdlkjf lksd")) # -> 'lskdjsdlkjflksddsklfjkldsjdksl' - """ - if isinstance(itemseq, ParseResults): - self.__iadd__(itemseq) - else: - self._toklist.extend(itemseq) - - def clear(self): - """ - Clear all elements and results names. - """ - del self._toklist[:] - self._tokdict.clear() - - def __getattr__(self, name): - try: - return self[name] - except KeyError: - if name.startswith("__"): - raise AttributeError(name) - return "" - - def __add__(self, other) -> "ParseResults": - ret = self.copy() - ret += other - return ret - - def __iadd__(self, other) -> "ParseResults": - if other._tokdict: - offset = len(self._toklist) - addoffset = lambda a: offset if a < 0 else a + offset - otheritems = other._tokdict.items() - otherdictitems = [ - (k, _ParseResultsWithOffset(v[0], addoffset(v[1]))) - for k, vlist in otheritems - for v in vlist - ] - for k, v in otherdictitems: - self[k] = v - if isinstance(v[0], ParseResults): - v[0]._parent = wkref(self) - - self._toklist += other._toklist - self._all_names |= other._all_names - return self - - def __radd__(self, other) -> "ParseResults": - if isinstance(other, int) and other == 0: - # useful for merging many ParseResults using sum() builtin - return self.copy() - else: - # this may raise a TypeError - so be it - return other + self - - def __repr__(self) -> str: - return "{}({!r}, {})".format(type(self).__name__, self._toklist, self.as_dict()) - - def __str__(self) -> str: - return ( - "[" - + ", ".join( - [ - str(i) if isinstance(i, ParseResults) else repr(i) - for i in self._toklist - ] - ) - + "]" - ) - - def _asStringList(self, sep=""): - out = [] - for item in self._toklist: - if out and sep: - out.append(sep) - if isinstance(item, ParseResults): - out += item._asStringList() - else: - out.append(str(item)) - return out - - def as_list(self) -> list: - """ - Returns the parse results as a nested list of matching tokens, all converted to strings. - - Example:: - - patt = Word(alphas)[1, ...] - result = patt.parse_string("sldkj lsdkj sldkj") - # even though the result prints in string-like form, it is actually a pyparsing ParseResults - print(type(result), result) # -> ['sldkj', 'lsdkj', 'sldkj'] - - # Use as_list() to create an actual list - result_list = result.as_list() - print(type(result_list), result_list) # -> ['sldkj', 'lsdkj', 'sldkj'] - """ - return [ - res.as_list() if isinstance(res, ParseResults) else res - for res in self._toklist - ] - - def as_dict(self) -> dict: - """ - Returns the named parse results as a nested dictionary. - - Example:: - - integer = Word(nums) - date_str = integer("year") + '/' + integer("month") + '/' + integer("day") - - result = date_str.parse_string('12/31/1999') - print(type(result), repr(result)) # -> (['12', '/', '31', '/', '1999'], {'day': [('1999', 4)], 'year': [('12', 0)], 'month': [('31', 2)]}) - - result_dict = result.as_dict() - print(type(result_dict), repr(result_dict)) # -> {'day': '1999', 'year': '12', 'month': '31'} - - # even though a ParseResults supports dict-like access, sometime you just need to have a dict - import json - print(json.dumps(result)) # -> Exception: TypeError: ... is not JSON serializable - print(json.dumps(result.as_dict())) # -> {"month": "31", "day": "1999", "year": "12"} - """ - - def to_item(obj): - if isinstance(obj, ParseResults): - return obj.as_dict() if obj.haskeys() else [to_item(v) for v in obj] - else: - return obj - - return dict((k, to_item(v)) for k, v in self.items()) - - def copy(self) -> "ParseResults": - """ - Returns a new copy of a :class:`ParseResults` object. - """ - ret = ParseResults(self._toklist) - ret._tokdict = self._tokdict.copy() - ret._parent = self._parent - ret._all_names |= self._all_names - ret._name = self._name - return ret - - def get_name(self): - r""" - Returns the results name for this token expression. Useful when several - different expressions might match at a particular location. - - Example:: - - integer = Word(nums) - ssn_expr = Regex(r"\d\d\d-\d\d-\d\d\d\d") - house_number_expr = Suppress('#') + Word(nums, alphanums) - user_data = (Group(house_number_expr)("house_number") - | Group(ssn_expr)("ssn") - | Group(integer)("age")) - user_info = user_data[1, ...] - - result = user_info.parse_string("22 111-22-3333 #221B") - for item in result: - print(item.get_name(), ':', item[0]) - - prints:: - - age : 22 - ssn : 111-22-3333 - house_number : 221B - """ - if self._name: - return self._name - elif self._parent: - par = self._parent() - - def find_in_parent(sub): - return next( - ( - k - for k, vlist in par._tokdict.items() - for v, loc in vlist - if sub is v - ), - None, - ) - - return find_in_parent(self) if par else None - elif ( - len(self) == 1 - and len(self._tokdict) == 1 - and next(iter(self._tokdict.values()))[0][1] in (0, -1) - ): - return next(iter(self._tokdict.keys())) - else: - return None - - def dump(self, indent="", full=True, include_list=True, _depth=0) -> str: - """ - Diagnostic method for listing out the contents of - a :class:`ParseResults`. Accepts an optional ``indent`` argument so - that this string can be embedded in a nested display of other data. - - Example:: - - integer = Word(nums) - date_str = integer("year") + '/' + integer("month") + '/' + integer("day") - - result = date_str.parse_string('1999/12/31') - print(result.dump()) - - prints:: - - ['1999', '/', '12', '/', '31'] - - day: '31' - - month: '12' - - year: '1999' - """ - out = [] - NL = "\n" - out.append(indent + str(self.as_list()) if include_list else "") - - if full: - if self.haskeys(): - items = sorted((str(k), v) for k, v in self.items()) - for k, v in items: - if out: - out.append(NL) - out.append("{}{}- {}: ".format(indent, (" " * _depth), k)) - if isinstance(v, ParseResults): - if v: - out.append( - v.dump( - indent=indent, - full=full, - include_list=include_list, - _depth=_depth + 1, - ) - ) - else: - out.append(str(v)) - else: - out.append(repr(v)) - if any(isinstance(vv, ParseResults) for vv in self): - v = self - for i, vv in enumerate(v): - if isinstance(vv, ParseResults): - out.append( - "\n{}{}[{}]:\n{}{}{}".format( - indent, - (" " * (_depth)), - i, - indent, - (" " * (_depth + 1)), - vv.dump( - indent=indent, - full=full, - include_list=include_list, - _depth=_depth + 1, - ), - ) - ) - else: - out.append( - "\n%s%s[%d]:\n%s%s%s" - % ( - indent, - (" " * (_depth)), - i, - indent, - (" " * (_depth + 1)), - str(vv), - ) - ) - - return "".join(out) - - def pprint(self, *args, **kwargs): - """ - Pretty-printer for parsed results as a list, using the - `pprint `_ module. - Accepts additional positional or keyword args as defined for - `pprint.pprint `_ . - - Example:: - - ident = Word(alphas, alphanums) - num = Word(nums) - func = Forward() - term = ident | num | Group('(' + func + ')') - func <<= ident + Group(Optional(delimited_list(term))) - result = func.parse_string("fna a,b,(fnb c,d,200),100") - result.pprint(width=40) - - prints:: - - ['fna', - ['a', - 'b', - ['(', 'fnb', ['c', 'd', '200'], ')'], - '100']] - """ - pprint.pprint(self.as_list(), *args, **kwargs) - - # add support for pickle protocol - def __getstate__(self): - return ( - self._toklist, - ( - self._tokdict.copy(), - self._parent is not None and self._parent() or None, - self._all_names, - self._name, - ), - ) - - def __setstate__(self, state): - self._toklist, (self._tokdict, par, inAccumNames, self._name) = state - self._all_names = set(inAccumNames) - if par is not None: - self._parent = wkref(par) - else: - self._parent = None - - def __getnewargs__(self): - return self._toklist, self._name - - def __dir__(self): - return dir(type(self)) + list(self.keys()) - - @classmethod - def from_dict(cls, other, name=None) -> "ParseResults": - """ - Helper classmethod to construct a ``ParseResults`` from a ``dict``, preserving the - name-value relations as results names. If an optional ``name`` argument is - given, a nested ``ParseResults`` will be returned. - """ - - def is_iterable(obj): - try: - iter(obj) - except Exception: - return False - else: - return not isinstance(obj, str_type) - - ret = cls([]) - for k, v in other.items(): - if isinstance(v, Mapping): - ret += cls.from_dict(v, name=k) - else: - ret += cls([v], name=k, asList=is_iterable(v)) - if name is not None: - ret = cls([ret], name=name) - return ret - - asList = as_list - asDict = as_dict - getName = get_name - - -MutableMapping.register(ParseResults) -MutableSequence.register(ParseResults) diff --git a/venv/lib/python3.10/site-packages/pkg_resources/_vendor/pyparsing/testing.py b/venv/lib/python3.10/site-packages/pkg_resources/_vendor/pyparsing/testing.py deleted file mode 100644 index 84a0ef1..0000000 --- a/venv/lib/python3.10/site-packages/pkg_resources/_vendor/pyparsing/testing.py +++ /dev/null @@ -1,331 +0,0 @@ -# testing.py - -from contextlib import contextmanager -import typing - -from .core import ( - ParserElement, - ParseException, - Keyword, - __diag__, - __compat__, -) - - -class pyparsing_test: - """ - namespace class for classes useful in writing unit tests - """ - - class reset_pyparsing_context: - """ - Context manager to be used when writing unit tests that modify pyparsing config values: - - packrat parsing - - bounded recursion parsing - - default whitespace characters. - - default keyword characters - - literal string auto-conversion class - - __diag__ settings - - Example:: - - with reset_pyparsing_context(): - # test that literals used to construct a grammar are automatically suppressed - ParserElement.inlineLiteralsUsing(Suppress) - - term = Word(alphas) | Word(nums) - group = Group('(' + term[...] + ')') - - # assert that the '()' characters are not included in the parsed tokens - self.assertParseAndCheckList(group, "(abc 123 def)", ['abc', '123', 'def']) - - # after exiting context manager, literals are converted to Literal expressions again - """ - - def __init__(self): - self._save_context = {} - - def save(self): - self._save_context["default_whitespace"] = ParserElement.DEFAULT_WHITE_CHARS - self._save_context["default_keyword_chars"] = Keyword.DEFAULT_KEYWORD_CHARS - - self._save_context[ - "literal_string_class" - ] = ParserElement._literalStringClass - - self._save_context["verbose_stacktrace"] = ParserElement.verbose_stacktrace - - self._save_context["packrat_enabled"] = ParserElement._packratEnabled - if ParserElement._packratEnabled: - self._save_context[ - "packrat_cache_size" - ] = ParserElement.packrat_cache.size - else: - self._save_context["packrat_cache_size"] = None - self._save_context["packrat_parse"] = ParserElement._parse - self._save_context[ - "recursion_enabled" - ] = ParserElement._left_recursion_enabled - - self._save_context["__diag__"] = { - name: getattr(__diag__, name) for name in __diag__._all_names - } - - self._save_context["__compat__"] = { - "collect_all_And_tokens": __compat__.collect_all_And_tokens - } - - return self - - def restore(self): - # reset pyparsing global state - if ( - ParserElement.DEFAULT_WHITE_CHARS - != self._save_context["default_whitespace"] - ): - ParserElement.set_default_whitespace_chars( - self._save_context["default_whitespace"] - ) - - ParserElement.verbose_stacktrace = self._save_context["verbose_stacktrace"] - - Keyword.DEFAULT_KEYWORD_CHARS = self._save_context["default_keyword_chars"] - ParserElement.inlineLiteralsUsing( - self._save_context["literal_string_class"] - ) - - for name, value in self._save_context["__diag__"].items(): - (__diag__.enable if value else __diag__.disable)(name) - - ParserElement._packratEnabled = False - if self._save_context["packrat_enabled"]: - ParserElement.enable_packrat(self._save_context["packrat_cache_size"]) - else: - ParserElement._parse = self._save_context["packrat_parse"] - ParserElement._left_recursion_enabled = self._save_context[ - "recursion_enabled" - ] - - __compat__.collect_all_And_tokens = self._save_context["__compat__"] - - return self - - def copy(self): - ret = type(self)() - ret._save_context.update(self._save_context) - return ret - - def __enter__(self): - return self.save() - - def __exit__(self, *args): - self.restore() - - class TestParseResultsAsserts: - """ - A mixin class to add parse results assertion methods to normal unittest.TestCase classes. - """ - - def assertParseResultsEquals( - self, result, expected_list=None, expected_dict=None, msg=None - ): - """ - Unit test assertion to compare a :class:`ParseResults` object with an optional ``expected_list``, - and compare any defined results names with an optional ``expected_dict``. - """ - if expected_list is not None: - self.assertEqual(expected_list, result.as_list(), msg=msg) - if expected_dict is not None: - self.assertEqual(expected_dict, result.as_dict(), msg=msg) - - def assertParseAndCheckList( - self, expr, test_string, expected_list, msg=None, verbose=True - ): - """ - Convenience wrapper assert to test a parser element and input string, and assert that - the resulting ``ParseResults.asList()`` is equal to the ``expected_list``. - """ - result = expr.parse_string(test_string, parse_all=True) - if verbose: - print(result.dump()) - else: - print(result.as_list()) - self.assertParseResultsEquals(result, expected_list=expected_list, msg=msg) - - def assertParseAndCheckDict( - self, expr, test_string, expected_dict, msg=None, verbose=True - ): - """ - Convenience wrapper assert to test a parser element and input string, and assert that - the resulting ``ParseResults.asDict()`` is equal to the ``expected_dict``. - """ - result = expr.parse_string(test_string, parseAll=True) - if verbose: - print(result.dump()) - else: - print(result.as_list()) - self.assertParseResultsEquals(result, expected_dict=expected_dict, msg=msg) - - def assertRunTestResults( - self, run_tests_report, expected_parse_results=None, msg=None - ): - """ - Unit test assertion to evaluate output of ``ParserElement.runTests()``. If a list of - list-dict tuples is given as the ``expected_parse_results`` argument, then these are zipped - with the report tuples returned by ``runTests`` and evaluated using ``assertParseResultsEquals``. - Finally, asserts that the overall ``runTests()`` success value is ``True``. - - :param run_tests_report: tuple(bool, [tuple(str, ParseResults or Exception)]) returned from runTests - :param expected_parse_results (optional): [tuple(str, list, dict, Exception)] - """ - run_test_success, run_test_results = run_tests_report - - if expected_parse_results is not None: - merged = [ - (*rpt, expected) - for rpt, expected in zip(run_test_results, expected_parse_results) - ] - for test_string, result, expected in merged: - # expected should be a tuple containing a list and/or a dict or an exception, - # and optional failure message string - # an empty tuple will skip any result validation - fail_msg = next( - (exp for exp in expected if isinstance(exp, str)), None - ) - expected_exception = next( - ( - exp - for exp in expected - if isinstance(exp, type) and issubclass(exp, Exception) - ), - None, - ) - if expected_exception is not None: - with self.assertRaises( - expected_exception=expected_exception, msg=fail_msg or msg - ): - if isinstance(result, Exception): - raise result - else: - expected_list = next( - (exp for exp in expected if isinstance(exp, list)), None - ) - expected_dict = next( - (exp for exp in expected if isinstance(exp, dict)), None - ) - if (expected_list, expected_dict) != (None, None): - self.assertParseResultsEquals( - result, - expected_list=expected_list, - expected_dict=expected_dict, - msg=fail_msg or msg, - ) - else: - # warning here maybe? - print("no validation for {!r}".format(test_string)) - - # do this last, in case some specific test results can be reported instead - self.assertTrue( - run_test_success, msg=msg if msg is not None else "failed runTests" - ) - - @contextmanager - def assertRaisesParseException(self, exc_type=ParseException, msg=None): - with self.assertRaises(exc_type, msg=msg): - yield - - @staticmethod - def with_line_numbers( - s: str, - start_line: typing.Optional[int] = None, - end_line: typing.Optional[int] = None, - expand_tabs: bool = True, - eol_mark: str = "|", - mark_spaces: typing.Optional[str] = None, - mark_control: typing.Optional[str] = None, - ) -> str: - """ - Helpful method for debugging a parser - prints a string with line and column numbers. - (Line and column numbers are 1-based.) - - :param s: tuple(bool, str - string to be printed with line and column numbers - :param start_line: int - (optional) starting line number in s to print (default=1) - :param end_line: int - (optional) ending line number in s to print (default=len(s)) - :param expand_tabs: bool - (optional) expand tabs to spaces, to match the pyparsing default - :param eol_mark: str - (optional) string to mark the end of lines, helps visualize trailing spaces (default="|") - :param mark_spaces: str - (optional) special character to display in place of spaces - :param mark_control: str - (optional) convert non-printing control characters to a placeholding - character; valid values: - - "unicode" - replaces control chars with Unicode symbols, such as "␍" and "␊" - - any single character string - replace control characters with given string - - None (default) - string is displayed as-is - - :return: str - input string with leading line numbers and column number headers - """ - if expand_tabs: - s = s.expandtabs() - if mark_control is not None: - if mark_control == "unicode": - tbl = str.maketrans( - {c: u for c, u in zip(range(0, 33), range(0x2400, 0x2433))} - | {127: 0x2421} - ) - eol_mark = "" - else: - tbl = str.maketrans( - {c: mark_control for c in list(range(0, 32)) + [127]} - ) - s = s.translate(tbl) - if mark_spaces is not None and mark_spaces != " ": - if mark_spaces == "unicode": - tbl = str.maketrans({9: 0x2409, 32: 0x2423}) - s = s.translate(tbl) - else: - s = s.replace(" ", mark_spaces) - if start_line is None: - start_line = 1 - if end_line is None: - end_line = len(s) - end_line = min(end_line, len(s)) - start_line = min(max(1, start_line), end_line) - - if mark_control != "unicode": - s_lines = s.splitlines()[start_line - 1 : end_line] - else: - s_lines = [line + "␊" for line in s.split("␊")[start_line - 1 : end_line]] - if not s_lines: - return "" - - lineno_width = len(str(end_line)) - max_line_len = max(len(line) for line in s_lines) - lead = " " * (lineno_width + 1) - if max_line_len >= 99: - header0 = ( - lead - + "".join( - "{}{}".format(" " * 99, (i + 1) % 100) - for i in range(max(max_line_len // 100, 1)) - ) - + "\n" - ) - else: - header0 = "" - header1 = ( - header0 - + lead - + "".join( - " {}".format((i + 1) % 10) - for i in range(-(-max_line_len // 10)) - ) - + "\n" - ) - header2 = lead + "1234567890" * (-(-max_line_len // 10)) + "\n" - return ( - header1 - + header2 - + "\n".join( - "{:{}d}:{}{}".format(i, lineno_width, line, eol_mark) - for i, line in enumerate(s_lines, start=start_line) - ) - + "\n" - ) diff --git a/venv/lib/python3.10/site-packages/pkg_resources/_vendor/pyparsing/unicode.py b/venv/lib/python3.10/site-packages/pkg_resources/_vendor/pyparsing/unicode.py deleted file mode 100644 index 0652620..0000000 --- a/venv/lib/python3.10/site-packages/pkg_resources/_vendor/pyparsing/unicode.py +++ /dev/null @@ -1,352 +0,0 @@ -# unicode.py - -import sys -from itertools import filterfalse -from typing import List, Tuple, Union - - -class _lazyclassproperty: - def __init__(self, fn): - self.fn = fn - self.__doc__ = fn.__doc__ - self.__name__ = fn.__name__ - - def __get__(self, obj, cls): - if cls is None: - cls = type(obj) - if not hasattr(cls, "_intern") or any( - cls._intern is getattr(superclass, "_intern", []) - for superclass in cls.__mro__[1:] - ): - cls._intern = {} - attrname = self.fn.__name__ - if attrname not in cls._intern: - cls._intern[attrname] = self.fn(cls) - return cls._intern[attrname] - - -UnicodeRangeList = List[Union[Tuple[int, int], Tuple[int]]] - - -class unicode_set: - """ - A set of Unicode characters, for language-specific strings for - ``alphas``, ``nums``, ``alphanums``, and ``printables``. - A unicode_set is defined by a list of ranges in the Unicode character - set, in a class attribute ``_ranges``. Ranges can be specified using - 2-tuples or a 1-tuple, such as:: - - _ranges = [ - (0x0020, 0x007e), - (0x00a0, 0x00ff), - (0x0100,), - ] - - Ranges are left- and right-inclusive. A 1-tuple of (x,) is treated as (x, x). - - A unicode set can also be defined using multiple inheritance of other unicode sets:: - - class CJK(Chinese, Japanese, Korean): - pass - """ - - _ranges: UnicodeRangeList = [] - - @_lazyclassproperty - def _chars_for_ranges(cls): - ret = [] - for cc in cls.__mro__: - if cc is unicode_set: - break - for rr in getattr(cc, "_ranges", ()): - ret.extend(range(rr[0], rr[-1] + 1)) - return [chr(c) for c in sorted(set(ret))] - - @_lazyclassproperty - def printables(cls): - "all non-whitespace characters in this range" - return "".join(filterfalse(str.isspace, cls._chars_for_ranges)) - - @_lazyclassproperty - def alphas(cls): - "all alphabetic characters in this range" - return "".join(filter(str.isalpha, cls._chars_for_ranges)) - - @_lazyclassproperty - def nums(cls): - "all numeric digit characters in this range" - return "".join(filter(str.isdigit, cls._chars_for_ranges)) - - @_lazyclassproperty - def alphanums(cls): - "all alphanumeric characters in this range" - return cls.alphas + cls.nums - - @_lazyclassproperty - def identchars(cls): - "all characters in this range that are valid identifier characters, plus underscore '_'" - return "".join( - sorted( - set( - "".join(filter(str.isidentifier, cls._chars_for_ranges)) - + "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyzªµº" - + "ÀÁÂÃÄÅÆÇÈÉÊËÌÍÎÏÐÑÒÓÔÕÖØÙÚÛÜÝÞßàáâãäåæçèéêëìíîïðñòóôõöøùúûüýþÿ" - + "_" - ) - ) - ) - - @_lazyclassproperty - def identbodychars(cls): - """ - all characters in this range that are valid identifier body characters, - plus the digits 0-9 - """ - return "".join( - sorted( - set( - cls.identchars - + "0123456789" - + "".join( - [c for c in cls._chars_for_ranges if ("_" + c).isidentifier()] - ) - ) - ) - ) - - -class pyparsing_unicode(unicode_set): - """ - A namespace class for defining common language unicode_sets. - """ - - # fmt: off - - # define ranges in language character sets - _ranges: UnicodeRangeList = [ - (0x0020, sys.maxunicode), - ] - - class BasicMultilingualPlane(unicode_set): - "Unicode set for the Basic Multilingual Plane" - _ranges: UnicodeRangeList = [ - (0x0020, 0xFFFF), - ] - - class Latin1(unicode_set): - "Unicode set for Latin-1 Unicode Character Range" - _ranges: UnicodeRangeList = [ - (0x0020, 0x007E), - (0x00A0, 0x00FF), - ] - - class LatinA(unicode_set): - "Unicode set for Latin-A Unicode Character Range" - _ranges: UnicodeRangeList = [ - (0x0100, 0x017F), - ] - - class LatinB(unicode_set): - "Unicode set for Latin-B Unicode Character Range" - _ranges: UnicodeRangeList = [ - (0x0180, 0x024F), - ] - - class Greek(unicode_set): - "Unicode set for Greek Unicode Character Ranges" - _ranges: UnicodeRangeList = [ - (0x0342, 0x0345), - (0x0370, 0x0377), - (0x037A, 0x037F), - (0x0384, 0x038A), - (0x038C,), - (0x038E, 0x03A1), - (0x03A3, 0x03E1), - (0x03F0, 0x03FF), - (0x1D26, 0x1D2A), - (0x1D5E,), - (0x1D60,), - (0x1D66, 0x1D6A), - (0x1F00, 0x1F15), - (0x1F18, 0x1F1D), - (0x1F20, 0x1F45), - (0x1F48, 0x1F4D), - (0x1F50, 0x1F57), - (0x1F59,), - (0x1F5B,), - (0x1F5D,), - (0x1F5F, 0x1F7D), - (0x1F80, 0x1FB4), - (0x1FB6, 0x1FC4), - (0x1FC6, 0x1FD3), - (0x1FD6, 0x1FDB), - (0x1FDD, 0x1FEF), - (0x1FF2, 0x1FF4), - (0x1FF6, 0x1FFE), - (0x2129,), - (0x2719, 0x271A), - (0xAB65,), - (0x10140, 0x1018D), - (0x101A0,), - (0x1D200, 0x1D245), - (0x1F7A1, 0x1F7A7), - ] - - class Cyrillic(unicode_set): - "Unicode set for Cyrillic Unicode Character Range" - _ranges: UnicodeRangeList = [ - (0x0400, 0x052F), - (0x1C80, 0x1C88), - (0x1D2B,), - (0x1D78,), - (0x2DE0, 0x2DFF), - (0xA640, 0xA672), - (0xA674, 0xA69F), - (0xFE2E, 0xFE2F), - ] - - class Chinese(unicode_set): - "Unicode set for Chinese Unicode Character Range" - _ranges: UnicodeRangeList = [ - (0x2E80, 0x2E99), - (0x2E9B, 0x2EF3), - (0x31C0, 0x31E3), - (0x3400, 0x4DB5), - (0x4E00, 0x9FEF), - (0xA700, 0xA707), - (0xF900, 0xFA6D), - (0xFA70, 0xFAD9), - (0x16FE2, 0x16FE3), - (0x1F210, 0x1F212), - (0x1F214, 0x1F23B), - (0x1F240, 0x1F248), - (0x20000, 0x2A6D6), - (0x2A700, 0x2B734), - (0x2B740, 0x2B81D), - (0x2B820, 0x2CEA1), - (0x2CEB0, 0x2EBE0), - (0x2F800, 0x2FA1D), - ] - - class Japanese(unicode_set): - "Unicode set for Japanese Unicode Character Range, combining Kanji, Hiragana, and Katakana ranges" - _ranges: UnicodeRangeList = [] - - class Kanji(unicode_set): - "Unicode set for Kanji Unicode Character Range" - _ranges: UnicodeRangeList = [ - (0x4E00, 0x9FBF), - (0x3000, 0x303F), - ] - - class Hiragana(unicode_set): - "Unicode set for Hiragana Unicode Character Range" - _ranges: UnicodeRangeList = [ - (0x3041, 0x3096), - (0x3099, 0x30A0), - (0x30FC,), - (0xFF70,), - (0x1B001,), - (0x1B150, 0x1B152), - (0x1F200,), - ] - - class Katakana(unicode_set): - "Unicode set for Katakana Unicode Character Range" - _ranges: UnicodeRangeList = [ - (0x3099, 0x309C), - (0x30A0, 0x30FF), - (0x31F0, 0x31FF), - (0x32D0, 0x32FE), - (0xFF65, 0xFF9F), - (0x1B000,), - (0x1B164, 0x1B167), - (0x1F201, 0x1F202), - (0x1F213,), - ] - - class Hangul(unicode_set): - "Unicode set for Hangul (Korean) Unicode Character Range" - _ranges: UnicodeRangeList = [ - (0x1100, 0x11FF), - (0x302E, 0x302F), - (0x3131, 0x318E), - (0x3200, 0x321C), - (0x3260, 0x327B), - (0x327E,), - (0xA960, 0xA97C), - (0xAC00, 0xD7A3), - (0xD7B0, 0xD7C6), - (0xD7CB, 0xD7FB), - (0xFFA0, 0xFFBE), - (0xFFC2, 0xFFC7), - (0xFFCA, 0xFFCF), - (0xFFD2, 0xFFD7), - (0xFFDA, 0xFFDC), - ] - - Korean = Hangul - - class CJK(Chinese, Japanese, Hangul): - "Unicode set for combined Chinese, Japanese, and Korean (CJK) Unicode Character Range" - - class Thai(unicode_set): - "Unicode set for Thai Unicode Character Range" - _ranges: UnicodeRangeList = [ - (0x0E01, 0x0E3A), - (0x0E3F, 0x0E5B) - ] - - class Arabic(unicode_set): - "Unicode set for Arabic Unicode Character Range" - _ranges: UnicodeRangeList = [ - (0x0600, 0x061B), - (0x061E, 0x06FF), - (0x0700, 0x077F), - ] - - class Hebrew(unicode_set): - "Unicode set for Hebrew Unicode Character Range" - _ranges: UnicodeRangeList = [ - (0x0591, 0x05C7), - (0x05D0, 0x05EA), - (0x05EF, 0x05F4), - (0xFB1D, 0xFB36), - (0xFB38, 0xFB3C), - (0xFB3E,), - (0xFB40, 0xFB41), - (0xFB43, 0xFB44), - (0xFB46, 0xFB4F), - ] - - class Devanagari(unicode_set): - "Unicode set for Devanagari Unicode Character Range" - _ranges: UnicodeRangeList = [ - (0x0900, 0x097F), - (0xA8E0, 0xA8FF) - ] - - # fmt: on - - -pyparsing_unicode.Japanese._ranges = ( - pyparsing_unicode.Japanese.Kanji._ranges - + pyparsing_unicode.Japanese.Hiragana._ranges - + pyparsing_unicode.Japanese.Katakana._ranges -) - -pyparsing_unicode.BMP = pyparsing_unicode.BasicMultilingualPlane - -# add language identifiers using language Unicode -pyparsing_unicode.العربية = pyparsing_unicode.Arabic -pyparsing_unicode.中文 = pyparsing_unicode.Chinese -pyparsing_unicode.кириллица = pyparsing_unicode.Cyrillic -pyparsing_unicode.Ελληνικά = pyparsing_unicode.Greek -pyparsing_unicode.עִברִית = pyparsing_unicode.Hebrew -pyparsing_unicode.日本語 = pyparsing_unicode.Japanese -pyparsing_unicode.Japanese.漢字 = pyparsing_unicode.Japanese.Kanji -pyparsing_unicode.Japanese.カタカナ = pyparsing_unicode.Japanese.Katakana -pyparsing_unicode.Japanese.ひらがな = pyparsing_unicode.Japanese.Hiragana -pyparsing_unicode.한국어 = pyparsing_unicode.Korean -pyparsing_unicode.ไทย = pyparsing_unicode.Thai -pyparsing_unicode.देवनागरी = pyparsing_unicode.Devanagari diff --git a/venv/lib/python3.10/site-packages/pkg_resources/_vendor/pyparsing/util.py b/venv/lib/python3.10/site-packages/pkg_resources/_vendor/pyparsing/util.py deleted file mode 100644 index 34ce092..0000000 --- a/venv/lib/python3.10/site-packages/pkg_resources/_vendor/pyparsing/util.py +++ /dev/null @@ -1,235 +0,0 @@ -# util.py -import warnings -import types -import collections -import itertools -from functools import lru_cache -from typing import List, Union, Iterable - -_bslash = chr(92) - - -class __config_flags: - """Internal class for defining compatibility and debugging flags""" - - _all_names: List[str] = [] - _fixed_names: List[str] = [] - _type_desc = "configuration" - - @classmethod - def _set(cls, dname, value): - if dname in cls._fixed_names: - warnings.warn( - "{}.{} {} is {} and cannot be overridden".format( - cls.__name__, - dname, - cls._type_desc, - str(getattr(cls, dname)).upper(), - ) - ) - return - if dname in cls._all_names: - setattr(cls, dname, value) - else: - raise ValueError("no such {} {!r}".format(cls._type_desc, dname)) - - enable = classmethod(lambda cls, name: cls._set(name, True)) - disable = classmethod(lambda cls, name: cls._set(name, False)) - - -@lru_cache(maxsize=128) -def col(loc: int, strg: str) -> int: - """ - Returns current column within a string, counting newlines as line separators. - The first column is number 1. - - Note: the default parsing behavior is to expand tabs in the input string - before starting the parsing process. See - :class:`ParserElement.parseString` for more - information on parsing strings containing ```` s, and suggested - methods to maintain a consistent view of the parsed string, the parse - location, and line and column positions within the parsed string. - """ - s = strg - return 1 if 0 < loc < len(s) and s[loc - 1] == "\n" else loc - s.rfind("\n", 0, loc) - - -@lru_cache(maxsize=128) -def lineno(loc: int, strg: str) -> int: - """Returns current line number within a string, counting newlines as line separators. - The first line is number 1. - - Note - the default parsing behavior is to expand tabs in the input string - before starting the parsing process. See :class:`ParserElement.parseString` - for more information on parsing strings containing ```` s, and - suggested methods to maintain a consistent view of the parsed string, the - parse location, and line and column positions within the parsed string. - """ - return strg.count("\n", 0, loc) + 1 - - -@lru_cache(maxsize=128) -def line(loc: int, strg: str) -> str: - """ - Returns the line of text containing loc within a string, counting newlines as line separators. - """ - last_cr = strg.rfind("\n", 0, loc) - next_cr = strg.find("\n", loc) - return strg[last_cr + 1 : next_cr] if next_cr >= 0 else strg[last_cr + 1 :] - - -class _UnboundedCache: - def __init__(self): - cache = {} - cache_get = cache.get - self.not_in_cache = not_in_cache = object() - - def get(_, key): - return cache_get(key, not_in_cache) - - def set_(_, key, value): - cache[key] = value - - def clear(_): - cache.clear() - - self.size = None - self.get = types.MethodType(get, self) - self.set = types.MethodType(set_, self) - self.clear = types.MethodType(clear, self) - - -class _FifoCache: - def __init__(self, size): - self.not_in_cache = not_in_cache = object() - cache = collections.OrderedDict() - cache_get = cache.get - - def get(_, key): - return cache_get(key, not_in_cache) - - def set_(_, key, value): - cache[key] = value - while len(cache) > size: - cache.popitem(last=False) - - def clear(_): - cache.clear() - - self.size = size - self.get = types.MethodType(get, self) - self.set = types.MethodType(set_, self) - self.clear = types.MethodType(clear, self) - - -class LRUMemo: - """ - A memoizing mapping that retains `capacity` deleted items - - The memo tracks retained items by their access order; once `capacity` items - are retained, the least recently used item is discarded. - """ - - def __init__(self, capacity): - self._capacity = capacity - self._active = {} - self._memory = collections.OrderedDict() - - def __getitem__(self, key): - try: - return self._active[key] - except KeyError: - self._memory.move_to_end(key) - return self._memory[key] - - def __setitem__(self, key, value): - self._memory.pop(key, None) - self._active[key] = value - - def __delitem__(self, key): - try: - value = self._active.pop(key) - except KeyError: - pass - else: - while len(self._memory) >= self._capacity: - self._memory.popitem(last=False) - self._memory[key] = value - - def clear(self): - self._active.clear() - self._memory.clear() - - -class UnboundedMemo(dict): - """ - A memoizing mapping that retains all deleted items - """ - - def __delitem__(self, key): - pass - - -def _escape_regex_range_chars(s: str) -> str: - # escape these chars: ^-[] - for c in r"\^-[]": - s = s.replace(c, _bslash + c) - s = s.replace("\n", r"\n") - s = s.replace("\t", r"\t") - return str(s) - - -def _collapse_string_to_ranges( - s: Union[str, Iterable[str]], re_escape: bool = True -) -> str: - def is_consecutive(c): - c_int = ord(c) - is_consecutive.prev, prev = c_int, is_consecutive.prev - if c_int - prev > 1: - is_consecutive.value = next(is_consecutive.counter) - return is_consecutive.value - - is_consecutive.prev = 0 - is_consecutive.counter = itertools.count() - is_consecutive.value = -1 - - def escape_re_range_char(c): - return "\\" + c if c in r"\^-][" else c - - def no_escape_re_range_char(c): - return c - - if not re_escape: - escape_re_range_char = no_escape_re_range_char - - ret = [] - s = "".join(sorted(set(s))) - if len(s) > 3: - for _, chars in itertools.groupby(s, key=is_consecutive): - first = last = next(chars) - last = collections.deque( - itertools.chain(iter([last]), chars), maxlen=1 - ).pop() - if first == last: - ret.append(escape_re_range_char(first)) - else: - sep = "" if ord(last) == ord(first) + 1 else "-" - ret.append( - "{}{}{}".format( - escape_re_range_char(first), sep, escape_re_range_char(last) - ) - ) - else: - ret = [escape_re_range_char(c) for c in s] - - return "".join(ret) - - -def _flatten(ll: list) -> list: - ret = [] - for i in ll: - if isinstance(i, list): - ret.extend(_flatten(i)) - else: - ret.append(i) - return ret diff --git a/venv/lib/python3.10/site-packages/pkg_resources/_vendor/zipp.py b/venv/lib/python3.10/site-packages/pkg_resources/_vendor/zipp.py deleted file mode 100644 index 26b723c..0000000 --- a/venv/lib/python3.10/site-packages/pkg_resources/_vendor/zipp.py +++ /dev/null @@ -1,329 +0,0 @@ -import io -import posixpath -import zipfile -import itertools -import contextlib -import sys -import pathlib - -if sys.version_info < (3, 7): - from collections import OrderedDict -else: - OrderedDict = dict - - -__all__ = ['Path'] - - -def _parents(path): - """ - Given a path with elements separated by - posixpath.sep, generate all parents of that path. - - >>> list(_parents('b/d')) - ['b'] - >>> list(_parents('/b/d/')) - ['/b'] - >>> list(_parents('b/d/f/')) - ['b/d', 'b'] - >>> list(_parents('b')) - [] - >>> list(_parents('')) - [] - """ - return itertools.islice(_ancestry(path), 1, None) - - -def _ancestry(path): - """ - Given a path with elements separated by - posixpath.sep, generate all elements of that path - - >>> list(_ancestry('b/d')) - ['b/d', 'b'] - >>> list(_ancestry('/b/d/')) - ['/b/d', '/b'] - >>> list(_ancestry('b/d/f/')) - ['b/d/f', 'b/d', 'b'] - >>> list(_ancestry('b')) - ['b'] - >>> list(_ancestry('')) - [] - """ - path = path.rstrip(posixpath.sep) - while path and path != posixpath.sep: - yield path - path, tail = posixpath.split(path) - - -_dedupe = OrderedDict.fromkeys -"""Deduplicate an iterable in original order""" - - -def _difference(minuend, subtrahend): - """ - Return items in minuend not in subtrahend, retaining order - with O(1) lookup. - """ - return itertools.filterfalse(set(subtrahend).__contains__, minuend) - - -class CompleteDirs(zipfile.ZipFile): - """ - A ZipFile subclass that ensures that implied directories - are always included in the namelist. - """ - - @staticmethod - def _implied_dirs(names): - parents = itertools.chain.from_iterable(map(_parents, names)) - as_dirs = (p + posixpath.sep for p in parents) - return _dedupe(_difference(as_dirs, names)) - - def namelist(self): - names = super(CompleteDirs, self).namelist() - return names + list(self._implied_dirs(names)) - - def _name_set(self): - return set(self.namelist()) - - def resolve_dir(self, name): - """ - If the name represents a directory, return that name - as a directory (with the trailing slash). - """ - names = self._name_set() - dirname = name + '/' - dir_match = name not in names and dirname in names - return dirname if dir_match else name - - @classmethod - def make(cls, source): - """ - Given a source (filename or zipfile), return an - appropriate CompleteDirs subclass. - """ - if isinstance(source, CompleteDirs): - return source - - if not isinstance(source, zipfile.ZipFile): - return cls(_pathlib_compat(source)) - - # Only allow for FastLookup when supplied zipfile is read-only - if 'r' not in source.mode: - cls = CompleteDirs - - source.__class__ = cls - return source - - -class FastLookup(CompleteDirs): - """ - ZipFile subclass to ensure implicit - dirs exist and are resolved rapidly. - """ - - def namelist(self): - with contextlib.suppress(AttributeError): - return self.__names - self.__names = super(FastLookup, self).namelist() - return self.__names - - def _name_set(self): - with contextlib.suppress(AttributeError): - return self.__lookup - self.__lookup = super(FastLookup, self)._name_set() - return self.__lookup - - -def _pathlib_compat(path): - """ - For path-like objects, convert to a filename for compatibility - on Python 3.6.1 and earlier. - """ - try: - return path.__fspath__() - except AttributeError: - return str(path) - - -class Path: - """ - A pathlib-compatible interface for zip files. - - Consider a zip file with this structure:: - - . - ├── a.txt - └── b - ├── c.txt - └── d - └── e.txt - - >>> data = io.BytesIO() - >>> zf = zipfile.ZipFile(data, 'w') - >>> zf.writestr('a.txt', 'content of a') - >>> zf.writestr('b/c.txt', 'content of c') - >>> zf.writestr('b/d/e.txt', 'content of e') - >>> zf.filename = 'mem/abcde.zip' - - Path accepts the zipfile object itself or a filename - - >>> root = Path(zf) - - From there, several path operations are available. - - Directory iteration (including the zip file itself): - - >>> a, b = root.iterdir() - >>> a - Path('mem/abcde.zip', 'a.txt') - >>> b - Path('mem/abcde.zip', 'b/') - - name property: - - >>> b.name - 'b' - - join with divide operator: - - >>> c = b / 'c.txt' - >>> c - Path('mem/abcde.zip', 'b/c.txt') - >>> c.name - 'c.txt' - - Read text: - - >>> c.read_text() - 'content of c' - - existence: - - >>> c.exists() - True - >>> (b / 'missing.txt').exists() - False - - Coercion to string: - - >>> import os - >>> str(c).replace(os.sep, posixpath.sep) - 'mem/abcde.zip/b/c.txt' - - At the root, ``name``, ``filename``, and ``parent`` - resolve to the zipfile. Note these attributes are not - valid and will raise a ``ValueError`` if the zipfile - has no filename. - - >>> root.name - 'abcde.zip' - >>> str(root.filename).replace(os.sep, posixpath.sep) - 'mem/abcde.zip' - >>> str(root.parent) - 'mem' - """ - - __repr = "{self.__class__.__name__}({self.root.filename!r}, {self.at!r})" - - def __init__(self, root, at=""): - """ - Construct a Path from a ZipFile or filename. - - Note: When the source is an existing ZipFile object, - its type (__class__) will be mutated to a - specialized type. If the caller wishes to retain the - original type, the caller should either create a - separate ZipFile object or pass a filename. - """ - self.root = FastLookup.make(root) - self.at = at - - def open(self, mode='r', *args, pwd=None, **kwargs): - """ - Open this entry as text or binary following the semantics - of ``pathlib.Path.open()`` by passing arguments through - to io.TextIOWrapper(). - """ - if self.is_dir(): - raise IsADirectoryError(self) - zip_mode = mode[0] - if not self.exists() and zip_mode == 'r': - raise FileNotFoundError(self) - stream = self.root.open(self.at, zip_mode, pwd=pwd) - if 'b' in mode: - if args or kwargs: - raise ValueError("encoding args invalid for binary operation") - return stream - return io.TextIOWrapper(stream, *args, **kwargs) - - @property - def name(self): - return pathlib.Path(self.at).name or self.filename.name - - @property - def suffix(self): - return pathlib.Path(self.at).suffix or self.filename.suffix - - @property - def suffixes(self): - return pathlib.Path(self.at).suffixes or self.filename.suffixes - - @property - def stem(self): - return pathlib.Path(self.at).stem or self.filename.stem - - @property - def filename(self): - return pathlib.Path(self.root.filename).joinpath(self.at) - - def read_text(self, *args, **kwargs): - with self.open('r', *args, **kwargs) as strm: - return strm.read() - - def read_bytes(self): - with self.open('rb') as strm: - return strm.read() - - def _is_child(self, path): - return posixpath.dirname(path.at.rstrip("/")) == self.at.rstrip("/") - - def _next(self, at): - return self.__class__(self.root, at) - - def is_dir(self): - return not self.at or self.at.endswith("/") - - def is_file(self): - return self.exists() and not self.is_dir() - - def exists(self): - return self.at in self.root._name_set() - - def iterdir(self): - if not self.is_dir(): - raise ValueError("Can't listdir a file") - subs = map(self._next, self.root.namelist()) - return filter(self._is_child, subs) - - def __str__(self): - return posixpath.join(self.root.filename, self.at) - - def __repr__(self): - return self.__repr.format(self=self) - - def joinpath(self, *other): - next = posixpath.join(self.at, *map(_pathlib_compat, other)) - return self._next(self.root.resolve_dir(next)) - - __truediv__ = joinpath - - @property - def parent(self): - if not self.at: - return self.filename.parent - parent_at = posixpath.dirname(self.at.rstrip('/')) - if parent_at: - parent_at += '/' - return self._next(parent_at) diff --git a/venv/lib/python3.10/site-packages/pkg_resources/extern/__init__.py b/venv/lib/python3.10/site-packages/pkg_resources/extern/__init__.py index 70897ee..fed5929 100644 --- a/venv/lib/python3.10/site-packages/pkg_resources/extern/__init__.py +++ b/venv/lib/python3.10/site-packages/pkg_resources/extern/__init__.py @@ -69,8 +69,5 @@ def install(self): sys.meta_path.append(self) -names = ( - 'packaging', 'pyparsing', 'appdirs', 'jaraco', 'importlib_resources', - 'more_itertools', -) +names = 'packaging', 'pyparsing', 'appdirs' VendorImporter(__name__, names).install() diff --git a/venv/lib/python3.10/site-packages/platformdirs-3.10.0.dist-info/INSTALLER b/venv/lib/python3.10/site-packages/platformdirs-3.10.0.dist-info/INSTALLER new file mode 100644 index 0000000..a1b589e --- /dev/null +++ b/venv/lib/python3.10/site-packages/platformdirs-3.10.0.dist-info/INSTALLER @@ -0,0 +1 @@ +pip diff --git a/venv/lib/python3.10/site-packages/platformdirs-3.10.0.dist-info/METADATA b/venv/lib/python3.10/site-packages/platformdirs-3.10.0.dist-info/METADATA new file mode 100644 index 0000000..a8090d3 --- /dev/null +++ b/venv/lib/python3.10/site-packages/platformdirs-3.10.0.dist-info/METADATA @@ -0,0 +1,319 @@ +Metadata-Version: 2.1 +Name: platformdirs +Version: 3.10.0 +Summary: A small Python package for determining appropriate platform-specific dirs, e.g. a "user data dir". +Project-URL: Documentation, https://platformdirs.readthedocs.io +Project-URL: Homepage, https://github.com/platformdirs/platformdirs +Project-URL: Source, https://github.com/platformdirs/platformdirs +Project-URL: Tracker, https://github.com/platformdirs/platformdirs/issues +Maintainer-email: Bernát Gábor , Julian Berman , Ofek Lev , Ronny Pfannschmidt +License-Expression: MIT +License-File: LICENSE +Keywords: appdirs,application,cache,directory,log,user +Classifier: Development Status :: 5 - Production/Stable +Classifier: Intended Audience :: Developers +Classifier: License :: OSI Approved :: MIT License +Classifier: Operating System :: OS Independent +Classifier: Programming Language :: Python +Classifier: Programming Language :: Python :: 3 :: Only +Classifier: Programming Language :: Python :: 3.7 +Classifier: Programming Language :: Python :: 3.8 +Classifier: Programming Language :: Python :: 3.9 +Classifier: Programming Language :: Python :: 3.10 +Classifier: Programming Language :: Python :: 3.11 +Classifier: Programming Language :: Python :: 3.12 +Classifier: Programming Language :: Python :: Implementation :: CPython +Classifier: Programming Language :: Python :: Implementation :: PyPy +Classifier: Topic :: Software Development :: Libraries :: Python Modules +Requires-Python: >=3.7 +Requires-Dist: typing-extensions>=4.7.1; python_version < '3.8' +Provides-Extra: docs +Requires-Dist: furo>=2023.7.26; extra == 'docs' +Requires-Dist: proselint>=0.13; extra == 'docs' +Requires-Dist: sphinx-autodoc-typehints>=1.24; extra == 'docs' +Requires-Dist: sphinx>=7.1.1; extra == 'docs' +Provides-Extra: test +Requires-Dist: appdirs==1.4.4; extra == 'test' +Requires-Dist: covdefaults>=2.3; extra == 'test' +Requires-Dist: pytest-cov>=4.1; extra == 'test' +Requires-Dist: pytest-mock>=3.11.1; extra == 'test' +Requires-Dist: pytest>=7.4; extra == 'test' +Description-Content-Type: text/x-rst + +The problem +=========== + +.. image:: https://github.com/platformdirs/platformdirs/workflows/Test/badge.svg + :target: https://github.com/platformdirs/platformdirs/actions?query=workflow%3ATest + +When writing desktop application, finding the right location to store user data +and configuration varies per platform. Even for single-platform apps, there +may by plenty of nuances in figuring out the right location. + +For example, if running on macOS, you should use:: + + ~/Library/Application Support/ + +If on Windows (at least English Win) that should be:: + + C:\Documents and Settings\\Application Data\Local Settings\\ + +or possibly:: + + C:\Documents and Settings\\Application Data\\ + +for `roaming profiles `_ but that is another story. + +On Linux (and other Unices), according to the `XDG Basedir Spec`_, it should be:: + + ~/.local/share/ + +.. _XDG Basedir Spec: https://specifications.freedesktop.org/basedir-spec/basedir-spec-latest.html + +``platformdirs`` to the rescue +============================== + +This kind of thing is what the ``platformdirs`` package is for. +``platformdirs`` will help you choose an appropriate: + +- user data dir (``user_data_dir``) +- user config dir (``user_config_dir``) +- user cache dir (``user_cache_dir``) +- site data dir (``site_data_dir``) +- site config dir (``site_config_dir``) +- user log dir (``user_log_dir``) +- user documents dir (``user_documents_dir``) +- user downloads dir (``user_downloads_dir``) +- user pictures dir (``user_pictures_dir``) +- user videos dir (``user_videos_dir``) +- user music dir (``user_music_dir``) +- user desktop dir (``user_desktop_dir``) +- user runtime dir (``user_runtime_dir``) + +And also: + +- Is slightly opinionated on the directory names used. Look for "OPINION" in + documentation and code for when an opinion is being applied. + +Example output +============== + +On macOS: + +.. code-block:: pycon + + >>> from platformdirs import * + >>> appname = "SuperApp" + >>> appauthor = "Acme" + >>> user_data_dir(appname, appauthor) + '/Users/trentm/Library/Application Support/SuperApp' + >>> site_data_dir(appname, appauthor) + '/Library/Application Support/SuperApp' + >>> user_cache_dir(appname, appauthor) + '/Users/trentm/Library/Caches/SuperApp' + >>> user_log_dir(appname, appauthor) + '/Users/trentm/Library/Logs/SuperApp' + >>> user_documents_dir() + '/Users/trentm/Documents' + >>> user_downloads_dir() + '/Users/trentm/Downloads' + >>> user_pictures_dir() + '/Users/trentm/Pictures' + >>> user_videos_dir() + '/Users/trentm/Movies' + >>> user_music_dir() + '/Users/trentm/Music' + >>> user_desktop_dir() + '/Users/trentm/Desktop' + >>> user_runtime_dir(appname, appauthor) + '/Users/trentm/Library/Caches/TemporaryItems/SuperApp' + +On Windows: + +.. code-block:: pycon + + >>> from platformdirs import * + >>> appname = "SuperApp" + >>> appauthor = "Acme" + >>> user_data_dir(appname, appauthor) + 'C:\\Users\\trentm\\AppData\\Local\\Acme\\SuperApp' + >>> user_data_dir(appname, appauthor, roaming=True) + 'C:\\Users\\trentm\\AppData\\Roaming\\Acme\\SuperApp' + >>> user_cache_dir(appname, appauthor) + 'C:\\Users\\trentm\\AppData\\Local\\Acme\\SuperApp\\Cache' + >>> user_log_dir(appname, appauthor) + 'C:\\Users\\trentm\\AppData\\Local\\Acme\\SuperApp\\Logs' + >>> user_documents_dir() + 'C:\\Users\\trentm\\Documents' + >>> user_downloads_dir() + 'C:\\Users\\trentm\\Downloads' + >>> user_pictures_dir() + 'C:\\Users\\trentm\\Pictures' + >>> user_videos_dir() + 'C:\\Users\\trentm\\Videos' + >>> user_music_dir() + 'C:\\Users\\trentm\\Music' + >>> user_desktop_dir() + 'C:\\Users\\trentm\\Desktop' + >>> user_runtime_dir(appname, appauthor) + 'C:\\Users\\trentm\\AppData\\Local\\Temp\\Acme\\SuperApp' + +On Linux: + +.. code-block:: pycon + + >>> from platformdirs import * + >>> appname = "SuperApp" + >>> appauthor = "Acme" + >>> user_data_dir(appname, appauthor) + '/home/trentm/.local/share/SuperApp' + >>> site_data_dir(appname, appauthor) + '/usr/local/share/SuperApp' + >>> site_data_dir(appname, appauthor, multipath=True) + '/usr/local/share/SuperApp:/usr/share/SuperApp' + >>> user_cache_dir(appname, appauthor) + '/home/trentm/.cache/SuperApp' + >>> user_log_dir(appname, appauthor) + '/home/trentm/.cache/SuperApp/log' + >>> user_config_dir(appname) + '/home/trentm/.config/SuperApp' + >>> user_documents_dir() + '/home/trentm/Documents' + >>> user_downloads_dir() + '/home/trentm/Downloads' + >>> user_pictures_dir() + '/home/trentm/Pictures' + >>> user_videos_dir() + '/home/trentm/Videos' + >>> user_music_dir() + '/home/trentm/Music' + >>> user_desktop_dir() + '/home/trentm/Desktop' + >>> user_runtime_dir(appname, appauthor) + '/run/user/{os.getuid()}/SuperApp' + >>> site_config_dir(appname) + '/etc/xdg/SuperApp' + >>> os.environ["XDG_CONFIG_DIRS"] = "/etc:/usr/local/etc" + >>> site_config_dir(appname, multipath=True) + '/etc/SuperApp:/usr/local/etc/SuperApp' + +On Android:: + + >>> from platformdirs import * + >>> appname = "SuperApp" + >>> appauthor = "Acme" + >>> user_data_dir(appname, appauthor) + '/data/data/com.myApp/files/SuperApp' + >>> user_cache_dir(appname, appauthor) + '/data/data/com.myApp/cache/SuperApp' + >>> user_log_dir(appname, appauthor) + '/data/data/com.myApp/cache/SuperApp/log' + >>> user_config_dir(appname) + '/data/data/com.myApp/shared_prefs/SuperApp' + >>> user_documents_dir() + '/storage/emulated/0/Documents' + >>> user_downloads_dir() + '/storage/emulated/0/Downloads' + >>> user_pictures_dir() + '/storage/emulated/0/Pictures' + >>> user_videos_dir() + '/storage/emulated/0/DCIM/Camera' + >>> user_music_dir() + '/storage/emulated/0/Music' + >>> user_desktop_dir() + '/storage/emulated/0/Desktop' + >>> user_runtime_dir(appname, appauthor) + '/data/data/com.myApp/cache/SuperApp/tmp' + +Note: Some android apps like Termux and Pydroid are used as shells. These +apps are used by the end user to emulate Linux environment. Presence of +``SHELL`` environment variable is used by Platformdirs to differentiate +between general android apps and android apps used as shells. Shell android +apps also support ``XDG_*`` environment variables. + + +``PlatformDirs`` for convenience +================================ + +.. code-block:: pycon + + >>> from platformdirs import PlatformDirs + >>> dirs = PlatformDirs("SuperApp", "Acme") + >>> dirs.user_data_dir + '/Users/trentm/Library/Application Support/SuperApp' + >>> dirs.site_data_dir + '/Library/Application Support/SuperApp' + >>> dirs.user_cache_dir + '/Users/trentm/Library/Caches/SuperApp' + >>> dirs.user_log_dir + '/Users/trentm/Library/Logs/SuperApp' + >>> dirs.user_documents_dir + '/Users/trentm/Documents' + >>> dirs.user_downloads_dir + '/Users/trentm/Downloads' + >>> dirs.user_pictures_dir + '/Users/trentm/Pictures' + >>> dirs.user_videos_dir + '/Users/trentm/Movies' + >>> dirs.user_music_dir + '/Users/trentm/Music' + >>> dirs.user_desktop_dir + '/Users/trentm/Desktop' + >>> dirs.user_runtime_dir + '/Users/trentm/Library/Caches/TemporaryItems/SuperApp' + +Per-version isolation +===================== + +If you have multiple versions of your app in use that you want to be +able to run side-by-side, then you may want version-isolation for these +dirs:: + + >>> from platformdirs import PlatformDirs + >>> dirs = PlatformDirs("SuperApp", "Acme", version="1.0") + >>> dirs.user_data_dir + '/Users/trentm/Library/Application Support/SuperApp/1.0' + >>> dirs.site_data_dir + '/Library/Application Support/SuperApp/1.0' + >>> dirs.user_cache_dir + '/Users/trentm/Library/Caches/SuperApp/1.0' + >>> dirs.user_log_dir + '/Users/trentm/Library/Logs/SuperApp/1.0' + >>> dirs.user_documents_dir + '/Users/trentm/Documents' + >>> dirs.user_downloads_dir + '/Users/trentm/Downloads' + >>> dirs.user_pictures_dir + '/Users/trentm/Pictures' + >>> dirs.user_videos_dir + '/Users/trentm/Movies' + >>> dirs.user_music_dir + '/Users/trentm/Music' + >>> dirs.user_desktop_dir + '/Users/trentm/Desktop' + >>> dirs.user_runtime_dir + '/Users/trentm/Library/Caches/TemporaryItems/SuperApp/1.0' + +Be wary of using this for configuration files though; you'll need to handle +migrating configuration files manually. + +Why this Fork? +============== + +This repository is a friendly fork of the wonderful work started by +`ActiveState `_ who created +``appdirs``, this package's ancestor. + +Maintaining an open source project is no easy task, particularly +from within an organization, and the Python community is indebted +to ``appdirs`` (and to Trent Mick and Jeff Rouse in particular) for +creating an incredibly useful simple module, as evidenced by the wide +number of users it has attracted over the years. + +Nonetheless, given the number of long-standing open issues +and pull requests, and no clear path towards `ensuring +that maintenance of the package would continue or grow +`_, this fork was +created. + +Contributions are most welcome. diff --git a/venv/lib/python3.10/site-packages/platformdirs-3.10.0.dist-info/RECORD b/venv/lib/python3.10/site-packages/platformdirs-3.10.0.dist-info/RECORD new file mode 100644 index 0000000..e7a4b44 --- /dev/null +++ b/venv/lib/python3.10/site-packages/platformdirs-3.10.0.dist-info/RECORD @@ -0,0 +1,22 @@ +platformdirs-3.10.0.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +platformdirs-3.10.0.dist-info/METADATA,sha256=q18lpTNH4xbbBxuo2TnArGAHsmoWizXjy_8o5l4fqB4,11482 +platformdirs-3.10.0.dist-info/RECORD,, +platformdirs-3.10.0.dist-info/WHEEL,sha256=9QBuHhg6FNW7lppboF2vKVbCGTVzsFykgRQjjlajrhA,87 +platformdirs-3.10.0.dist-info/licenses/LICENSE,sha256=KeD9YukphQ6G6yjD_czwzv30-pSHkBHP-z0NS-1tTbY,1089 +platformdirs/__init__.py,sha256=9u_kqpChtOnpF_1hqFn8hEFNsIOkle0eDWjaWO3qHG8,22285 +platformdirs/__main__.py,sha256=u6HYqz1l2RHhRXeUCYpDxoRPaqJlY_81wsUakfwM-ps,1488 +platformdirs/__pycache__/__init__.cpython-310.pyc,, +platformdirs/__pycache__/__main__.cpython-310.pyc,, +platformdirs/__pycache__/android.cpython-310.pyc,, +platformdirs/__pycache__/api.cpython-310.pyc,, +platformdirs/__pycache__/macos.cpython-310.pyc,, +platformdirs/__pycache__/unix.cpython-310.pyc,, +platformdirs/__pycache__/version.cpython-310.pyc,, +platformdirs/__pycache__/windows.cpython-310.pyc,, +platformdirs/android.py,sha256=68BOo0S0GlblgtD46JWtaLs4yXFTuxnNDGPpsRv4Kp4,7581 +platformdirs/api.py,sha256=wG7Y6uuk6vgGvuJ53zh3e7p3h0LHxvheiTpE8uU58UQ,7686 +platformdirs/macos.py,sha256=PVgRkjfPzGz2IzP1F-cm8nkSUt65LjWAVqFJVs6Ogtw,4049 +platformdirs/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +platformdirs/unix.py,sha256=ocgaYzR0RscUwFU3_6E_C5Y88fwvIHsICkOnmrm_L5Q,10042 +platformdirs/version.py,sha256=o0lWC4c2oLf8b_SD3RzZENKYMC2oAa3ILKAWdi5XiBQ,162 +platformdirs/windows.py,sha256=PeoPtOhy0hub3H6tH0ZRrLFRUZUeowl0UCTGyV05a_w,10006 diff --git a/venv/lib/python3.10/site-packages/platformdirs-3.10.0.dist-info/WHEEL b/venv/lib/python3.10/site-packages/platformdirs-3.10.0.dist-info/WHEEL new file mode 100644 index 0000000..ba1a8af --- /dev/null +++ b/venv/lib/python3.10/site-packages/platformdirs-3.10.0.dist-info/WHEEL @@ -0,0 +1,4 @@ +Wheel-Version: 1.0 +Generator: hatchling 1.18.0 +Root-Is-Purelib: true +Tag: py3-none-any diff --git a/venv/lib/python3.10/site-packages/platformdirs-3.10.0.dist-info/licenses/LICENSE b/venv/lib/python3.10/site-packages/platformdirs-3.10.0.dist-info/licenses/LICENSE new file mode 100644 index 0000000..f35fed9 --- /dev/null +++ b/venv/lib/python3.10/site-packages/platformdirs-3.10.0.dist-info/licenses/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2010-202x The platformdirs developers + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/venv/lib/python3.10/site-packages/platformdirs/__init__.py b/venv/lib/python3.10/site-packages/platformdirs/__init__.py new file mode 100644 index 0000000..3d5a5bd --- /dev/null +++ b/venv/lib/python3.10/site-packages/platformdirs/__init__.py @@ -0,0 +1,628 @@ +""" +Utilities for determining application-specific dirs. See for details and +usage. +""" +from __future__ import annotations + +import os +import sys +from typing import TYPE_CHECKING + +from .api import PlatformDirsABC +from .version import __version__ +from .version import __version_tuple__ as __version_info__ + +if TYPE_CHECKING: + from pathlib import Path + + if sys.version_info >= (3, 8): # pragma: no cover (py38+) + from typing import Literal + else: # pragma: no cover (py38+) + from typing_extensions import Literal + + +def _set_platform_dir_class() -> type[PlatformDirsABC]: + if sys.platform == "win32": + from platformdirs.windows import Windows as Result + elif sys.platform == "darwin": + from platformdirs.macos import MacOS as Result + else: + from platformdirs.unix import Unix as Result + + if os.getenv("ANDROID_DATA") == "/data" and os.getenv("ANDROID_ROOT") == "/system": + if os.getenv("SHELL") or os.getenv("PREFIX"): + return Result + + from platformdirs.android import _android_folder + + if _android_folder() is not None: + from platformdirs.android import Android + + return Android # return to avoid redefinition of result + + return Result + + +PlatformDirs = _set_platform_dir_class() #: Currently active platform +AppDirs = PlatformDirs #: Backwards compatibility with appdirs + + +def user_data_dir( + appname: str | None = None, + appauthor: str | None | Literal[False] = None, + version: str | None = None, + roaming: bool = False, # noqa: FBT001, FBT002 + ensure_exists: bool = False, # noqa: FBT001, FBT002 +) -> str: + """ + :param appname: See `appname `. + :param appauthor: See `appauthor `. + :param version: See `version `. + :param roaming: See `roaming `. + :param ensure_exists: See `ensure_exists `. + :returns: data directory tied to the user + """ + return PlatformDirs( + appname=appname, + appauthor=appauthor, + version=version, + roaming=roaming, + ensure_exists=ensure_exists, + ).user_data_dir + + +def site_data_dir( + appname: str | None = None, + appauthor: str | None | Literal[False] = None, + version: str | None = None, + multipath: bool = False, # noqa: FBT001, FBT002 + ensure_exists: bool = False, # noqa: FBT001, FBT002 +) -> str: + """ + :param appname: See `appname `. + :param appauthor: See `appauthor `. + :param version: See `version `. + :param multipath: See `roaming `. + :param ensure_exists: See `ensure_exists `. + :returns: data directory shared by users + """ + return PlatformDirs( + appname=appname, + appauthor=appauthor, + version=version, + multipath=multipath, + ensure_exists=ensure_exists, + ).site_data_dir + + +def user_config_dir( + appname: str | None = None, + appauthor: str | None | Literal[False] = None, + version: str | None = None, + roaming: bool = False, # noqa: FBT001, FBT002 + ensure_exists: bool = False, # noqa: FBT001, FBT002 +) -> str: + """ + :param appname: See `appname `. + :param appauthor: See `appauthor `. + :param version: See `version `. + :param roaming: See `roaming `. + :param ensure_exists: See `ensure_exists `. + :returns: config directory tied to the user + """ + return PlatformDirs( + appname=appname, + appauthor=appauthor, + version=version, + roaming=roaming, + ensure_exists=ensure_exists, + ).user_config_dir + + +def site_config_dir( + appname: str | None = None, + appauthor: str | None | Literal[False] = None, + version: str | None = None, + multipath: bool = False, # noqa: FBT001, FBT002 + ensure_exists: bool = False, # noqa: FBT001, FBT002 +) -> str: + """ + :param appname: See `appname `. + :param appauthor: See `appauthor `. + :param version: See `version `. + :param multipath: See `roaming `. + :param ensure_exists: See `ensure_exists `. + :returns: config directory shared by the users + """ + return PlatformDirs( + appname=appname, + appauthor=appauthor, + version=version, + multipath=multipath, + ensure_exists=ensure_exists, + ).site_config_dir + + +def user_cache_dir( + appname: str | None = None, + appauthor: str | None | Literal[False] = None, + version: str | None = None, + opinion: bool = True, # noqa: FBT001, FBT002 + ensure_exists: bool = False, # noqa: FBT001, FBT002 +) -> str: + """ + :param appname: See `appname `. + :param appauthor: See `appauthor `. + :param version: See `version `. + :param opinion: See `roaming `. + :param ensure_exists: See `ensure_exists `. + :returns: cache directory tied to the user + """ + return PlatformDirs( + appname=appname, + appauthor=appauthor, + version=version, + opinion=opinion, + ensure_exists=ensure_exists, + ).user_cache_dir + + +def site_cache_dir( + appname: str | None = None, + appauthor: str | None | Literal[False] = None, + version: str | None = None, + opinion: bool = True, # noqa: FBT001, FBT002 + ensure_exists: bool = False, # noqa: FBT001, FBT002 +) -> str: + """ + :param appname: See `appname `. + :param appauthor: See `appauthor `. + :param version: See `version `. + :param opinion: See `opinion `. + :param ensure_exists: See `ensure_exists `. + :returns: cache directory tied to the user + """ + return PlatformDirs( + appname=appname, + appauthor=appauthor, + version=version, + opinion=opinion, + ensure_exists=ensure_exists, + ).site_cache_dir + + +def user_state_dir( + appname: str | None = None, + appauthor: str | None | Literal[False] = None, + version: str | None = None, + roaming: bool = False, # noqa: FBT001, FBT002 + ensure_exists: bool = False, # noqa: FBT001, FBT002 +) -> str: + """ + :param appname: See `appname `. + :param appauthor: See `appauthor `. + :param version: See `version `. + :param roaming: See `roaming `. + :param ensure_exists: See `ensure_exists `. + :returns: state directory tied to the user + """ + return PlatformDirs( + appname=appname, + appauthor=appauthor, + version=version, + roaming=roaming, + ensure_exists=ensure_exists, + ).user_state_dir + + +def user_log_dir( + appname: str | None = None, + appauthor: str | None | Literal[False] = None, + version: str | None = None, + opinion: bool = True, # noqa: FBT001, FBT002 + ensure_exists: bool = False, # noqa: FBT001, FBT002 +) -> str: + """ + :param appname: See `appname `. + :param appauthor: See `appauthor `. + :param version: See `version `. + :param opinion: See `roaming `. + :param ensure_exists: See `ensure_exists `. + :returns: log directory tied to the user + """ + return PlatformDirs( + appname=appname, + appauthor=appauthor, + version=version, + opinion=opinion, + ensure_exists=ensure_exists, + ).user_log_dir + + +def user_documents_dir() -> str: + """:returns: documents directory tied to the user""" + return PlatformDirs().user_documents_dir + + +def user_downloads_dir() -> str: + """:returns: downloads directory tied to the user""" + return PlatformDirs().user_downloads_dir + + +def user_pictures_dir() -> str: + """:returns: pictures directory tied to the user""" + return PlatformDirs().user_pictures_dir + + +def user_videos_dir() -> str: + """:returns: videos directory tied to the user""" + return PlatformDirs().user_videos_dir + + +def user_music_dir() -> str: + """:returns: music directory tied to the user""" + return PlatformDirs().user_music_dir + + +def user_desktop_dir() -> str: + """:returns: desktop directory tied to the user""" + return PlatformDirs().user_desktop_dir + + +def user_runtime_dir( + appname: str | None = None, + appauthor: str | None | Literal[False] = None, + version: str | None = None, + opinion: bool = True, # noqa: FBT001, FBT002 + ensure_exists: bool = False, # noqa: FBT001, FBT002 +) -> str: + """ + :param appname: See `appname `. + :param appauthor: See `appauthor `. + :param version: See `version `. + :param opinion: See `opinion `. + :param ensure_exists: See `ensure_exists `. + :returns: runtime directory tied to the user + """ + return PlatformDirs( + appname=appname, + appauthor=appauthor, + version=version, + opinion=opinion, + ensure_exists=ensure_exists, + ).user_runtime_dir + + +def site_runtime_dir( + appname: str | None = None, + appauthor: str | None | Literal[False] = None, + version: str | None = None, + opinion: bool = True, # noqa: FBT001, FBT002 + ensure_exists: bool = False, # noqa: FBT001, FBT002 +) -> str: + """ + :param appname: See `appname `. + :param appauthor: See `appauthor `. + :param version: See `version `. + :param opinion: See `opinion `. + :param ensure_exists: See `ensure_exists `. + :returns: runtime directory shared by users + """ + return PlatformDirs( + appname=appname, + appauthor=appauthor, + version=version, + opinion=opinion, + ensure_exists=ensure_exists, + ).site_runtime_dir + + +def user_data_path( + appname: str | None = None, + appauthor: str | None | Literal[False] = None, + version: str | None = None, + roaming: bool = False, # noqa: FBT001, FBT002 + ensure_exists: bool = False, # noqa: FBT001, FBT002 +) -> Path: + """ + :param appname: See `appname `. + :param appauthor: See `appauthor `. + :param version: See `version `. + :param roaming: See `roaming `. + :param ensure_exists: See `ensure_exists `. + :returns: data path tied to the user + """ + return PlatformDirs( + appname=appname, + appauthor=appauthor, + version=version, + roaming=roaming, + ensure_exists=ensure_exists, + ).user_data_path + + +def site_data_path( + appname: str | None = None, + appauthor: str | None | Literal[False] = None, + version: str | None = None, + multipath: bool = False, # noqa: FBT001, FBT002 + ensure_exists: bool = False, # noqa: FBT001, FBT002 +) -> Path: + """ + :param appname: See `appname `. + :param appauthor: See `appauthor `. + :param version: See `version `. + :param multipath: See `multipath `. + :param ensure_exists: See `ensure_exists `. + :returns: data path shared by users + """ + return PlatformDirs( + appname=appname, + appauthor=appauthor, + version=version, + multipath=multipath, + ensure_exists=ensure_exists, + ).site_data_path + + +def user_config_path( + appname: str | None = None, + appauthor: str | None | Literal[False] = None, + version: str | None = None, + roaming: bool = False, # noqa: FBT001, FBT002 + ensure_exists: bool = False, # noqa: FBT001, FBT002 +) -> Path: + """ + :param appname: See `appname `. + :param appauthor: See `appauthor `. + :param version: See `version `. + :param roaming: See `roaming `. + :param ensure_exists: See `ensure_exists `. + :returns: config path tied to the user + """ + return PlatformDirs( + appname=appname, + appauthor=appauthor, + version=version, + roaming=roaming, + ensure_exists=ensure_exists, + ).user_config_path + + +def site_config_path( + appname: str | None = None, + appauthor: str | None | Literal[False] = None, + version: str | None = None, + multipath: bool = False, # noqa: FBT001, FBT002 + ensure_exists: bool = False, # noqa: FBT001, FBT002 +) -> Path: + """ + :param appname: See `appname `. + :param appauthor: See `appauthor `. + :param version: See `version `. + :param multipath: See `roaming `. + :param ensure_exists: See `ensure_exists `. + :returns: config path shared by the users + """ + return PlatformDirs( + appname=appname, + appauthor=appauthor, + version=version, + multipath=multipath, + ensure_exists=ensure_exists, + ).site_config_path + + +def site_cache_path( + appname: str | None = None, + appauthor: str | None | Literal[False] = None, + version: str | None = None, + opinion: bool = True, # noqa: FBT001, FBT002 + ensure_exists: bool = False, # noqa: FBT001, FBT002 +) -> Path: + """ + :param appname: See `appname `. + :param appauthor: See `appauthor `. + :param version: See `version `. + :param opinion: See `opinion `. + :param ensure_exists: See `ensure_exists `. + :returns: cache directory tied to the user + """ + return PlatformDirs( + appname=appname, + appauthor=appauthor, + version=version, + opinion=opinion, + ensure_exists=ensure_exists, + ).site_cache_path + + +def user_cache_path( + appname: str | None = None, + appauthor: str | None | Literal[False] = None, + version: str | None = None, + opinion: bool = True, # noqa: FBT001, FBT002 + ensure_exists: bool = False, # noqa: FBT001, FBT002 +) -> Path: + """ + :param appname: See `appname `. + :param appauthor: See `appauthor `. + :param version: See `version `. + :param opinion: See `roaming `. + :param ensure_exists: See `ensure_exists `. + :returns: cache path tied to the user + """ + return PlatformDirs( + appname=appname, + appauthor=appauthor, + version=version, + opinion=opinion, + ensure_exists=ensure_exists, + ).user_cache_path + + +def user_state_path( + appname: str | None = None, + appauthor: str | None | Literal[False] = None, + version: str | None = None, + roaming: bool = False, # noqa: FBT001, FBT002 + ensure_exists: bool = False, # noqa: FBT001, FBT002 +) -> Path: + """ + :param appname: See `appname `. + :param appauthor: See `appauthor `. + :param version: See `version `. + :param roaming: See `roaming `. + :param ensure_exists: See `ensure_exists `. + :returns: state path tied to the user + """ + return PlatformDirs( + appname=appname, + appauthor=appauthor, + version=version, + roaming=roaming, + ensure_exists=ensure_exists, + ).user_state_path + + +def user_log_path( + appname: str | None = None, + appauthor: str | None | Literal[False] = None, + version: str | None = None, + opinion: bool = True, # noqa: FBT001, FBT002 + ensure_exists: bool = False, # noqa: FBT001, FBT002 +) -> Path: + """ + :param appname: See `appname `. + :param appauthor: See `appauthor `. + :param version: See `version `. + :param opinion: See `roaming `. + :param ensure_exists: See `ensure_exists `. + :returns: log path tied to the user + """ + return PlatformDirs( + appname=appname, + appauthor=appauthor, + version=version, + opinion=opinion, + ensure_exists=ensure_exists, + ).user_log_path + + +def user_documents_path() -> Path: + """:returns: documents path tied to the user""" + return PlatformDirs().user_documents_path + + +def user_downloads_path() -> Path: + """:returns: downloads path tied to the user""" + return PlatformDirs().user_downloads_path + + +def user_pictures_path() -> Path: + """:returns: pictures path tied to the user""" + return PlatformDirs().user_pictures_path + + +def user_videos_path() -> Path: + """:returns: videos path tied to the user""" + return PlatformDirs().user_videos_path + + +def user_music_path() -> Path: + """:returns: music path tied to the user""" + return PlatformDirs().user_music_path + + +def user_desktop_path() -> Path: + """:returns: desktop path tied to the user""" + return PlatformDirs().user_desktop_path + + +def user_runtime_path( + appname: str | None = None, + appauthor: str | None | Literal[False] = None, + version: str | None = None, + opinion: bool = True, # noqa: FBT001, FBT002 + ensure_exists: bool = False, # noqa: FBT001, FBT002 +) -> Path: + """ + :param appname: See `appname `. + :param appauthor: See `appauthor `. + :param version: See `version `. + :param opinion: See `opinion `. + :param ensure_exists: See `ensure_exists `. + :returns: runtime path tied to the user + """ + return PlatformDirs( + appname=appname, + appauthor=appauthor, + version=version, + opinion=opinion, + ensure_exists=ensure_exists, + ).user_runtime_path + + +def site_runtime_path( + appname: str | None = None, + appauthor: str | None | Literal[False] = None, + version: str | None = None, + opinion: bool = True, # noqa: FBT001, FBT002 + ensure_exists: bool = False, # noqa: FBT001, FBT002 +) -> Path: + """ + :param appname: See `appname `. + :param appauthor: See `appauthor `. + :param version: See `version `. + :param opinion: See `opinion `. + :param ensure_exists: See `ensure_exists `. + :returns: runtime path shared by users + """ + return PlatformDirs( + appname=appname, + appauthor=appauthor, + version=version, + opinion=opinion, + ensure_exists=ensure_exists, + ).site_runtime_path + + +__all__ = [ + "__version__", + "__version_info__", + "PlatformDirs", + "AppDirs", + "PlatformDirsABC", + "user_data_dir", + "user_config_dir", + "user_cache_dir", + "user_state_dir", + "user_log_dir", + "user_documents_dir", + "user_downloads_dir", + "user_pictures_dir", + "user_videos_dir", + "user_music_dir", + "user_desktop_dir", + "user_runtime_dir", + "site_data_dir", + "site_config_dir", + "site_cache_dir", + "site_runtime_dir", + "user_data_path", + "user_config_path", + "user_cache_path", + "user_state_path", + "user_log_path", + "user_documents_path", + "user_downloads_path", + "user_pictures_path", + "user_videos_path", + "user_music_path", + "user_desktop_path", + "user_runtime_path", + "site_data_path", + "site_config_path", + "site_cache_path", + "site_runtime_path", +] diff --git a/venv/lib/python3.10/site-packages/platformdirs/__main__.py b/venv/lib/python3.10/site-packages/platformdirs/__main__.py new file mode 100644 index 0000000..3cefedb --- /dev/null +++ b/venv/lib/python3.10/site-packages/platformdirs/__main__.py @@ -0,0 +1,54 @@ +"""Main entry point.""" +from __future__ import annotations + +from platformdirs import PlatformDirs, __version__ + +PROPS = ( + "user_data_dir", + "user_config_dir", + "user_cache_dir", + "user_state_dir", + "user_log_dir", + "user_documents_dir", + "user_downloads_dir", + "user_pictures_dir", + "user_videos_dir", + "user_music_dir", + "user_runtime_dir", + "site_data_dir", + "site_config_dir", + "site_cache_dir", + "site_runtime_dir", +) + + +def main() -> None: + """Run main entry point.""" + app_name = "MyApp" + app_author = "MyCompany" + + print(f"-- platformdirs {__version__} --") # noqa: T201 + + print("-- app dirs (with optional 'version')") # noqa: T201 + dirs = PlatformDirs(app_name, app_author, version="1.0") + for prop in PROPS: + print(f"{prop}: {getattr(dirs, prop)}") # noqa: T201 + + print("\n-- app dirs (without optional 'version')") # noqa: T201 + dirs = PlatformDirs(app_name, app_author) + for prop in PROPS: + print(f"{prop}: {getattr(dirs, prop)}") # noqa: T201 + + print("\n-- app dirs (without optional 'appauthor')") # noqa: T201 + dirs = PlatformDirs(app_name) + for prop in PROPS: + print(f"{prop}: {getattr(dirs, prop)}") # noqa: T201 + + print("\n-- app dirs (with disabled 'appauthor')") # noqa: T201 + dirs = PlatformDirs(app_name, appauthor=False) + for prop in PROPS: + print(f"{prop}: {getattr(dirs, prop)}") # noqa: T201 + + +if __name__ == "__main__": + main() diff --git a/venv/lib/python3.10/site-packages/platformdirs/android.py b/venv/lib/python3.10/site-packages/platformdirs/android.py new file mode 100644 index 0000000..572559f --- /dev/null +++ b/venv/lib/python3.10/site-packages/platformdirs/android.py @@ -0,0 +1,220 @@ +"""Android.""" +from __future__ import annotations + +import os +import re +import sys +from functools import lru_cache +from typing import cast + +from .api import PlatformDirsABC + + +class Android(PlatformDirsABC): + """ + Follows the guidance `from here `_. Makes use of the + `appname `, + `version `, + `ensure_exists `. + """ + + @property + def user_data_dir(self) -> str: + """:return: data directory tied to the user, e.g. ``/data/user///files/``""" + return self._append_app_name_and_version(cast(str, _android_folder()), "files") + + @property + def site_data_dir(self) -> str: + """:return: data directory shared by users, same as `user_data_dir`""" + return self.user_data_dir + + @property + def user_config_dir(self) -> str: + """ + :return: config directory tied to the user, e.g. \ + ``/data/user///shared_prefs/`` + """ + return self._append_app_name_and_version(cast(str, _android_folder()), "shared_prefs") + + @property + def site_config_dir(self) -> str: + """:return: config directory shared by the users, same as `user_config_dir`""" + return self.user_config_dir + + @property + def user_cache_dir(self) -> str: + """:return: cache directory tied to the user, e.g. e.g. ``/data/user///cache/``""" + return self._append_app_name_and_version(cast(str, _android_folder()), "cache") + + @property + def site_cache_dir(self) -> str: + """:return: cache directory shared by users, same as `user_cache_dir`""" + return self.user_cache_dir + + @property + def user_state_dir(self) -> str: + """:return: state directory tied to the user, same as `user_data_dir`""" + return self.user_data_dir + + @property + def user_log_dir(self) -> str: + """ + :return: log directory tied to the user, same as `user_cache_dir` if not opinionated else ``log`` in it, + e.g. ``/data/user///cache//log`` + """ + path = self.user_cache_dir + if self.opinion: + path = os.path.join(path, "log") # noqa: PTH118 + return path + + @property + def user_documents_dir(self) -> str: + """:return: documents directory tied to the user e.g. ``/storage/emulated/0/Documents``""" + return _android_documents_folder() + + @property + def user_downloads_dir(self) -> str: + """:return: downloads directory tied to the user e.g. ``/storage/emulated/0/Downloads``""" + return _android_downloads_folder() + + @property + def user_pictures_dir(self) -> str: + """:return: pictures directory tied to the user e.g. ``/storage/emulated/0/Pictures``""" + return _android_pictures_folder() + + @property + def user_videos_dir(self) -> str: + """:return: videos directory tied to the user e.g. ``/storage/emulated/0/DCIM/Camera``""" + return _android_videos_folder() + + @property + def user_music_dir(self) -> str: + """:return: music directory tied to the user e.g. ``/storage/emulated/0/Music``""" + return _android_music_folder() + + @property + def user_desktop_dir(self) -> str: + """:return: desktop directory tied to the user e.g. ``/storage/emulated/0/Desktop``""" + return "/storage/emulated/0/Desktop" + + @property + def user_runtime_dir(self) -> str: + """ + :return: runtime directory tied to the user, same as `user_cache_dir` if not opinionated else ``tmp`` in it, + e.g. ``/data/user///cache//tmp`` + """ + path = self.user_cache_dir + if self.opinion: + path = os.path.join(path, "tmp") # noqa: PTH118 + return path + + @property + def site_runtime_dir(self) -> str: + """:return: runtime directory shared by users, same as `user_runtime_dir`""" + return self.user_runtime_dir + + +@lru_cache(maxsize=1) +def _android_folder() -> str | None: + """:return: base folder for the Android OS or None if it cannot be found""" + try: + # First try to get path to android app via pyjnius + from jnius import autoclass + + context = autoclass("android.content.Context") + result: str | None = context.getFilesDir().getParentFile().getAbsolutePath() + except Exception: # noqa: BLE001 + # if fails find an android folder looking path on the sys.path + pattern = re.compile(r"/data/(data|user/\d+)/(.+)/files") + for path in sys.path: + if pattern.match(path): + result = path.split("/files")[0] + break + else: + result = None + return result + + +@lru_cache(maxsize=1) +def _android_documents_folder() -> str: + """:return: documents folder for the Android OS""" + # Get directories with pyjnius + try: + from jnius import autoclass + + context = autoclass("android.content.Context") + environment = autoclass("android.os.Environment") + documents_dir: str = context.getExternalFilesDir(environment.DIRECTORY_DOCUMENTS).getAbsolutePath() + except Exception: # noqa: BLE001 + documents_dir = "/storage/emulated/0/Documents" + + return documents_dir + + +@lru_cache(maxsize=1) +def _android_downloads_folder() -> str: + """:return: downloads folder for the Android OS""" + # Get directories with pyjnius + try: + from jnius import autoclass + + context = autoclass("android.content.Context") + environment = autoclass("android.os.Environment") + downloads_dir: str = context.getExternalFilesDir(environment.DIRECTORY_DOWNLOADS).getAbsolutePath() + except Exception: # noqa: BLE001 + downloads_dir = "/storage/emulated/0/Downloads" + + return downloads_dir + + +@lru_cache(maxsize=1) +def _android_pictures_folder() -> str: + """:return: pictures folder for the Android OS""" + # Get directories with pyjnius + try: + from jnius import autoclass + + context = autoclass("android.content.Context") + environment = autoclass("android.os.Environment") + pictures_dir: str = context.getExternalFilesDir(environment.DIRECTORY_PICTURES).getAbsolutePath() + except Exception: # noqa: BLE001 + pictures_dir = "/storage/emulated/0/Pictures" + + return pictures_dir + + +@lru_cache(maxsize=1) +def _android_videos_folder() -> str: + """:return: videos folder for the Android OS""" + # Get directories with pyjnius + try: + from jnius import autoclass + + context = autoclass("android.content.Context") + environment = autoclass("android.os.Environment") + videos_dir: str = context.getExternalFilesDir(environment.DIRECTORY_DCIM).getAbsolutePath() + except Exception: # noqa: BLE001 + videos_dir = "/storage/emulated/0/DCIM/Camera" + + return videos_dir + + +@lru_cache(maxsize=1) +def _android_music_folder() -> str: + """:return: music folder for the Android OS""" + # Get directories with pyjnius + try: + from jnius import autoclass + + context = autoclass("android.content.Context") + environment = autoclass("android.os.Environment") + music_dir: str = context.getExternalFilesDir(environment.DIRECTORY_MUSIC).getAbsolutePath() + except Exception: # noqa: BLE001 + music_dir = "/storage/emulated/0/Music" + + return music_dir + + +__all__ = [ + "Android", +] diff --git a/venv/lib/python3.10/site-packages/platformdirs/api.py b/venv/lib/python3.10/site-packages/platformdirs/api.py new file mode 100644 index 0000000..1315799 --- /dev/null +++ b/venv/lib/python3.10/site-packages/platformdirs/api.py @@ -0,0 +1,243 @@ +"""Base API.""" +from __future__ import annotations + +import os +from abc import ABC, abstractmethod +from pathlib import Path +from typing import TYPE_CHECKING + +if TYPE_CHECKING: + import sys + + if sys.version_info >= (3, 8): # pragma: no cover (py38+) + from typing import Literal + else: # pragma: no cover (py38+) + from typing_extensions import Literal + + +class PlatformDirsABC(ABC): + """Abstract base class for platform directories.""" + + def __init__( # noqa: PLR0913 + self, + appname: str | None = None, + appauthor: str | None | Literal[False] = None, + version: str | None = None, + roaming: bool = False, # noqa: FBT001, FBT002 + multipath: bool = False, # noqa: FBT001, FBT002 + opinion: bool = True, # noqa: FBT001, FBT002 + ensure_exists: bool = False, # noqa: FBT001, FBT002 + ) -> None: + """ + Create a new platform directory. + + :param appname: See `appname`. + :param appauthor: See `appauthor`. + :param version: See `version`. + :param roaming: See `roaming`. + :param multipath: See `multipath`. + :param opinion: See `opinion`. + :param ensure_exists: See `ensure_exists`. + """ + self.appname = appname #: The name of application. + self.appauthor = appauthor + """ + The name of the app author or distributing body for this application. Typically, it is the owning company name. + Defaults to `appname`. You may pass ``False`` to disable it. + """ + self.version = version + """ + An optional version path element to append to the path. You might want to use this if you want multiple versions + of your app to be able to run independently. If used, this would typically be ``.``. + """ + self.roaming = roaming + """ + Whether to use the roaming appdata directory on Windows. That means that for users on a Windows network setup + for roaming profiles, this user data will be synced on login (see + `here `_). + """ + self.multipath = multipath + """ + An optional parameter only applicable to Unix/Linux which indicates that the entire list of data dirs should be + returned. By default, the first item would only be returned. + """ + self.opinion = opinion #: A flag to indicating to use opinionated values. + self.ensure_exists = ensure_exists + """ + Optionally create the directory (and any missing parents) upon access if it does not exist. + By default, no directories are created. + """ + + def _append_app_name_and_version(self, *base: str) -> str: + params = list(base[1:]) + if self.appname: + params.append(self.appname) + if self.version: + params.append(self.version) + path = os.path.join(base[0], *params) # noqa: PTH118 + self._optionally_create_directory(path) + return path + + def _optionally_create_directory(self, path: str) -> None: + if self.ensure_exists: + Path(path).mkdir(parents=True, exist_ok=True) + + @property + @abstractmethod + def user_data_dir(self) -> str: + """:return: data directory tied to the user""" + + @property + @abstractmethod + def site_data_dir(self) -> str: + """:return: data directory shared by users""" + + @property + @abstractmethod + def user_config_dir(self) -> str: + """:return: config directory tied to the user""" + + @property + @abstractmethod + def site_config_dir(self) -> str: + """:return: config directory shared by the users""" + + @property + @abstractmethod + def user_cache_dir(self) -> str: + """:return: cache directory tied to the user""" + + @property + @abstractmethod + def site_cache_dir(self) -> str: + """:return: cache directory shared by users""" + + @property + @abstractmethod + def user_state_dir(self) -> str: + """:return: state directory tied to the user""" + + @property + @abstractmethod + def user_log_dir(self) -> str: + """:return: log directory tied to the user""" + + @property + @abstractmethod + def user_documents_dir(self) -> str: + """:return: documents directory tied to the user""" + + @property + @abstractmethod + def user_downloads_dir(self) -> str: + """:return: downloads directory tied to the user""" + + @property + @abstractmethod + def user_pictures_dir(self) -> str: + """:return: pictures directory tied to the user""" + + @property + @abstractmethod + def user_videos_dir(self) -> str: + """:return: videos directory tied to the user""" + + @property + @abstractmethod + def user_music_dir(self) -> str: + """:return: music directory tied to the user""" + + @property + @abstractmethod + def user_desktop_dir(self) -> str: + """:return: desktop directory tied to the user""" + + @property + @abstractmethod + def user_runtime_dir(self) -> str: + """:return: runtime directory tied to the user""" + + @property + @abstractmethod + def site_runtime_dir(self) -> str: + """:return: runtime directory shared by users""" + + @property + def user_data_path(self) -> Path: + """:return: data path tied to the user""" + return Path(self.user_data_dir) + + @property + def site_data_path(self) -> Path: + """:return: data path shared by users""" + return Path(self.site_data_dir) + + @property + def user_config_path(self) -> Path: + """:return: config path tied to the user""" + return Path(self.user_config_dir) + + @property + def site_config_path(self) -> Path: + """:return: config path shared by the users""" + return Path(self.site_config_dir) + + @property + def user_cache_path(self) -> Path: + """:return: cache path tied to the user""" + return Path(self.user_cache_dir) + + @property + def site_cache_path(self) -> Path: + """:return: cache path shared by users""" + return Path(self.site_cache_dir) + + @property + def user_state_path(self) -> Path: + """:return: state path tied to the user""" + return Path(self.user_state_dir) + + @property + def user_log_path(self) -> Path: + """:return: log path tied to the user""" + return Path(self.user_log_dir) + + @property + def user_documents_path(self) -> Path: + """:return: documents path tied to the user""" + return Path(self.user_documents_dir) + + @property + def user_downloads_path(self) -> Path: + """:return: downloads path tied to the user""" + return Path(self.user_downloads_dir) + + @property + def user_pictures_path(self) -> Path: + """:return: pictures path tied to the user""" + return Path(self.user_pictures_dir) + + @property + def user_videos_path(self) -> Path: + """:return: videos path tied to the user""" + return Path(self.user_videos_dir) + + @property + def user_music_path(self) -> Path: + """:return: music path tied to the user""" + return Path(self.user_music_dir) + + @property + def user_desktop_path(self) -> Path: + """:return: desktop path tied to the user""" + return Path(self.user_desktop_dir) + + @property + def user_runtime_path(self) -> Path: + """:return: runtime path tied to the user""" + return Path(self.user_runtime_dir) + + @property + def site_runtime_path(self) -> Path: + """:return: runtime path shared by users""" + return Path(self.site_runtime_dir) diff --git a/venv/lib/python3.10/site-packages/platformdirs/macos.py b/venv/lib/python3.10/site-packages/platformdirs/macos.py new file mode 100644 index 0000000..7800fe1 --- /dev/null +++ b/venv/lib/python3.10/site-packages/platformdirs/macos.py @@ -0,0 +1,101 @@ +"""macOS.""" +from __future__ import annotations + +import os.path + +from .api import PlatformDirsABC + + +class MacOS(PlatformDirsABC): + """ + Platform directories for the macOS operating system. Follows the guidance from `Apple documentation + `_. + Makes use of the `appname `, + `version `, + `ensure_exists `. + """ + + @property + def user_data_dir(self) -> str: + """:return: data directory tied to the user, e.g. ``~/Library/Application Support/$appname/$version``""" + return self._append_app_name_and_version(os.path.expanduser("~/Library/Application Support")) # noqa: PTH111 + + @property + def site_data_dir(self) -> str: + """:return: data directory shared by users, e.g. ``/Library/Application Support/$appname/$version``""" + return self._append_app_name_and_version("/Library/Application Support") + + @property + def user_config_dir(self) -> str: + """:return: config directory tied to the user, same as `user_data_dir`""" + return self.user_data_dir + + @property + def site_config_dir(self) -> str: + """:return: config directory shared by the users, same as `site_data_dir`""" + return self.site_data_dir + + @property + def user_cache_dir(self) -> str: + """:return: cache directory tied to the user, e.g. ``~/Library/Caches/$appname/$version``""" + return self._append_app_name_and_version(os.path.expanduser("~/Library/Caches")) # noqa: PTH111 + + @property + def site_cache_dir(self) -> str: + """:return: cache directory shared by users, e.g. ``/Library/Caches/$appname/$version``""" + return self._append_app_name_and_version("/Library/Caches") + + @property + def user_state_dir(self) -> str: + """:return: state directory tied to the user, same as `user_data_dir`""" + return self.user_data_dir + + @property + def user_log_dir(self) -> str: + """:return: log directory tied to the user, e.g. ``~/Library/Logs/$appname/$version``""" + return self._append_app_name_and_version(os.path.expanduser("~/Library/Logs")) # noqa: PTH111 + + @property + def user_documents_dir(self) -> str: + """:return: documents directory tied to the user, e.g. ``~/Documents``""" + return os.path.expanduser("~/Documents") # noqa: PTH111 + + @property + def user_downloads_dir(self) -> str: + """:return: downloads directory tied to the user, e.g. ``~/Downloads``""" + return os.path.expanduser("~/Downloads") # noqa: PTH111 + + @property + def user_pictures_dir(self) -> str: + """:return: pictures directory tied to the user, e.g. ``~/Pictures``""" + return os.path.expanduser("~/Pictures") # noqa: PTH111 + + @property + def user_videos_dir(self) -> str: + """:return: videos directory tied to the user, e.g. ``~/Movies``""" + return os.path.expanduser("~/Movies") # noqa: PTH111 + + @property + def user_music_dir(self) -> str: + """:return: music directory tied to the user, e.g. ``~/Music``""" + return os.path.expanduser("~/Music") # noqa: PTH111 + + @property + def user_desktop_dir(self) -> str: + """:return: desktop directory tied to the user, e.g. ``~/Desktop``""" + return os.path.expanduser("~/Desktop") # noqa: PTH111 + + @property + def user_runtime_dir(self) -> str: + """:return: runtime directory tied to the user, e.g. ``~/Library/Caches/TemporaryItems/$appname/$version``""" + return self._append_app_name_and_version(os.path.expanduser("~/Library/Caches/TemporaryItems")) # noqa: PTH111 + + @property + def site_runtime_dir(self) -> str: + """:return: runtime directory shared by users, same as `user_runtime_dir`""" + return self.user_runtime_dir + + +__all__ = [ + "MacOS", +] diff --git a/venv/lib/python3.10/site-packages/platformdirs/py.typed b/venv/lib/python3.10/site-packages/platformdirs/py.typed new file mode 100644 index 0000000..e69de29 diff --git a/venv/lib/python3.10/site-packages/platformdirs/unix.py b/venv/lib/python3.10/site-packages/platformdirs/unix.py new file mode 100644 index 0000000..de4573e --- /dev/null +++ b/venv/lib/python3.10/site-packages/platformdirs/unix.py @@ -0,0 +1,251 @@ +"""Unix.""" +from __future__ import annotations + +import os +import sys +from configparser import ConfigParser +from pathlib import Path + +from .api import PlatformDirsABC + +if sys.platform == "win32": + + def getuid() -> int: + msg = "should only be used on Unix" + raise RuntimeError(msg) + +else: + from os import getuid + + +class Unix(PlatformDirsABC): + """ + On Unix/Linux, we follow the + `XDG Basedir Spec `_. The spec allows + overriding directories with environment variables. The examples show are the default values, alongside the name of + the environment variable that overrides them. Makes use of the + `appname `, + `version `, + `multipath `, + `opinion `, + `ensure_exists `. + """ + + @property + def user_data_dir(self) -> str: + """ + :return: data directory tied to the user, e.g. ``~/.local/share/$appname/$version`` or + ``$XDG_DATA_HOME/$appname/$version`` + """ + path = os.environ.get("XDG_DATA_HOME", "") + if not path.strip(): + path = os.path.expanduser("~/.local/share") # noqa: PTH111 + return self._append_app_name_and_version(path) + + @property + def site_data_dir(self) -> str: + """ + :return: data directories shared by users (if `multipath ` is + enabled and ``XDG_DATA_DIR`` is set and a multi path the response is also a multi path separated by the OS + path separator), e.g. ``/usr/local/share/$appname/$version`` or ``/usr/share/$appname/$version`` + """ + # XDG default for $XDG_DATA_DIRS; only first, if multipath is False + path = os.environ.get("XDG_DATA_DIRS", "") + if not path.strip(): + path = f"/usr/local/share{os.pathsep}/usr/share" + return self._with_multi_path(path) + + def _with_multi_path(self, path: str) -> str: + path_list = path.split(os.pathsep) + if not self.multipath: + path_list = path_list[0:1] + path_list = [self._append_app_name_and_version(os.path.expanduser(p)) for p in path_list] # noqa: PTH111 + return os.pathsep.join(path_list) + + @property + def user_config_dir(self) -> str: + """ + :return: config directory tied to the user, e.g. ``~/.config/$appname/$version`` or + ``$XDG_CONFIG_HOME/$appname/$version`` + """ + path = os.environ.get("XDG_CONFIG_HOME", "") + if not path.strip(): + path = os.path.expanduser("~/.config") # noqa: PTH111 + return self._append_app_name_and_version(path) + + @property + def site_config_dir(self) -> str: + """ + :return: config directories shared by users (if `multipath ` + is enabled and ``XDG_DATA_DIR`` is set and a multi path the response is also a multi path separated by the OS + path separator), e.g. ``/etc/xdg/$appname/$version`` + """ + # XDG default for $XDG_CONFIG_DIRS only first, if multipath is False + path = os.environ.get("XDG_CONFIG_DIRS", "") + if not path.strip(): + path = "/etc/xdg" + return self._with_multi_path(path) + + @property + def user_cache_dir(self) -> str: + """ + :return: cache directory tied to the user, e.g. ``~/.cache/$appname/$version`` or + ``~/$XDG_CACHE_HOME/$appname/$version`` + """ + path = os.environ.get("XDG_CACHE_HOME", "") + if not path.strip(): + path = os.path.expanduser("~/.cache") # noqa: PTH111 + return self._append_app_name_and_version(path) + + @property + def site_cache_dir(self) -> str: + """:return: cache directory shared by users, e.g. ``/var/tmp/$appname/$version``""" + return self._append_app_name_and_version("/var/tmp") # noqa: S108 + + @property + def user_state_dir(self) -> str: + """ + :return: state directory tied to the user, e.g. ``~/.local/state/$appname/$version`` or + ``$XDG_STATE_HOME/$appname/$version`` + """ + path = os.environ.get("XDG_STATE_HOME", "") + if not path.strip(): + path = os.path.expanduser("~/.local/state") # noqa: PTH111 + return self._append_app_name_and_version(path) + + @property + def user_log_dir(self) -> str: + """:return: log directory tied to the user, same as `user_state_dir` if not opinionated else ``log`` in it""" + path = self.user_state_dir + if self.opinion: + path = os.path.join(path, "log") # noqa: PTH118 + self._optionally_create_directory(path) + return path + + @property + def user_documents_dir(self) -> str: + """:return: documents directory tied to the user, e.g. ``~/Documents``""" + return _get_user_media_dir("XDG_DOCUMENTS_DIR", "~/Documents") + + @property + def user_downloads_dir(self) -> str: + """:return: downloads directory tied to the user, e.g. ``~/Downloads``""" + return _get_user_media_dir("XDG_DOWNLOAD_DIR", "~/Downloads") + + @property + def user_pictures_dir(self) -> str: + """:return: pictures directory tied to the user, e.g. ``~/Pictures``""" + return _get_user_media_dir("XDG_PICTURES_DIR", "~/Pictures") + + @property + def user_videos_dir(self) -> str: + """:return: videos directory tied to the user, e.g. ``~/Videos``""" + return _get_user_media_dir("XDG_VIDEOS_DIR", "~/Videos") + + @property + def user_music_dir(self) -> str: + """:return: music directory tied to the user, e.g. ``~/Music``""" + return _get_user_media_dir("XDG_MUSIC_DIR", "~/Music") + + @property + def user_desktop_dir(self) -> str: + """:return: desktop directory tied to the user, e.g. ``~/Desktop``""" + return _get_user_media_dir("XDG_DESKTOP_DIR", "~/Desktop") + + @property + def user_runtime_dir(self) -> str: + """ + :return: runtime directory tied to the user, e.g. ``/run/user/$(id -u)/$appname/$version`` or + ``$XDG_RUNTIME_DIR/$appname/$version``. + + For FreeBSD/OpenBSD/NetBSD, it would return ``/var/run/user/$(id -u)/$appname/$version`` if + exists, otherwise ``/tmp/runtime-$(id -u)/$appname/$version``, if``$XDG_RUNTIME_DIR`` + is not set. + """ + path = os.environ.get("XDG_RUNTIME_DIR", "") + if not path.strip(): + if sys.platform.startswith(("freebsd", "openbsd", "netbsd")): + path = f"/var/run/user/{getuid()}" + if not Path(path).exists(): + path = f"/tmp/runtime-{getuid()}" # noqa: S108 + else: + path = f"/run/user/{getuid()}" + return self._append_app_name_and_version(path) + + @property + def site_runtime_dir(self) -> str: + """ + :return: runtime directory shared by users, e.g. ``/run/$appname/$version`` or + ``$XDG_RUNTIME_DIR/$appname/$version``. + + Note that this behaves almost exactly like `user_runtime_dir` if ``$XDG_RUNTIME_DIR`` is set, but will + fallback to paths associated to the root user instead of a regular logged-in user if it's not set. + + If you wish to ensure that a logged-in root user path is returned e.g. ``/run/user/0``, use `user_runtime_dir` + instead. + + For FreeBSD/OpenBSD/NetBSD, it would return ``/var/run/$appname/$version`` if ``$XDG_RUNTIME_DIR`` is not set. + """ + path = os.environ.get("XDG_RUNTIME_DIR", "") + if not path.strip(): + if sys.platform.startswith(("freebsd", "openbsd", "netbsd")): + path = "/var/run" + else: + path = "/run" + return self._append_app_name_and_version(path) + + @property + def site_data_path(self) -> Path: + """:return: data path shared by users. Only return first item, even if ``multipath`` is set to ``True``""" + return self._first_item_as_path_if_multipath(self.site_data_dir) + + @property + def site_config_path(self) -> Path: + """:return: config path shared by the users. Only return first item, even if ``multipath`` is set to ``True``""" + return self._first_item_as_path_if_multipath(self.site_config_dir) + + @property + def site_cache_path(self) -> Path: + """:return: cache path shared by users. Only return first item, even if ``multipath`` is set to ``True``""" + return self._first_item_as_path_if_multipath(self.site_cache_dir) + + def _first_item_as_path_if_multipath(self, directory: str) -> Path: + if self.multipath: + # If multipath is True, the first path is returned. + directory = directory.split(os.pathsep)[0] + return Path(directory) + + +def _get_user_media_dir(env_var: str, fallback_tilde_path: str) -> str: + media_dir = _get_user_dirs_folder(env_var) + if media_dir is None: + media_dir = os.environ.get(env_var, "").strip() + if not media_dir: + media_dir = os.path.expanduser(fallback_tilde_path) # noqa: PTH111 + + return media_dir + + +def _get_user_dirs_folder(key: str) -> str | None: + """Return directory from user-dirs.dirs config file. See https://freedesktop.org/wiki/Software/xdg-user-dirs/.""" + user_dirs_config_path = Path(Unix().user_config_dir) / "user-dirs.dirs" + if user_dirs_config_path.exists(): + parser = ConfigParser() + + with user_dirs_config_path.open() as stream: + # Add fake section header, so ConfigParser doesn't complain + parser.read_string(f"[top]\n{stream.read()}") + + if key not in parser["top"]: + return None + + path = parser["top"][key].strip('"') + # Handle relative home paths + return path.replace("$HOME", os.path.expanduser("~")) # noqa: PTH111 + + return None + + +__all__ = [ + "Unix", +] diff --git a/venv/lib/python3.10/site-packages/platformdirs/version.py b/venv/lib/python3.10/site-packages/platformdirs/version.py new file mode 100644 index 0000000..2352f2f --- /dev/null +++ b/venv/lib/python3.10/site-packages/platformdirs/version.py @@ -0,0 +1,4 @@ +# file generated by setuptools_scm +# don't change, don't track in version control +__version__ = version = '3.10.0' +__version_tuple__ = version_tuple = (3, 10, 0) diff --git a/venv/lib/python3.10/site-packages/platformdirs/windows.py b/venv/lib/python3.10/site-packages/platformdirs/windows.py new file mode 100644 index 0000000..0ba3d27 --- /dev/null +++ b/venv/lib/python3.10/site-packages/platformdirs/windows.py @@ -0,0 +1,266 @@ +"""Windows.""" +from __future__ import annotations + +import ctypes +import os +import sys +from functools import lru_cache +from typing import TYPE_CHECKING + +from .api import PlatformDirsABC + +if TYPE_CHECKING: + from collections.abc import Callable + + +class Windows(PlatformDirsABC): + """ + `MSDN on where to store app data files + `_. + Makes use of the + `appname `, + `appauthor `, + `version `, + `roaming `, + `opinion `, + `ensure_exists `. + """ + + @property + def user_data_dir(self) -> str: + """ + :return: data directory tied to the user, e.g. + ``%USERPROFILE%\\AppData\\Local\\$appauthor\\$appname`` (not roaming) or + ``%USERPROFILE%\\AppData\\Roaming\\$appauthor\\$appname`` (roaming) + """ + const = "CSIDL_APPDATA" if self.roaming else "CSIDL_LOCAL_APPDATA" + path = os.path.normpath(get_win_folder(const)) + return self._append_parts(path) + + def _append_parts(self, path: str, *, opinion_value: str | None = None) -> str: + params = [] + if self.appname: + if self.appauthor is not False: + author = self.appauthor or self.appname + params.append(author) + params.append(self.appname) + if opinion_value is not None and self.opinion: + params.append(opinion_value) + if self.version: + params.append(self.version) + path = os.path.join(path, *params) # noqa: PTH118 + self._optionally_create_directory(path) + return path + + @property + def site_data_dir(self) -> str: + """:return: data directory shared by users, e.g. ``C:\\ProgramData\\$appauthor\\$appname``""" + path = os.path.normpath(get_win_folder("CSIDL_COMMON_APPDATA")) + return self._append_parts(path) + + @property + def user_config_dir(self) -> str: + """:return: config directory tied to the user, same as `user_data_dir`""" + return self.user_data_dir + + @property + def site_config_dir(self) -> str: + """:return: config directory shared by the users, same as `site_data_dir`""" + return self.site_data_dir + + @property + def user_cache_dir(self) -> str: + """ + :return: cache directory tied to the user (if opinionated with ``Cache`` folder within ``$appname``) e.g. + ``%USERPROFILE%\\AppData\\Local\\$appauthor\\$appname\\Cache\\$version`` + """ + path = os.path.normpath(get_win_folder("CSIDL_LOCAL_APPDATA")) + return self._append_parts(path, opinion_value="Cache") + + @property + def site_cache_dir(self) -> str: + """:return: cache directory shared by users, e.g. ``C:\\ProgramData\\$appauthor\\$appname\\Cache\\$version``""" + path = os.path.normpath(get_win_folder("CSIDL_COMMON_APPDATA")) + return self._append_parts(path, opinion_value="Cache") + + @property + def user_state_dir(self) -> str: + """:return: state directory tied to the user, same as `user_data_dir`""" + return self.user_data_dir + + @property + def user_log_dir(self) -> str: + """:return: log directory tied to the user, same as `user_data_dir` if not opinionated else ``Logs`` in it""" + path = self.user_data_dir + if self.opinion: + path = os.path.join(path, "Logs") # noqa: PTH118 + self._optionally_create_directory(path) + return path + + @property + def user_documents_dir(self) -> str: + """:return: documents directory tied to the user e.g. ``%USERPROFILE%\\Documents``""" + return os.path.normpath(get_win_folder("CSIDL_PERSONAL")) + + @property + def user_downloads_dir(self) -> str: + """:return: downloads directory tied to the user e.g. ``%USERPROFILE%\\Downloads``""" + return os.path.normpath(get_win_folder("CSIDL_DOWNLOADS")) + + @property + def user_pictures_dir(self) -> str: + """:return: pictures directory tied to the user e.g. ``%USERPROFILE%\\Pictures``""" + return os.path.normpath(get_win_folder("CSIDL_MYPICTURES")) + + @property + def user_videos_dir(self) -> str: + """:return: videos directory tied to the user e.g. ``%USERPROFILE%\\Videos``""" + return os.path.normpath(get_win_folder("CSIDL_MYVIDEO")) + + @property + def user_music_dir(self) -> str: + """:return: music directory tied to the user e.g. ``%USERPROFILE%\\Music``""" + return os.path.normpath(get_win_folder("CSIDL_MYMUSIC")) + + @property + def user_desktop_dir(self) -> str: + """:return: desktop directory tied to the user, e.g. ``%USERPROFILE%\\Desktop``""" + return os.path.normpath(get_win_folder("CSIDL_DESKTOPDIRECTORY")) + + @property + def user_runtime_dir(self) -> str: + """ + :return: runtime directory tied to the user, e.g. + ``%USERPROFILE%\\AppData\\Local\\Temp\\$appauthor\\$appname`` + """ + path = os.path.normpath(os.path.join(get_win_folder("CSIDL_LOCAL_APPDATA"), "Temp")) # noqa: PTH118 + return self._append_parts(path) + + @property + def site_runtime_dir(self) -> str: + """:return: runtime directory shared by users, same as `user_runtime_dir`""" + return self.user_runtime_dir + + +def get_win_folder_from_env_vars(csidl_name: str) -> str: + """Get folder from environment variables.""" + result = get_win_folder_if_csidl_name_not_env_var(csidl_name) + if result is not None: + return result + + env_var_name = { + "CSIDL_APPDATA": "APPDATA", + "CSIDL_COMMON_APPDATA": "ALLUSERSPROFILE", + "CSIDL_LOCAL_APPDATA": "LOCALAPPDATA", + }.get(csidl_name) + if env_var_name is None: + msg = f"Unknown CSIDL name: {csidl_name}" + raise ValueError(msg) + result = os.environ.get(env_var_name) + if result is None: + msg = f"Unset environment variable: {env_var_name}" + raise ValueError(msg) + return result + + +def get_win_folder_if_csidl_name_not_env_var(csidl_name: str) -> str | None: + """Get folder for a CSIDL name that does not exist as an environment variable.""" + if csidl_name == "CSIDL_PERSONAL": + return os.path.join(os.path.normpath(os.environ["USERPROFILE"]), "Documents") # noqa: PTH118 + + if csidl_name == "CSIDL_DOWNLOADS": + return os.path.join(os.path.normpath(os.environ["USERPROFILE"]), "Downloads") # noqa: PTH118 + + if csidl_name == "CSIDL_MYPICTURES": + return os.path.join(os.path.normpath(os.environ["USERPROFILE"]), "Pictures") # noqa: PTH118 + + if csidl_name == "CSIDL_MYVIDEO": + return os.path.join(os.path.normpath(os.environ["USERPROFILE"]), "Videos") # noqa: PTH118 + + if csidl_name == "CSIDL_MYMUSIC": + return os.path.join(os.path.normpath(os.environ["USERPROFILE"]), "Music") # noqa: PTH118 + return None + + +def get_win_folder_from_registry(csidl_name: str) -> str: + """ + Get folder from the registry. + + This is a fallback technique at best. I'm not sure if using the registry for these guarantees us the correct answer + for all CSIDL_* names. + """ + shell_folder_name = { + "CSIDL_APPDATA": "AppData", + "CSIDL_COMMON_APPDATA": "Common AppData", + "CSIDL_LOCAL_APPDATA": "Local AppData", + "CSIDL_PERSONAL": "Personal", + "CSIDL_DOWNLOADS": "{374DE290-123F-4565-9164-39C4925E467B}", + "CSIDL_MYPICTURES": "My Pictures", + "CSIDL_MYVIDEO": "My Video", + "CSIDL_MYMUSIC": "My Music", + }.get(csidl_name) + if shell_folder_name is None: + msg = f"Unknown CSIDL name: {csidl_name}" + raise ValueError(msg) + if sys.platform != "win32": # only needed for mypy type checker to know that this code runs only on Windows + raise NotImplementedError + import winreg + + key = winreg.OpenKey(winreg.HKEY_CURRENT_USER, r"Software\Microsoft\Windows\CurrentVersion\Explorer\Shell Folders") + directory, _ = winreg.QueryValueEx(key, shell_folder_name) + return str(directory) + + +def get_win_folder_via_ctypes(csidl_name: str) -> str: + """Get folder with ctypes.""" + # There is no 'CSIDL_DOWNLOADS'. + # Use 'CSIDL_PROFILE' (40) and append the default folder 'Downloads' instead. + # https://learn.microsoft.com/en-us/windows/win32/shell/knownfolderid + + csidl_const = { + "CSIDL_APPDATA": 26, + "CSIDL_COMMON_APPDATA": 35, + "CSIDL_LOCAL_APPDATA": 28, + "CSIDL_PERSONAL": 5, + "CSIDL_MYPICTURES": 39, + "CSIDL_MYVIDEO": 14, + "CSIDL_MYMUSIC": 13, + "CSIDL_DOWNLOADS": 40, + "CSIDL_DESKTOPDIRECTORY": 16, + }.get(csidl_name) + if csidl_const is None: + msg = f"Unknown CSIDL name: {csidl_name}" + raise ValueError(msg) + + buf = ctypes.create_unicode_buffer(1024) + windll = getattr(ctypes, "windll") # noqa: B009 # using getattr to avoid false positive with mypy type checker + windll.shell32.SHGetFolderPathW(None, csidl_const, None, 0, buf) + + # Downgrade to short path name if it has highbit chars. + if any(ord(c) > 255 for c in buf): # noqa: PLR2004 + buf2 = ctypes.create_unicode_buffer(1024) + if windll.kernel32.GetShortPathNameW(buf.value, buf2, 1024): + buf = buf2 + + if csidl_name == "CSIDL_DOWNLOADS": + return os.path.join(buf.value, "Downloads") # noqa: PTH118 + + return buf.value + + +def _pick_get_win_folder() -> Callable[[str], str]: + if hasattr(ctypes, "windll"): + return get_win_folder_via_ctypes + try: + import winreg # noqa: F401 + except ImportError: + return get_win_folder_from_env_vars + else: + return get_win_folder_from_registry + + +get_win_folder = lru_cache(maxsize=None)(_pick_get_win_folder()) + +__all__ = [ + "Windows", +] diff --git a/venv/lib/python3.10/site-packages/pluggy-1.3.0.dist-info/INSTALLER b/venv/lib/python3.10/site-packages/pluggy-1.3.0.dist-info/INSTALLER new file mode 100644 index 0000000..a1b589e --- /dev/null +++ b/venv/lib/python3.10/site-packages/pluggy-1.3.0.dist-info/INSTALLER @@ -0,0 +1 @@ +pip diff --git a/venv/lib/python3.10/site-packages/pluggy-1.3.0.dist-info/LICENSE b/venv/lib/python3.10/site-packages/pluggy-1.3.0.dist-info/LICENSE new file mode 100644 index 0000000..85f4dd6 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pluggy-1.3.0.dist-info/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2015 holger krekel (rather uses bitbucket/hpk42) + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/venv/lib/python3.10/site-packages/pluggy-1.3.0.dist-info/METADATA b/venv/lib/python3.10/site-packages/pluggy-1.3.0.dist-info/METADATA new file mode 100644 index 0000000..684704f --- /dev/null +++ b/venv/lib/python3.10/site-packages/pluggy-1.3.0.dist-info/METADATA @@ -0,0 +1,140 @@ +Metadata-Version: 2.1 +Name: pluggy +Version: 1.3.0 +Summary: plugin and hook calling mechanisms for python +Home-page: https://github.com/pytest-dev/pluggy +Author: Holger Krekel +Author-email: holger@merlinux.eu +License: MIT +Platform: unix +Platform: linux +Platform: osx +Platform: win32 +Classifier: Development Status :: 6 - Mature +Classifier: Intended Audience :: Developers +Classifier: License :: OSI Approved :: MIT License +Classifier: Operating System :: POSIX +Classifier: Operating System :: Microsoft :: Windows +Classifier: Operating System :: MacOS :: MacOS X +Classifier: Topic :: Software Development :: Testing +Classifier: Topic :: Software Development :: Libraries +Classifier: Topic :: Utilities +Classifier: Programming Language :: Python :: Implementation :: CPython +Classifier: Programming Language :: Python :: Implementation :: PyPy +Classifier: Programming Language :: Python :: 3 +Classifier: Programming Language :: Python :: 3 :: Only +Classifier: Programming Language :: Python :: 3.8 +Classifier: Programming Language :: Python :: 3.9 +Classifier: Programming Language :: Python :: 3.10 +Classifier: Programming Language :: Python :: 3.11 +Requires-Python: >=3.8 +Description-Content-Type: text/x-rst +License-File: LICENSE +Provides-Extra: dev +Requires-Dist: pre-commit ; extra == 'dev' +Requires-Dist: tox ; extra == 'dev' +Provides-Extra: testing +Requires-Dist: pytest ; extra == 'testing' +Requires-Dist: pytest-benchmark ; extra == 'testing' + +==================================================== +pluggy - A minimalist production ready plugin system +==================================================== + +|pypi| |conda-forge| |versions| |github-actions| |gitter| |black| |codecov| + +This is the core framework used by the `pytest`_, `tox`_, and `devpi`_ projects. + +Please `read the docs`_ to learn more! + +A definitive example +==================== +.. code-block:: python + + import pluggy + + hookspec = pluggy.HookspecMarker("myproject") + hookimpl = pluggy.HookimplMarker("myproject") + + + class MySpec: + """A hook specification namespace.""" + + @hookspec + def myhook(self, arg1, arg2): + """My special little hook that you can customize.""" + + + class Plugin_1: + """A hook implementation namespace.""" + + @hookimpl + def myhook(self, arg1, arg2): + print("inside Plugin_1.myhook()") + return arg1 + arg2 + + + class Plugin_2: + """A 2nd hook implementation namespace.""" + + @hookimpl + def myhook(self, arg1, arg2): + print("inside Plugin_2.myhook()") + return arg1 - arg2 + + + # create a manager and add the spec + pm = pluggy.PluginManager("myproject") + pm.add_hookspecs(MySpec) + + # register plugins + pm.register(Plugin_1()) + pm.register(Plugin_2()) + + # call our ``myhook`` hook + results = pm.hook.myhook(arg1=1, arg2=2) + print(results) + + +Running this directly gets us:: + + $ python docs/examples/toy-example.py + inside Plugin_2.myhook() + inside Plugin_1.myhook() + [-1, 3] + + +.. badges + +.. |pypi| image:: https://img.shields.io/pypi/v/pluggy.svg + :target: https://pypi.org/pypi/pluggy + +.. |versions| image:: https://img.shields.io/pypi/pyversions/pluggy.svg + :target: https://pypi.org/pypi/pluggy + +.. |github-actions| image:: https://github.com/pytest-dev/pluggy/workflows/main/badge.svg + :target: https://github.com/pytest-dev/pluggy/actions + +.. |conda-forge| image:: https://img.shields.io/conda/vn/conda-forge/pluggy.svg + :target: https://anaconda.org/conda-forge/pytest + +.. |gitter| image:: https://badges.gitter.im/pytest-dev/pluggy.svg + :alt: Join the chat at https://gitter.im/pytest-dev/pluggy + :target: https://gitter.im/pytest-dev/pluggy?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge + +.. |black| image:: https://img.shields.io/badge/code%20style-black-000000.svg + :target: https://github.com/ambv/black + +.. |codecov| image:: https://codecov.io/gh/pytest-dev/pluggy/branch/master/graph/badge.svg + :target: https://codecov.io/gh/pytest-dev/pluggy + :alt: Code coverage Status + +.. links +.. _pytest: + http://pytest.org +.. _tox: + https://tox.readthedocs.org +.. _devpi: + http://doc.devpi.net +.. _read the docs: + https://pluggy.readthedocs.io/en/latest/ diff --git a/venv/lib/python3.10/site-packages/pluggy-1.3.0.dist-info/RECORD b/venv/lib/python3.10/site-packages/pluggy-1.3.0.dist-info/RECORD new file mode 100644 index 0000000..39c4f13 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pluggy-1.3.0.dist-info/RECORD @@ -0,0 +1,21 @@ +pluggy-1.3.0.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +pluggy-1.3.0.dist-info/LICENSE,sha256=1rZebCE6XQtXeRHTTW5ZSbn1nXbCOMUHGi8_wWz7JgY,1110 +pluggy-1.3.0.dist-info/METADATA,sha256=cewZlldDixhBWMw_A8PNP1-9HyUgDq7jncO9GtQOiOs,4277 +pluggy-1.3.0.dist-info/RECORD,, +pluggy-1.3.0.dist-info/WHEEL,sha256=yQN5g4mg4AybRjkgi-9yy4iQEFibGQmlz78Pik5Or-A,92 +pluggy-1.3.0.dist-info/top_level.txt,sha256=xKSCRhai-v9MckvMuWqNz16c1tbsmOggoMSwTgcpYHE,7 +pluggy/__init__.py,sha256=CjD3L_WaOpbeQEKrl4RYsMUSta_anw0gmKjaM1SaXho,714 +pluggy/__pycache__/__init__.cpython-310.pyc,, +pluggy/__pycache__/_callers.cpython-310.pyc,, +pluggy/__pycache__/_hooks.cpython-310.pyc,, +pluggy/__pycache__/_manager.cpython-310.pyc,, +pluggy/__pycache__/_result.cpython-310.pyc,, +pluggy/__pycache__/_tracing.cpython-310.pyc,, +pluggy/__pycache__/_version.cpython-310.pyc,, +pluggy/_callers.py,sha256=ijxhDDj5SAIpe_dc3DNQ7Buu2sy7TesG8SkOF_VcGh0,6233 +pluggy/_hooks.py,sha256=TiGLtCACb0bT77yt_Koze3evTAZmTE3Xg_T8AtBaA9U,23877 +pluggy/_manager.py,sha256=fcYU7VER0CplRym4jAJ7RCFYl6cfDSeVM589YHHx9uA,19517 +pluggy/_result.py,sha256=CMpNNTgyptUnoYigRZHuJs11gKt9OwTOteDYhY3wAFM,3238 +pluggy/_tracing.py,sha256=ui2w1xQpsjn67ISaEmizvgojXXAUmRL2nwlV-VXmemc,2088 +pluggy/_version.py,sha256=foCZ2hOsQWwUKwjI0-E5-fQh5s9IVn0hjo8sXE-FKwQ,160 +pluggy/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 diff --git a/venv/lib/python3.10/site-packages/setuptools-65.5.1.dist-info/WHEEL b/venv/lib/python3.10/site-packages/pluggy-1.3.0.dist-info/WHEEL similarity index 65% rename from venv/lib/python3.10/site-packages/setuptools-65.5.1.dist-info/WHEEL rename to venv/lib/python3.10/site-packages/pluggy-1.3.0.dist-info/WHEEL index 7faec15..7e68873 100644 --- a/venv/lib/python3.10/site-packages/setuptools-65.5.1.dist-info/WHEEL +++ b/venv/lib/python3.10/site-packages/pluggy-1.3.0.dist-info/WHEEL @@ -1,5 +1,5 @@ Wheel-Version: 1.0 -Generator: bdist_wheel (0.38.1) +Generator: bdist_wheel (0.41.2) Root-Is-Purelib: true Tag: py3-none-any diff --git a/venv/lib/python3.10/site-packages/pluggy-1.3.0.dist-info/top_level.txt b/venv/lib/python3.10/site-packages/pluggy-1.3.0.dist-info/top_level.txt new file mode 100644 index 0000000..11bdb5c --- /dev/null +++ b/venv/lib/python3.10/site-packages/pluggy-1.3.0.dist-info/top_level.txt @@ -0,0 +1 @@ +pluggy diff --git a/venv/lib/python3.10/site-packages/pluggy/__init__.py b/venv/lib/python3.10/site-packages/pluggy/__init__.py new file mode 100644 index 0000000..9d9e873 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pluggy/__init__.py @@ -0,0 +1,33 @@ +try: + from ._version import version as __version__ +except ImportError: + # broken installation, we don't even try + # unknown only works because we do poor mans version compare + __version__ = "unknown" + +__all__ = [ + "__version__", + "PluginManager", + "PluginValidationError", + "HookCaller", + "HookCallError", + "HookspecOpts", + "HookimplOpts", + "HookImpl", + "HookRelay", + "HookspecMarker", + "HookimplMarker", + "Result", +] + +from ._manager import PluginManager, PluginValidationError +from ._result import HookCallError, Result +from ._hooks import ( + HookspecMarker, + HookimplMarker, + HookCaller, + HookRelay, + HookspecOpts, + HookimplOpts, + HookImpl, +) diff --git a/venv/lib/python3.10/site-packages/pluggy/_callers.py b/venv/lib/python3.10/site-packages/pluggy/_callers.py new file mode 100644 index 0000000..6498eae --- /dev/null +++ b/venv/lib/python3.10/site-packages/pluggy/_callers.py @@ -0,0 +1,152 @@ +""" +Call loop machinery +""" +from __future__ import annotations + +from typing import cast +from typing import Generator +from typing import Mapping +from typing import Sequence +from typing import Tuple +from typing import Union + +from ._hooks import HookImpl +from ._result import _raise_wrapfail +from ._result import HookCallError +from ._result import Result + + +# Need to distinguish between old- and new-style hook wrappers. +# Wrapping one a singleton tuple is the fastest type-safe way I found to do it. +Teardown = Union[ + Tuple[Generator[None, Result[object], None]], + Generator[None, object, object], +] + + +def _multicall( + hook_name: str, + hook_impls: Sequence[HookImpl], + caller_kwargs: Mapping[str, object], + firstresult: bool, +) -> object | list[object]: + """Execute a call into multiple python functions/methods and return the + result(s). + + ``caller_kwargs`` comes from HookCaller.__call__(). + """ + __tracebackhide__ = True + results: list[object] = [] + exception = None + only_new_style_wrappers = True + try: # run impl and wrapper setup functions in a loop + teardowns: list[Teardown] = [] + try: + for hook_impl in reversed(hook_impls): + try: + args = [caller_kwargs[argname] for argname in hook_impl.argnames] + except KeyError: + for argname in hook_impl.argnames: + if argname not in caller_kwargs: + raise HookCallError( + f"hook call must provide argument {argname!r}" + ) + + if hook_impl.hookwrapper: + only_new_style_wrappers = False + try: + # If this cast is not valid, a type error is raised below, + # which is the desired response. + res = hook_impl.function(*args) + wrapper_gen = cast(Generator[None, Result[object], None], res) + next(wrapper_gen) # first yield + teardowns.append((wrapper_gen,)) + except StopIteration: + _raise_wrapfail(wrapper_gen, "did not yield") + elif hook_impl.wrapper: + try: + # If this cast is not valid, a type error is raised below, + # which is the desired response. + res = hook_impl.function(*args) + function_gen = cast(Generator[None, object, object], res) + next(function_gen) # first yield + teardowns.append(function_gen) + except StopIteration: + _raise_wrapfail(function_gen, "did not yield") + else: + res = hook_impl.function(*args) + if res is not None: + results.append(res) + if firstresult: # halt further impl calls + break + except BaseException as exc: + exception = exc + finally: + # Fast path - only new-style wrappers, no Result. + if only_new_style_wrappers: + if firstresult: # first result hooks return a single value + result = results[0] if results else None + else: + result = results + + # run all wrapper post-yield blocks + for teardown in reversed(teardowns): + try: + if exception is not None: + teardown.throw(exception) # type: ignore[union-attr] + else: + teardown.send(result) # type: ignore[union-attr] + # Following is unreachable for a well behaved hook wrapper. + # Try to force finalizers otherwise postponed till GC action. + # Note: close() may raise if generator handles GeneratorExit. + teardown.close() # type: ignore[union-attr] + except StopIteration as si: + result = si.value + exception = None + continue + except BaseException as e: + exception = e + continue + _raise_wrapfail(teardown, "has second yield") # type: ignore[arg-type] + + if exception is not None: + raise exception.with_traceback(exception.__traceback__) + else: + return result + + # Slow path - need to support old-style wrappers. + else: + if firstresult: # first result hooks return a single value + outcome: Result[object | list[object]] = Result( + results[0] if results else None, exception + ) + else: + outcome = Result(results, exception) + + # run all wrapper post-yield blocks + for teardown in reversed(teardowns): + if isinstance(teardown, tuple): + try: + teardown[0].send(outcome) + _raise_wrapfail(teardown[0], "has second yield") + except StopIteration: + pass + else: + try: + if outcome._exception is not None: + teardown.throw(outcome._exception) + else: + teardown.send(outcome._result) + # Following is unreachable for a well behaved hook wrapper. + # Try to force finalizers otherwise postponed till GC action. + # Note: close() may raise if generator handles GeneratorExit. + teardown.close() + except StopIteration as si: + outcome.force_result(si.value) + continue + except BaseException as e: + outcome.force_exception(e) + continue + _raise_wrapfail(teardown, "has second yield") + + return outcome.get_result() diff --git a/venv/lib/python3.10/site-packages/pluggy/_hooks.py b/venv/lib/python3.10/site-packages/pluggy/_hooks.py new file mode 100644 index 0000000..916ca70 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pluggy/_hooks.py @@ -0,0 +1,691 @@ +""" +Internal hook annotation, representation and calling machinery. +""" +from __future__ import annotations + +import inspect +import sys +import warnings +from types import ModuleType +from typing import AbstractSet +from typing import Any +from typing import Callable +from typing import Final +from typing import final +from typing import Generator +from typing import List +from typing import Mapping +from typing import Optional +from typing import overload +from typing import Sequence +from typing import Tuple +from typing import TYPE_CHECKING +from typing import TypedDict +from typing import TypeVar +from typing import Union + +from ._result import Result + + +_T = TypeVar("_T") +_F = TypeVar("_F", bound=Callable[..., object]) +_Namespace = Union[ModuleType, type] +_Plugin = object +_HookExec = Callable[ + [str, Sequence["HookImpl"], Mapping[str, object], bool], + Union[object, List[object]], +] +_HookImplFunction = Callable[..., Union[_T, Generator[None, Result[_T], None]]] + + +class HookspecOpts(TypedDict): + """Options for a hook specification.""" + + #: Whether the hook is :ref:`first result only `. + firstresult: bool + #: Whether the hook is :ref:`historic `. + historic: bool + #: Whether the hook :ref:`warns when implemented `. + warn_on_impl: Warning | None + + +class HookimplOpts(TypedDict): + """Options for a hook implementation.""" + + #: Whether the hook implementation is a :ref:`wrapper `. + wrapper: bool + #: Whether the hook implementation is an :ref:`old-style wrapper + #: `. + hookwrapper: bool + #: Whether validation against a hook specification is :ref:`optional + #: `. + optionalhook: bool + #: Whether to try to order this hook implementation :ref:`first + #: `. + tryfirst: bool + #: Whether to try to order this hook implementation :ref:`last + #: `. + trylast: bool + #: The name of the hook specification to match, see :ref:`specname`. + specname: str | None + + +@final +class HookspecMarker: + """Decorator for marking functions as hook specifications. + + Instantiate it with a project_name to get a decorator. + Calling :meth:`PluginManager.add_hookspecs` later will discover all marked + functions if the :class:`PluginManager` uses the same project name. + """ + + __slots__ = ("project_name",) + + def __init__(self, project_name: str) -> None: + self.project_name: Final = project_name + + @overload + def __call__( + self, + function: _F, + firstresult: bool = False, + historic: bool = False, + warn_on_impl: Warning | None = None, + ) -> _F: + ... + + @overload # noqa: F811 + def __call__( # noqa: F811 + self, + function: None = ..., + firstresult: bool = ..., + historic: bool = ..., + warn_on_impl: Warning | None = ..., + ) -> Callable[[_F], _F]: + ... + + def __call__( # noqa: F811 + self, + function: _F | None = None, + firstresult: bool = False, + historic: bool = False, + warn_on_impl: Warning | None = None, + ) -> _F | Callable[[_F], _F]: + """If passed a function, directly sets attributes on the function + which will make it discoverable to :meth:`PluginManager.add_hookspecs`. + + If passed no function, returns a decorator which can be applied to a + function later using the attributes supplied. + + :param firstresult: + If ``True``, the 1:N hook call (N being the number of registered + hook implementation functions) will stop at I<=N when the I'th + function returns a non-``None`` result. See :ref:`firstresult`. + + :param historic: + If ``True``, every call to the hook will be memorized and replayed + on plugins registered after the call was made. See :ref:`historic`. + + :param warn_on_impl: + If given, every implementation of this hook will trigger the given + warning. See :ref:`warn_on_impl`. + """ + + def setattr_hookspec_opts(func: _F) -> _F: + if historic and firstresult: + raise ValueError("cannot have a historic firstresult hook") + opts: HookspecOpts = { + "firstresult": firstresult, + "historic": historic, + "warn_on_impl": warn_on_impl, + } + setattr(func, self.project_name + "_spec", opts) + return func + + if function is not None: + return setattr_hookspec_opts(function) + else: + return setattr_hookspec_opts + + +@final +class HookimplMarker: + """Decorator for marking functions as hook implementations. + + Instantiate it with a ``project_name`` to get a decorator. + Calling :meth:`PluginManager.register` later will discover all marked + functions if the :class:`PluginManager` uses the same project name. + """ + + __slots__ = ("project_name",) + + def __init__(self, project_name: str) -> None: + self.project_name: Final = project_name + + @overload + def __call__( + self, + function: _F, + hookwrapper: bool = ..., + optionalhook: bool = ..., + tryfirst: bool = ..., + trylast: bool = ..., + specname: str | None = ..., + wrapper: bool = ..., + ) -> _F: + ... + + @overload # noqa: F811 + def __call__( # noqa: F811 + self, + function: None = ..., + hookwrapper: bool = ..., + optionalhook: bool = ..., + tryfirst: bool = ..., + trylast: bool = ..., + specname: str | None = ..., + wrapper: bool = ..., + ) -> Callable[[_F], _F]: + ... + + def __call__( # noqa: F811 + self, + function: _F | None = None, + hookwrapper: bool = False, + optionalhook: bool = False, + tryfirst: bool = False, + trylast: bool = False, + specname: str | None = None, + wrapper: bool = False, + ) -> _F | Callable[[_F], _F]: + """If passed a function, directly sets attributes on the function + which will make it discoverable to :meth:`PluginManager.register`. + + If passed no function, returns a decorator which can be applied to a + function later using the attributes supplied. + + :param optionalhook: + If ``True``, a missing matching hook specification will not result + in an error (by default it is an error if no matching spec is + found). See :ref:`optionalhook`. + + :param tryfirst: + If ``True``, this hook implementation will run as early as possible + in the chain of N hook implementations for a specification. See + :ref:`callorder`. + + :param trylast: + If ``True``, this hook implementation will run as late as possible + in the chain of N hook implementations for a specification. See + :ref:`callorder`. + + :param wrapper: + If ``True`` ("new-style hook wrapper"), the hook implementation + needs to execute exactly one ``yield``. The code before the + ``yield`` is run early before any non-hook-wrapper function is run. + The code after the ``yield`` is run after all non-hook-wrapper + functions have run. The ``yield`` receives the result value of the + inner calls, or raises the exception of inner calls (including + earlier hook wrapper calls). The return value of the function + becomes the return value of the hook, and a raised exception becomes + the exception of the hook. See :ref:`hookwrapper`. + + :param hookwrapper: + If ``True`` ("old-style hook wrapper"), the hook implementation + needs to execute exactly one ``yield``. The code before the + ``yield`` is run early before any non-hook-wrapper function is run. + The code after the ``yield`` is run after all non-hook-wrapper + function have run The ``yield`` receives a :class:`Result` object + representing the exception or result outcome of the inner calls + (including earlier hook wrapper calls). This option is mutually + exclusive with ``wrapper``. See :ref:`old_style_hookwrapper`. + + :param specname: + If provided, the given name will be used instead of the function + name when matching this hook implementation to a hook specification + during registration. See :ref:`specname`. + + .. versionadded:: 1.2.0 + The ``wrapper`` parameter. + """ + + def setattr_hookimpl_opts(func: _F) -> _F: + opts: HookimplOpts = { + "wrapper": wrapper, + "hookwrapper": hookwrapper, + "optionalhook": optionalhook, + "tryfirst": tryfirst, + "trylast": trylast, + "specname": specname, + } + setattr(func, self.project_name + "_impl", opts) + return func + + if function is None: + return setattr_hookimpl_opts + else: + return setattr_hookimpl_opts(function) + + +def normalize_hookimpl_opts(opts: HookimplOpts) -> None: + opts.setdefault("tryfirst", False) + opts.setdefault("trylast", False) + opts.setdefault("wrapper", False) + opts.setdefault("hookwrapper", False) + opts.setdefault("optionalhook", False) + opts.setdefault("specname", None) + + +_PYPY = hasattr(sys, "pypy_version_info") + + +def varnames(func: object) -> tuple[tuple[str, ...], tuple[str, ...]]: + """Return tuple of positional and keywrord argument names for a function, + method, class or callable. + + In case of a class, its ``__init__`` method is considered. + For methods the ``self`` parameter is not included. + """ + if inspect.isclass(func): + try: + func = func.__init__ + except AttributeError: + return (), () + elif not inspect.isroutine(func): # callable object? + try: + func = getattr(func, "__call__", func) + except Exception: + return (), () + + try: + # func MUST be a function or method here or we won't parse any args. + sig = inspect.signature( + func.__func__ if inspect.ismethod(func) else func # type:ignore[arg-type] + ) + except TypeError: + return (), () + + _valid_param_kinds = ( + inspect.Parameter.POSITIONAL_ONLY, + inspect.Parameter.POSITIONAL_OR_KEYWORD, + ) + _valid_params = { + name: param + for name, param in sig.parameters.items() + if param.kind in _valid_param_kinds + } + args = tuple(_valid_params) + defaults = ( + tuple( + param.default + for param in _valid_params.values() + if param.default is not param.empty + ) + or None + ) + + if defaults: + index = -len(defaults) + args, kwargs = args[:index], tuple(args[index:]) + else: + kwargs = () + + # strip any implicit instance arg + # pypy3 uses "obj" instead of "self" for default dunder methods + if not _PYPY: + implicit_names: tuple[str, ...] = ("self",) + else: + implicit_names = ("self", "obj") + if args: + qualname: str = getattr(func, "__qualname__", "") + if inspect.ismethod(func) or ("." in qualname and args[0] in implicit_names): + args = args[1:] + + return args, kwargs + + +@final +class HookRelay: + """Hook holder object for performing 1:N hook calls where N is the number + of registered plugins.""" + + __slots__ = ("__dict__",) + + def __init__(self) -> None: + """:meta private:""" + + if TYPE_CHECKING: + + def __getattr__(self, name: str) -> HookCaller: + ... + + +# Historical name (pluggy<=1.2), kept for backward compatibility. +_HookRelay = HookRelay + + +_CallHistory = List[Tuple[Mapping[str, object], Optional[Callable[[Any], None]]]] + + +class HookCaller: + """A caller of all registered implementations of a hook specification.""" + + __slots__ = ( + "name", + "spec", + "_hookexec", + "_hookimpls", + "_call_history", + ) + + def __init__( + self, + name: str, + hook_execute: _HookExec, + specmodule_or_class: _Namespace | None = None, + spec_opts: HookspecOpts | None = None, + ) -> None: + """:meta private:""" + #: Name of the hook getting called. + self.name: Final = name + self._hookexec: Final = hook_execute + self._hookimpls: Final[list[HookImpl]] = [] + self._call_history: _CallHistory | None = None + # TODO: Document, or make private. + self.spec: HookSpec | None = None + if specmodule_or_class is not None: + assert spec_opts is not None + self.set_specification(specmodule_or_class, spec_opts) + + # TODO: Document, or make private. + def has_spec(self) -> bool: + return self.spec is not None + + # TODO: Document, or make private. + def set_specification( + self, + specmodule_or_class: _Namespace, + spec_opts: HookspecOpts, + ) -> None: + if self.spec is not None: + raise ValueError( + f"Hook {self.spec.name!r} is already registered " + f"within namespace {self.spec.namespace}" + ) + self.spec = HookSpec(specmodule_or_class, self.name, spec_opts) + if spec_opts.get("historic"): + self._call_history = [] + + def is_historic(self) -> bool: + """Whether this caller is :ref:`historic `.""" + return self._call_history is not None + + def _remove_plugin(self, plugin: _Plugin) -> None: + for i, method in enumerate(self._hookimpls): + if method.plugin == plugin: + del self._hookimpls[i] + return + raise ValueError(f"plugin {plugin!r} not found") + + def get_hookimpls(self) -> list[HookImpl]: + """Get all registered hook implementations for this hook.""" + return self._hookimpls.copy() + + def _add_hookimpl(self, hookimpl: HookImpl) -> None: + """Add an implementation to the callback chain.""" + for i, method in enumerate(self._hookimpls): + if method.hookwrapper or method.wrapper: + splitpoint = i + break + else: + splitpoint = len(self._hookimpls) + if hookimpl.hookwrapper or hookimpl.wrapper: + start, end = splitpoint, len(self._hookimpls) + else: + start, end = 0, splitpoint + + if hookimpl.trylast: + self._hookimpls.insert(start, hookimpl) + elif hookimpl.tryfirst: + self._hookimpls.insert(end, hookimpl) + else: + # find last non-tryfirst method + i = end - 1 + while i >= start and self._hookimpls[i].tryfirst: + i -= 1 + self._hookimpls.insert(i + 1, hookimpl) + + def __repr__(self) -> str: + return f"" + + def _verify_all_args_are_provided(self, kwargs: Mapping[str, object]) -> None: + # This is written to avoid expensive operations when not needed. + if self.spec: + for argname in self.spec.argnames: + if argname not in kwargs: + notincall = ", ".join( + repr(argname) + for argname in self.spec.argnames + # Avoid self.spec.argnames - kwargs.keys() - doesn't preserve order. + if argname not in kwargs.keys() + ) + warnings.warn( + "Argument(s) {} which are declared in the hookspec " + "cannot be found in this hook call".format(notincall), + stacklevel=2, + ) + break + + def __call__(self, **kwargs: object) -> Any: + """Call the hook. + + Only accepts keyword arguments, which should match the hook + specification. + + Returns the result(s) of calling all registered plugins, see + :ref:`calling`. + """ + assert ( + not self.is_historic() + ), "Cannot directly call a historic hook - use call_historic instead." + self._verify_all_args_are_provided(kwargs) + firstresult = self.spec.opts.get("firstresult", False) if self.spec else False + return self._hookexec(self.name, self._hookimpls, kwargs, firstresult) + + def call_historic( + self, + result_callback: Callable[[Any], None] | None = None, + kwargs: Mapping[str, object] | None = None, + ) -> None: + """Call the hook with given ``kwargs`` for all registered plugins and + for all plugins which will be registered afterwards, see + :ref:`historic`. + + :param result_callback: + If provided, will be called for each non-``None`` result obtained + from a hook implementation. + """ + assert self._call_history is not None + kwargs = kwargs or {} + self._verify_all_args_are_provided(kwargs) + self._call_history.append((kwargs, result_callback)) + # Historizing hooks don't return results. + # Remember firstresult isn't compatible with historic. + res = self._hookexec(self.name, self._hookimpls, kwargs, False) + if result_callback is None: + return + if isinstance(res, list): + for x in res: + result_callback(x) + + def call_extra( + self, methods: Sequence[Callable[..., object]], kwargs: Mapping[str, object] + ) -> Any: + """Call the hook with some additional temporarily participating + methods using the specified ``kwargs`` as call parameters, see + :ref:`call_extra`.""" + assert ( + not self.is_historic() + ), "Cannot directly call a historic hook - use call_historic instead." + self._verify_all_args_are_provided(kwargs) + opts: HookimplOpts = { + "wrapper": False, + "hookwrapper": False, + "optionalhook": False, + "trylast": False, + "tryfirst": False, + "specname": None, + } + hookimpls = self._hookimpls.copy() + for method in methods: + hookimpl = HookImpl(None, "", method, opts) + # Find last non-tryfirst nonwrapper method. + i = len(hookimpls) - 1 + while ( + i >= 0 + and hookimpls[i].tryfirst + and not (hookimpls[i].hookwrapper or hookimpls[i].wrapper) + ): + i -= 1 + hookimpls.insert(i + 1, hookimpl) + firstresult = self.spec.opts.get("firstresult", False) if self.spec else False + return self._hookexec(self.name, hookimpls, kwargs, firstresult) + + def _maybe_apply_history(self, method: HookImpl) -> None: + """Apply call history to a new hookimpl if it is marked as historic.""" + if self.is_historic(): + assert self._call_history is not None + for kwargs, result_callback in self._call_history: + res = self._hookexec(self.name, [method], kwargs, False) + if res and result_callback is not None: + # XXX: remember firstresult isn't compat with historic + assert isinstance(res, list) + result_callback(res[0]) + + +# Historical name (pluggy<=1.2), kept for backward compatibility. +_HookCaller = HookCaller + + +class _SubsetHookCaller(HookCaller): + """A proxy to another HookCaller which manages calls to all registered + plugins except the ones from remove_plugins.""" + + # This class is unusual: in inhertits from `HookCaller` so all of + # the *code* runs in the class, but it delegates all underlying *data* + # to the original HookCaller. + # `subset_hook_caller` used to be implemented by creating a full-fledged + # HookCaller, copying all hookimpls from the original. This had problems + # with memory leaks (#346) and historic calls (#347), which make a proxy + # approach better. + # An alternative implementation is to use a `_getattr__`/`__getattribute__` + # proxy, however that adds more overhead and is more tricky to implement. + + __slots__ = ( + "_orig", + "_remove_plugins", + ) + + def __init__(self, orig: HookCaller, remove_plugins: AbstractSet[_Plugin]) -> None: + self._orig = orig + self._remove_plugins = remove_plugins + self.name = orig.name # type: ignore[misc] + self._hookexec = orig._hookexec # type: ignore[misc] + + @property # type: ignore[misc] + def _hookimpls(self) -> list[HookImpl]: + return [ + impl + for impl in self._orig._hookimpls + if impl.plugin not in self._remove_plugins + ] + + @property + def spec(self) -> HookSpec | None: # type: ignore[override] + return self._orig.spec + + @property + def _call_history(self) -> _CallHistory | None: # type: ignore[override] + return self._orig._call_history + + def __repr__(self) -> str: + return f"<_SubsetHookCaller {self.name!r}>" + + +@final +class HookImpl: + """A hook implementation in a :class:`HookCaller`.""" + + __slots__ = ( + "function", + "argnames", + "kwargnames", + "plugin", + "opts", + "plugin_name", + "wrapper", + "hookwrapper", + "optionalhook", + "tryfirst", + "trylast", + ) + + def __init__( + self, + plugin: _Plugin, + plugin_name: str, + function: _HookImplFunction[object], + hook_impl_opts: HookimplOpts, + ) -> None: + """:meta private:""" + #: The hook implementation function. + self.function: Final = function + argnames, kwargnames = varnames(self.function) + #: The positional parameter names of ``function```. + self.argnames: Final = argnames + #: The keyword parameter names of ``function```. + self.kwargnames: Final = kwargnames + #: The plugin which defined this hook implementation. + self.plugin: Final = plugin + #: The :class:`HookimplOpts` used to configure this hook implementation. + self.opts: Final = hook_impl_opts + #: The name of the plugin which defined this hook implementation. + self.plugin_name: Final = plugin_name + #: Whether the hook implementation is a :ref:`wrapper `. + self.wrapper: Final = hook_impl_opts["wrapper"] + #: Whether the hook implementation is an :ref:`old-style wrapper + #: `. + self.hookwrapper: Final = hook_impl_opts["hookwrapper"] + #: Whether validation against a hook specification is :ref:`optional + #: `. + self.optionalhook: Final = hook_impl_opts["optionalhook"] + #: Whether to try to order this hook implementation :ref:`first + #: `. + self.tryfirst: Final = hook_impl_opts["tryfirst"] + #: Whether to try to order this hook implementation :ref:`last + #: `. + self.trylast: Final = hook_impl_opts["trylast"] + + def __repr__(self) -> str: + return f"" + + +@final +class HookSpec: + __slots__ = ( + "namespace", + "function", + "name", + "argnames", + "kwargnames", + "opts", + "warn_on_impl", + ) + + def __init__(self, namespace: _Namespace, name: str, opts: HookspecOpts) -> None: + self.namespace = namespace + self.function: Callable[..., object] = getattr(namespace, name) + self.name = name + self.argnames, self.kwargnames = varnames(self.function) + self.opts = opts + self.warn_on_impl = opts.get("warn_on_impl") diff --git a/venv/lib/python3.10/site-packages/pluggy/_manager.py b/venv/lib/python3.10/site-packages/pluggy/_manager.py new file mode 100644 index 0000000..84717e6 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pluggy/_manager.py @@ -0,0 +1,505 @@ +from __future__ import annotations + +import importlib.metadata +import inspect +import types +import warnings +from typing import Any +from typing import Callable +from typing import cast +from typing import Final +from typing import Iterable +from typing import Mapping +from typing import Sequence + +from . import _tracing +from ._callers import _multicall +from ._hooks import _HookImplFunction +from ._hooks import _Namespace +from ._hooks import _Plugin +from ._hooks import _SubsetHookCaller +from ._hooks import HookCaller +from ._hooks import HookImpl +from ._hooks import HookimplOpts +from ._hooks import HookRelay +from ._hooks import HookspecOpts +from ._hooks import normalize_hookimpl_opts +from ._result import Result + + +_BeforeTrace = Callable[[str, Sequence[HookImpl], Mapping[str, Any]], None] +_AfterTrace = Callable[[Result[Any], str, Sequence[HookImpl], Mapping[str, Any]], None] + + +def _warn_for_function(warning: Warning, function: Callable[..., object]) -> None: + func = cast(types.FunctionType, function) + warnings.warn_explicit( + warning, + type(warning), + lineno=func.__code__.co_firstlineno, + filename=func.__code__.co_filename, + ) + + +class PluginValidationError(Exception): + """Plugin failed validation. + + :param plugin: The plugin which failed validation. + :param message: Error message. + """ + + def __init__(self, plugin: _Plugin, message: str) -> None: + super().__init__(message) + #: The plugin which failed validation. + self.plugin = plugin + + +class DistFacade: + """Emulate a pkg_resources Distribution""" + + def __init__(self, dist: importlib.metadata.Distribution) -> None: + self._dist = dist + + @property + def project_name(self) -> str: + name: str = self.metadata["name"] + return name + + def __getattr__(self, attr: str, default=None): + return getattr(self._dist, attr, default) + + def __dir__(self) -> list[str]: + return sorted(dir(self._dist) + ["_dist", "project_name"]) + + +class PluginManager: + """Core class which manages registration of plugin objects and 1:N hook + calling. + + You can register new hooks by calling :meth:`add_hookspecs(module_or_class) + `. + + You can register plugin objects (which contain hook implementations) by + calling :meth:`register(plugin) `. + + For debugging purposes you can call :meth:`PluginManager.enable_tracing` + which will subsequently send debug information to the trace helper. + + :param project_name: + The short project name. Prefer snake case. Make sure it's unique! + """ + + def __init__(self, project_name: str) -> None: + #: The project name. + self.project_name: Final = project_name + self._name2plugin: Final[dict[str, _Plugin]] = {} + self._plugin_distinfo: Final[list[tuple[_Plugin, DistFacade]]] = [] + #: The "hook relay", used to call a hook on all registered plugins. + #: See :ref:`calling`. + self.hook: Final = HookRelay() + #: The tracing entry point. See :ref:`tracing`. + self.trace: Final[_tracing.TagTracerSub] = _tracing.TagTracer().get( + "pluginmanage" + ) + self._inner_hookexec = _multicall + + def _hookexec( + self, + hook_name: str, + methods: Sequence[HookImpl], + kwargs: Mapping[str, object], + firstresult: bool, + ) -> object | list[object]: + # called from all hookcaller instances. + # enable_tracing will set its own wrapping function at self._inner_hookexec + return self._inner_hookexec(hook_name, methods, kwargs, firstresult) + + def register(self, plugin: _Plugin, name: str | None = None) -> str | None: + """Register a plugin and return its name. + + :param name: + The name under which to register the plugin. If not specified, a + name is generated using :func:`get_canonical_name`. + + :returns: + The plugin name. If the name is blocked from registering, returns + ``None``. + + If the plugin is already registered, raises a :exc:`ValueError`. + """ + plugin_name = name or self.get_canonical_name(plugin) + + if plugin_name in self._name2plugin: + if self._name2plugin.get(plugin_name, -1) is None: + return None # blocked plugin, return None to indicate no registration + raise ValueError( + "Plugin name already registered: %s=%s\n%s" + % (plugin_name, plugin, self._name2plugin) + ) + + if plugin in self._name2plugin.values(): + raise ValueError( + "Plugin already registered under a different name: %s=%s\n%s" + % (plugin_name, plugin, self._name2plugin) + ) + + # XXX if an error happens we should make sure no state has been + # changed at point of return + self._name2plugin[plugin_name] = plugin + + # register matching hook implementations of the plugin + for name in dir(plugin): + hookimpl_opts = self.parse_hookimpl_opts(plugin, name) + if hookimpl_opts is not None: + normalize_hookimpl_opts(hookimpl_opts) + method: _HookImplFunction[object] = getattr(plugin, name) + hookimpl = HookImpl(plugin, plugin_name, method, hookimpl_opts) + name = hookimpl_opts.get("specname") or name + hook: HookCaller | None = getattr(self.hook, name, None) + if hook is None: + hook = HookCaller(name, self._hookexec) + setattr(self.hook, name, hook) + elif hook.has_spec(): + self._verify_hook(hook, hookimpl) + hook._maybe_apply_history(hookimpl) + hook._add_hookimpl(hookimpl) + return plugin_name + + def parse_hookimpl_opts(self, plugin: _Plugin, name: str) -> HookimplOpts | None: + """Try to obtain a hook implementation from an item with the given name + in the given plugin which is being searched for hook impls. + + :returns: + The parsed hookimpl options, or None to skip the given item. + + This method can be overridden by ``PluginManager`` subclasses to + customize how hook implementation are picked up. By default, returns the + options for items decorated with :class:`HookimplMarker`. + """ + method: object = getattr(plugin, name) + if not inspect.isroutine(method): + return None + try: + res: HookimplOpts | None = getattr( + method, self.project_name + "_impl", None + ) + except Exception: + res = {} # type: ignore[assignment] + if res is not None and not isinstance(res, dict): + # false positive + res = None # type:ignore[unreachable] + return res + + def unregister( + self, plugin: _Plugin | None = None, name: str | None = None + ) -> Any | None: + """Unregister a plugin and all of its hook implementations. + + The plugin can be specified either by the plugin object or the plugin + name. If both are specified, they must agree. + + Returns the unregistered plugin, or ``None`` if not found. + """ + if name is None: + assert plugin is not None, "one of name or plugin needs to be specified" + name = self.get_name(plugin) + assert name is not None, "plugin is not registered" + + if plugin is None: + plugin = self.get_plugin(name) + if plugin is None: + return None + + hookcallers = self.get_hookcallers(plugin) + if hookcallers: + for hookcaller in hookcallers: + hookcaller._remove_plugin(plugin) + + # if self._name2plugin[name] == None registration was blocked: ignore + if self._name2plugin.get(name): + assert name is not None + del self._name2plugin[name] + + return plugin + + def set_blocked(self, name: str) -> None: + """Block registrations of the given name, unregister if already registered.""" + self.unregister(name=name) + self._name2plugin[name] = None + + def is_blocked(self, name: str) -> bool: + """Return whether the given plugin name is blocked.""" + return name in self._name2plugin and self._name2plugin[name] is None + + def add_hookspecs(self, module_or_class: _Namespace) -> None: + """Add new hook specifications defined in the given ``module_or_class``. + + Functions are recognized as hook specifications if they have been + decorated with a matching :class:`HookspecMarker`. + """ + names = [] + for name in dir(module_or_class): + spec_opts = self.parse_hookspec_opts(module_or_class, name) + if spec_opts is not None: + hc: HookCaller | None = getattr(self.hook, name, None) + if hc is None: + hc = HookCaller(name, self._hookexec, module_or_class, spec_opts) + setattr(self.hook, name, hc) + else: + # Plugins registered this hook without knowing the spec. + hc.set_specification(module_or_class, spec_opts) + for hookfunction in hc.get_hookimpls(): + self._verify_hook(hc, hookfunction) + names.append(name) + + if not names: + raise ValueError( + f"did not find any {self.project_name!r} hooks in {module_or_class!r}" + ) + + def parse_hookspec_opts( + self, module_or_class: _Namespace, name: str + ) -> HookspecOpts | None: + """Try to obtain a hook specification from an item with the given name + in the given module or class which is being searched for hook specs. + + :returns: + The parsed hookspec options for defining a hook, or None to skip the + given item. + + This method can be overridden by ``PluginManager`` subclasses to + customize how hook specifications are picked up. By default, returns the + options for items decorated with :class:`HookspecMarker`. + """ + method = getattr(module_or_class, name) + opts: HookspecOpts | None = getattr(method, self.project_name + "_spec", None) + return opts + + def get_plugins(self) -> set[Any]: + """Return a set of all registered plugin objects.""" + return set(self._name2plugin.values()) + + def is_registered(self, plugin: _Plugin) -> bool: + """Return whether the plugin is already registered.""" + return any(plugin == val for val in self._name2plugin.values()) + + def get_canonical_name(self, plugin: _Plugin) -> str: + """Return a canonical name for a plugin object. + + Note that a plugin may be registered under a different name + specified by the caller of :meth:`register(plugin, name) `. + To obtain the name of a registered plugin use :meth:`get_name(plugin) + ` instead. + """ + name: str | None = getattr(plugin, "__name__", None) + return name or str(id(plugin)) + + def get_plugin(self, name: str) -> Any | None: + """Return the plugin registered under the given name, if any.""" + return self._name2plugin.get(name) + + def has_plugin(self, name: str) -> bool: + """Return whether a plugin with the given name is registered.""" + return self.get_plugin(name) is not None + + def get_name(self, plugin: _Plugin) -> str | None: + """Return the name the plugin is registered under, or ``None`` if + is isn't.""" + for name, val in self._name2plugin.items(): + if plugin == val: + return name + return None + + def _verify_hook(self, hook: HookCaller, hookimpl: HookImpl) -> None: + if hook.is_historic() and (hookimpl.hookwrapper or hookimpl.wrapper): + raise PluginValidationError( + hookimpl.plugin, + "Plugin %r\nhook %r\nhistoric incompatible with yield/wrapper/hookwrapper" + % (hookimpl.plugin_name, hook.name), + ) + + assert hook.spec is not None + if hook.spec.warn_on_impl: + _warn_for_function(hook.spec.warn_on_impl, hookimpl.function) + + # positional arg checking + notinspec = set(hookimpl.argnames) - set(hook.spec.argnames) + if notinspec: + raise PluginValidationError( + hookimpl.plugin, + "Plugin %r for hook %r\nhookimpl definition: %s\n" + "Argument(s) %s are declared in the hookimpl but " + "can not be found in the hookspec" + % ( + hookimpl.plugin_name, + hook.name, + _formatdef(hookimpl.function), + notinspec, + ), + ) + + if ( + hookimpl.wrapper or hookimpl.hookwrapper + ) and not inspect.isgeneratorfunction(hookimpl.function): + raise PluginValidationError( + hookimpl.plugin, + "Plugin %r for hook %r\nhookimpl definition: %s\n" + "Declared as wrapper=True or hookwrapper=True " + "but function is not a generator function" + % (hookimpl.plugin_name, hook.name, _formatdef(hookimpl.function)), + ) + + if hookimpl.wrapper and hookimpl.hookwrapper: + raise PluginValidationError( + hookimpl.plugin, + "Plugin %r for hook %r\nhookimpl definition: %s\n" + "The wrapper=True and hookwrapper=True options are mutually exclusive" + % (hookimpl.plugin_name, hook.name, _formatdef(hookimpl.function)), + ) + + def check_pending(self) -> None: + """Verify that all hooks which have not been verified against a + hook specification are optional, otherwise raise + :exc:`PluginValidationError`.""" + for name in self.hook.__dict__: + if name[0] != "_": + hook: HookCaller = getattr(self.hook, name) + if not hook.has_spec(): + for hookimpl in hook.get_hookimpls(): + if not hookimpl.optionalhook: + raise PluginValidationError( + hookimpl.plugin, + "unknown hook %r in plugin %r" + % (name, hookimpl.plugin), + ) + + def load_setuptools_entrypoints(self, group: str, name: str | None = None) -> int: + """Load modules from querying the specified setuptools ``group``. + + :param group: + Entry point group to load plugins. + :param name: + If given, loads only plugins with the given ``name``. + + :return: + The number of plugins loaded by this call. + """ + count = 0 + for dist in list(importlib.metadata.distributions()): + for ep in dist.entry_points: + if ( + ep.group != group + or (name is not None and ep.name != name) + # already registered + or self.get_plugin(ep.name) + or self.is_blocked(ep.name) + ): + continue + plugin = ep.load() + self.register(plugin, name=ep.name) + self._plugin_distinfo.append((plugin, DistFacade(dist))) + count += 1 + return count + + def list_plugin_distinfo(self) -> list[tuple[_Plugin, DistFacade]]: + """Return a list of (plugin, distinfo) pairs for all + setuptools-registered plugins.""" + return list(self._plugin_distinfo) + + def list_name_plugin(self) -> list[tuple[str, _Plugin]]: + """Return a list of (name, plugin) pairs for all registered plugins.""" + return list(self._name2plugin.items()) + + def get_hookcallers(self, plugin: _Plugin) -> list[HookCaller] | None: + """Get all hook callers for the specified plugin. + + :returns: + The hook callers, or ``None`` if ``plugin`` is not registered in + this plugin manager. + """ + if self.get_name(plugin) is None: + return None + hookcallers = [] + for hookcaller in self.hook.__dict__.values(): + for hookimpl in hookcaller.get_hookimpls(): + if hookimpl.plugin is plugin: + hookcallers.append(hookcaller) + return hookcallers + + def add_hookcall_monitoring( + self, before: _BeforeTrace, after: _AfterTrace + ) -> Callable[[], None]: + """Add before/after tracing functions for all hooks. + + Returns an undo function which, when called, removes the added tracers. + + ``before(hook_name, hook_impls, kwargs)`` will be called ahead + of all hook calls and receive a hookcaller instance, a list + of HookImpl instances and the keyword arguments for the hook call. + + ``after(outcome, hook_name, hook_impls, kwargs)`` receives the + same arguments as ``before`` but also a :class:`~pluggy.Result` object + which represents the result of the overall hook call. + """ + oldcall = self._inner_hookexec + + def traced_hookexec( + hook_name: str, + hook_impls: Sequence[HookImpl], + caller_kwargs: Mapping[str, object], + firstresult: bool, + ) -> object | list[object]: + before(hook_name, hook_impls, caller_kwargs) + outcome = Result.from_call( + lambda: oldcall(hook_name, hook_impls, caller_kwargs, firstresult) + ) + after(outcome, hook_name, hook_impls, caller_kwargs) + return outcome.get_result() + + self._inner_hookexec = traced_hookexec + + def undo() -> None: + self._inner_hookexec = oldcall + + return undo + + def enable_tracing(self) -> Callable[[], None]: + """Enable tracing of hook calls. + + Returns an undo function which, when called, removes the added tracing. + """ + hooktrace = self.trace.root.get("hook") + + def before( + hook_name: str, methods: Sequence[HookImpl], kwargs: Mapping[str, object] + ) -> None: + hooktrace.root.indent += 1 + hooktrace(hook_name, kwargs) + + def after( + outcome: Result[object], + hook_name: str, + methods: Sequence[HookImpl], + kwargs: Mapping[str, object], + ) -> None: + if outcome.exception is None: + hooktrace("finish", hook_name, "-->", outcome.get_result()) + hooktrace.root.indent -= 1 + + return self.add_hookcall_monitoring(before, after) + + def subset_hook_caller( + self, name: str, remove_plugins: Iterable[_Plugin] + ) -> HookCaller: + """Return a proxy :class:`~pluggy.HookCaller` instance for the named + method which manages calls to all registered plugins except the ones + from remove_plugins.""" + orig: HookCaller = getattr(self.hook, name) + plugins_to_remove = {plug for plug in remove_plugins if hasattr(plug, name)} + if plugins_to_remove: + return _SubsetHookCaller(orig, plugins_to_remove) + return orig + + +def _formatdef(func: Callable[..., object]) -> str: + return f"{func.__name__}{inspect.signature(func)}" diff --git a/venv/lib/python3.10/site-packages/pluggy/_result.py b/venv/lib/python3.10/site-packages/pluggy/_result.py new file mode 100644 index 0000000..29859eb --- /dev/null +++ b/venv/lib/python3.10/site-packages/pluggy/_result.py @@ -0,0 +1,118 @@ +""" +Hook wrapper "result" utilities. +""" +from __future__ import annotations + +from types import TracebackType +from typing import Callable +from typing import cast +from typing import final +from typing import Generator +from typing import Generic +from typing import NoReturn +from typing import Optional +from typing import Tuple +from typing import Type +from typing import TypeVar + + +_ExcInfo = Tuple[Type[BaseException], BaseException, Optional[TracebackType]] +ResultType = TypeVar("ResultType") + + +def _raise_wrapfail( + wrap_controller: ( + Generator[None, Result[ResultType], None] | Generator[None, object, object] + ), + msg: str, +) -> NoReturn: + co = wrap_controller.gi_code + raise RuntimeError( + "wrap_controller at %r %s:%d %s" + % (co.co_name, co.co_filename, co.co_firstlineno, msg) + ) + + +class HookCallError(Exception): + """Hook was called incorrectly.""" + + +@final +class Result(Generic[ResultType]): + """An object used to inspect and set the result in a :ref:`hook wrapper + `.""" + + __slots__ = ("_result", "_exception") + + def __init__( + self, + result: ResultType | None, + exception: BaseException | None, + ) -> None: + """:meta private:""" + self._result = result + self._exception = exception + + @property + def excinfo(self) -> _ExcInfo | None: + """:meta private:""" + exc = self._exception + if exc is None: + return None + else: + return (type(exc), exc, exc.__traceback__) + + @property + def exception(self) -> BaseException | None: + """:meta private:""" + return self._exception + + @classmethod + def from_call(cls, func: Callable[[], ResultType]) -> Result[ResultType]: + """:meta private:""" + __tracebackhide__ = True + result = exception = None + try: + result = func() + except BaseException as exc: + exception = exc + return cls(result, exception) + + def force_result(self, result: ResultType) -> None: + """Force the result(s) to ``result``. + + If the hook was marked as a ``firstresult`` a single value should + be set, otherwise set a (modified) list of results. Any exceptions + found during invocation will be deleted. + + This overrides any previous result or exception. + """ + self._result = result + self._exception = None + + def force_exception(self, exception: BaseException) -> None: + """Force the result to fail with ``exception``. + + This overrides any previous result or exception. + + .. versionadded:: 1.1.0 + """ + self._result = None + self._exception = exception + + def get_result(self) -> ResultType: + """Get the result(s) for this hook call. + + If the hook was marked as a ``firstresult`` only a single value + will be returned, otherwise a list of results. + """ + __tracebackhide__ = True + exc = self._exception + if exc is None: + return cast(ResultType, self._result) + else: + raise exc.with_traceback(exc.__traceback__) + + +# Historical name (pluggy<=1.2), kept for backward compatibility. +_Result = Result diff --git a/venv/lib/python3.10/site-packages/pluggy/_tracing.py b/venv/lib/python3.10/site-packages/pluggy/_tracing.py new file mode 100644 index 0000000..de1e13a --- /dev/null +++ b/venv/lib/python3.10/site-packages/pluggy/_tracing.py @@ -0,0 +1,72 @@ +""" +Tracing utils +""" +from __future__ import annotations + +from typing import Any +from typing import Callable +from typing import Sequence +from typing import Tuple + + +_Writer = Callable[[str], object] +_Processor = Callable[[Tuple[str, ...], Tuple[Any, ...]], object] + + +class TagTracer: + def __init__(self) -> None: + self._tags2proc: dict[tuple[str, ...], _Processor] = {} + self._writer: _Writer | None = None + self.indent = 0 + + def get(self, name: str) -> TagTracerSub: + return TagTracerSub(self, (name,)) + + def _format_message(self, tags: Sequence[str], args: Sequence[object]) -> str: + if isinstance(args[-1], dict): + extra = args[-1] + args = args[:-1] + else: + extra = {} + + content = " ".join(map(str, args)) + indent = " " * self.indent + + lines = ["{}{} [{}]\n".format(indent, content, ":".join(tags))] + + for name, value in extra.items(): + lines.append(f"{indent} {name}: {value}\n") + + return "".join(lines) + + def _processmessage(self, tags: tuple[str, ...], args: tuple[object, ...]) -> None: + if self._writer is not None and args: + self._writer(self._format_message(tags, args)) + try: + processor = self._tags2proc[tags] + except KeyError: + pass + else: + processor(tags, args) + + def setwriter(self, writer: _Writer | None) -> None: + self._writer = writer + + def setprocessor(self, tags: str | tuple[str, ...], processor: _Processor) -> None: + if isinstance(tags, str): + tags = tuple(tags.split(":")) + else: + assert isinstance(tags, tuple) + self._tags2proc[tags] = processor + + +class TagTracerSub: + def __init__(self, root: TagTracer, tags: tuple[str, ...]) -> None: + self.root = root + self.tags = tags + + def __call__(self, *args: object) -> None: + self.root._processmessage(self.tags, args) + + def get(self, name: str) -> TagTracerSub: + return self.__class__(self.root, self.tags + (name,)) diff --git a/venv/lib/python3.10/site-packages/pluggy/_version.py b/venv/lib/python3.10/site-packages/pluggy/_version.py new file mode 100644 index 0000000..4a742d2 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pluggy/_version.py @@ -0,0 +1,4 @@ +# file generated by setuptools_scm +# don't change, don't track in version control +__version__ = version = '1.3.0' +__version_tuple__ = version_tuple = (1, 3, 0) diff --git a/venv/lib/python3.10/site-packages/pluggy/py.typed b/venv/lib/python3.10/site-packages/pluggy/py.typed new file mode 100644 index 0000000..e69de29 diff --git a/venv/lib/python3.10/site-packages/py-1.11.0.dist-info/INSTALLER b/venv/lib/python3.10/site-packages/py-1.11.0.dist-info/INSTALLER new file mode 100644 index 0000000..a1b589e --- /dev/null +++ b/venv/lib/python3.10/site-packages/py-1.11.0.dist-info/INSTALLER @@ -0,0 +1 @@ +pip diff --git a/venv/lib/python3.10/site-packages/py-1.11.0.dist-info/LICENSE b/venv/lib/python3.10/site-packages/py-1.11.0.dist-info/LICENSE new file mode 100644 index 0000000..31ecdfb --- /dev/null +++ b/venv/lib/python3.10/site-packages/py-1.11.0.dist-info/LICENSE @@ -0,0 +1,19 @@ + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to deal + in the Software without restriction, including without limitation the rights + to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be included in all + copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + SOFTWARE. + diff --git a/venv/lib/python3.10/site-packages/py-1.11.0.dist-info/METADATA b/venv/lib/python3.10/site-packages/py-1.11.0.dist-info/METADATA new file mode 100644 index 0000000..a14febe --- /dev/null +++ b/venv/lib/python3.10/site-packages/py-1.11.0.dist-info/METADATA @@ -0,0 +1,69 @@ +Metadata-Version: 2.1 +Name: py +Version: 1.11.0 +Summary: library with cross-python path, ini-parsing, io, code, log facilities +Home-page: https://py.readthedocs.io/ +Author: holger krekel, Ronny Pfannschmidt, Benjamin Peterson and others +Author-email: pytest-dev@python.org +License: MIT license +Platform: unix +Platform: linux +Platform: osx +Platform: cygwin +Platform: win32 +Classifier: Development Status :: 6 - Mature +Classifier: Intended Audience :: Developers +Classifier: License :: OSI Approved :: MIT License +Classifier: Operating System :: POSIX +Classifier: Operating System :: Microsoft :: Windows +Classifier: Operating System :: MacOS :: MacOS X +Classifier: Topic :: Software Development :: Testing +Classifier: Topic :: Software Development :: Libraries +Classifier: Topic :: Utilities +Classifier: Programming Language :: Python +Classifier: Programming Language :: Python :: 2 +Classifier: Programming Language :: Python :: 2.7 +Classifier: Programming Language :: Python :: 3 +Classifier: Programming Language :: Python :: 3.5 +Classifier: Programming Language :: Python :: 3.6 +Classifier: Programming Language :: Python :: 3.7 +Classifier: Programming Language :: Python :: 3.8 +Classifier: Programming Language :: Python :: 3.9 +Classifier: Programming Language :: Python :: 3.10 +Classifier: Programming Language :: Python :: Implementation :: CPython +Classifier: Programming Language :: Python :: Implementation :: PyPy +Requires-Python: >=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.* + +.. image:: https://img.shields.io/pypi/v/py.svg + :target: https://pypi.org/project/py + +.. image:: https://img.shields.io/conda/vn/conda-forge/py.svg + :target: https://anaconda.org/conda-forge/py + +.. image:: https://img.shields.io/pypi/pyversions/py.svg + :target: https://pypi.org/project/py + +.. image:: https://github.com/pytest-dev/py/workflows/build/badge.svg + :target: https://github.com/pytest-dev/py/actions + + +**NOTE**: this library is in **maintenance mode** and should not be used in new code. + +The py lib is a Python development support library featuring +the following tools and modules: + +* ``py.path``: uniform local and svn path objects -> please use pathlib/pathlib2 instead +* ``py.apipkg``: explicit API control and lazy-importing -> please use the standalone package instead +* ``py.iniconfig``: easy parsing of .ini files -> please use the standalone package instead +* ``py.code``: dynamic code generation and introspection (deprecated, moved to ``pytest`` as a implementation detail). + +**NOTE**: prior to the 1.4 release this distribution used to +contain py.test which is now its own package, see https://docs.pytest.org + +For questions and more information please visit https://py.readthedocs.io + +Bugs and issues: https://github.com/pytest-dev/py + +Authors: Holger Krekel and others, 2004-2017 + + diff --git a/venv/lib/python3.10/site-packages/py-1.11.0.dist-info/RECORD b/venv/lib/python3.10/site-packages/py-1.11.0.dist-info/RECORD new file mode 100644 index 0000000..1b64d39 --- /dev/null +++ b/venv/lib/python3.10/site-packages/py-1.11.0.dist-info/RECORD @@ -0,0 +1,101 @@ +py-1.11.0.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +py-1.11.0.dist-info/LICENSE,sha256=KvaAw570k_uCgwNW0dPfGstaBgM8ui3sehniHKp3qGY,1061 +py-1.11.0.dist-info/METADATA,sha256=j1AvLZH7HqTO06dYJbYYGypPxkhP9IZjlTPSOY82ehM,2811 +py-1.11.0.dist-info/RECORD,, +py-1.11.0.dist-info/WHEEL,sha256=WzZ8cwjh8l0jtULNjYq1Hpr-WCqCRgPr--TX4P5I1Wo,110 +py-1.11.0.dist-info/top_level.txt,sha256=rwh8_ukTaGscjyhGkBVcsGOMdc-Cfdz2QH7BKGENv-4,3 +py/__init__.py,sha256=56vBwkYKqNTj2StRTFjqa-p51_y6qVkvoyj10NyThtY,6022 +py/__init__.pyi,sha256=J0oNF3G0rcZL521oXyfWg7T053Spb2DmB5eDe40LcpY,341 +py/__metainfo.py,sha256=-APUcNtmuKgbYF8JfzlEyULMfp67uDDnRFKiu9nmxD0,55 +py/__pycache__/__init__.cpython-310.pyc,, +py/__pycache__/__metainfo.cpython-310.pyc,, +py/__pycache__/_builtin.cpython-310.pyc,, +py/__pycache__/_error.cpython-310.pyc,, +py/__pycache__/_std.cpython-310.pyc,, +py/__pycache__/_version.cpython-310.pyc,, +py/__pycache__/_xmlgen.cpython-310.pyc,, +py/__pycache__/test.cpython-310.pyc,, +py/_builtin.py,sha256=c9wCmZ0nsZtFARJoZ5Ia7RxJuBo1Bp7IHjLC5uQvIug,4021 +py/_code/__init__.py,sha256=PsNXpJtPfle_IbAgLXQTO5YJyHi8N1xR8YtetmLs1Ac,46 +py/_code/__pycache__/__init__.cpython-310.pyc,, +py/_code/__pycache__/_assertionnew.cpython-310.pyc,, +py/_code/__pycache__/_assertionold.cpython-310.pyc,, +py/_code/__pycache__/_py2traceback.cpython-310.pyc,, +py/_code/__pycache__/assertion.cpython-310.pyc,, +py/_code/__pycache__/code.cpython-310.pyc,, +py/_code/__pycache__/source.cpython-310.pyc,, +py/_code/_assertionnew.py,sha256=52ADFyZkW2aks5iFFKStINwz_2fFTomBBz40AplZ4vI,11450 +py/_code/_assertionold.py,sha256=HaDKP9esnh95ZUTZRH2gUcjGFHK4MAHi8Bk18rFBycA,17869 +py/_code/_py2traceback.py,sha256=QdRC-rUpHkhtfRq5EuBub-y6Tna_Z5BlXqBYtvf0-hE,2765 +py/_code/assertion.py,sha256=UgPH8qihF0qOIWGK-DR-usrJZztz-Njj-0cBuuqwjug,3174 +py/_code/code.py,sha256=5fTjcWOdqd8Xm37g82knNL2uK4ymp9yLpnmQrc9uWzI,27492 +py/_code/source.py,sha256=hZIzxUbKhgOElxeaiYlxEisxOevfg_OgxugXxpbMmGA,14050 +py/_error.py,sha256=59i7uYaoQlEB1QyRakvuIHh09fKGAOC521R4Rb1KVcI,2917 +py/_io/__init__.py,sha256=mroFkl-vhr0GhoOU33DR8CW5a23AmWEMkYd0Xkrn9gQ,29 +py/_io/__pycache__/__init__.cpython-310.pyc,, +py/_io/__pycache__/capture.cpython-310.pyc,, +py/_io/__pycache__/saferepr.cpython-310.pyc,, +py/_io/__pycache__/terminalwriter.cpython-310.pyc,, +py/_io/capture.py,sha256=UD23HRIjE9sZs70RaPJj5Zk8XlKSqJpqMR8-AqlOv80,11652 +py/_io/saferepr.py,sha256=vPzOq5XoGYzdTf5-zn3_2ib6w4IdPP2URwenkDkMO8s,2483 +py/_io/terminalwriter.py,sha256=bKN8Gnd5gKZeXALbCLZkfzkjF-jGbXPyID_lxCGezX4,14714 +py/_log/__init__.py,sha256=2GE1ao7mud57-K6VXgmItZJsMDJBR500Xj7_-ou_jY4,74 +py/_log/__pycache__/__init__.cpython-310.pyc,, +py/_log/__pycache__/log.cpython-310.pyc,, +py/_log/__pycache__/warning.cpython-310.pyc,, +py/_log/log.py,sha256=arQ8lvZUIPlwDo6ffg6lNvAQ0x8U1yPRhkMLtHUQKx8,6003 +py/_log/warning.py,sha256=wufxpNU8YBXKNNcCZsZnaJaaNuKEjuvsIa1V-HE6YIk,2565 +py/_path/__init__.py,sha256=uBkaNhYAPiTOe8cj8WWD7rpM06XR6H0E3KghK6MgBpA,32 +py/_path/__pycache__/__init__.cpython-310.pyc,, +py/_path/__pycache__/cacheutil.cpython-310.pyc,, +py/_path/__pycache__/common.cpython-310.pyc,, +py/_path/__pycache__/local.cpython-310.pyc,, +py/_path/__pycache__/svnurl.cpython-310.pyc,, +py/_path/__pycache__/svnwc.cpython-310.pyc,, +py/_path/cacheutil.py,sha256=jQ0wk4Goqr_bIE8wGdr-CTiMD6dpcgdqyngGmMO48pY,3333 +py/_path/common.py,sha256=EC18Pl6zYGGMzHkDgGNbLC2W23ajtDwHMJOm3jNMdOA,14818 +py/_path/local.py,sha256=-QdTI95H2gtAPAfE4WhvRQq_2qMjlNBVRSp6_reg7kE,36759 +py/_path/svnurl.py,sha256=OC0w9p_pNpSncwgvD61Pcr4r2NrFztYb-OngD8RzNi8,14715 +py/_path/svnwc.py,sha256=IKJkzNwevB7zxxW9OIhH5n4wesAnJQcJTgxjxdgcqUk,43825 +py/_process/__init__.py,sha256=e7LQPDo7Q-LR9VjcRithvT4UoszeZ80NEeUvc9j4H-o,40 +py/_process/__pycache__/__init__.cpython-310.pyc,, +py/_process/__pycache__/cmdexec.cpython-310.pyc,, +py/_process/__pycache__/forkedfunc.cpython-310.pyc,, +py/_process/__pycache__/killproc.cpython-310.pyc,, +py/_process/cmdexec.py,sha256=bTtnRydYMvW5w-K_qzRRRgycU8p4IfWQB5ymzLXMdkU,1814 +py/_process/forkedfunc.py,sha256=ZTGHp8kp5Z1icj0TonoPRmpcm64pyaVVfiRC9c5TnGU,3692 +py/_process/killproc.py,sha256=0fj_w_A8Mi_ZBJd9Koy_NnmMNoNYttb713943WxTVxw,648 +py/_std.py,sha256=JnzTePDF0TNzPKjYHIRMKwuwzE6bvLOV8q9r7FlLZZ8,668 +py/_vendored_packages/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +py/_vendored_packages/__pycache__/__init__.cpython-310.pyc,, +py/_vendored_packages/apipkg-2.0.0.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +py/_vendored_packages/apipkg-2.0.0.dist-info/LICENSE,sha256=6J7tEHTTqUMZi6E5uAhE9bRFuGC7p0qK6twGEFZhZOo,1054 +py/_vendored_packages/apipkg-2.0.0.dist-info/METADATA,sha256=GqNwkxraK5UTxObLVXTLc2UqktOPwZnKqdk2ThzHX0A,4292 +py/_vendored_packages/apipkg-2.0.0.dist-info/RECORD,sha256=VqARwZMQSTLsSY4QcLChtdNYtH1_llKRb1sGiK7wRm4,801 +py/_vendored_packages/apipkg-2.0.0.dist-info/REQUESTED,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +py/_vendored_packages/apipkg-2.0.0.dist-info/WHEEL,sha256=WzZ8cwjh8l0jtULNjYq1Hpr-WCqCRgPr--TX4P5I1Wo,110 +py/_vendored_packages/apipkg-2.0.0.dist-info/top_level.txt,sha256=3TGS6nmN7kjxhUK4LpPCB3QkQI34QYGrT0ZQGWajoZ8,7 +py/_vendored_packages/apipkg/__init__.py,sha256=gpbD3O57S9f-LsO2e-XwI6IGISayicfnCq3B5y_8frg,6978 +py/_vendored_packages/apipkg/__pycache__/__init__.cpython-310.pyc,, +py/_vendored_packages/apipkg/__pycache__/version.cpython-310.pyc,, +py/_vendored_packages/apipkg/version.py,sha256=bgZFg-f3UKhgE-z2w8RoFrwqRBzJBZkM4_jKFiYB9eU,142 +py/_vendored_packages/iniconfig-1.1.1.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +py/_vendored_packages/iniconfig-1.1.1.dist-info/LICENSE,sha256=KvaAw570k_uCgwNW0dPfGstaBgM8ui3sehniHKp3qGY,1061 +py/_vendored_packages/iniconfig-1.1.1.dist-info/METADATA,sha256=_4-oFKpRXuZv5rzepScpXRwhq6DzqsgbnA5ZpgMUMcs,2405 +py/_vendored_packages/iniconfig-1.1.1.dist-info/RECORD,sha256=13Pl7e4y-9Te0285E_6IMvnDQzT4NawZCXhkodtXlk4,863 +py/_vendored_packages/iniconfig-1.1.1.dist-info/REQUESTED,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +py/_vendored_packages/iniconfig-1.1.1.dist-info/WHEEL,sha256=ADKeyaGyKF5DwBNE0sRE5pvW-bSkFMJfBuhzZ3rceP4,110 +py/_vendored_packages/iniconfig-1.1.1.dist-info/top_level.txt,sha256=7KfM0fugdlToj9UW7enKXk2HYALQD8qHiyKtjhSzgN8,10 +py/_vendored_packages/iniconfig/__init__.py,sha256=-pBe5AF_6aAwo1CxJQ8i_zJq6ejc6IxHta7qk2tNJhY,5208 +py/_vendored_packages/iniconfig/__init__.pyi,sha256=-4KOctzq28ohRmTZsqlH6aylyFqsNKxYqtk1dteypi4,1205 +py/_vendored_packages/iniconfig/__pycache__/__init__.cpython-310.pyc,, +py/_vendored_packages/iniconfig/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +py/_version.py,sha256=Xs0eR54RO9PHie_bsnHE9MaEmKMBiyxDAkf-poAVEX0,144 +py/_xmlgen.py,sha256=y-PCg1hZpIozJi7GXSRZv6saT_0nnNZ2D-6ue_A2xww,8364 +py/error.pyi,sha256=fQOaF1TOx_pK1StqWC_6d6DAGzSuPJ6vR6Fd_5lRol0,3409 +py/iniconfig.pyi,sha256=-4KOctzq28ohRmTZsqlH6aylyFqsNKxYqtk1dteypi4,1205 +py/io.pyi,sha256=nuC3RIVMXOp-xsaXBbPYNZHxzcCEHgDdIpS9yRmJR-g,5277 +py/path.pyi,sha256=OmDEqkp756dcWHq10Gwaw8pXtIABAdbg9mSAUCQPPyk,7168 +py/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +py/test.py,sha256=1VLPdbBKEOai2WAKABJAbVdRfcJxtff2x2mXNmQgDL8,222 +py/xml.pyi,sha256=SBnALd6w7VwqrGYtEm4ESJ_u9iD7LVH7LWFZ3Y7xAoo,787 diff --git a/venv/lib/python3.10/site-packages/py-1.11.0.dist-info/WHEEL b/venv/lib/python3.10/site-packages/py-1.11.0.dist-info/WHEEL new file mode 100644 index 0000000..b733a60 --- /dev/null +++ b/venv/lib/python3.10/site-packages/py-1.11.0.dist-info/WHEEL @@ -0,0 +1,6 @@ +Wheel-Version: 1.0 +Generator: bdist_wheel (0.37.0) +Root-Is-Purelib: true +Tag: py2-none-any +Tag: py3-none-any + diff --git a/venv/lib/python3.10/site-packages/py-1.11.0.dist-info/top_level.txt b/venv/lib/python3.10/site-packages/py-1.11.0.dist-info/top_level.txt new file mode 100644 index 0000000..edfce78 --- /dev/null +++ b/venv/lib/python3.10/site-packages/py-1.11.0.dist-info/top_level.txt @@ -0,0 +1 @@ +py diff --git a/venv/lib/python3.10/site-packages/py/__init__.py b/venv/lib/python3.10/site-packages/py/__init__.py new file mode 100644 index 0000000..b892ce1 --- /dev/null +++ b/venv/lib/python3.10/site-packages/py/__init__.py @@ -0,0 +1,156 @@ +""" +pylib: rapid testing and development utils + +this module uses apipkg.py for lazy-loading sub modules +and classes. The initpkg-dictionary below specifies +name->value mappings where value can be another namespace +dictionary or an import path. + +(c) Holger Krekel and others, 2004-2014 +""" +from py._error import error + +try: + from py._vendored_packages import apipkg + lib_not_mangled_by_packagers = True + vendor_prefix = '._vendored_packages.' +except ImportError: + import apipkg + lib_not_mangled_by_packagers = False + vendor_prefix = '' + +try: + from ._version import version as __version__ +except ImportError: + # broken installation, we don't even try + __version__ = "unknown" + + +apipkg.initpkg(__name__, attr={'_apipkg': apipkg, 'error': error}, exportdefs={ + # access to all standard lib modules + 'std': '._std:std', + + '_pydir' : '.__metainfo:pydir', + 'version': 'py:__version__', # backward compatibility + + # pytest-2.0 has a flat namespace, we use alias modules + # to keep old references compatible + 'test' : 'pytest', + + # hook into the top-level standard library + 'process' : { + '__doc__' : '._process:__doc__', + 'cmdexec' : '._process.cmdexec:cmdexec', + 'kill' : '._process.killproc:kill', + 'ForkedFunc' : '._process.forkedfunc:ForkedFunc', + }, + + 'apipkg' : { + 'initpkg' : vendor_prefix + 'apipkg:initpkg', + 'ApiModule' : vendor_prefix + 'apipkg:ApiModule', + }, + + 'iniconfig' : { + 'IniConfig' : vendor_prefix + 'iniconfig:IniConfig', + 'ParseError' : vendor_prefix + 'iniconfig:ParseError', + }, + + 'path' : { + '__doc__' : '._path:__doc__', + 'svnwc' : '._path.svnwc:SvnWCCommandPath', + 'svnurl' : '._path.svnurl:SvnCommandPath', + 'local' : '._path.local:LocalPath', + 'SvnAuth' : '._path.svnwc:SvnAuth', + }, + + # python inspection/code-generation API + 'code' : { + '__doc__' : '._code:__doc__', + 'compile' : '._code.source:compile_', + 'Source' : '._code.source:Source', + 'Code' : '._code.code:Code', + 'Frame' : '._code.code:Frame', + 'ExceptionInfo' : '._code.code:ExceptionInfo', + 'Traceback' : '._code.code:Traceback', + 'getfslineno' : '._code.source:getfslineno', + 'getrawcode' : '._code.code:getrawcode', + 'patch_builtins' : '._code.code:patch_builtins', + 'unpatch_builtins' : '._code.code:unpatch_builtins', + '_AssertionError' : '._code.assertion:AssertionError', + '_reinterpret_old' : '._code.assertion:reinterpret_old', + '_reinterpret' : '._code.assertion:reinterpret', + '_reprcompare' : '._code.assertion:_reprcompare', + '_format_explanation' : '._code.assertion:_format_explanation', + }, + + # backports and additions of builtins + 'builtin' : { + '__doc__' : '._builtin:__doc__', + 'enumerate' : '._builtin:enumerate', + 'reversed' : '._builtin:reversed', + 'sorted' : '._builtin:sorted', + 'any' : '._builtin:any', + 'all' : '._builtin:all', + 'set' : '._builtin:set', + 'frozenset' : '._builtin:frozenset', + 'BaseException' : '._builtin:BaseException', + 'GeneratorExit' : '._builtin:GeneratorExit', + '_sysex' : '._builtin:_sysex', + 'print_' : '._builtin:print_', + '_reraise' : '._builtin:_reraise', + '_tryimport' : '._builtin:_tryimport', + 'exec_' : '._builtin:exec_', + '_basestring' : '._builtin:_basestring', + '_totext' : '._builtin:_totext', + '_isbytes' : '._builtin:_isbytes', + '_istext' : '._builtin:_istext', + '_getimself' : '._builtin:_getimself', + '_getfuncdict' : '._builtin:_getfuncdict', + '_getcode' : '._builtin:_getcode', + 'builtins' : '._builtin:builtins', + 'execfile' : '._builtin:execfile', + 'callable' : '._builtin:callable', + 'bytes' : '._builtin:bytes', + 'text' : '._builtin:text', + }, + + # input-output helping + 'io' : { + '__doc__' : '._io:__doc__', + 'dupfile' : '._io.capture:dupfile', + 'TextIO' : '._io.capture:TextIO', + 'BytesIO' : '._io.capture:BytesIO', + 'FDCapture' : '._io.capture:FDCapture', + 'StdCapture' : '._io.capture:StdCapture', + 'StdCaptureFD' : '._io.capture:StdCaptureFD', + 'TerminalWriter' : '._io.terminalwriter:TerminalWriter', + 'ansi_print' : '._io.terminalwriter:ansi_print', + 'get_terminal_width' : '._io.terminalwriter:get_terminal_width', + 'saferepr' : '._io.saferepr:saferepr', + }, + + # small and mean xml/html generation + 'xml' : { + '__doc__' : '._xmlgen:__doc__', + 'html' : '._xmlgen:html', + 'Tag' : '._xmlgen:Tag', + 'raw' : '._xmlgen:raw', + 'Namespace' : '._xmlgen:Namespace', + 'escape' : '._xmlgen:escape', + }, + + 'log' : { + # logging API ('producers' and 'consumers' connected via keywords) + '__doc__' : '._log:__doc__', + '_apiwarn' : '._log.warning:_apiwarn', + 'Producer' : '._log.log:Producer', + 'setconsumer' : '._log.log:setconsumer', + '_setstate' : '._log.log:setstate', + '_getstate' : '._log.log:getstate', + 'Path' : '._log.log:Path', + 'STDOUT' : '._log.log:STDOUT', + 'STDERR' : '._log.log:STDERR', + 'Syslog' : '._log.log:Syslog', + }, + +}) diff --git a/venv/lib/python3.10/site-packages/py/__init__.pyi b/venv/lib/python3.10/site-packages/py/__init__.pyi new file mode 100644 index 0000000..96859e3 --- /dev/null +++ b/venv/lib/python3.10/site-packages/py/__init__.pyi @@ -0,0 +1,20 @@ +from typing import Any + +# py allows to use e.g. py.path.local even without importing py.path. +# So import implicitly. +from . import error +from . import iniconfig +from . import path +from . import io +from . import xml + +__version__: str + +# Untyped modules below here. +std: Any +test: Any +process: Any +apipkg: Any +code: Any +builtin: Any +log: Any diff --git a/venv/lib/python3.10/site-packages/py/__metainfo.py b/venv/lib/python3.10/site-packages/py/__metainfo.py new file mode 100644 index 0000000..12581eb --- /dev/null +++ b/venv/lib/python3.10/site-packages/py/__metainfo.py @@ -0,0 +1,2 @@ +import py +pydir = py.path.local(py.__file__).dirpath() diff --git a/venv/lib/python3.10/site-packages/py/_builtin.py b/venv/lib/python3.10/site-packages/py/_builtin.py new file mode 100644 index 0000000..ddc89fc --- /dev/null +++ b/venv/lib/python3.10/site-packages/py/_builtin.py @@ -0,0 +1,149 @@ +import sys + + +# Passthrough for builtins supported with py27. +BaseException = BaseException +GeneratorExit = GeneratorExit +_sysex = (KeyboardInterrupt, SystemExit, MemoryError, GeneratorExit) +all = all +any = any +callable = callable +enumerate = enumerate +reversed = reversed +set, frozenset = set, frozenset +sorted = sorted + + +if sys.version_info >= (3, 0): + exec("print_ = print ; exec_=exec") + import builtins + + # some backward compatibility helpers + _basestring = str + def _totext(obj, encoding=None, errors=None): + if isinstance(obj, bytes): + if errors is None: + obj = obj.decode(encoding) + else: + obj = obj.decode(encoding, errors) + elif not isinstance(obj, str): + obj = str(obj) + return obj + + def _isbytes(x): + return isinstance(x, bytes) + + def _istext(x): + return isinstance(x, str) + + text = str + bytes = bytes + + def _getimself(function): + return getattr(function, '__self__', None) + + def _getfuncdict(function): + return getattr(function, "__dict__", None) + + def _getcode(function): + return getattr(function, "__code__", None) + + def execfile(fn, globs=None, locs=None): + if globs is None: + back = sys._getframe(1) + globs = back.f_globals + locs = back.f_locals + del back + elif locs is None: + locs = globs + fp = open(fn, "r") + try: + source = fp.read() + finally: + fp.close() + co = compile(source, fn, "exec", dont_inherit=True) + exec_(co, globs, locs) + +else: + import __builtin__ as builtins + _totext = unicode + _basestring = basestring + text = unicode + bytes = str + execfile = execfile + callable = callable + def _isbytes(x): + return isinstance(x, str) + def _istext(x): + return isinstance(x, unicode) + + def _getimself(function): + return getattr(function, 'im_self', None) + + def _getfuncdict(function): + return getattr(function, "__dict__", None) + + def _getcode(function): + try: + return getattr(function, "__code__") + except AttributeError: + return getattr(function, "func_code", None) + + def print_(*args, **kwargs): + """ minimal backport of py3k print statement. """ + sep = ' ' + if 'sep' in kwargs: + sep = kwargs.pop('sep') + end = '\n' + if 'end' in kwargs: + end = kwargs.pop('end') + file = 'file' in kwargs and kwargs.pop('file') or sys.stdout + if kwargs: + args = ", ".join([str(x) for x in kwargs]) + raise TypeError("invalid keyword arguments: %s" % args) + at_start = True + for x in args: + if not at_start: + file.write(sep) + file.write(str(x)) + at_start = False + file.write(end) + + def exec_(obj, globals=None, locals=None): + """ minimal backport of py3k exec statement. """ + __tracebackhide__ = True + if globals is None: + frame = sys._getframe(1) + globals = frame.f_globals + if locals is None: + locals = frame.f_locals + elif locals is None: + locals = globals + exec2(obj, globals, locals) + +if sys.version_info >= (3, 0): + def _reraise(cls, val, tb): + __tracebackhide__ = True + assert hasattr(val, '__traceback__') + raise cls.with_traceback(val, tb) +else: + exec (""" +def _reraise(cls, val, tb): + __tracebackhide__ = True + raise cls, val, tb +def exec2(obj, globals, locals): + __tracebackhide__ = True + exec obj in globals, locals +""") + +def _tryimport(*names): + """ return the first successfully imported module. """ + assert names + for name in names: + try: + __import__(name) + except ImportError: + excinfo = sys.exc_info() + else: + return sys.modules[name] + _reraise(*excinfo) diff --git a/venv/lib/python3.10/site-packages/py/_code/__init__.py b/venv/lib/python3.10/site-packages/py/_code/__init__.py new file mode 100644 index 0000000..f15acf8 --- /dev/null +++ b/venv/lib/python3.10/site-packages/py/_code/__init__.py @@ -0,0 +1 @@ +""" python inspection/code generation API """ diff --git a/venv/lib/python3.10/site-packages/py/_code/_assertionnew.py b/venv/lib/python3.10/site-packages/py/_code/_assertionnew.py new file mode 100644 index 0000000..d03f29d --- /dev/null +++ b/venv/lib/python3.10/site-packages/py/_code/_assertionnew.py @@ -0,0 +1,322 @@ +""" +Find intermediate evalutation results in assert statements through builtin AST. +This should replace _assertionold.py eventually. +""" + +import sys +import ast + +import py +from py._code.assertion import _format_explanation, BuiltinAssertionError + + +def _is_ast_expr(node): + return isinstance(node, ast.expr) +def _is_ast_stmt(node): + return isinstance(node, ast.stmt) + + +class Failure(Exception): + """Error found while interpreting AST.""" + + def __init__(self, explanation=""): + self.cause = sys.exc_info() + self.explanation = explanation + + +def interpret(source, frame, should_fail=False): + mod = ast.parse(source) + visitor = DebugInterpreter(frame) + try: + visitor.visit(mod) + except Failure: + failure = sys.exc_info()[1] + return getfailure(failure) + if should_fail: + return ("(assertion failed, but when it was re-run for " + "printing intermediate values, it did not fail. Suggestions: " + "compute assert expression before the assert or use --no-assert)") + +def run(offending_line, frame=None): + if frame is None: + frame = py.code.Frame(sys._getframe(1)) + return interpret(offending_line, frame) + +def getfailure(failure): + explanation = _format_explanation(failure.explanation) + value = failure.cause[1] + if str(value): + lines = explanation.splitlines() + if not lines: + lines.append("") + lines[0] += " << %s" % (value,) + explanation = "\n".join(lines) + text = "%s: %s" % (failure.cause[0].__name__, explanation) + if text.startswith("AssertionError: assert "): + text = text[16:] + return text + + +operator_map = { + ast.BitOr : "|", + ast.BitXor : "^", + ast.BitAnd : "&", + ast.LShift : "<<", + ast.RShift : ">>", + ast.Add : "+", + ast.Sub : "-", + ast.Mult : "*", + ast.Div : "/", + ast.FloorDiv : "//", + ast.Mod : "%", + ast.Eq : "==", + ast.NotEq : "!=", + ast.Lt : "<", + ast.LtE : "<=", + ast.Gt : ">", + ast.GtE : ">=", + ast.Pow : "**", + ast.Is : "is", + ast.IsNot : "is not", + ast.In : "in", + ast.NotIn : "not in" +} + +unary_map = { + ast.Not : "not %s", + ast.Invert : "~%s", + ast.USub : "-%s", + ast.UAdd : "+%s" +} + + +class DebugInterpreter(ast.NodeVisitor): + """Interpret AST nodes to gleam useful debugging information. """ + + def __init__(self, frame): + self.frame = frame + + def generic_visit(self, node): + # Fallback when we don't have a special implementation. + if _is_ast_expr(node): + mod = ast.Expression(node) + co = self._compile(mod) + try: + result = self.frame.eval(co) + except Exception: + raise Failure() + explanation = self.frame.repr(result) + return explanation, result + elif _is_ast_stmt(node): + mod = ast.Module([node]) + co = self._compile(mod, "exec") + try: + self.frame.exec_(co) + except Exception: + raise Failure() + return None, None + else: + raise AssertionError("can't handle %s" %(node,)) + + def _compile(self, source, mode="eval"): + return compile(source, "", mode) + + def visit_Expr(self, expr): + return self.visit(expr.value) + + def visit_Module(self, mod): + for stmt in mod.body: + self.visit(stmt) + + def visit_Name(self, name): + explanation, result = self.generic_visit(name) + # See if the name is local. + source = "%r in locals() is not globals()" % (name.id,) + co = self._compile(source) + try: + local = self.frame.eval(co) + except Exception: + # have to assume it isn't + local = False + if not local: + return name.id, result + return explanation, result + + def visit_Compare(self, comp): + left = comp.left + left_explanation, left_result = self.visit(left) + for op, next_op in zip(comp.ops, comp.comparators): + next_explanation, next_result = self.visit(next_op) + op_symbol = operator_map[op.__class__] + explanation = "%s %s %s" % (left_explanation, op_symbol, + next_explanation) + source = "__exprinfo_left %s __exprinfo_right" % (op_symbol,) + co = self._compile(source) + try: + result = self.frame.eval(co, __exprinfo_left=left_result, + __exprinfo_right=next_result) + except Exception: + raise Failure(explanation) + try: + if not result: + break + except KeyboardInterrupt: + raise + except: + break + left_explanation, left_result = next_explanation, next_result + + rcomp = py.code._reprcompare + if rcomp: + res = rcomp(op_symbol, left_result, next_result) + if res: + explanation = res + return explanation, result + + def visit_BoolOp(self, boolop): + is_or = isinstance(boolop.op, ast.Or) + explanations = [] + for operand in boolop.values: + explanation, result = self.visit(operand) + explanations.append(explanation) + if result == is_or: + break + name = is_or and " or " or " and " + explanation = "(" + name.join(explanations) + ")" + return explanation, result + + def visit_UnaryOp(self, unary): + pattern = unary_map[unary.op.__class__] + operand_explanation, operand_result = self.visit(unary.operand) + explanation = pattern % (operand_explanation,) + co = self._compile(pattern % ("__exprinfo_expr",)) + try: + result = self.frame.eval(co, __exprinfo_expr=operand_result) + except Exception: + raise Failure(explanation) + return explanation, result + + def visit_BinOp(self, binop): + left_explanation, left_result = self.visit(binop.left) + right_explanation, right_result = self.visit(binop.right) + symbol = operator_map[binop.op.__class__] + explanation = "(%s %s %s)" % (left_explanation, symbol, + right_explanation) + source = "__exprinfo_left %s __exprinfo_right" % (symbol,) + co = self._compile(source) + try: + result = self.frame.eval(co, __exprinfo_left=left_result, + __exprinfo_right=right_result) + except Exception: + raise Failure(explanation) + return explanation, result + + def visit_Call(self, call): + func_explanation, func = self.visit(call.func) + arg_explanations = [] + ns = {"__exprinfo_func" : func} + arguments = [] + for arg in call.args: + arg_explanation, arg_result = self.visit(arg) + arg_name = "__exprinfo_%s" % (len(ns),) + ns[arg_name] = arg_result + arguments.append(arg_name) + arg_explanations.append(arg_explanation) + for keyword in call.keywords: + arg_explanation, arg_result = self.visit(keyword.value) + arg_name = "__exprinfo_%s" % (len(ns),) + ns[arg_name] = arg_result + keyword_source = "%s=%%s" % (keyword.arg) + arguments.append(keyword_source % (arg_name,)) + arg_explanations.append(keyword_source % (arg_explanation,)) + if call.starargs: + arg_explanation, arg_result = self.visit(call.starargs) + arg_name = "__exprinfo_star" + ns[arg_name] = arg_result + arguments.append("*%s" % (arg_name,)) + arg_explanations.append("*%s" % (arg_explanation,)) + if call.kwargs: + arg_explanation, arg_result = self.visit(call.kwargs) + arg_name = "__exprinfo_kwds" + ns[arg_name] = arg_result + arguments.append("**%s" % (arg_name,)) + arg_explanations.append("**%s" % (arg_explanation,)) + args_explained = ", ".join(arg_explanations) + explanation = "%s(%s)" % (func_explanation, args_explained) + args = ", ".join(arguments) + source = "__exprinfo_func(%s)" % (args,) + co = self._compile(source) + try: + result = self.frame.eval(co, **ns) + except Exception: + raise Failure(explanation) + pattern = "%s\n{%s = %s\n}" + rep = self.frame.repr(result) + explanation = pattern % (rep, rep, explanation) + return explanation, result + + def _is_builtin_name(self, name): + pattern = "%r not in globals() and %r not in locals()" + source = pattern % (name.id, name.id) + co = self._compile(source) + try: + return self.frame.eval(co) + except Exception: + return False + + def visit_Attribute(self, attr): + if not isinstance(attr.ctx, ast.Load): + return self.generic_visit(attr) + source_explanation, source_result = self.visit(attr.value) + explanation = "%s.%s" % (source_explanation, attr.attr) + source = "__exprinfo_expr.%s" % (attr.attr,) + co = self._compile(source) + try: + result = self.frame.eval(co, __exprinfo_expr=source_result) + except Exception: + raise Failure(explanation) + explanation = "%s\n{%s = %s.%s\n}" % (self.frame.repr(result), + self.frame.repr(result), + source_explanation, attr.attr) + # Check if the attr is from an instance. + source = "%r in getattr(__exprinfo_expr, '__dict__', {})" + source = source % (attr.attr,) + co = self._compile(source) + try: + from_instance = self.frame.eval(co, __exprinfo_expr=source_result) + except Exception: + from_instance = True + if from_instance: + rep = self.frame.repr(result) + pattern = "%s\n{%s = %s\n}" + explanation = pattern % (rep, rep, explanation) + return explanation, result + + def visit_Assert(self, assrt): + test_explanation, test_result = self.visit(assrt.test) + if test_explanation.startswith("False\n{False =") and \ + test_explanation.endswith("\n"): + test_explanation = test_explanation[15:-2] + explanation = "assert %s" % (test_explanation,) + if not test_result: + try: + raise BuiltinAssertionError + except Exception: + raise Failure(explanation) + return explanation, test_result + + def visit_Assign(self, assign): + value_explanation, value_result = self.visit(assign.value) + explanation = "... = %s" % (value_explanation,) + name = ast.Name("__exprinfo_expr", ast.Load(), + lineno=assign.value.lineno, + col_offset=assign.value.col_offset) + new_assign = ast.Assign(assign.targets, name, lineno=assign.lineno, + col_offset=assign.col_offset) + mod = ast.Module([new_assign]) + co = self._compile(mod, "exec") + try: + self.frame.exec_(co, __exprinfo_expr=value_result) + except Exception: + raise Failure(explanation) + return explanation, value_result diff --git a/venv/lib/python3.10/site-packages/py/_code/_assertionold.py b/venv/lib/python3.10/site-packages/py/_code/_assertionold.py new file mode 100644 index 0000000..1bb70a8 --- /dev/null +++ b/venv/lib/python3.10/site-packages/py/_code/_assertionold.py @@ -0,0 +1,556 @@ +import py +import sys, inspect +from compiler import parse, ast, pycodegen +from py._code.assertion import BuiltinAssertionError, _format_explanation +import types + +passthroughex = py.builtin._sysex + +class Failure: + def __init__(self, node): + self.exc, self.value, self.tb = sys.exc_info() + self.node = node + +class View(object): + """View base class. + + If C is a subclass of View, then C(x) creates a proxy object around + the object x. The actual class of the proxy is not C in general, + but a *subclass* of C determined by the rules below. To avoid confusion + we call view class the class of the proxy (a subclass of C, so of View) + and object class the class of x. + + Attributes and methods not found in the proxy are automatically read on x. + Other operations like setting attributes are performed on the proxy, as + determined by its view class. The object x is available from the proxy + as its __obj__ attribute. + + The view class selection is determined by the __view__ tuples and the + optional __viewkey__ method. By default, the selected view class is the + most specific subclass of C whose __view__ mentions the class of x. + If no such subclass is found, the search proceeds with the parent + object classes. For example, C(True) will first look for a subclass + of C with __view__ = (..., bool, ...) and only if it doesn't find any + look for one with __view__ = (..., int, ...), and then ..., object,... + If everything fails the class C itself is considered to be the default. + + Alternatively, the view class selection can be driven by another aspect + of the object x, instead of the class of x, by overriding __viewkey__. + See last example at the end of this module. + """ + + _viewcache = {} + __view__ = () + + def __new__(rootclass, obj, *args, **kwds): + self = object.__new__(rootclass) + self.__obj__ = obj + self.__rootclass__ = rootclass + key = self.__viewkey__() + try: + self.__class__ = self._viewcache[key] + except KeyError: + self.__class__ = self._selectsubclass(key) + return self + + def __getattr__(self, attr): + # attributes not found in the normal hierarchy rooted on View + # are looked up in the object's real class + return getattr(self.__obj__, attr) + + def __viewkey__(self): + return self.__obj__.__class__ + + def __matchkey__(self, key, subclasses): + if inspect.isclass(key): + keys = inspect.getmro(key) + else: + keys = [key] + for key in keys: + result = [C for C in subclasses if key in C.__view__] + if result: + return result + return [] + + def _selectsubclass(self, key): + subclasses = list(enumsubclasses(self.__rootclass__)) + for C in subclasses: + if not isinstance(C.__view__, tuple): + C.__view__ = (C.__view__,) + choices = self.__matchkey__(key, subclasses) + if not choices: + return self.__rootclass__ + elif len(choices) == 1: + return choices[0] + else: + # combine the multiple choices + return type('?', tuple(choices), {}) + + def __repr__(self): + return '%s(%r)' % (self.__rootclass__.__name__, self.__obj__) + + +def enumsubclasses(cls): + for subcls in cls.__subclasses__(): + for subsubclass in enumsubclasses(subcls): + yield subsubclass + yield cls + + +class Interpretable(View): + """A parse tree node with a few extra methods.""" + explanation = None + + def is_builtin(self, frame): + return False + + def eval(self, frame): + # fall-back for unknown expression nodes + try: + expr = ast.Expression(self.__obj__) + expr.filename = '' + self.__obj__.filename = '' + co = pycodegen.ExpressionCodeGenerator(expr).getCode() + result = frame.eval(co) + except passthroughex: + raise + except: + raise Failure(self) + self.result = result + self.explanation = self.explanation or frame.repr(self.result) + + def run(self, frame): + # fall-back for unknown statement nodes + try: + expr = ast.Module(None, ast.Stmt([self.__obj__])) + expr.filename = '' + co = pycodegen.ModuleCodeGenerator(expr).getCode() + frame.exec_(co) + except passthroughex: + raise + except: + raise Failure(self) + + def nice_explanation(self): + return _format_explanation(self.explanation) + + +class Name(Interpretable): + __view__ = ast.Name + + def is_local(self, frame): + source = '%r in locals() is not globals()' % self.name + try: + return frame.is_true(frame.eval(source)) + except passthroughex: + raise + except: + return False + + def is_global(self, frame): + source = '%r in globals()' % self.name + try: + return frame.is_true(frame.eval(source)) + except passthroughex: + raise + except: + return False + + def is_builtin(self, frame): + source = '%r not in locals() and %r not in globals()' % ( + self.name, self.name) + try: + return frame.is_true(frame.eval(source)) + except passthroughex: + raise + except: + return False + + def eval(self, frame): + super(Name, self).eval(frame) + if not self.is_local(frame): + self.explanation = self.name + +class Compare(Interpretable): + __view__ = ast.Compare + + def eval(self, frame): + expr = Interpretable(self.expr) + expr.eval(frame) + for operation, expr2 in self.ops: + if hasattr(self, 'result'): + # shortcutting in chained expressions + if not frame.is_true(self.result): + break + expr2 = Interpretable(expr2) + expr2.eval(frame) + self.explanation = "%s %s %s" % ( + expr.explanation, operation, expr2.explanation) + source = "__exprinfo_left %s __exprinfo_right" % operation + try: + self.result = frame.eval(source, + __exprinfo_left=expr.result, + __exprinfo_right=expr2.result) + except passthroughex: + raise + except: + raise Failure(self) + expr = expr2 + +class And(Interpretable): + __view__ = ast.And + + def eval(self, frame): + explanations = [] + for expr in self.nodes: + expr = Interpretable(expr) + expr.eval(frame) + explanations.append(expr.explanation) + self.result = expr.result + if not frame.is_true(expr.result): + break + self.explanation = '(' + ' and '.join(explanations) + ')' + +class Or(Interpretable): + __view__ = ast.Or + + def eval(self, frame): + explanations = [] + for expr in self.nodes: + expr = Interpretable(expr) + expr.eval(frame) + explanations.append(expr.explanation) + self.result = expr.result + if frame.is_true(expr.result): + break + self.explanation = '(' + ' or '.join(explanations) + ')' + + +# == Unary operations == +keepalive = [] +for astclass, astpattern in { + ast.Not : 'not __exprinfo_expr', + ast.Invert : '(~__exprinfo_expr)', + }.items(): + + class UnaryArith(Interpretable): + __view__ = astclass + + def eval(self, frame, astpattern=astpattern): + expr = Interpretable(self.expr) + expr.eval(frame) + self.explanation = astpattern.replace('__exprinfo_expr', + expr.explanation) + try: + self.result = frame.eval(astpattern, + __exprinfo_expr=expr.result) + except passthroughex: + raise + except: + raise Failure(self) + + keepalive.append(UnaryArith) + +# == Binary operations == +for astclass, astpattern in { + ast.Add : '(__exprinfo_left + __exprinfo_right)', + ast.Sub : '(__exprinfo_left - __exprinfo_right)', + ast.Mul : '(__exprinfo_left * __exprinfo_right)', + ast.Div : '(__exprinfo_left / __exprinfo_right)', + ast.Mod : '(__exprinfo_left % __exprinfo_right)', + ast.Power : '(__exprinfo_left ** __exprinfo_right)', + }.items(): + + class BinaryArith(Interpretable): + __view__ = astclass + + def eval(self, frame, astpattern=astpattern): + left = Interpretable(self.left) + left.eval(frame) + right = Interpretable(self.right) + right.eval(frame) + self.explanation = (astpattern + .replace('__exprinfo_left', left .explanation) + .replace('__exprinfo_right', right.explanation)) + try: + self.result = frame.eval(astpattern, + __exprinfo_left=left.result, + __exprinfo_right=right.result) + except passthroughex: + raise + except: + raise Failure(self) + + keepalive.append(BinaryArith) + + +class CallFunc(Interpretable): + __view__ = ast.CallFunc + + def is_bool(self, frame): + source = 'isinstance(__exprinfo_value, bool)' + try: + return frame.is_true(frame.eval(source, + __exprinfo_value=self.result)) + except passthroughex: + raise + except: + return False + + def eval(self, frame): + node = Interpretable(self.node) + node.eval(frame) + explanations = [] + vars = {'__exprinfo_fn': node.result} + source = '__exprinfo_fn(' + for a in self.args: + if isinstance(a, ast.Keyword): + keyword = a.name + a = a.expr + else: + keyword = None + a = Interpretable(a) + a.eval(frame) + argname = '__exprinfo_%d' % len(vars) + vars[argname] = a.result + if keyword is None: + source += argname + ',' + explanations.append(a.explanation) + else: + source += '%s=%s,' % (keyword, argname) + explanations.append('%s=%s' % (keyword, a.explanation)) + if self.star_args: + star_args = Interpretable(self.star_args) + star_args.eval(frame) + argname = '__exprinfo_star' + vars[argname] = star_args.result + source += '*' + argname + ',' + explanations.append('*' + star_args.explanation) + if self.dstar_args: + dstar_args = Interpretable(self.dstar_args) + dstar_args.eval(frame) + argname = '__exprinfo_kwds' + vars[argname] = dstar_args.result + source += '**' + argname + ',' + explanations.append('**' + dstar_args.explanation) + self.explanation = "%s(%s)" % ( + node.explanation, ', '.join(explanations)) + if source.endswith(','): + source = source[:-1] + source += ')' + try: + self.result = frame.eval(source, **vars) + except passthroughex: + raise + except: + raise Failure(self) + if not node.is_builtin(frame) or not self.is_bool(frame): + r = frame.repr(self.result) + self.explanation = '%s\n{%s = %s\n}' % (r, r, self.explanation) + +class Getattr(Interpretable): + __view__ = ast.Getattr + + def eval(self, frame): + expr = Interpretable(self.expr) + expr.eval(frame) + source = '__exprinfo_expr.%s' % self.attrname + try: + self.result = frame.eval(source, __exprinfo_expr=expr.result) + except passthroughex: + raise + except: + raise Failure(self) + self.explanation = '%s.%s' % (expr.explanation, self.attrname) + # if the attribute comes from the instance, its value is interesting + source = ('hasattr(__exprinfo_expr, "__dict__") and ' + '%r in __exprinfo_expr.__dict__' % self.attrname) + try: + from_instance = frame.is_true( + frame.eval(source, __exprinfo_expr=expr.result)) + except passthroughex: + raise + except: + from_instance = True + if from_instance: + r = frame.repr(self.result) + self.explanation = '%s\n{%s = %s\n}' % (r, r, self.explanation) + +# == Re-interpretation of full statements == + +class Assert(Interpretable): + __view__ = ast.Assert + + def run(self, frame): + test = Interpretable(self.test) + test.eval(frame) + # simplify 'assert False where False = ...' + if (test.explanation.startswith('False\n{False = ') and + test.explanation.endswith('\n}')): + test.explanation = test.explanation[15:-2] + # print the result as 'assert ' + self.result = test.result + self.explanation = 'assert ' + test.explanation + if not frame.is_true(test.result): + try: + raise BuiltinAssertionError + except passthroughex: + raise + except: + raise Failure(self) + +class Assign(Interpretable): + __view__ = ast.Assign + + def run(self, frame): + expr = Interpretable(self.expr) + expr.eval(frame) + self.result = expr.result + self.explanation = '... = ' + expr.explanation + # fall-back-run the rest of the assignment + ass = ast.Assign(self.nodes, ast.Name('__exprinfo_expr')) + mod = ast.Module(None, ast.Stmt([ass])) + mod.filename = '' + co = pycodegen.ModuleCodeGenerator(mod).getCode() + try: + frame.exec_(co, __exprinfo_expr=expr.result) + except passthroughex: + raise + except: + raise Failure(self) + +class Discard(Interpretable): + __view__ = ast.Discard + + def run(self, frame): + expr = Interpretable(self.expr) + expr.eval(frame) + self.result = expr.result + self.explanation = expr.explanation + +class Stmt(Interpretable): + __view__ = ast.Stmt + + def run(self, frame): + for stmt in self.nodes: + stmt = Interpretable(stmt) + stmt.run(frame) + + +def report_failure(e): + explanation = e.node.nice_explanation() + if explanation: + explanation = ", in: " + explanation + else: + explanation = "" + sys.stdout.write("%s: %s%s\n" % (e.exc.__name__, e.value, explanation)) + +def check(s, frame=None): + if frame is None: + frame = sys._getframe(1) + frame = py.code.Frame(frame) + expr = parse(s, 'eval') + assert isinstance(expr, ast.Expression) + node = Interpretable(expr.node) + try: + node.eval(frame) + except passthroughex: + raise + except Failure: + e = sys.exc_info()[1] + report_failure(e) + else: + if not frame.is_true(node.result): + sys.stderr.write("assertion failed: %s\n" % node.nice_explanation()) + + +########################################################### +# API / Entry points +# ######################################################### + +def interpret(source, frame, should_fail=False): + module = Interpretable(parse(source, 'exec').node) + #print "got module", module + if isinstance(frame, types.FrameType): + frame = py.code.Frame(frame) + try: + module.run(frame) + except Failure: + e = sys.exc_info()[1] + return getfailure(e) + except passthroughex: + raise + except: + import traceback + traceback.print_exc() + if should_fail: + return ("(assertion failed, but when it was re-run for " + "printing intermediate values, it did not fail. Suggestions: " + "compute assert expression before the assert or use --nomagic)") + else: + return None + +def getmsg(excinfo): + if isinstance(excinfo, tuple): + excinfo = py.code.ExceptionInfo(excinfo) + #frame, line = gettbline(tb) + #frame = py.code.Frame(frame) + #return interpret(line, frame) + + tb = excinfo.traceback[-1] + source = str(tb.statement).strip() + x = interpret(source, tb.frame, should_fail=True) + if not isinstance(x, str): + raise TypeError("interpret returned non-string %r" % (x,)) + return x + +def getfailure(e): + explanation = e.node.nice_explanation() + if str(e.value): + lines = explanation.split('\n') + lines[0] += " << %s" % (e.value,) + explanation = '\n'.join(lines) + text = "%s: %s" % (e.exc.__name__, explanation) + if text.startswith('AssertionError: assert '): + text = text[16:] + return text + +def run(s, frame=None): + if frame is None: + frame = sys._getframe(1) + frame = py.code.Frame(frame) + module = Interpretable(parse(s, 'exec').node) + try: + module.run(frame) + except Failure: + e = sys.exc_info()[1] + report_failure(e) + + +if __name__ == '__main__': + # example: + def f(): + return 5 + def g(): + return 3 + def h(x): + return 'never' + check("f() * g() == 5") + check("not f()") + check("not (f() and g() or 0)") + check("f() == g()") + i = 4 + check("i == f()") + check("len(f()) == 0") + check("isinstance(2+3+4, float)") + + run("x = i") + check("x == 5") + + run("assert not f(), 'oops'") + run("a, b, c = 1, 2") + run("a, b, c = f()") + + check("max([f(),g()]) == 4") + check("'hello'[g()] == 'h'") + run("'guk%d' % h(f())") diff --git a/venv/lib/python3.10/site-packages/py/_code/_py2traceback.py b/venv/lib/python3.10/site-packages/py/_code/_py2traceback.py new file mode 100644 index 0000000..d65e27c --- /dev/null +++ b/venv/lib/python3.10/site-packages/py/_code/_py2traceback.py @@ -0,0 +1,79 @@ +# copied from python-2.7.3's traceback.py +# CHANGES: +# - some_str is replaced, trying to create unicode strings +# +import types + +def format_exception_only(etype, value): + """Format the exception part of a traceback. + + The arguments are the exception type and value such as given by + sys.last_type and sys.last_value. The return value is a list of + strings, each ending in a newline. + + Normally, the list contains a single string; however, for + SyntaxError exceptions, it contains several lines that (when + printed) display detailed information about where the syntax + error occurred. + + The message indicating which exception occurred is always the last + string in the list. + + """ + + # An instance should not have a meaningful value parameter, but + # sometimes does, particularly for string exceptions, such as + # >>> raise string1, string2 # deprecated + # + # Clear these out first because issubtype(string1, SyntaxError) + # would throw another exception and mask the original problem. + if (isinstance(etype, BaseException) or + isinstance(etype, types.InstanceType) or + etype is None or type(etype) is str): + return [_format_final_exc_line(etype, value)] + + stype = etype.__name__ + + if not issubclass(etype, SyntaxError): + return [_format_final_exc_line(stype, value)] + + # It was a syntax error; show exactly where the problem was found. + lines = [] + try: + msg, (filename, lineno, offset, badline) = value.args + except Exception: + pass + else: + filename = filename or "" + lines.append(' File "%s", line %d\n' % (filename, lineno)) + if badline is not None: + lines.append(' %s\n' % badline.strip()) + if offset is not None: + caretspace = badline.rstrip('\n')[:offset].lstrip() + # non-space whitespace (likes tabs) must be kept for alignment + caretspace = ((c.isspace() and c or ' ') for c in caretspace) + # only three spaces to account for offset1 == pos 0 + lines.append(' %s^\n' % ''.join(caretspace)) + value = msg + + lines.append(_format_final_exc_line(stype, value)) + return lines + +def _format_final_exc_line(etype, value): + """Return a list of a single line -- normal case for format_exception_only""" + valuestr = _some_str(value) + if value is None or not valuestr: + line = "%s\n" % etype + else: + line = "%s: %s\n" % (etype, valuestr) + return line + +def _some_str(value): + try: + return unicode(value) + except Exception: + try: + return str(value) + except Exception: + pass + return '' % type(value).__name__ diff --git a/venv/lib/python3.10/site-packages/py/_code/assertion.py b/venv/lib/python3.10/site-packages/py/_code/assertion.py new file mode 100644 index 0000000..ff16437 --- /dev/null +++ b/venv/lib/python3.10/site-packages/py/_code/assertion.py @@ -0,0 +1,90 @@ +import sys +import py + +BuiltinAssertionError = py.builtin.builtins.AssertionError + +_reprcompare = None # if set, will be called by assert reinterp for comparison ops + +def _format_explanation(explanation): + """This formats an explanation + + Normally all embedded newlines are escaped, however there are + three exceptions: \n{, \n} and \n~. The first two are intended + cover nested explanations, see function and attribute explanations + for examples (.visit_Call(), visit_Attribute()). The last one is + for when one explanation needs to span multiple lines, e.g. when + displaying diffs. + """ + raw_lines = (explanation or '').split('\n') + # escape newlines not followed by {, } and ~ + lines = [raw_lines[0]] + for l in raw_lines[1:]: + if l.startswith('{') or l.startswith('}') or l.startswith('~'): + lines.append(l) + else: + lines[-1] += '\\n' + l + + result = lines[:1] + stack = [0] + stackcnt = [0] + for line in lines[1:]: + if line.startswith('{'): + if stackcnt[-1]: + s = 'and ' + else: + s = 'where ' + stack.append(len(result)) + stackcnt[-1] += 1 + stackcnt.append(0) + result.append(' +' + ' '*(len(stack)-1) + s + line[1:]) + elif line.startswith('}'): + assert line.startswith('}') + stack.pop() + stackcnt.pop() + result[stack[-1]] += line[1:] + else: + assert line.startswith('~') + result.append(' '*len(stack) + line[1:]) + assert len(stack) == 1 + return '\n'.join(result) + + +class AssertionError(BuiltinAssertionError): + def __init__(self, *args): + BuiltinAssertionError.__init__(self, *args) + if args: + try: + self.msg = str(args[0]) + except py.builtin._sysex: + raise + except: + self.msg = "<[broken __repr__] %s at %0xd>" %( + args[0].__class__, id(args[0])) + else: + f = py.code.Frame(sys._getframe(1)) + try: + source = f.code.fullsource + if source is not None: + try: + source = source.getstatement(f.lineno, assertion=True) + except IndexError: + source = None + else: + source = str(source.deindent()).strip() + except py.error.ENOENT: + source = None + # this can also occur during reinterpretation, when the + # co_filename is set to "". + if source: + self.msg = reinterpret(source, f, should_fail=True) + else: + self.msg = "" + if not self.args: + self.args = (self.msg,) + +if sys.version_info > (3, 0): + AssertionError.__module__ = "builtins" + reinterpret_old = "old reinterpretation not available for py3" +else: + from py._code._assertionold import interpret as reinterpret_old +from py._code._assertionnew import interpret as reinterpret diff --git a/venv/lib/python3.10/site-packages/py/_code/code.py b/venv/lib/python3.10/site-packages/py/_code/code.py new file mode 100644 index 0000000..dad7962 --- /dev/null +++ b/venv/lib/python3.10/site-packages/py/_code/code.py @@ -0,0 +1,796 @@ +import py +import sys +from inspect import CO_VARARGS, CO_VARKEYWORDS, isclass + +builtin_repr = repr + +reprlib = py.builtin._tryimport('repr', 'reprlib') + +if sys.version_info[0] >= 3: + from traceback import format_exception_only +else: + from py._code._py2traceback import format_exception_only + +import traceback + + +class Code(object): + """ wrapper around Python code objects """ + def __init__(self, rawcode): + if not hasattr(rawcode, "co_filename"): + rawcode = py.code.getrawcode(rawcode) + try: + self.filename = rawcode.co_filename + self.firstlineno = rawcode.co_firstlineno - 1 + self.name = rawcode.co_name + except AttributeError: + raise TypeError("not a code object: %r" % (rawcode,)) + self.raw = rawcode + + def __eq__(self, other): + return self.raw == other.raw + + def __ne__(self, other): + return not self == other + + @property + def path(self): + """ return a path object pointing to source code (note that it + might not point to an actually existing file). """ + p = py.path.local(self.raw.co_filename) + # maybe don't try this checking + if not p.check(): + # XXX maybe try harder like the weird logic + # in the standard lib [linecache.updatecache] does? + p = self.raw.co_filename + return p + + @property + def fullsource(self): + """ return a py.code.Source object for the full source file of the code + """ + from py._code import source + full, _ = source.findsource(self.raw) + return full + + def source(self): + """ return a py.code.Source object for the code object's source only + """ + # return source only for that part of code + return py.code.Source(self.raw) + + def getargs(self, var=False): + """ return a tuple with the argument names for the code object + + if 'var' is set True also return the names of the variable and + keyword arguments when present + """ + # handfull shortcut for getting args + raw = self.raw + argcount = raw.co_argcount + if var: + argcount += raw.co_flags & CO_VARARGS + argcount += raw.co_flags & CO_VARKEYWORDS + return raw.co_varnames[:argcount] + +class Frame(object): + """Wrapper around a Python frame holding f_locals and f_globals + in which expressions can be evaluated.""" + + def __init__(self, frame): + self.lineno = frame.f_lineno - 1 + self.f_globals = frame.f_globals + self.f_locals = frame.f_locals + self.raw = frame + self.code = py.code.Code(frame.f_code) + + @property + def statement(self): + """ statement this frame is at """ + if self.code.fullsource is None: + return py.code.Source("") + return self.code.fullsource.getstatement(self.lineno) + + def eval(self, code, **vars): + """ evaluate 'code' in the frame + + 'vars' are optional additional local variables + + returns the result of the evaluation + """ + f_locals = self.f_locals.copy() + f_locals.update(vars) + return eval(code, self.f_globals, f_locals) + + def exec_(self, code, **vars): + """ exec 'code' in the frame + + 'vars' are optiona; additional local variables + """ + f_locals = self.f_locals.copy() + f_locals.update(vars) + py.builtin.exec_(code, self.f_globals, f_locals) + + def repr(self, object): + """ return a 'safe' (non-recursive, one-line) string repr for 'object' + """ + return py.io.saferepr(object) + + def is_true(self, object): + return object + + def getargs(self, var=False): + """ return a list of tuples (name, value) for all arguments + + if 'var' is set True also include the variable and keyword + arguments when present + """ + retval = [] + for arg in self.code.getargs(var): + try: + retval.append((arg, self.f_locals[arg])) + except KeyError: + pass # this can occur when using Psyco + return retval + + +class TracebackEntry(object): + """ a single entry in a traceback """ + + _repr_style = None + exprinfo = None + + def __init__(self, rawentry): + self._rawentry = rawentry + self.lineno = rawentry.tb_lineno - 1 + + def set_repr_style(self, mode): + assert mode in ("short", "long") + self._repr_style = mode + + @property + def frame(self): + return py.code.Frame(self._rawentry.tb_frame) + + @property + def relline(self): + return self.lineno - self.frame.code.firstlineno + + def __repr__(self): + return "" % (self.frame.code.path, self.lineno+1) + + @property + def statement(self): + """ py.code.Source object for the current statement """ + source = self.frame.code.fullsource + return source.getstatement(self.lineno) + + @property + def path(self): + """ path to the source code """ + return self.frame.code.path + + def getlocals(self): + return self.frame.f_locals + locals = property(getlocals, None, None, "locals of underlaying frame") + + def reinterpret(self): + """Reinterpret the failing statement and returns a detailed information + about what operations are performed.""" + if self.exprinfo is None: + source = str(self.statement).strip() + x = py.code._reinterpret(source, self.frame, should_fail=True) + if not isinstance(x, str): + raise TypeError("interpret returned non-string %r" % (x,)) + self.exprinfo = x + return self.exprinfo + + def getfirstlinesource(self): + # on Jython this firstlineno can be -1 apparently + return max(self.frame.code.firstlineno, 0) + + def getsource(self, astcache=None): + """ return failing source code. """ + # we use the passed in astcache to not reparse asttrees + # within exception info printing + from py._code.source import getstatementrange_ast + source = self.frame.code.fullsource + if source is None: + return None + key = astnode = None + if astcache is not None: + key = self.frame.code.path + if key is not None: + astnode = astcache.get(key, None) + start = self.getfirstlinesource() + try: + astnode, _, end = getstatementrange_ast(self.lineno, source, + astnode=astnode) + except SyntaxError: + end = self.lineno + 1 + else: + if key is not None: + astcache[key] = astnode + return source[start:end] + + source = property(getsource) + + def ishidden(self): + """ return True if the current frame has a var __tracebackhide__ + resolving to True + + mostly for internal use + """ + try: + return self.frame.f_locals['__tracebackhide__'] + except KeyError: + try: + return self.frame.f_globals['__tracebackhide__'] + except KeyError: + return False + + def __str__(self): + try: + fn = str(self.path) + except py.error.Error: + fn = '???' + name = self.frame.code.name + try: + line = str(self.statement).lstrip() + except KeyboardInterrupt: + raise + except: + line = "???" + return " File %r:%d in %s\n %s\n" % (fn, self.lineno+1, name, line) + + def name(self): + return self.frame.code.raw.co_name + name = property(name, None, None, "co_name of underlaying code") + + +class Traceback(list): + """ Traceback objects encapsulate and offer higher level + access to Traceback entries. + """ + Entry = TracebackEntry + + def __init__(self, tb): + """ initialize from given python traceback object. """ + if hasattr(tb, 'tb_next'): + def f(cur): + while cur is not None: + yield self.Entry(cur) + cur = cur.tb_next + list.__init__(self, f(tb)) + else: + list.__init__(self, tb) + + def cut(self, path=None, lineno=None, firstlineno=None, excludepath=None): + """ return a Traceback instance wrapping part of this Traceback + + by provding any combination of path, lineno and firstlineno, the + first frame to start the to-be-returned traceback is determined + + this allows cutting the first part of a Traceback instance e.g. + for formatting reasons (removing some uninteresting bits that deal + with handling of the exception/traceback) + """ + for x in self: + code = x.frame.code + codepath = code.path + if ((path is None or codepath == path) and + (excludepath is None or not hasattr(codepath, 'relto') or + not codepath.relto(excludepath)) and + (lineno is None or x.lineno == lineno) and + (firstlineno is None or x.frame.code.firstlineno == firstlineno)): + return Traceback(x._rawentry) + return self + + def __getitem__(self, key): + val = super(Traceback, self).__getitem__(key) + if isinstance(key, type(slice(0))): + val = self.__class__(val) + return val + + def filter(self, fn=lambda x: not x.ishidden()): + """ return a Traceback instance with certain items removed + + fn is a function that gets a single argument, a TracebackItem + instance, and should return True when the item should be added + to the Traceback, False when not + + by default this removes all the TracebackItems which are hidden + (see ishidden() above) + """ + return Traceback(filter(fn, self)) + + def getcrashentry(self): + """ return last non-hidden traceback entry that lead + to the exception of a traceback. + """ + for i in range(-1, -len(self)-1, -1): + entry = self[i] + if not entry.ishidden(): + return entry + return self[-1] + + def recursionindex(self): + """ return the index of the frame/TracebackItem where recursion + originates if appropriate, None if no recursion occurred + """ + cache = {} + for i, entry in enumerate(self): + # id for the code.raw is needed to work around + # the strange metaprogramming in the decorator lib from pypi + # which generates code objects that have hash/value equality + #XXX needs a test + key = entry.frame.code.path, id(entry.frame.code.raw), entry.lineno + #print "checking for recursion at", key + l = cache.setdefault(key, []) + if l: + f = entry.frame + loc = f.f_locals + for otherloc in l: + if f.is_true(f.eval(co_equal, + __recursioncache_locals_1=loc, + __recursioncache_locals_2=otherloc)): + return i + l.append(entry.frame.f_locals) + return None + +co_equal = compile('__recursioncache_locals_1 == __recursioncache_locals_2', + '?', 'eval') + +class ExceptionInfo(object): + """ wraps sys.exc_info() objects and offers + help for navigating the traceback. + """ + _striptext = '' + def __init__(self, tup=None, exprinfo=None): + if tup is None: + tup = sys.exc_info() + if exprinfo is None and isinstance(tup[1], AssertionError): + exprinfo = getattr(tup[1], 'msg', None) + if exprinfo is None: + exprinfo = str(tup[1]) + if exprinfo and exprinfo.startswith('assert '): + self._striptext = 'AssertionError: ' + self._excinfo = tup + #: the exception class + self.type = tup[0] + #: the exception instance + self.value = tup[1] + #: the exception raw traceback + self.tb = tup[2] + #: the exception type name + self.typename = self.type.__name__ + #: the exception traceback (py.code.Traceback instance) + self.traceback = py.code.Traceback(self.tb) + + def __repr__(self): + return "" % ( + self.typename, len(self.traceback)) + + def exconly(self, tryshort=False): + """ return the exception as a string + + when 'tryshort' resolves to True, and the exception is a + py.code._AssertionError, only the actual exception part of + the exception representation is returned (so 'AssertionError: ' is + removed from the beginning) + """ + lines = format_exception_only(self.type, self.value) + text = ''.join(lines) + text = text.rstrip() + if tryshort: + if text.startswith(self._striptext): + text = text[len(self._striptext):] + return text + + def errisinstance(self, exc): + """ return True if the exception is an instance of exc """ + return isinstance(self.value, exc) + + def _getreprcrash(self): + exconly = self.exconly(tryshort=True) + entry = self.traceback.getcrashentry() + path, lineno = entry.frame.code.raw.co_filename, entry.lineno + return ReprFileLocation(path, lineno+1, exconly) + + def getrepr(self, showlocals=False, style="long", + abspath=False, tbfilter=True, funcargs=False): + """ return str()able representation of this exception info. + showlocals: show locals per traceback entry + style: long|short|no|native traceback style + tbfilter: hide entries (where __tracebackhide__ is true) + + in case of style==native, tbfilter and showlocals is ignored. + """ + if style == 'native': + return ReprExceptionInfo(ReprTracebackNative( + traceback.format_exception( + self.type, + self.value, + self.traceback[0]._rawentry, + )), self._getreprcrash()) + + fmt = FormattedExcinfo( + showlocals=showlocals, style=style, + abspath=abspath, tbfilter=tbfilter, funcargs=funcargs) + return fmt.repr_excinfo(self) + + def __str__(self): + entry = self.traceback[-1] + loc = ReprFileLocation(entry.path, entry.lineno + 1, self.exconly()) + return str(loc) + + def __unicode__(self): + entry = self.traceback[-1] + loc = ReprFileLocation(entry.path, entry.lineno + 1, self.exconly()) + return loc.__unicode__() + + +class FormattedExcinfo(object): + """ presenting information about failing Functions and Generators. """ + # for traceback entries + flow_marker = ">" + fail_marker = "E" + + def __init__(self, showlocals=False, style="long", + abspath=True, tbfilter=True, funcargs=False): + self.showlocals = showlocals + self.style = style + self.tbfilter = tbfilter + self.funcargs = funcargs + self.abspath = abspath + self.astcache = {} + + def _getindent(self, source): + # figure out indent for given source + try: + s = str(source.getstatement(len(source)-1)) + except KeyboardInterrupt: + raise + except: + try: + s = str(source[-1]) + except KeyboardInterrupt: + raise + except: + return 0 + return 4 + (len(s) - len(s.lstrip())) + + def _getentrysource(self, entry): + source = entry.getsource(self.astcache) + if source is not None: + source = source.deindent() + return source + + def _saferepr(self, obj): + return py.io.saferepr(obj) + + def repr_args(self, entry): + if self.funcargs: + args = [] + for argname, argvalue in entry.frame.getargs(var=True): + args.append((argname, self._saferepr(argvalue))) + return ReprFuncArgs(args) + + def get_source(self, source, line_index=-1, excinfo=None, short=False): + """ return formatted and marked up source lines. """ + lines = [] + if source is None or line_index >= len(source.lines): + source = py.code.Source("???") + line_index = 0 + if line_index < 0: + line_index += len(source) + space_prefix = " " + if short: + lines.append(space_prefix + source.lines[line_index].strip()) + else: + for line in source.lines[:line_index]: + lines.append(space_prefix + line) + lines.append(self.flow_marker + " " + source.lines[line_index]) + for line in source.lines[line_index+1:]: + lines.append(space_prefix + line) + if excinfo is not None: + indent = 4 if short else self._getindent(source) + lines.extend(self.get_exconly(excinfo, indent=indent, markall=True)) + return lines + + def get_exconly(self, excinfo, indent=4, markall=False): + lines = [] + indent = " " * indent + # get the real exception information out + exlines = excinfo.exconly(tryshort=True).split('\n') + failindent = self.fail_marker + indent[1:] + for line in exlines: + lines.append(failindent + line) + if not markall: + failindent = indent + return lines + + def repr_locals(self, locals): + if self.showlocals: + lines = [] + keys = [loc for loc in locals if loc[0] != "@"] + keys.sort() + for name in keys: + value = locals[name] + if name == '__builtins__': + lines.append("__builtins__ = ") + else: + # This formatting could all be handled by the + # _repr() function, which is only reprlib.Repr in + # disguise, so is very configurable. + str_repr = self._saferepr(value) + #if len(str_repr) < 70 or not isinstance(value, + # (list, tuple, dict)): + lines.append("%-10s = %s" %(name, str_repr)) + #else: + # self._line("%-10s =\\" % (name,)) + # # XXX + # pprint.pprint(value, stream=self.excinfowriter) + return ReprLocals(lines) + + def repr_traceback_entry(self, entry, excinfo=None): + source = self._getentrysource(entry) + if source is None: + source = py.code.Source("???") + line_index = 0 + else: + # entry.getfirstlinesource() can be -1, should be 0 on jython + line_index = entry.lineno - max(entry.getfirstlinesource(), 0) + + lines = [] + style = entry._repr_style + if style is None: + style = self.style + if style in ("short", "long"): + short = style == "short" + reprargs = self.repr_args(entry) if not short else None + s = self.get_source(source, line_index, excinfo, short=short) + lines.extend(s) + if short: + message = "in %s" %(entry.name) + else: + message = excinfo and excinfo.typename or "" + path = self._makepath(entry.path) + filelocrepr = ReprFileLocation(path, entry.lineno+1, message) + localsrepr = None + if not short: + localsrepr = self.repr_locals(entry.locals) + return ReprEntry(lines, reprargs, localsrepr, filelocrepr, style) + if excinfo: + lines.extend(self.get_exconly(excinfo, indent=4)) + return ReprEntry(lines, None, None, None, style) + + def _makepath(self, path): + if not self.abspath: + try: + np = py.path.local().bestrelpath(path) + except OSError: + return path + if len(np) < len(str(path)): + path = np + return path + + def repr_traceback(self, excinfo): + traceback = excinfo.traceback + if self.tbfilter: + traceback = traceback.filter() + recursionindex = None + if excinfo.errisinstance(RuntimeError): + if "maximum recursion depth exceeded" in str(excinfo.value): + recursionindex = traceback.recursionindex() + last = traceback[-1] + entries = [] + extraline = None + for index, entry in enumerate(traceback): + einfo = (last == entry) and excinfo or None + reprentry = self.repr_traceback_entry(entry, einfo) + entries.append(reprentry) + if index == recursionindex: + extraline = "!!! Recursion detected (same locals & position)" + break + return ReprTraceback(entries, extraline, style=self.style) + + def repr_excinfo(self, excinfo): + reprtraceback = self.repr_traceback(excinfo) + reprcrash = excinfo._getreprcrash() + return ReprExceptionInfo(reprtraceback, reprcrash) + +class TerminalRepr: + def __str__(self): + s = self.__unicode__() + if sys.version_info[0] < 3: + s = s.encode('utf-8') + return s + + def __unicode__(self): + # FYI this is called from pytest-xdist's serialization of exception + # information. + io = py.io.TextIO() + tw = py.io.TerminalWriter(file=io) + self.toterminal(tw) + return io.getvalue().strip() + + def __repr__(self): + return "<%s instance at %0x>" %(self.__class__, id(self)) + + +class ReprExceptionInfo(TerminalRepr): + def __init__(self, reprtraceback, reprcrash): + self.reprtraceback = reprtraceback + self.reprcrash = reprcrash + self.sections = [] + + def addsection(self, name, content, sep="-"): + self.sections.append((name, content, sep)) + + def toterminal(self, tw): + self.reprtraceback.toterminal(tw) + for name, content, sep in self.sections: + tw.sep(sep, name) + tw.line(content) + +class ReprTraceback(TerminalRepr): + entrysep = "_ " + + def __init__(self, reprentries, extraline, style): + self.reprentries = reprentries + self.extraline = extraline + self.style = style + + def toterminal(self, tw): + # the entries might have different styles + last_style = None + for i, entry in enumerate(self.reprentries): + if entry.style == "long": + tw.line("") + entry.toterminal(tw) + if i < len(self.reprentries) - 1: + next_entry = self.reprentries[i+1] + if entry.style == "long" or \ + entry.style == "short" and next_entry.style == "long": + tw.sep(self.entrysep) + + if self.extraline: + tw.line(self.extraline) + +class ReprTracebackNative(ReprTraceback): + def __init__(self, tblines): + self.style = "native" + self.reprentries = [ReprEntryNative(tblines)] + self.extraline = None + +class ReprEntryNative(TerminalRepr): + style = "native" + + def __init__(self, tblines): + self.lines = tblines + + def toterminal(self, tw): + tw.write("".join(self.lines)) + +class ReprEntry(TerminalRepr): + localssep = "_ " + + def __init__(self, lines, reprfuncargs, reprlocals, filelocrepr, style): + self.lines = lines + self.reprfuncargs = reprfuncargs + self.reprlocals = reprlocals + self.reprfileloc = filelocrepr + self.style = style + + def toterminal(self, tw): + if self.style == "short": + self.reprfileloc.toterminal(tw) + for line in self.lines: + red = line.startswith("E ") + tw.line(line, bold=True, red=red) + #tw.line("") + return + if self.reprfuncargs: + self.reprfuncargs.toterminal(tw) + for line in self.lines: + red = line.startswith("E ") + tw.line(line, bold=True, red=red) + if self.reprlocals: + #tw.sep(self.localssep, "Locals") + tw.line("") + self.reprlocals.toterminal(tw) + if self.reprfileloc: + if self.lines: + tw.line("") + self.reprfileloc.toterminal(tw) + + def __str__(self): + return "%s\n%s\n%s" % ("\n".join(self.lines), + self.reprlocals, + self.reprfileloc) + +class ReprFileLocation(TerminalRepr): + def __init__(self, path, lineno, message): + self.path = str(path) + self.lineno = lineno + self.message = message + + def toterminal(self, tw): + # filename and lineno output for each entry, + # using an output format that most editors unterstand + msg = self.message + i = msg.find("\n") + if i != -1: + msg = msg[:i] + tw.line("%s:%s: %s" %(self.path, self.lineno, msg)) + +class ReprLocals(TerminalRepr): + def __init__(self, lines): + self.lines = lines + + def toterminal(self, tw): + for line in self.lines: + tw.line(line) + +class ReprFuncArgs(TerminalRepr): + def __init__(self, args): + self.args = args + + def toterminal(self, tw): + if self.args: + linesofar = "" + for name, value in self.args: + ns = "%s = %s" %(name, value) + if len(ns) + len(linesofar) + 2 > tw.fullwidth: + if linesofar: + tw.line(linesofar) + linesofar = ns + else: + if linesofar: + linesofar += ", " + ns + else: + linesofar = ns + if linesofar: + tw.line(linesofar) + tw.line("") + + + +oldbuiltins = {} + +def patch_builtins(assertion=True, compile=True): + """ put compile and AssertionError builtins to Python's builtins. """ + if assertion: + from py._code import assertion + l = oldbuiltins.setdefault('AssertionError', []) + l.append(py.builtin.builtins.AssertionError) + py.builtin.builtins.AssertionError = assertion.AssertionError + if compile: + l = oldbuiltins.setdefault('compile', []) + l.append(py.builtin.builtins.compile) + py.builtin.builtins.compile = py.code.compile + +def unpatch_builtins(assertion=True, compile=True): + """ remove compile and AssertionError builtins from Python builtins. """ + if assertion: + py.builtin.builtins.AssertionError = oldbuiltins['AssertionError'].pop() + if compile: + py.builtin.builtins.compile = oldbuiltins['compile'].pop() + +def getrawcode(obj, trycall=True): + """ return code object for given function. """ + try: + return obj.__code__ + except AttributeError: + obj = getattr(obj, 'im_func', obj) + obj = getattr(obj, 'func_code', obj) + obj = getattr(obj, 'f_code', obj) + obj = getattr(obj, '__code__', obj) + if trycall and not hasattr(obj, 'co_firstlineno'): + if hasattr(obj, '__call__') and not isclass(obj): + x = getrawcode(obj.__call__, trycall=False) + if hasattr(x, 'co_firstlineno'): + return x + return obj + diff --git a/venv/lib/python3.10/site-packages/py/_code/source.py b/venv/lib/python3.10/site-packages/py/_code/source.py new file mode 100644 index 0000000..7fc7b23 --- /dev/null +++ b/venv/lib/python3.10/site-packages/py/_code/source.py @@ -0,0 +1,410 @@ +from __future__ import generators + +from bisect import bisect_right +import sys +import inspect, tokenize +import py +from types import ModuleType +cpy_compile = compile + +try: + import _ast + from _ast import PyCF_ONLY_AST as _AST_FLAG +except ImportError: + _AST_FLAG = 0 + _ast = None + + +class Source(object): + """ a immutable object holding a source code fragment, + possibly deindenting it. + """ + _compilecounter = 0 + def __init__(self, *parts, **kwargs): + self.lines = lines = [] + de = kwargs.get('deindent', True) + rstrip = kwargs.get('rstrip', True) + for part in parts: + if not part: + partlines = [] + if isinstance(part, Source): + partlines = part.lines + elif isinstance(part, (tuple, list)): + partlines = [x.rstrip("\n") for x in part] + elif isinstance(part, py.builtin._basestring): + partlines = part.split('\n') + if rstrip: + while partlines: + if partlines[-1].strip(): + break + partlines.pop() + else: + partlines = getsource(part, deindent=de).lines + if de: + partlines = deindent(partlines) + lines.extend(partlines) + + def __eq__(self, other): + try: + return self.lines == other.lines + except AttributeError: + if isinstance(other, str): + return str(self) == other + return False + + def __getitem__(self, key): + if isinstance(key, int): + return self.lines[key] + else: + if key.step not in (None, 1): + raise IndexError("cannot slice a Source with a step") + return self.__getslice__(key.start, key.stop) + + def __len__(self): + return len(self.lines) + + def __getslice__(self, start, end): + newsource = Source() + newsource.lines = self.lines[start:end] + return newsource + + def strip(self): + """ return new source object with trailing + and leading blank lines removed. + """ + start, end = 0, len(self) + while start < end and not self.lines[start].strip(): + start += 1 + while end > start and not self.lines[end-1].strip(): + end -= 1 + source = Source() + source.lines[:] = self.lines[start:end] + return source + + def putaround(self, before='', after='', indent=' ' * 4): + """ return a copy of the source object with + 'before' and 'after' wrapped around it. + """ + before = Source(before) + after = Source(after) + newsource = Source() + lines = [ (indent + line) for line in self.lines] + newsource.lines = before.lines + lines + after.lines + return newsource + + def indent(self, indent=' ' * 4): + """ return a copy of the source object with + all lines indented by the given indent-string. + """ + newsource = Source() + newsource.lines = [(indent+line) for line in self.lines] + return newsource + + def getstatement(self, lineno, assertion=False): + """ return Source statement which contains the + given linenumber (counted from 0). + """ + start, end = self.getstatementrange(lineno, assertion) + return self[start:end] + + def getstatementrange(self, lineno, assertion=False): + """ return (start, end) tuple which spans the minimal + statement region which containing the given lineno. + """ + if not (0 <= lineno < len(self)): + raise IndexError("lineno out of range") + ast, start, end = getstatementrange_ast(lineno, self) + return start, end + + def deindent(self, offset=None): + """ return a new source object deindented by offset. + If offset is None then guess an indentation offset from + the first non-blank line. Subsequent lines which have a + lower indentation offset will be copied verbatim as + they are assumed to be part of multilines. + """ + # XXX maybe use the tokenizer to properly handle multiline + # strings etc.pp? + newsource = Source() + newsource.lines[:] = deindent(self.lines, offset) + return newsource + + def isparseable(self, deindent=True): + """ return True if source is parseable, heuristically + deindenting it by default. + """ + try: + import parser + except ImportError: + syntax_checker = lambda x: compile(x, 'asd', 'exec') + else: + syntax_checker = parser.suite + + if deindent: + source = str(self.deindent()) + else: + source = str(self) + try: + #compile(source+'\n', "x", "exec") + syntax_checker(source+'\n') + except KeyboardInterrupt: + raise + except Exception: + return False + else: + return True + + def __str__(self): + return "\n".join(self.lines) + + def compile(self, filename=None, mode='exec', + flag=generators.compiler_flag, + dont_inherit=0, _genframe=None): + """ return compiled code object. if filename is None + invent an artificial filename which displays + the source/line position of the caller frame. + """ + if not filename or py.path.local(filename).check(file=0): + if _genframe is None: + _genframe = sys._getframe(1) # the caller + fn,lineno = _genframe.f_code.co_filename, _genframe.f_lineno + base = "<%d-codegen " % self._compilecounter + self.__class__._compilecounter += 1 + if not filename: + filename = base + '%s:%d>' % (fn, lineno) + else: + filename = base + '%r %s:%d>' % (filename, fn, lineno) + source = "\n".join(self.lines) + '\n' + try: + co = cpy_compile(source, filename, mode, flag) + except SyntaxError: + ex = sys.exc_info()[1] + # re-represent syntax errors from parsing python strings + msglines = self.lines[:ex.lineno] + if ex.offset: + msglines.append(" "*ex.offset + '^') + msglines.append("(code was compiled probably from here: %s)" % filename) + newex = SyntaxError('\n'.join(msglines)) + newex.offset = ex.offset + newex.lineno = ex.lineno + newex.text = ex.text + raise newex + else: + if flag & _AST_FLAG: + return co + lines = [(x + "\n") for x in self.lines] + import linecache + linecache.cache[filename] = (1, None, lines, filename) + return co + +# +# public API shortcut functions +# + +def compile_(source, filename=None, mode='exec', flags= + generators.compiler_flag, dont_inherit=0): + """ compile the given source to a raw code object, + and maintain an internal cache which allows later + retrieval of the source code for the code object + and any recursively created code objects. + """ + if _ast is not None and isinstance(source, _ast.AST): + # XXX should Source support having AST? + return cpy_compile(source, filename, mode, flags, dont_inherit) + _genframe = sys._getframe(1) # the caller + s = Source(source) + co = s.compile(filename, mode, flags, _genframe=_genframe) + return co + + +def getfslineno(obj): + """ Return source location (path, lineno) for the given object. + If the source cannot be determined return ("", -1) + """ + try: + code = py.code.Code(obj) + except TypeError: + try: + fn = (inspect.getsourcefile(obj) or + inspect.getfile(obj)) + except TypeError: + return "", -1 + + fspath = fn and py.path.local(fn) or None + lineno = -1 + if fspath: + try: + _, lineno = findsource(obj) + except IOError: + pass + else: + fspath = code.path + lineno = code.firstlineno + assert isinstance(lineno, int) + return fspath, lineno + +# +# helper functions +# + +def findsource(obj): + try: + sourcelines, lineno = inspect.findsource(obj) + except py.builtin._sysex: + raise + except: + return None, -1 + source = Source() + source.lines = [line.rstrip() for line in sourcelines] + return source, lineno + +def getsource(obj, **kwargs): + obj = py.code.getrawcode(obj) + try: + strsrc = inspect.getsource(obj) + except IndentationError: + strsrc = "\"Buggy python version consider upgrading, cannot get source\"" + assert isinstance(strsrc, str) + return Source(strsrc, **kwargs) + +def deindent(lines, offset=None): + if offset is None: + for line in lines: + line = line.expandtabs() + s = line.lstrip() + if s: + offset = len(line)-len(s) + break + else: + offset = 0 + if offset == 0: + return list(lines) + newlines = [] + def readline_generator(lines): + for line in lines: + yield line + '\n' + while True: + yield '' + + it = readline_generator(lines) + + try: + for _, _, (sline, _), (eline, _), _ in tokenize.generate_tokens(lambda: next(it)): + if sline > len(lines): + break # End of input reached + if sline > len(newlines): + line = lines[sline - 1].expandtabs() + if line.lstrip() and line[:offset].isspace(): + line = line[offset:] # Deindent + newlines.append(line) + + for i in range(sline, eline): + # Don't deindent continuing lines of + # multiline tokens (i.e. multiline strings) + newlines.append(lines[i]) + except (IndentationError, tokenize.TokenError): + pass + # Add any lines we didn't see. E.g. if an exception was raised. + newlines.extend(lines[len(newlines):]) + return newlines + + +def get_statement_startend2(lineno, node): + import ast + # flatten all statements and except handlers into one lineno-list + # AST's line numbers start indexing at 1 + l = [] + for x in ast.walk(node): + if isinstance(x, _ast.stmt) or isinstance(x, _ast.ExceptHandler): + l.append(x.lineno - 1) + for name in "finalbody", "orelse": + val = getattr(x, name, None) + if val: + # treat the finally/orelse part as its own statement + l.append(val[0].lineno - 1 - 1) + l.sort() + insert_index = bisect_right(l, lineno) + start = l[insert_index - 1] + if insert_index >= len(l): + end = None + else: + end = l[insert_index] + return start, end + + +def getstatementrange_ast(lineno, source, assertion=False, astnode=None): + if astnode is None: + content = str(source) + try: + astnode = compile(content, "source", "exec", 1024) # 1024 for AST + except ValueError: + start, end = getstatementrange_old(lineno, source, assertion) + return None, start, end + start, end = get_statement_startend2(lineno, astnode) + # we need to correct the end: + # - ast-parsing strips comments + # - there might be empty lines + # - we might have lesser indented code blocks at the end + if end is None: + end = len(source.lines) + + if end > start + 1: + # make sure we don't span differently indented code blocks + # by using the BlockFinder helper used which inspect.getsource() uses itself + block_finder = inspect.BlockFinder() + # if we start with an indented line, put blockfinder to "started" mode + block_finder.started = source.lines[start][0].isspace() + it = ((x + "\n") for x in source.lines[start:end]) + try: + for tok in tokenize.generate_tokens(lambda: next(it)): + block_finder.tokeneater(*tok) + except (inspect.EndOfBlock, IndentationError): + end = block_finder.last + start + except Exception: + pass + + # the end might still point to a comment or empty line, correct it + while end: + line = source.lines[end - 1].lstrip() + if line.startswith("#") or not line: + end -= 1 + else: + break + return astnode, start, end + + +def getstatementrange_old(lineno, source, assertion=False): + """ return (start, end) tuple which spans the minimal + statement region which containing the given lineno. + raise an IndexError if no such statementrange can be found. + """ + # XXX this logic is only used on python2.4 and below + # 1. find the start of the statement + from codeop import compile_command + for start in range(lineno, -1, -1): + if assertion: + line = source.lines[start] + # the following lines are not fully tested, change with care + if 'super' in line and 'self' in line and '__init__' in line: + raise IndexError("likely a subclass") + if "assert" not in line and "raise" not in line: + continue + trylines = source.lines[start:lineno+1] + # quick hack to prepare parsing an indented line with + # compile_command() (which errors on "return" outside defs) + trylines.insert(0, 'def xxx():') + trysource = '\n '.join(trylines) + # ^ space here + try: + compile_command(trysource) + except (SyntaxError, OverflowError, ValueError): + continue + + # 2. find the end of the statement + for end in range(lineno+1, len(source)+1): + trysource = source[start:end] + if trysource.isparseable(): + return start, end + raise SyntaxError("no valid source range around line %d " % (lineno,)) + + diff --git a/venv/lib/python3.10/site-packages/py/_error.py b/venv/lib/python3.10/site-packages/py/_error.py new file mode 100644 index 0000000..a6375de --- /dev/null +++ b/venv/lib/python3.10/site-packages/py/_error.py @@ -0,0 +1,91 @@ +""" +create errno-specific classes for IO or os calls. + +""" +from types import ModuleType +import sys, os, errno + +class Error(EnvironmentError): + def __repr__(self): + return "%s.%s %r: %s " %(self.__class__.__module__, + self.__class__.__name__, + self.__class__.__doc__, + " ".join(map(str, self.args)), + #repr(self.args) + ) + + def __str__(self): + s = "[%s]: %s" %(self.__class__.__doc__, + " ".join(map(str, self.args)), + ) + return s + +_winerrnomap = { + 2: errno.ENOENT, + 3: errno.ENOENT, + 17: errno.EEXIST, + 18: errno.EXDEV, + 13: errno.EBUSY, # empty cd drive, but ENOMEDIUM seems unavailiable + 22: errno.ENOTDIR, + 20: errno.ENOTDIR, + 267: errno.ENOTDIR, + 5: errno.EACCES, # anything better? +} + +class ErrorMaker(ModuleType): + """ lazily provides Exception classes for each possible POSIX errno + (as defined per the 'errno' module). All such instances + subclass EnvironmentError. + """ + Error = Error + _errno2class = {} + + def __getattr__(self, name): + if name[0] == "_": + raise AttributeError(name) + eno = getattr(errno, name) + cls = self._geterrnoclass(eno) + setattr(self, name, cls) + return cls + + def _geterrnoclass(self, eno): + try: + return self._errno2class[eno] + except KeyError: + clsname = errno.errorcode.get(eno, "UnknownErrno%d" %(eno,)) + errorcls = type(Error)(clsname, (Error,), + {'__module__':'py.error', + '__doc__': os.strerror(eno)}) + self._errno2class[eno] = errorcls + return errorcls + + def checked_call(self, func, *args, **kwargs): + """ call a function and raise an errno-exception if applicable. """ + __tracebackhide__ = True + try: + return func(*args, **kwargs) + except self.Error: + raise + except (OSError, EnvironmentError): + cls, value, tb = sys.exc_info() + if not hasattr(value, 'errno'): + raise + __tracebackhide__ = False + errno = value.errno + try: + if not isinstance(value, WindowsError): + raise NameError + except NameError: + # we are not on Windows, or we got a proper OSError + cls = self._geterrnoclass(errno) + else: + try: + cls = self._geterrnoclass(_winerrnomap[errno]) + except KeyError: + raise value + raise cls("%s%r" % (func.__name__, args)) + __tracebackhide__ = True + + +error = ErrorMaker('py.error') +sys.modules[error.__name__] = error \ No newline at end of file diff --git a/venv/lib/python3.10/site-packages/py/_io/__init__.py b/venv/lib/python3.10/site-packages/py/_io/__init__.py new file mode 100644 index 0000000..835f01f --- /dev/null +++ b/venv/lib/python3.10/site-packages/py/_io/__init__.py @@ -0,0 +1 @@ +""" input/output helping """ diff --git a/venv/lib/python3.10/site-packages/py/_io/capture.py b/venv/lib/python3.10/site-packages/py/_io/capture.py new file mode 100644 index 0000000..cacf2fa --- /dev/null +++ b/venv/lib/python3.10/site-packages/py/_io/capture.py @@ -0,0 +1,371 @@ +import os +import sys +import py +import tempfile + +try: + from io import StringIO +except ImportError: + from StringIO import StringIO + +if sys.version_info < (3,0): + class TextIO(StringIO): + def write(self, data): + if not isinstance(data, unicode): + data = unicode(data, getattr(self, '_encoding', 'UTF-8'), 'replace') + return StringIO.write(self, data) +else: + TextIO = StringIO + +try: + from io import BytesIO +except ImportError: + class BytesIO(StringIO): + def write(self, data): + if isinstance(data, unicode): + raise TypeError("not a byte value: %r" %(data,)) + return StringIO.write(self, data) + +patchsysdict = {0: 'stdin', 1: 'stdout', 2: 'stderr'} + +class FDCapture: + """ Capture IO to/from a given os-level filedescriptor. """ + + def __init__(self, targetfd, tmpfile=None, now=True, patchsys=False): + """ save targetfd descriptor, and open a new + temporary file there. If no tmpfile is + specified a tempfile.Tempfile() will be opened + in text mode. + """ + self.targetfd = targetfd + if tmpfile is None and targetfd != 0: + f = tempfile.TemporaryFile('wb+') + tmpfile = dupfile(f, encoding="UTF-8") + f.close() + self.tmpfile = tmpfile + self._savefd = os.dup(self.targetfd) + if patchsys: + self._oldsys = getattr(sys, patchsysdict[targetfd]) + if now: + self.start() + + def start(self): + try: + os.fstat(self._savefd) + except OSError: + raise ValueError("saved filedescriptor not valid, " + "did you call start() twice?") + if self.targetfd == 0 and not self.tmpfile: + fd = os.open(devnullpath, os.O_RDONLY) + os.dup2(fd, 0) + os.close(fd) + if hasattr(self, '_oldsys'): + setattr(sys, patchsysdict[self.targetfd], DontReadFromInput()) + else: + os.dup2(self.tmpfile.fileno(), self.targetfd) + if hasattr(self, '_oldsys'): + setattr(sys, patchsysdict[self.targetfd], self.tmpfile) + + def done(self): + """ unpatch and clean up, returns the self.tmpfile (file object) + """ + os.dup2(self._savefd, self.targetfd) + os.close(self._savefd) + if self.targetfd != 0: + self.tmpfile.seek(0) + if hasattr(self, '_oldsys'): + setattr(sys, patchsysdict[self.targetfd], self._oldsys) + return self.tmpfile + + def writeorg(self, data): + """ write a string to the original file descriptor + """ + tempfp = tempfile.TemporaryFile() + try: + os.dup2(self._savefd, tempfp.fileno()) + tempfp.write(data) + finally: + tempfp.close() + + +def dupfile(f, mode=None, buffering=0, raising=False, encoding=None): + """ return a new open file object that's a duplicate of f + + mode is duplicated if not given, 'buffering' controls + buffer size (defaulting to no buffering) and 'raising' + defines whether an exception is raised when an incompatible + file object is passed in (if raising is False, the file + object itself will be returned) + """ + try: + fd = f.fileno() + mode = mode or f.mode + except AttributeError: + if raising: + raise + return f + newfd = os.dup(fd) + if sys.version_info >= (3,0): + if encoding is not None: + mode = mode.replace("b", "") + buffering = True + return os.fdopen(newfd, mode, buffering, encoding, closefd=True) + else: + f = os.fdopen(newfd, mode, buffering) + if encoding is not None: + return EncodedFile(f, encoding) + return f + +class EncodedFile(object): + def __init__(self, _stream, encoding): + self._stream = _stream + self.encoding = encoding + + def write(self, obj): + if isinstance(obj, unicode): + obj = obj.encode(self.encoding) + elif isinstance(obj, str): + pass + else: + obj = str(obj) + self._stream.write(obj) + + def writelines(self, linelist): + data = ''.join(linelist) + self.write(data) + + def __getattr__(self, name): + return getattr(self._stream, name) + +class Capture(object): + def call(cls, func, *args, **kwargs): + """ return a (res, out, err) tuple where + out and err represent the output/error output + during function execution. + call the given function with args/kwargs + and capture output/error during its execution. + """ + so = cls() + try: + res = func(*args, **kwargs) + finally: + out, err = so.reset() + return res, out, err + call = classmethod(call) + + def reset(self): + """ reset sys.stdout/stderr and return captured output as strings. """ + if hasattr(self, '_reset'): + raise ValueError("was already reset") + self._reset = True + outfile, errfile = self.done(save=False) + out, err = "", "" + if outfile and not outfile.closed: + out = outfile.read() + outfile.close() + if errfile and errfile != outfile and not errfile.closed: + err = errfile.read() + errfile.close() + return out, err + + def suspend(self): + """ return current snapshot captures, memorize tempfiles. """ + outerr = self.readouterr() + outfile, errfile = self.done() + return outerr + + +class StdCaptureFD(Capture): + """ This class allows to capture writes to FD1 and FD2 + and may connect a NULL file to FD0 (and prevent + reads from sys.stdin). If any of the 0,1,2 file descriptors + is invalid it will not be captured. + """ + def __init__(self, out=True, err=True, mixed=False, + in_=True, patchsys=True, now=True): + self._options = { + "out": out, + "err": err, + "mixed": mixed, + "in_": in_, + "patchsys": patchsys, + "now": now, + } + self._save() + if now: + self.startall() + + def _save(self): + in_ = self._options['in_'] + out = self._options['out'] + err = self._options['err'] + mixed = self._options['mixed'] + patchsys = self._options['patchsys'] + if in_: + try: + self.in_ = FDCapture(0, tmpfile=None, now=False, + patchsys=patchsys) + except OSError: + pass + if out: + tmpfile = None + if hasattr(out, 'write'): + tmpfile = out + try: + self.out = FDCapture(1, tmpfile=tmpfile, + now=False, patchsys=patchsys) + self._options['out'] = self.out.tmpfile + except OSError: + pass + if err: + if out and mixed: + tmpfile = self.out.tmpfile + elif hasattr(err, 'write'): + tmpfile = err + else: + tmpfile = None + try: + self.err = FDCapture(2, tmpfile=tmpfile, + now=False, patchsys=patchsys) + self._options['err'] = self.err.tmpfile + except OSError: + pass + + def startall(self): + if hasattr(self, 'in_'): + self.in_.start() + if hasattr(self, 'out'): + self.out.start() + if hasattr(self, 'err'): + self.err.start() + + def resume(self): + """ resume capturing with original temp files. """ + self.startall() + + def done(self, save=True): + """ return (outfile, errfile) and stop capturing. """ + outfile = errfile = None + if hasattr(self, 'out') and not self.out.tmpfile.closed: + outfile = self.out.done() + if hasattr(self, 'err') and not self.err.tmpfile.closed: + errfile = self.err.done() + if hasattr(self, 'in_'): + tmpfile = self.in_.done() + if save: + self._save() + return outfile, errfile + + def readouterr(self): + """ return snapshot value of stdout/stderr capturings. """ + if hasattr(self, "out"): + out = self._readsnapshot(self.out.tmpfile) + else: + out = "" + if hasattr(self, "err"): + err = self._readsnapshot(self.err.tmpfile) + else: + err = "" + return out, err + + def _readsnapshot(self, f): + f.seek(0) + res = f.read() + enc = getattr(f, "encoding", None) + if enc: + res = py.builtin._totext(res, enc, "replace") + f.truncate(0) + f.seek(0) + return res + + +class StdCapture(Capture): + """ This class allows to capture writes to sys.stdout|stderr "in-memory" + and will raise errors on tries to read from sys.stdin. It only + modifies sys.stdout|stderr|stdin attributes and does not + touch underlying File Descriptors (use StdCaptureFD for that). + """ + def __init__(self, out=True, err=True, in_=True, mixed=False, now=True): + self._oldout = sys.stdout + self._olderr = sys.stderr + self._oldin = sys.stdin + if out and not hasattr(out, 'file'): + out = TextIO() + self.out = out + if err: + if mixed: + err = out + elif not hasattr(err, 'write'): + err = TextIO() + self.err = err + self.in_ = in_ + if now: + self.startall() + + def startall(self): + if self.out: + sys.stdout = self.out + if self.err: + sys.stderr = self.err + if self.in_: + sys.stdin = self.in_ = DontReadFromInput() + + def done(self, save=True): + """ return (outfile, errfile) and stop capturing. """ + outfile = errfile = None + if self.out and not self.out.closed: + sys.stdout = self._oldout + outfile = self.out + outfile.seek(0) + if self.err and not self.err.closed: + sys.stderr = self._olderr + errfile = self.err + errfile.seek(0) + if self.in_: + sys.stdin = self._oldin + return outfile, errfile + + def resume(self): + """ resume capturing with original temp files. """ + self.startall() + + def readouterr(self): + """ return snapshot value of stdout/stderr capturings. """ + out = err = "" + if self.out: + out = self.out.getvalue() + self.out.truncate(0) + self.out.seek(0) + if self.err: + err = self.err.getvalue() + self.err.truncate(0) + self.err.seek(0) + return out, err + +class DontReadFromInput: + """Temporary stub class. Ideally when stdin is accessed, the + capturing should be turned off, with possibly all data captured + so far sent to the screen. This should be configurable, though, + because in automated test runs it is better to crash than + hang indefinitely. + """ + def read(self, *args): + raise IOError("reading from stdin while output is captured") + readline = read + readlines = read + __iter__ = read + + def fileno(self): + raise ValueError("redirected Stdin is pseudofile, has no fileno()") + def isatty(self): + return False + def close(self): + pass + +try: + devnullpath = os.devnull +except AttributeError: + if os.name == 'nt': + devnullpath = 'NUL' + else: + devnullpath = '/dev/null' diff --git a/venv/lib/python3.10/site-packages/py/_io/saferepr.py b/venv/lib/python3.10/site-packages/py/_io/saferepr.py new file mode 100644 index 0000000..8518290 --- /dev/null +++ b/venv/lib/python3.10/site-packages/py/_io/saferepr.py @@ -0,0 +1,71 @@ +import py +import sys + +builtin_repr = repr + +reprlib = py.builtin._tryimport('repr', 'reprlib') + +class SafeRepr(reprlib.Repr): + """ subclass of repr.Repr that limits the resulting size of repr() + and includes information on exceptions raised during the call. + """ + def repr(self, x): + return self._callhelper(reprlib.Repr.repr, self, x) + + def repr_unicode(self, x, level): + # Strictly speaking wrong on narrow builds + def repr(u): + if "'" not in u: + return py.builtin._totext("'%s'") % u + elif '"' not in u: + return py.builtin._totext('"%s"') % u + else: + return py.builtin._totext("'%s'") % u.replace("'", r"\'") + s = repr(x[:self.maxstring]) + if len(s) > self.maxstring: + i = max(0, (self.maxstring-3)//2) + j = max(0, self.maxstring-3-i) + s = repr(x[:i] + x[len(x)-j:]) + s = s[:i] + '...' + s[len(s)-j:] + return s + + def repr_instance(self, x, level): + return self._callhelper(builtin_repr, x) + + def _callhelper(self, call, x, *args): + try: + # Try the vanilla repr and make sure that the result is a string + s = call(x, *args) + except py.builtin._sysex: + raise + except: + cls, e, tb = sys.exc_info() + exc_name = getattr(cls, '__name__', 'unknown') + try: + exc_info = str(e) + except py.builtin._sysex: + raise + except: + exc_info = 'unknown' + return '<[%s("%s") raised in repr()] %s object at 0x%x>' % ( + exc_name, exc_info, x.__class__.__name__, id(x)) + else: + if len(s) > self.maxsize: + i = max(0, (self.maxsize-3)//2) + j = max(0, self.maxsize-3-i) + s = s[:i] + '...' + s[len(s)-j:] + return s + +def saferepr(obj, maxsize=240): + """ return a size-limited safe repr-string for the given object. + Failing __repr__ functions of user instances will be represented + with a short exception info and 'saferepr' generally takes + care to never raise exceptions itself. This function is a wrapper + around the Repr/reprlib functionality of the standard 2.6 lib. + """ + # review exception handling + srepr = SafeRepr() + srepr.maxstring = maxsize + srepr.maxsize = maxsize + srepr.maxother = 160 + return srepr.repr(obj) diff --git a/venv/lib/python3.10/site-packages/py/_io/terminalwriter.py b/venv/lib/python3.10/site-packages/py/_io/terminalwriter.py new file mode 100644 index 0000000..442ca23 --- /dev/null +++ b/venv/lib/python3.10/site-packages/py/_io/terminalwriter.py @@ -0,0 +1,423 @@ +""" + +Helper functions for writing to terminals and files. + +""" + + +import sys, os, unicodedata +import py +py3k = sys.version_info[0] >= 3 +py33 = sys.version_info >= (3, 3) +from py.builtin import text, bytes + +win32_and_ctypes = False +colorama = None +if sys.platform == "win32": + try: + import colorama + except ImportError: + try: + import ctypes + win32_and_ctypes = True + except ImportError: + pass + + +def _getdimensions(): + if py33: + import shutil + size = shutil.get_terminal_size() + return size.lines, size.columns + else: + import termios, fcntl, struct + call = fcntl.ioctl(1, termios.TIOCGWINSZ, "\000" * 8) + height, width = struct.unpack("hhhh", call)[:2] + return height, width + + +def get_terminal_width(): + width = 0 + try: + _, width = _getdimensions() + except py.builtin._sysex: + raise + except: + # pass to fallback below + pass + + if width == 0: + # FALLBACK: + # * some exception happened + # * or this is emacs terminal which reports (0,0) + width = int(os.environ.get('COLUMNS', 80)) + + # XXX the windows getdimensions may be bogus, let's sanify a bit + if width < 40: + width = 80 + return width + +terminal_width = get_terminal_width() + +char_width = { + 'A': 1, # "Ambiguous" + 'F': 2, # Fullwidth + 'H': 1, # Halfwidth + 'N': 1, # Neutral + 'Na': 1, # Narrow + 'W': 2, # Wide +} + + +def get_line_width(text): + text = unicodedata.normalize('NFC', text) + return sum(char_width.get(unicodedata.east_asian_width(c), 1) for c in text) + + +# XXX unify with _escaped func below +def ansi_print(text, esc, file=None, newline=True, flush=False): + if file is None: + file = sys.stderr + text = text.rstrip() + if esc and not isinstance(esc, tuple): + esc = (esc,) + if esc and sys.platform != "win32" and file.isatty(): + text = (''.join(['\x1b[%sm' % cod for cod in esc]) + + text + + '\x1b[0m') # ANSI color code "reset" + if newline: + text += '\n' + + if esc and win32_and_ctypes and file.isatty(): + if 1 in esc: + bold = True + esc = tuple([x for x in esc if x != 1]) + else: + bold = False + esctable = {() : FOREGROUND_WHITE, # normal + (31,): FOREGROUND_RED, # red + (32,): FOREGROUND_GREEN, # green + (33,): FOREGROUND_GREEN|FOREGROUND_RED, # yellow + (34,): FOREGROUND_BLUE, # blue + (35,): FOREGROUND_BLUE|FOREGROUND_RED, # purple + (36,): FOREGROUND_BLUE|FOREGROUND_GREEN, # cyan + (37,): FOREGROUND_WHITE, # white + (39,): FOREGROUND_WHITE, # reset + } + attr = esctable.get(esc, FOREGROUND_WHITE) + if bold: + attr |= FOREGROUND_INTENSITY + STD_OUTPUT_HANDLE = -11 + STD_ERROR_HANDLE = -12 + if file is sys.stderr: + handle = GetStdHandle(STD_ERROR_HANDLE) + else: + handle = GetStdHandle(STD_OUTPUT_HANDLE) + oldcolors = GetConsoleInfo(handle).wAttributes + attr |= (oldcolors & 0x0f0) + SetConsoleTextAttribute(handle, attr) + while len(text) > 32768: + file.write(text[:32768]) + text = text[32768:] + if text: + file.write(text) + SetConsoleTextAttribute(handle, oldcolors) + else: + file.write(text) + + if flush: + file.flush() + +def should_do_markup(file): + if os.environ.get('PY_COLORS') == '1': + return True + if os.environ.get('PY_COLORS') == '0': + return False + if 'NO_COLOR' in os.environ: + return False + return hasattr(file, 'isatty') and file.isatty() \ + and os.environ.get('TERM') != 'dumb' \ + and not (sys.platform.startswith('java') and os._name == 'nt') + +class TerminalWriter(object): + _esctable = dict(black=30, red=31, green=32, yellow=33, + blue=34, purple=35, cyan=36, white=37, + Black=40, Red=41, Green=42, Yellow=43, + Blue=44, Purple=45, Cyan=46, White=47, + bold=1, light=2, blink=5, invert=7) + + # XXX deprecate stringio argument + def __init__(self, file=None, stringio=False, encoding=None): + if file is None: + if stringio: + self.stringio = file = py.io.TextIO() + else: + from sys import stdout as file + elif py.builtin.callable(file) and not ( + hasattr(file, "write") and hasattr(file, "flush")): + file = WriteFile(file, encoding=encoding) + if hasattr(file, "isatty") and file.isatty() and colorama: + file = colorama.AnsiToWin32(file).stream + self.encoding = encoding or getattr(file, 'encoding', "utf-8") + self._file = file + self.hasmarkup = should_do_markup(file) + self._lastlen = 0 + self._chars_on_current_line = 0 + self._width_of_current_line = 0 + + @property + def fullwidth(self): + if hasattr(self, '_terminal_width'): + return self._terminal_width + return get_terminal_width() + + @fullwidth.setter + def fullwidth(self, value): + self._terminal_width = value + + @property + def chars_on_current_line(self): + """Return the number of characters written so far in the current line. + + Please note that this count does not produce correct results after a reline() call, + see #164. + + .. versionadded:: 1.5.0 + + :rtype: int + """ + return self._chars_on_current_line + + @property + def width_of_current_line(self): + """Return an estimate of the width so far in the current line. + + .. versionadded:: 1.6.0 + + :rtype: int + """ + return self._width_of_current_line + + def _escaped(self, text, esc): + if esc and self.hasmarkup: + text = (''.join(['\x1b[%sm' % cod for cod in esc]) + + text +'\x1b[0m') + return text + + def markup(self, text, **kw): + esc = [] + for name in kw: + if name not in self._esctable: + raise ValueError("unknown markup: %r" %(name,)) + if kw[name]: + esc.append(self._esctable[name]) + return self._escaped(text, tuple(esc)) + + def sep(self, sepchar, title=None, fullwidth=None, **kw): + if fullwidth is None: + fullwidth = self.fullwidth + # the goal is to have the line be as long as possible + # under the condition that len(line) <= fullwidth + if sys.platform == "win32": + # if we print in the last column on windows we are on a + # new line but there is no way to verify/neutralize this + # (we may not know the exact line width) + # so let's be defensive to avoid empty lines in the output + fullwidth -= 1 + if title is not None: + # we want 2 + 2*len(fill) + len(title) <= fullwidth + # i.e. 2 + 2*len(sepchar)*N + len(title) <= fullwidth + # 2*len(sepchar)*N <= fullwidth - len(title) - 2 + # N <= (fullwidth - len(title) - 2) // (2*len(sepchar)) + N = max((fullwidth - len(title) - 2) // (2*len(sepchar)), 1) + fill = sepchar * N + line = "%s %s %s" % (fill, title, fill) + else: + # we want len(sepchar)*N <= fullwidth + # i.e. N <= fullwidth // len(sepchar) + line = sepchar * (fullwidth // len(sepchar)) + # in some situations there is room for an extra sepchar at the right, + # in particular if we consider that with a sepchar like "_ " the + # trailing space is not important at the end of the line + if len(line) + len(sepchar.rstrip()) <= fullwidth: + line += sepchar.rstrip() + + self.line(line, **kw) + + def write(self, msg, **kw): + if msg: + if not isinstance(msg, (bytes, text)): + msg = text(msg) + + self._update_chars_on_current_line(msg) + + if self.hasmarkup and kw: + markupmsg = self.markup(msg, **kw) + else: + markupmsg = msg + write_out(self._file, markupmsg) + + def _update_chars_on_current_line(self, text_or_bytes): + newline = b'\n' if isinstance(text_or_bytes, bytes) else '\n' + current_line = text_or_bytes.rsplit(newline, 1)[-1] + if isinstance(current_line, bytes): + current_line = current_line.decode('utf-8', errors='replace') + if newline in text_or_bytes: + self._chars_on_current_line = len(current_line) + self._width_of_current_line = get_line_width(current_line) + else: + self._chars_on_current_line += len(current_line) + self._width_of_current_line += get_line_width(current_line) + + def line(self, s='', **kw): + self.write(s, **kw) + self._checkfill(s) + self.write('\n') + + def reline(self, line, **kw): + if not self.hasmarkup: + raise ValueError("cannot use rewrite-line without terminal") + self.write(line, **kw) + self._checkfill(line) + self.write('\r') + self._lastlen = len(line) + + def _checkfill(self, line): + diff2last = self._lastlen - len(line) + if diff2last > 0: + self.write(" " * diff2last) + +class Win32ConsoleWriter(TerminalWriter): + def write(self, msg, **kw): + if msg: + if not isinstance(msg, (bytes, text)): + msg = text(msg) + + self._update_chars_on_current_line(msg) + + oldcolors = None + if self.hasmarkup and kw: + handle = GetStdHandle(STD_OUTPUT_HANDLE) + oldcolors = GetConsoleInfo(handle).wAttributes + default_bg = oldcolors & 0x00F0 + attr = default_bg + if kw.pop('bold', False): + attr |= FOREGROUND_INTENSITY + + if kw.pop('red', False): + attr |= FOREGROUND_RED + elif kw.pop('blue', False): + attr |= FOREGROUND_BLUE + elif kw.pop('green', False): + attr |= FOREGROUND_GREEN + elif kw.pop('yellow', False): + attr |= FOREGROUND_GREEN|FOREGROUND_RED + else: + attr |= oldcolors & 0x0007 + + SetConsoleTextAttribute(handle, attr) + write_out(self._file, msg) + if oldcolors: + SetConsoleTextAttribute(handle, oldcolors) + +class WriteFile(object): + def __init__(self, writemethod, encoding=None): + self.encoding = encoding + self._writemethod = writemethod + + def write(self, data): + if self.encoding: + data = data.encode(self.encoding, "replace") + self._writemethod(data) + + def flush(self): + return + + +if win32_and_ctypes: + TerminalWriter = Win32ConsoleWriter + import ctypes + from ctypes import wintypes + + # ctypes access to the Windows console + STD_OUTPUT_HANDLE = -11 + STD_ERROR_HANDLE = -12 + FOREGROUND_BLACK = 0x0000 # black text + FOREGROUND_BLUE = 0x0001 # text color contains blue. + FOREGROUND_GREEN = 0x0002 # text color contains green. + FOREGROUND_RED = 0x0004 # text color contains red. + FOREGROUND_WHITE = 0x0007 + FOREGROUND_INTENSITY = 0x0008 # text color is intensified. + BACKGROUND_BLACK = 0x0000 # background color black + BACKGROUND_BLUE = 0x0010 # background color contains blue. + BACKGROUND_GREEN = 0x0020 # background color contains green. + BACKGROUND_RED = 0x0040 # background color contains red. + BACKGROUND_WHITE = 0x0070 + BACKGROUND_INTENSITY = 0x0080 # background color is intensified. + + SHORT = ctypes.c_short + class COORD(ctypes.Structure): + _fields_ = [('X', SHORT), + ('Y', SHORT)] + class SMALL_RECT(ctypes.Structure): + _fields_ = [('Left', SHORT), + ('Top', SHORT), + ('Right', SHORT), + ('Bottom', SHORT)] + class CONSOLE_SCREEN_BUFFER_INFO(ctypes.Structure): + _fields_ = [('dwSize', COORD), + ('dwCursorPosition', COORD), + ('wAttributes', wintypes.WORD), + ('srWindow', SMALL_RECT), + ('dwMaximumWindowSize', COORD)] + + _GetStdHandle = ctypes.windll.kernel32.GetStdHandle + _GetStdHandle.argtypes = [wintypes.DWORD] + _GetStdHandle.restype = wintypes.HANDLE + def GetStdHandle(kind): + return _GetStdHandle(kind) + + SetConsoleTextAttribute = ctypes.windll.kernel32.SetConsoleTextAttribute + SetConsoleTextAttribute.argtypes = [wintypes.HANDLE, wintypes.WORD] + SetConsoleTextAttribute.restype = wintypes.BOOL + + _GetConsoleScreenBufferInfo = \ + ctypes.windll.kernel32.GetConsoleScreenBufferInfo + _GetConsoleScreenBufferInfo.argtypes = [wintypes.HANDLE, + ctypes.POINTER(CONSOLE_SCREEN_BUFFER_INFO)] + _GetConsoleScreenBufferInfo.restype = wintypes.BOOL + def GetConsoleInfo(handle): + info = CONSOLE_SCREEN_BUFFER_INFO() + _GetConsoleScreenBufferInfo(handle, ctypes.byref(info)) + return info + + def _getdimensions(): + handle = GetStdHandle(STD_OUTPUT_HANDLE) + info = GetConsoleInfo(handle) + # Substract one from the width, otherwise the cursor wraps + # and the ending \n causes an empty line to display. + return info.dwSize.Y, info.dwSize.X - 1 + +def write_out(fil, msg): + # XXX sometimes "msg" is of type bytes, sometimes text which + # complicates the situation. Should we try to enforce unicode? + try: + # on py27 and above writing out to sys.stdout with an encoding + # should usually work for unicode messages (if the encoding is + # capable of it) + fil.write(msg) + except UnicodeEncodeError: + # on py26 it might not work because stdout expects bytes + if fil.encoding: + try: + fil.write(msg.encode(fil.encoding)) + except UnicodeEncodeError: + # it might still fail if the encoding is not capable + pass + else: + fil.flush() + return + # fallback: escape all unicode characters + msg = msg.encode("unicode-escape").decode("ascii") + fil.write(msg) + fil.flush() diff --git a/venv/lib/python3.10/site-packages/py/_log/__init__.py b/venv/lib/python3.10/site-packages/py/_log/__init__.py new file mode 100644 index 0000000..fad62e9 --- /dev/null +++ b/venv/lib/python3.10/site-packages/py/_log/__init__.py @@ -0,0 +1,2 @@ +""" logging API ('producers' and 'consumers' connected via keywords) """ + diff --git a/venv/lib/python3.10/site-packages/py/_log/log.py b/venv/lib/python3.10/site-packages/py/_log/log.py new file mode 100644 index 0000000..56969bc --- /dev/null +++ b/venv/lib/python3.10/site-packages/py/_log/log.py @@ -0,0 +1,206 @@ +""" +basic logging functionality based on a producer/consumer scheme. + +XXX implement this API: (maybe put it into slogger.py?) + + log = Logger( + info=py.log.STDOUT, + debug=py.log.STDOUT, + command=None) + log.info("hello", "world") + log.command("hello", "world") + + log = Logger(info=Logger(something=...), + debug=py.log.STDOUT, + command=None) +""" +import py +import sys + + +class Message(object): + def __init__(self, keywords, args): + self.keywords = keywords + self.args = args + + def content(self): + return " ".join(map(str, self.args)) + + def prefix(self): + return "[%s] " % (":".join(self.keywords)) + + def __str__(self): + return self.prefix() + self.content() + + +class Producer(object): + """ (deprecated) Log producer API which sends messages to be logged + to a 'consumer' object, which then prints them to stdout, + stderr, files, etc. Used extensively by PyPy-1.1. + """ + + Message = Message # to allow later customization + keywords2consumer = {} + + def __init__(self, keywords, keywordmapper=None, **kw): + if hasattr(keywords, 'split'): + keywords = tuple(keywords.split()) + self._keywords = keywords + if keywordmapper is None: + keywordmapper = default_keywordmapper + self._keywordmapper = keywordmapper + + def __repr__(self): + return "" % ":".join(self._keywords) + + def __getattr__(self, name): + if '_' in name: + raise AttributeError(name) + producer = self.__class__(self._keywords + (name,)) + setattr(self, name, producer) + return producer + + def __call__(self, *args): + """ write a message to the appropriate consumer(s) """ + func = self._keywordmapper.getconsumer(self._keywords) + if func is not None: + func(self.Message(self._keywords, args)) + +class KeywordMapper: + def __init__(self): + self.keywords2consumer = {} + + def getstate(self): + return self.keywords2consumer.copy() + + def setstate(self, state): + self.keywords2consumer.clear() + self.keywords2consumer.update(state) + + def getconsumer(self, keywords): + """ return a consumer matching the given keywords. + + tries to find the most suitable consumer by walking, starting from + the back, the list of keywords, the first consumer matching a + keyword is returned (falling back to py.log.default) + """ + for i in range(len(keywords), 0, -1): + try: + return self.keywords2consumer[keywords[:i]] + except KeyError: + continue + return self.keywords2consumer.get('default', default_consumer) + + def setconsumer(self, keywords, consumer): + """ set a consumer for a set of keywords. """ + # normalize to tuples + if isinstance(keywords, str): + keywords = tuple(filter(None, keywords.split())) + elif hasattr(keywords, '_keywords'): + keywords = keywords._keywords + elif not isinstance(keywords, tuple): + raise TypeError("key %r is not a string or tuple" % (keywords,)) + if consumer is not None and not py.builtin.callable(consumer): + if not hasattr(consumer, 'write'): + raise TypeError( + "%r should be None, callable or file-like" % (consumer,)) + consumer = File(consumer) + self.keywords2consumer[keywords] = consumer + + +def default_consumer(msg): + """ the default consumer, prints the message to stdout (using 'print') """ + sys.stderr.write(str(msg)+"\n") + +default_keywordmapper = KeywordMapper() + + +def setconsumer(keywords, consumer): + default_keywordmapper.setconsumer(keywords, consumer) + + +def setstate(state): + default_keywordmapper.setstate(state) + + +def getstate(): + return default_keywordmapper.getstate() + +# +# Consumers +# + + +class File(object): + """ log consumer wrapping a file(-like) object """ + def __init__(self, f): + assert hasattr(f, 'write') + # assert isinstance(f, file) or not hasattr(f, 'open') + self._file = f + + def __call__(self, msg): + """ write a message to the log """ + self._file.write(str(msg) + "\n") + if hasattr(self._file, 'flush'): + self._file.flush() + + +class Path(object): + """ log consumer that opens and writes to a Path """ + def __init__(self, filename, append=False, + delayed_create=False, buffering=False): + self._append = append + self._filename = str(filename) + self._buffering = buffering + if not delayed_create: + self._openfile() + + def _openfile(self): + mode = self._append and 'a' or 'w' + f = open(self._filename, mode) + self._file = f + + def __call__(self, msg): + """ write a message to the log """ + if not hasattr(self, "_file"): + self._openfile() + self._file.write(str(msg) + "\n") + if not self._buffering: + self._file.flush() + + +def STDOUT(msg): + """ consumer that writes to sys.stdout """ + sys.stdout.write(str(msg)+"\n") + + +def STDERR(msg): + """ consumer that writes to sys.stderr """ + sys.stderr.write(str(msg)+"\n") + + +class Syslog: + """ consumer that writes to the syslog daemon """ + + def __init__(self, priority=None): + if priority is None: + priority = self.LOG_INFO + self.priority = priority + + def __call__(self, msg): + """ write a message to the log """ + import syslog + syslog.syslog(self.priority, str(msg)) + + +try: + import syslog +except ImportError: + pass +else: + for _prio in "EMERG ALERT CRIT ERR WARNING NOTICE INFO DEBUG".split(): + _prio = "LOG_" + _prio + try: + setattr(Syslog, _prio, getattr(syslog, _prio)) + except AttributeError: + pass diff --git a/venv/lib/python3.10/site-packages/py/_log/warning.py b/venv/lib/python3.10/site-packages/py/_log/warning.py new file mode 100644 index 0000000..6ef20d9 --- /dev/null +++ b/venv/lib/python3.10/site-packages/py/_log/warning.py @@ -0,0 +1,79 @@ +import py, sys + +class DeprecationWarning(DeprecationWarning): + def __init__(self, msg, path, lineno): + self.msg = msg + self.path = path + self.lineno = lineno + def __repr__(self): + return "%s:%d: %s" %(self.path, self.lineno+1, self.msg) + def __str__(self): + return self.msg + +def _apiwarn(startversion, msg, stacklevel=2, function=None): + # below is mostly COPIED from python2.4/warnings.py's def warn() + # Get context information + if isinstance(stacklevel, str): + frame = sys._getframe(1) + level = 1 + found = frame.f_code.co_filename.find(stacklevel) != -1 + while frame: + co = frame.f_code + if co.co_filename.find(stacklevel) == -1: + if found: + stacklevel = level + break + else: + found = True + level += 1 + frame = frame.f_back + else: + stacklevel = 1 + msg = "%s (since version %s)" %(msg, startversion) + warn(msg, stacklevel=stacklevel+1, function=function) + + +def warn(msg, stacklevel=1, function=None): + if function is not None: + import inspect + filename = inspect.getfile(function) + lineno = py.code.getrawcode(function).co_firstlineno + else: + try: + caller = sys._getframe(stacklevel) + except ValueError: + globals = sys.__dict__ + lineno = 1 + else: + globals = caller.f_globals + lineno = caller.f_lineno + if '__name__' in globals: + module = globals['__name__'] + else: + module = "" + filename = globals.get('__file__') + if filename: + fnl = filename.lower() + if fnl.endswith(".pyc") or fnl.endswith(".pyo"): + filename = filename[:-1] + elif fnl.endswith("$py.class"): + filename = filename.replace('$py.class', '.py') + else: + if module == "__main__": + try: + filename = sys.argv[0] + except AttributeError: + # embedded interpreters don't have sys.argv, see bug #839151 + filename = '__main__' + if not filename: + filename = module + path = py.path.local(filename) + warning = DeprecationWarning(msg, path, lineno) + import warnings + warnings.warn_explicit(warning, category=Warning, + filename=str(warning.path), + lineno=warning.lineno, + registry=warnings.__dict__.setdefault( + "__warningsregistry__", {}) + ) + diff --git a/venv/lib/python3.10/site-packages/py/_path/__init__.py b/venv/lib/python3.10/site-packages/py/_path/__init__.py new file mode 100644 index 0000000..51f3246 --- /dev/null +++ b/venv/lib/python3.10/site-packages/py/_path/__init__.py @@ -0,0 +1 @@ +""" unified file system api """ diff --git a/venv/lib/python3.10/site-packages/py/_path/cacheutil.py b/venv/lib/python3.10/site-packages/py/_path/cacheutil.py new file mode 100644 index 0000000..9922504 --- /dev/null +++ b/venv/lib/python3.10/site-packages/py/_path/cacheutil.py @@ -0,0 +1,114 @@ +""" +This module contains multithread-safe cache implementations. + +All Caches have + + getorbuild(key, builder) + delentry(key) + +methods and allow configuration when instantiating the cache class. +""" +from time import time as gettime + +class BasicCache(object): + def __init__(self, maxentries=128): + self.maxentries = maxentries + self.prunenum = int(maxentries - maxentries/8) + self._dict = {} + + def clear(self): + self._dict.clear() + + def _getentry(self, key): + return self._dict[key] + + def _putentry(self, key, entry): + self._prunelowestweight() + self._dict[key] = entry + + def delentry(self, key, raising=False): + try: + del self._dict[key] + except KeyError: + if raising: + raise + + def getorbuild(self, key, builder): + try: + entry = self._getentry(key) + except KeyError: + entry = self._build(key, builder) + self._putentry(key, entry) + return entry.value + + def _prunelowestweight(self): + """ prune out entries with lowest weight. """ + numentries = len(self._dict) + if numentries >= self.maxentries: + # evict according to entry's weight + items = [(entry.weight, key) + for key, entry in self._dict.items()] + items.sort() + index = numentries - self.prunenum + if index > 0: + for weight, key in items[:index]: + # in MT situations the element might be gone + self.delentry(key, raising=False) + +class BuildcostAccessCache(BasicCache): + """ A BuildTime/Access-counting cache implementation. + the weight of a value is computed as the product of + + num-accesses-of-a-value * time-to-build-the-value + + The values with the least such weights are evicted + if the cache maxentries threshold is superceded. + For implementation flexibility more than one object + might be evicted at a time. + """ + # time function to use for measuring build-times + + def _build(self, key, builder): + start = gettime() + val = builder() + end = gettime() + return WeightedCountingEntry(val, end-start) + + +class WeightedCountingEntry(object): + def __init__(self, value, oneweight): + self._value = value + self.weight = self._oneweight = oneweight + + def value(self): + self.weight += self._oneweight + return self._value + value = property(value) + +class AgingCache(BasicCache): + """ This cache prunes out cache entries that are too old. + """ + def __init__(self, maxentries=128, maxseconds=10.0): + super(AgingCache, self).__init__(maxentries) + self.maxseconds = maxseconds + + def _getentry(self, key): + entry = self._dict[key] + if entry.isexpired(): + self.delentry(key) + raise KeyError(key) + return entry + + def _build(self, key, builder): + val = builder() + entry = AgingEntry(val, gettime() + self.maxseconds) + return entry + +class AgingEntry(object): + def __init__(self, value, expirationtime): + self.value = value + self.weight = expirationtime + + def isexpired(self): + t = gettime() + return t >= self.weight diff --git a/venv/lib/python3.10/site-packages/py/_path/common.py b/venv/lib/python3.10/site-packages/py/_path/common.py new file mode 100644 index 0000000..2364e5f --- /dev/null +++ b/venv/lib/python3.10/site-packages/py/_path/common.py @@ -0,0 +1,459 @@ +""" +""" +import warnings +import os +import sys +import posixpath +import fnmatch +import py + +# Moved from local.py. +iswin32 = sys.platform == "win32" or (getattr(os, '_name', False) == 'nt') + +try: + # FileNotFoundError might happen in py34, and is not available with py27. + import_errors = (ImportError, FileNotFoundError) +except NameError: + import_errors = (ImportError,) + +try: + from os import fspath +except ImportError: + def fspath(path): + """ + Return the string representation of the path. + If str or bytes is passed in, it is returned unchanged. + This code comes from PEP 519, modified to support earlier versions of + python. + + This is required for python < 3.6. + """ + if isinstance(path, (py.builtin.text, py.builtin.bytes)): + return path + + # Work from the object's type to match method resolution of other magic + # methods. + path_type = type(path) + try: + return path_type.__fspath__(path) + except AttributeError: + if hasattr(path_type, '__fspath__'): + raise + try: + import pathlib + except import_errors: + pass + else: + if isinstance(path, pathlib.PurePath): + return py.builtin.text(path) + + raise TypeError("expected str, bytes or os.PathLike object, not " + + path_type.__name__) + +class Checkers: + _depend_on_existence = 'exists', 'link', 'dir', 'file' + + def __init__(self, path): + self.path = path + + def dir(self): + raise NotImplementedError + + def file(self): + raise NotImplementedError + + def dotfile(self): + return self.path.basename.startswith('.') + + def ext(self, arg): + if not arg.startswith('.'): + arg = '.' + arg + return self.path.ext == arg + + def exists(self): + raise NotImplementedError + + def basename(self, arg): + return self.path.basename == arg + + def basestarts(self, arg): + return self.path.basename.startswith(arg) + + def relto(self, arg): + return self.path.relto(arg) + + def fnmatch(self, arg): + return self.path.fnmatch(arg) + + def endswith(self, arg): + return str(self.path).endswith(arg) + + def _evaluate(self, kw): + for name, value in kw.items(): + invert = False + meth = None + try: + meth = getattr(self, name) + except AttributeError: + if name[:3] == 'not': + invert = True + try: + meth = getattr(self, name[3:]) + except AttributeError: + pass + if meth is None: + raise TypeError( + "no %r checker available for %r" % (name, self.path)) + try: + if py.code.getrawcode(meth).co_argcount > 1: + if (not meth(value)) ^ invert: + return False + else: + if bool(value) ^ bool(meth()) ^ invert: + return False + except (py.error.ENOENT, py.error.ENOTDIR, py.error.EBUSY): + # EBUSY feels not entirely correct, + # but its kind of necessary since ENOMEDIUM + # is not accessible in python + for name in self._depend_on_existence: + if name in kw: + if kw.get(name): + return False + name = 'not' + name + if name in kw: + if not kw.get(name): + return False + return True + +class NeverRaised(Exception): + pass + +class PathBase(object): + """ shared implementation for filesystem path objects.""" + Checkers = Checkers + + def __div__(self, other): + return self.join(fspath(other)) + __truediv__ = __div__ # py3k + + def basename(self): + """ basename part of path. """ + return self._getbyspec('basename')[0] + basename = property(basename, None, None, basename.__doc__) + + def dirname(self): + """ dirname part of path. """ + return self._getbyspec('dirname')[0] + dirname = property(dirname, None, None, dirname.__doc__) + + def purebasename(self): + """ pure base name of the path.""" + return self._getbyspec('purebasename')[0] + purebasename = property(purebasename, None, None, purebasename.__doc__) + + def ext(self): + """ extension of the path (including the '.').""" + return self._getbyspec('ext')[0] + ext = property(ext, None, None, ext.__doc__) + + def dirpath(self, *args, **kwargs): + """ return the directory path joined with any given path arguments. """ + return self.new(basename='').join(*args, **kwargs) + + def read_binary(self): + """ read and return a bytestring from reading the path. """ + with self.open('rb') as f: + return f.read() + + def read_text(self, encoding): + """ read and return a Unicode string from reading the path. """ + with self.open("r", encoding=encoding) as f: + return f.read() + + + def read(self, mode='r'): + """ read and return a bytestring from reading the path. """ + with self.open(mode) as f: + return f.read() + + def readlines(self, cr=1): + """ read and return a list of lines from the path. if cr is False, the +newline will be removed from the end of each line. """ + if sys.version_info < (3, ): + mode = 'rU' + else: # python 3 deprecates mode "U" in favor of "newline" option + mode = 'r' + + if not cr: + content = self.read(mode) + return content.split('\n') + else: + f = self.open(mode) + try: + return f.readlines() + finally: + f.close() + + def load(self): + """ (deprecated) return object unpickled from self.read() """ + f = self.open('rb') + try: + import pickle + return py.error.checked_call(pickle.load, f) + finally: + f.close() + + def move(self, target): + """ move this path to target. """ + if target.relto(self): + raise py.error.EINVAL( + target, + "cannot move path into a subdirectory of itself") + try: + self.rename(target) + except py.error.EXDEV: # invalid cross-device link + self.copy(target) + self.remove() + + def __repr__(self): + """ return a string representation of this path. """ + return repr(str(self)) + + def check(self, **kw): + """ check a path for existence and properties. + + Without arguments, return True if the path exists, otherwise False. + + valid checkers:: + + file=1 # is a file + file=0 # is not a file (may not even exist) + dir=1 # is a dir + link=1 # is a link + exists=1 # exists + + You can specify multiple checker definitions, for example:: + + path.check(file=1, link=1) # a link pointing to a file + """ + if not kw: + kw = {'exists': 1} + return self.Checkers(self)._evaluate(kw) + + def fnmatch(self, pattern): + """return true if the basename/fullname matches the glob-'pattern'. + + valid pattern characters:: + + * matches everything + ? matches any single character + [seq] matches any character in seq + [!seq] matches any char not in seq + + If the pattern contains a path-separator then the full path + is used for pattern matching and a '*' is prepended to the + pattern. + + if the pattern doesn't contain a path-separator the pattern + is only matched against the basename. + """ + return FNMatcher(pattern)(self) + + def relto(self, relpath): + """ return a string which is the relative part of the path + to the given 'relpath'. + """ + if not isinstance(relpath, (str, PathBase)): + raise TypeError("%r: not a string or path object" %(relpath,)) + strrelpath = str(relpath) + if strrelpath and strrelpath[-1] != self.sep: + strrelpath += self.sep + #assert strrelpath[-1] == self.sep + #assert strrelpath[-2] != self.sep + strself = self.strpath + if sys.platform == "win32" or getattr(os, '_name', None) == 'nt': + if os.path.normcase(strself).startswith( + os.path.normcase(strrelpath)): + return strself[len(strrelpath):] + elif strself.startswith(strrelpath): + return strself[len(strrelpath):] + return "" + + def ensure_dir(self, *args): + """ ensure the path joined with args is a directory. """ + return self.ensure(*args, **{"dir": True}) + + def bestrelpath(self, dest): + """ return a string which is a relative path from self + (assumed to be a directory) to dest such that + self.join(bestrelpath) == dest and if not such + path can be determined return dest. + """ + try: + if self == dest: + return os.curdir + base = self.common(dest) + if not base: # can be the case on windows + return str(dest) + self2base = self.relto(base) + reldest = dest.relto(base) + if self2base: + n = self2base.count(self.sep) + 1 + else: + n = 0 + l = [os.pardir] * n + if reldest: + l.append(reldest) + target = dest.sep.join(l) + return target + except AttributeError: + return str(dest) + + def exists(self): + return self.check() + + def isdir(self): + return self.check(dir=1) + + def isfile(self): + return self.check(file=1) + + def parts(self, reverse=False): + """ return a root-first list of all ancestor directories + plus the path itself. + """ + current = self + l = [self] + while 1: + last = current + current = current.dirpath() + if last == current: + break + l.append(current) + if not reverse: + l.reverse() + return l + + def common(self, other): + """ return the common part shared with the other path + or None if there is no common part. + """ + last = None + for x, y in zip(self.parts(), other.parts()): + if x != y: + return last + last = x + return last + + def __add__(self, other): + """ return new path object with 'other' added to the basename""" + return self.new(basename=self.basename+str(other)) + + def __cmp__(self, other): + """ return sort value (-1, 0, +1). """ + try: + return cmp(self.strpath, other.strpath) + except AttributeError: + return cmp(str(self), str(other)) # self.path, other.path) + + def __lt__(self, other): + try: + return self.strpath < other.strpath + except AttributeError: + return str(self) < str(other) + + def visit(self, fil=None, rec=None, ignore=NeverRaised, bf=False, sort=False): + """ yields all paths below the current one + + fil is a filter (glob pattern or callable), if not matching the + path will not be yielded, defaulting to None (everything is + returned) + + rec is a filter (glob pattern or callable) that controls whether + a node is descended, defaulting to None + + ignore is an Exception class that is ignoredwhen calling dirlist() + on any of the paths (by default, all exceptions are reported) + + bf if True will cause a breadthfirst search instead of the + default depthfirst. Default: False + + sort if True will sort entries within each directory level. + """ + for x in Visitor(fil, rec, ignore, bf, sort).gen(self): + yield x + + def _sortlist(self, res, sort): + if sort: + if hasattr(sort, '__call__'): + warnings.warn(DeprecationWarning( + "listdir(sort=callable) is deprecated and breaks on python3" + ), stacklevel=3) + res.sort(sort) + else: + res.sort() + + def samefile(self, other): + """ return True if other refers to the same stat object as self. """ + return self.strpath == str(other) + + def __fspath__(self): + return self.strpath + +class Visitor: + def __init__(self, fil, rec, ignore, bf, sort): + if isinstance(fil, py.builtin._basestring): + fil = FNMatcher(fil) + if isinstance(rec, py.builtin._basestring): + self.rec = FNMatcher(rec) + elif not hasattr(rec, '__call__') and rec: + self.rec = lambda path: True + else: + self.rec = rec + self.fil = fil + self.ignore = ignore + self.breadthfirst = bf + self.optsort = sort and sorted or (lambda x: x) + + def gen(self, path): + try: + entries = path.listdir() + except self.ignore: + return + rec = self.rec + dirs = self.optsort([p for p in entries + if p.check(dir=1) and (rec is None or rec(p))]) + if not self.breadthfirst: + for subdir in dirs: + for p in self.gen(subdir): + yield p + for p in self.optsort(entries): + if self.fil is None or self.fil(p): + yield p + if self.breadthfirst: + for subdir in dirs: + for p in self.gen(subdir): + yield p + +class FNMatcher: + def __init__(self, pattern): + self.pattern = pattern + + def __call__(self, path): + pattern = self.pattern + + if (pattern.find(path.sep) == -1 and + iswin32 and + pattern.find(posixpath.sep) != -1): + # Running on Windows, the pattern has no Windows path separators, + # and the pattern has one or more Posix path separators. Replace + # the Posix path separators with the Windows path separator. + pattern = pattern.replace(posixpath.sep, path.sep) + + if pattern.find(path.sep) == -1: + name = path.basename + else: + name = str(path) # path.strpath # XXX svn? + if not os.path.isabs(pattern): + pattern = '*' + path.sep + pattern + return fnmatch.fnmatch(name, pattern) diff --git a/venv/lib/python3.10/site-packages/py/_path/local.py b/venv/lib/python3.10/site-packages/py/_path/local.py new file mode 100644 index 0000000..1385a03 --- /dev/null +++ b/venv/lib/python3.10/site-packages/py/_path/local.py @@ -0,0 +1,1030 @@ +""" +local path implementation. +""" +from __future__ import with_statement + +from contextlib import contextmanager +import sys, os, atexit, io, uuid +import py +from py._path import common +from py._path.common import iswin32, fspath +from stat import S_ISLNK, S_ISDIR, S_ISREG + +from os.path import abspath, normpath, isabs, exists, isdir, isfile, islink, dirname + +if sys.version_info > (3,0): + def map_as_list(func, iter): + return list(map(func, iter)) +else: + map_as_list = map + +ALLOW_IMPORTLIB_MODE = sys.version_info > (3,5) +if ALLOW_IMPORTLIB_MODE: + import importlib + + +class Stat(object): + def __getattr__(self, name): + return getattr(self._osstatresult, "st_" + name) + + def __init__(self, path, osstatresult): + self.path = path + self._osstatresult = osstatresult + + @property + def owner(self): + if iswin32: + raise NotImplementedError("XXX win32") + import pwd + entry = py.error.checked_call(pwd.getpwuid, self.uid) + return entry[0] + + @property + def group(self): + """ return group name of file. """ + if iswin32: + raise NotImplementedError("XXX win32") + import grp + entry = py.error.checked_call(grp.getgrgid, self.gid) + return entry[0] + + def isdir(self): + return S_ISDIR(self._osstatresult.st_mode) + + def isfile(self): + return S_ISREG(self._osstatresult.st_mode) + + def islink(self): + st = self.path.lstat() + return S_ISLNK(self._osstatresult.st_mode) + +class PosixPath(common.PathBase): + def chown(self, user, group, rec=0): + """ change ownership to the given user and group. + user and group may be specified by a number or + by a name. if rec is True change ownership + recursively. + """ + uid = getuserid(user) + gid = getgroupid(group) + if rec: + for x in self.visit(rec=lambda x: x.check(link=0)): + if x.check(link=0): + py.error.checked_call(os.chown, str(x), uid, gid) + py.error.checked_call(os.chown, str(self), uid, gid) + + def readlink(self): + """ return value of a symbolic link. """ + return py.error.checked_call(os.readlink, self.strpath) + + def mklinkto(self, oldname): + """ posix style hard link to another name. """ + py.error.checked_call(os.link, str(oldname), str(self)) + + def mksymlinkto(self, value, absolute=1): + """ create a symbolic link with the given value (pointing to another name). """ + if absolute: + py.error.checked_call(os.symlink, str(value), self.strpath) + else: + base = self.common(value) + # with posix local paths '/' is always a common base + relsource = self.__class__(value).relto(base) + reldest = self.relto(base) + n = reldest.count(self.sep) + target = self.sep.join(('..', )*n + (relsource, )) + py.error.checked_call(os.symlink, target, self.strpath) + +def getuserid(user): + import pwd + if not isinstance(user, int): + user = pwd.getpwnam(user)[2] + return user + +def getgroupid(group): + import grp + if not isinstance(group, int): + group = grp.getgrnam(group)[2] + return group + +FSBase = not iswin32 and PosixPath or common.PathBase + +class LocalPath(FSBase): + """ object oriented interface to os.path and other local filesystem + related information. + """ + class ImportMismatchError(ImportError): + """ raised on pyimport() if there is a mismatch of __file__'s""" + + sep = os.sep + class Checkers(common.Checkers): + def _stat(self): + try: + return self._statcache + except AttributeError: + try: + self._statcache = self.path.stat() + except py.error.ELOOP: + self._statcache = self.path.lstat() + return self._statcache + + def dir(self): + return S_ISDIR(self._stat().mode) + + def file(self): + return S_ISREG(self._stat().mode) + + def exists(self): + return self._stat() + + def link(self): + st = self.path.lstat() + return S_ISLNK(st.mode) + + def __init__(self, path=None, expanduser=False): + """ Initialize and return a local Path instance. + + Path can be relative to the current directory. + If path is None it defaults to the current working directory. + If expanduser is True, tilde-expansion is performed. + Note that Path instances always carry an absolute path. + Note also that passing in a local path object will simply return + the exact same path object. Use new() to get a new copy. + """ + if path is None: + self.strpath = py.error.checked_call(os.getcwd) + else: + try: + path = fspath(path) + except TypeError: + raise ValueError("can only pass None, Path instances " + "or non-empty strings to LocalPath") + if expanduser: + path = os.path.expanduser(path) + self.strpath = abspath(path) + + def __hash__(self): + s = self.strpath + if iswin32: + s = s.lower() + return hash(s) + + def __eq__(self, other): + s1 = fspath(self) + try: + s2 = fspath(other) + except TypeError: + return False + if iswin32: + s1 = s1.lower() + try: + s2 = s2.lower() + except AttributeError: + return False + return s1 == s2 + + def __ne__(self, other): + return not (self == other) + + def __lt__(self, other): + return fspath(self) < fspath(other) + + def __gt__(self, other): + return fspath(self) > fspath(other) + + def samefile(self, other): + """ return True if 'other' references the same file as 'self'. + """ + other = fspath(other) + if not isabs(other): + other = abspath(other) + if self == other: + return True + if not hasattr(os.path, "samefile"): + return False + return py.error.checked_call( + os.path.samefile, self.strpath, other) + + def remove(self, rec=1, ignore_errors=False): + """ remove a file or directory (or a directory tree if rec=1). + if ignore_errors is True, errors while removing directories will + be ignored. + """ + if self.check(dir=1, link=0): + if rec: + # force remove of readonly files on windows + if iswin32: + self.chmod(0o700, rec=1) + import shutil + py.error.checked_call( + shutil.rmtree, self.strpath, + ignore_errors=ignore_errors) + else: + py.error.checked_call(os.rmdir, self.strpath) + else: + if iswin32: + self.chmod(0o700) + py.error.checked_call(os.remove, self.strpath) + + def computehash(self, hashtype="md5", chunksize=524288): + """ return hexdigest of hashvalue for this file. """ + try: + try: + import hashlib as mod + except ImportError: + if hashtype == "sha1": + hashtype = "sha" + mod = __import__(hashtype) + hash = getattr(mod, hashtype)() + except (AttributeError, ImportError): + raise ValueError("Don't know how to compute %r hash" %(hashtype,)) + f = self.open('rb') + try: + while 1: + buf = f.read(chunksize) + if not buf: + return hash.hexdigest() + hash.update(buf) + finally: + f.close() + + def new(self, **kw): + """ create a modified version of this path. + the following keyword arguments modify various path parts:: + + a:/some/path/to/a/file.ext + xx drive + xxxxxxxxxxxxxxxxx dirname + xxxxxxxx basename + xxxx purebasename + xxx ext + """ + obj = object.__new__(self.__class__) + if not kw: + obj.strpath = self.strpath + return obj + drive, dirname, basename, purebasename,ext = self._getbyspec( + "drive,dirname,basename,purebasename,ext") + if 'basename' in kw: + if 'purebasename' in kw or 'ext' in kw: + raise ValueError("invalid specification %r" % kw) + else: + pb = kw.setdefault('purebasename', purebasename) + try: + ext = kw['ext'] + except KeyError: + pass + else: + if ext and not ext.startswith('.'): + ext = '.' + ext + kw['basename'] = pb + ext + + if ('dirname' in kw and not kw['dirname']): + kw['dirname'] = drive + else: + kw.setdefault('dirname', dirname) + kw.setdefault('sep', self.sep) + obj.strpath = normpath( + "%(dirname)s%(sep)s%(basename)s" % kw) + return obj + + def _getbyspec(self, spec): + """ see new for what 'spec' can be. """ + res = [] + parts = self.strpath.split(self.sep) + + args = filter(None, spec.split(',') ) + append = res.append + for name in args: + if name == 'drive': + append(parts[0]) + elif name == 'dirname': + append(self.sep.join(parts[:-1])) + else: + basename = parts[-1] + if name == 'basename': + append(basename) + else: + i = basename.rfind('.') + if i == -1: + purebasename, ext = basename, '' + else: + purebasename, ext = basename[:i], basename[i:] + if name == 'purebasename': + append(purebasename) + elif name == 'ext': + append(ext) + else: + raise ValueError("invalid part specification %r" % name) + return res + + def dirpath(self, *args, **kwargs): + """ return the directory path joined with any given path arguments. """ + if not kwargs: + path = object.__new__(self.__class__) + path.strpath = dirname(self.strpath) + if args: + path = path.join(*args) + return path + return super(LocalPath, self).dirpath(*args, **kwargs) + + def join(self, *args, **kwargs): + """ return a new path by appending all 'args' as path + components. if abs=1 is used restart from root if any + of the args is an absolute path. + """ + sep = self.sep + strargs = [fspath(arg) for arg in args] + strpath = self.strpath + if kwargs.get('abs'): + newargs = [] + for arg in reversed(strargs): + if isabs(arg): + strpath = arg + strargs = newargs + break + newargs.insert(0, arg) + # special case for when we have e.g. strpath == "/" + actual_sep = "" if strpath.endswith(sep) else sep + for arg in strargs: + arg = arg.strip(sep) + if iswin32: + # allow unix style paths even on windows. + arg = arg.strip('/') + arg = arg.replace('/', sep) + strpath = strpath + actual_sep + arg + actual_sep = sep + obj = object.__new__(self.__class__) + obj.strpath = normpath(strpath) + return obj + + def open(self, mode='r', ensure=False, encoding=None): + """ return an opened file with the given mode. + + If ensure is True, create parent directories if needed. + """ + if ensure: + self.dirpath().ensure(dir=1) + if encoding: + return py.error.checked_call(io.open, self.strpath, mode, encoding=encoding) + return py.error.checked_call(open, self.strpath, mode) + + def _fastjoin(self, name): + child = object.__new__(self.__class__) + child.strpath = self.strpath + self.sep + name + return child + + def islink(self): + return islink(self.strpath) + + def check(self, **kw): + if not kw: + return exists(self.strpath) + if len(kw) == 1: + if "dir" in kw: + return not kw["dir"] ^ isdir(self.strpath) + if "file" in kw: + return not kw["file"] ^ isfile(self.strpath) + return super(LocalPath, self).check(**kw) + + _patternchars = set("*?[" + os.path.sep) + def listdir(self, fil=None, sort=None): + """ list directory contents, possibly filter by the given fil func + and possibly sorted. + """ + if fil is None and sort is None: + names = py.error.checked_call(os.listdir, self.strpath) + return map_as_list(self._fastjoin, names) + if isinstance(fil, py.builtin._basestring): + if not self._patternchars.intersection(fil): + child = self._fastjoin(fil) + if exists(child.strpath): + return [child] + return [] + fil = common.FNMatcher(fil) + names = py.error.checked_call(os.listdir, self.strpath) + res = [] + for name in names: + child = self._fastjoin(name) + if fil is None or fil(child): + res.append(child) + self._sortlist(res, sort) + return res + + def size(self): + """ return size of the underlying file object """ + return self.stat().size + + def mtime(self): + """ return last modification time of the path. """ + return self.stat().mtime + + def copy(self, target, mode=False, stat=False): + """ copy path to target. + + If mode is True, will copy copy permission from path to target. + If stat is True, copy permission, last modification + time, last access time, and flags from path to target. + """ + if self.check(file=1): + if target.check(dir=1): + target = target.join(self.basename) + assert self!=target + copychunked(self, target) + if mode: + copymode(self.strpath, target.strpath) + if stat: + copystat(self, target) + else: + def rec(p): + return p.check(link=0) + for x in self.visit(rec=rec): + relpath = x.relto(self) + newx = target.join(relpath) + newx.dirpath().ensure(dir=1) + if x.check(link=1): + newx.mksymlinkto(x.readlink()) + continue + elif x.check(file=1): + copychunked(x, newx) + elif x.check(dir=1): + newx.ensure(dir=1) + if mode: + copymode(x.strpath, newx.strpath) + if stat: + copystat(x, newx) + + def rename(self, target): + """ rename this path to target. """ + target = fspath(target) + return py.error.checked_call(os.rename, self.strpath, target) + + def dump(self, obj, bin=1): + """ pickle object into path location""" + f = self.open('wb') + import pickle + try: + py.error.checked_call(pickle.dump, obj, f, bin) + finally: + f.close() + + def mkdir(self, *args): + """ create & return the directory joined with args. """ + p = self.join(*args) + py.error.checked_call(os.mkdir, fspath(p)) + return p + + def write_binary(self, data, ensure=False): + """ write binary data into path. If ensure is True create + missing parent directories. + """ + if ensure: + self.dirpath().ensure(dir=1) + with self.open('wb') as f: + f.write(data) + + def write_text(self, data, encoding, ensure=False): + """ write text data into path using the specified encoding. + If ensure is True create missing parent directories. + """ + if ensure: + self.dirpath().ensure(dir=1) + with self.open('w', encoding=encoding) as f: + f.write(data) + + def write(self, data, mode='w', ensure=False): + """ write data into path. If ensure is True create + missing parent directories. + """ + if ensure: + self.dirpath().ensure(dir=1) + if 'b' in mode: + if not py.builtin._isbytes(data): + raise ValueError("can only process bytes") + else: + if not py.builtin._istext(data): + if not py.builtin._isbytes(data): + data = str(data) + else: + data = py.builtin._totext(data, sys.getdefaultencoding()) + f = self.open(mode) + try: + f.write(data) + finally: + f.close() + + def _ensuredirs(self): + parent = self.dirpath() + if parent == self: + return self + if parent.check(dir=0): + parent._ensuredirs() + if self.check(dir=0): + try: + self.mkdir() + except py.error.EEXIST: + # race condition: file/dir created by another thread/process. + # complain if it is not a dir + if self.check(dir=0): + raise + return self + + def ensure(self, *args, **kwargs): + """ ensure that an args-joined path exists (by default as + a file). if you specify a keyword argument 'dir=True' + then the path is forced to be a directory path. + """ + p = self.join(*args) + if kwargs.get('dir', 0): + return p._ensuredirs() + else: + p.dirpath()._ensuredirs() + if not p.check(file=1): + p.open('w').close() + return p + + def stat(self, raising=True): + """ Return an os.stat() tuple. """ + if raising == True: + return Stat(self, py.error.checked_call(os.stat, self.strpath)) + try: + return Stat(self, os.stat(self.strpath)) + except KeyboardInterrupt: + raise + except Exception: + return None + + def lstat(self): + """ Return an os.lstat() tuple. """ + return Stat(self, py.error.checked_call(os.lstat, self.strpath)) + + def setmtime(self, mtime=None): + """ set modification time for the given path. if 'mtime' is None + (the default) then the file's mtime is set to current time. + + Note that the resolution for 'mtime' is platform dependent. + """ + if mtime is None: + return py.error.checked_call(os.utime, self.strpath, mtime) + try: + return py.error.checked_call(os.utime, self.strpath, (-1, mtime)) + except py.error.EINVAL: + return py.error.checked_call(os.utime, self.strpath, (self.atime(), mtime)) + + def chdir(self): + """ change directory to self and return old current directory """ + try: + old = self.__class__() + except py.error.ENOENT: + old = None + py.error.checked_call(os.chdir, self.strpath) + return old + + + @contextmanager + def as_cwd(self): + """ + Return a context manager, which changes to the path's dir during the + managed "with" context. + On __enter__ it returns the old dir, which might be ``None``. + """ + old = self.chdir() + try: + yield old + finally: + if old is not None: + old.chdir() + + def realpath(self): + """ return a new path which contains no symbolic links.""" + return self.__class__(os.path.realpath(self.strpath)) + + def atime(self): + """ return last access time of the path. """ + return self.stat().atime + + def __repr__(self): + return 'local(%r)' % self.strpath + + def __str__(self): + """ return string representation of the Path. """ + return self.strpath + + def chmod(self, mode, rec=0): + """ change permissions to the given mode. If mode is an + integer it directly encodes the os-specific modes. + if rec is True perform recursively. + """ + if not isinstance(mode, int): + raise TypeError("mode %r must be an integer" % (mode,)) + if rec: + for x in self.visit(rec=rec): + py.error.checked_call(os.chmod, str(x), mode) + py.error.checked_call(os.chmod, self.strpath, mode) + + def pypkgpath(self): + """ return the Python package path by looking for the last + directory upwards which still contains an __init__.py. + Return None if a pkgpath can not be determined. + """ + pkgpath = None + for parent in self.parts(reverse=True): + if parent.isdir(): + if not parent.join('__init__.py').exists(): + break + if not isimportable(parent.basename): + break + pkgpath = parent + return pkgpath + + def _ensuresyspath(self, ensuremode, path): + if ensuremode: + s = str(path) + if ensuremode == "append": + if s not in sys.path: + sys.path.append(s) + else: + if s != sys.path[0]: + sys.path.insert(0, s) + + def pyimport(self, modname=None, ensuresyspath=True): + """ return path as an imported python module. + + If modname is None, look for the containing package + and construct an according module name. + The module will be put/looked up in sys.modules. + if ensuresyspath is True then the root dir for importing + the file (taking __init__.py files into account) will + be prepended to sys.path if it isn't there already. + If ensuresyspath=="append" the root dir will be appended + if it isn't already contained in sys.path. + if ensuresyspath is False no modification of syspath happens. + + Special value of ensuresyspath=="importlib" is intended + purely for using in pytest, it is capable only of importing + separate .py files outside packages, e.g. for test suite + without any __init__.py file. It effectively allows having + same-named test modules in different places and offers + mild opt-in via this option. Note that it works only in + recent versions of python. + """ + if not self.check(): + raise py.error.ENOENT(self) + + if ensuresyspath == 'importlib': + if modname is None: + modname = self.purebasename + if not ALLOW_IMPORTLIB_MODE: + raise ImportError( + "Can't use importlib due to old version of Python") + spec = importlib.util.spec_from_file_location( + modname, str(self)) + if spec is None: + raise ImportError( + "Can't find module %s at location %s" % + (modname, str(self)) + ) + mod = importlib.util.module_from_spec(spec) + spec.loader.exec_module(mod) + return mod + + pkgpath = None + if modname is None: + pkgpath = self.pypkgpath() + if pkgpath is not None: + pkgroot = pkgpath.dirpath() + names = self.new(ext="").relto(pkgroot).split(self.sep) + if names[-1] == "__init__": + names.pop() + modname = ".".join(names) + else: + pkgroot = self.dirpath() + modname = self.purebasename + + self._ensuresyspath(ensuresyspath, pkgroot) + __import__(modname) + mod = sys.modules[modname] + if self.basename == "__init__.py": + return mod # we don't check anything as we might + # be in a namespace package ... too icky to check + modfile = mod.__file__ + if modfile[-4:] in ('.pyc', '.pyo'): + modfile = modfile[:-1] + elif modfile.endswith('$py.class'): + modfile = modfile[:-9] + '.py' + if modfile.endswith(os.path.sep + "__init__.py"): + if self.basename != "__init__.py": + modfile = modfile[:-12] + try: + issame = self.samefile(modfile) + except py.error.ENOENT: + issame = False + if not issame: + ignore = os.getenv('PY_IGNORE_IMPORTMISMATCH') + if ignore != '1': + raise self.ImportMismatchError(modname, modfile, self) + return mod + else: + try: + return sys.modules[modname] + except KeyError: + # we have a custom modname, do a pseudo-import + import types + mod = types.ModuleType(modname) + mod.__file__ = str(self) + sys.modules[modname] = mod + try: + py.builtin.execfile(str(self), mod.__dict__) + except: + del sys.modules[modname] + raise + return mod + + def sysexec(self, *argv, **popen_opts): + """ return stdout text from executing a system child process, + where the 'self' path points to executable. + The process is directly invoked and not through a system shell. + """ + from subprocess import Popen, PIPE + argv = map_as_list(str, argv) + popen_opts['stdout'] = popen_opts['stderr'] = PIPE + proc = Popen([str(self)] + argv, **popen_opts) + stdout, stderr = proc.communicate() + ret = proc.wait() + if py.builtin._isbytes(stdout): + stdout = py.builtin._totext(stdout, sys.getdefaultencoding()) + if ret != 0: + if py.builtin._isbytes(stderr): + stderr = py.builtin._totext(stderr, sys.getdefaultencoding()) + raise py.process.cmdexec.Error(ret, ret, str(self), + stdout, stderr,) + return stdout + + def sysfind(cls, name, checker=None, paths=None): + """ return a path object found by looking at the systems + underlying PATH specification. If the checker is not None + it will be invoked to filter matching paths. If a binary + cannot be found, None is returned + Note: This is probably not working on plain win32 systems + but may work on cygwin. + """ + if isabs(name): + p = py.path.local(name) + if p.check(file=1): + return p + else: + if paths is None: + if iswin32: + paths = os.environ['Path'].split(';') + if '' not in paths and '.' not in paths: + paths.append('.') + try: + systemroot = os.environ['SYSTEMROOT'] + except KeyError: + pass + else: + paths = [path.replace('%SystemRoot%', systemroot) + for path in paths] + else: + paths = os.environ['PATH'].split(':') + tryadd = [] + if iswin32: + tryadd += os.environ['PATHEXT'].split(os.pathsep) + tryadd.append("") + + for x in paths: + for addext in tryadd: + p = py.path.local(x).join(name, abs=True) + addext + try: + if p.check(file=1): + if checker: + if not checker(p): + continue + return p + except py.error.EACCES: + pass + return None + sysfind = classmethod(sysfind) + + def _gethomedir(cls): + try: + x = os.environ['HOME'] + except KeyError: + try: + x = os.environ["HOMEDRIVE"] + os.environ['HOMEPATH'] + except KeyError: + return None + return cls(x) + _gethomedir = classmethod(_gethomedir) + + # """ + # special class constructors for local filesystem paths + # """ + @classmethod + def get_temproot(cls): + """ return the system's temporary directory + (where tempfiles are usually created in) + """ + import tempfile + return py.path.local(tempfile.gettempdir()) + + @classmethod + def mkdtemp(cls, rootdir=None): + """ return a Path object pointing to a fresh new temporary directory + (which we created ourself). + """ + import tempfile + if rootdir is None: + rootdir = cls.get_temproot() + return cls(py.error.checked_call(tempfile.mkdtemp, dir=str(rootdir))) + + def make_numbered_dir(cls, prefix='session-', rootdir=None, keep=3, + lock_timeout=172800): # two days + """ return unique directory with a number greater than the current + maximum one. The number is assumed to start directly after prefix. + if keep is true directories with a number less than (maxnum-keep) + will be removed. If .lock files are used (lock_timeout non-zero), + algorithm is multi-process safe. + """ + if rootdir is None: + rootdir = cls.get_temproot() + + nprefix = prefix.lower() + def parse_num(path): + """ parse the number out of a path (if it matches the prefix) """ + nbasename = path.basename.lower() + if nbasename.startswith(nprefix): + try: + return int(nbasename[len(nprefix):]) + except ValueError: + pass + + def create_lockfile(path): + """ exclusively create lockfile. Throws when failed """ + mypid = os.getpid() + lockfile = path.join('.lock') + if hasattr(lockfile, 'mksymlinkto'): + lockfile.mksymlinkto(str(mypid)) + else: + fd = py.error.checked_call(os.open, str(lockfile), os.O_WRONLY | os.O_CREAT | os.O_EXCL, 0o644) + with os.fdopen(fd, 'w') as f: + f.write(str(mypid)) + return lockfile + + def atexit_remove_lockfile(lockfile): + """ ensure lockfile is removed at process exit """ + mypid = os.getpid() + def try_remove_lockfile(): + # in a fork() situation, only the last process should + # remove the .lock, otherwise the other processes run the + # risk of seeing their temporary dir disappear. For now + # we remove the .lock in the parent only (i.e. we assume + # that the children finish before the parent). + if os.getpid() != mypid: + return + try: + lockfile.remove() + except py.error.Error: + pass + atexit.register(try_remove_lockfile) + + # compute the maximum number currently in use with the prefix + lastmax = None + while True: + maxnum = -1 + for path in rootdir.listdir(): + num = parse_num(path) + if num is not None: + maxnum = max(maxnum, num) + + # make the new directory + try: + udir = rootdir.mkdir(prefix + str(maxnum+1)) + if lock_timeout: + lockfile = create_lockfile(udir) + atexit_remove_lockfile(lockfile) + except (py.error.EEXIST, py.error.ENOENT, py.error.EBUSY): + # race condition (1): another thread/process created the dir + # in the meantime - try again + # race condition (2): another thread/process spuriously acquired + # lock treating empty directory as candidate + # for removal - try again + # race condition (3): another thread/process tried to create the lock at + # the same time (happened in Python 3.3 on Windows) + # https://ci.appveyor.com/project/pytestbot/py/build/1.0.21/job/ffi85j4c0lqwsfwa + if lastmax == maxnum: + raise + lastmax = maxnum + continue + break + + def get_mtime(path): + """ read file modification time """ + try: + return path.lstat().mtime + except py.error.Error: + pass + + garbage_prefix = prefix + 'garbage-' + + def is_garbage(path): + """ check if path denotes directory scheduled for removal """ + bn = path.basename + return bn.startswith(garbage_prefix) + + # prune old directories + udir_time = get_mtime(udir) + if keep and udir_time: + for path in rootdir.listdir(): + num = parse_num(path) + if num is not None and num <= (maxnum - keep): + try: + # try acquiring lock to remove directory as exclusive user + if lock_timeout: + create_lockfile(path) + except (py.error.EEXIST, py.error.ENOENT, py.error.EBUSY): + path_time = get_mtime(path) + if not path_time: + # assume directory doesn't exist now + continue + if abs(udir_time - path_time) < lock_timeout: + # assume directory with lockfile exists + # and lock timeout hasn't expired yet + continue + + # path dir locked for exclusive use + # and scheduled for removal to avoid another thread/process + # treating it as a new directory or removal candidate + garbage_path = rootdir.join(garbage_prefix + str(uuid.uuid4())) + try: + path.rename(garbage_path) + garbage_path.remove(rec=1) + except KeyboardInterrupt: + raise + except: # this might be py.error.Error, WindowsError ... + pass + if is_garbage(path): + try: + path.remove(rec=1) + except KeyboardInterrupt: + raise + except: # this might be py.error.Error, WindowsError ... + pass + + # make link... + try: + username = os.environ['USER'] #linux, et al + except KeyError: + try: + username = os.environ['USERNAME'] #windows + except KeyError: + username = 'current' + + src = str(udir) + dest = src[:src.rfind('-')] + '-' + username + try: + os.unlink(dest) + except OSError: + pass + try: + os.symlink(src, dest) + except (OSError, AttributeError, NotImplementedError): + pass + + return udir + make_numbered_dir = classmethod(make_numbered_dir) + + +def copymode(src, dest): + """ copy permission from src to dst. """ + import shutil + shutil.copymode(src, dest) + + +def copystat(src, dest): + """ copy permission, last modification time, + last access time, and flags from src to dst.""" + import shutil + shutil.copystat(str(src), str(dest)) + + +def copychunked(src, dest): + chunksize = 524288 # half a meg of bytes + fsrc = src.open('rb') + try: + fdest = dest.open('wb') + try: + while 1: + buf = fsrc.read(chunksize) + if not buf: + break + fdest.write(buf) + finally: + fdest.close() + finally: + fsrc.close() + + +def isimportable(name): + if name and (name[0].isalpha() or name[0] == '_'): + name = name.replace("_", '') + return not name or name.isalnum() diff --git a/venv/lib/python3.10/site-packages/py/_path/svnurl.py b/venv/lib/python3.10/site-packages/py/_path/svnurl.py new file mode 100644 index 0000000..6589a71 --- /dev/null +++ b/venv/lib/python3.10/site-packages/py/_path/svnurl.py @@ -0,0 +1,380 @@ +""" +module defining a subversion path object based on the external +command 'svn'. This modules aims to work with svn 1.3 and higher +but might also interact well with earlier versions. +""" + +import os, sys, time, re +import py +from py import path, process +from py._path import common +from py._path import svnwc as svncommon +from py._path.cacheutil import BuildcostAccessCache, AgingCache + +DEBUG=False + +class SvnCommandPath(svncommon.SvnPathBase): + """ path implementation that offers access to (possibly remote) subversion + repositories. """ + + _lsrevcache = BuildcostAccessCache(maxentries=128) + _lsnorevcache = AgingCache(maxentries=1000, maxseconds=60.0) + + def __new__(cls, path, rev=None, auth=None): + self = object.__new__(cls) + if isinstance(path, cls): + rev = path.rev + auth = path.auth + path = path.strpath + svncommon.checkbadchars(path) + path = path.rstrip('/') + self.strpath = path + self.rev = rev + self.auth = auth + return self + + def __repr__(self): + if self.rev == -1: + return 'svnurl(%r)' % self.strpath + else: + return 'svnurl(%r, %r)' % (self.strpath, self.rev) + + def _svnwithrev(self, cmd, *args): + """ execute an svn command, append our own url and revision """ + if self.rev is None: + return self._svnwrite(cmd, *args) + else: + args = ['-r', self.rev] + list(args) + return self._svnwrite(cmd, *args) + + def _svnwrite(self, cmd, *args): + """ execute an svn command, append our own url """ + l = ['svn %s' % cmd] + args = ['"%s"' % self._escape(item) for item in args] + l.extend(args) + l.append('"%s"' % self._encodedurl()) + # fixing the locale because we can't otherwise parse + string = " ".join(l) + if DEBUG: + print("execing %s" % string) + out = self._svncmdexecauth(string) + return out + + def _svncmdexecauth(self, cmd): + """ execute an svn command 'as is' """ + cmd = svncommon.fixlocale() + cmd + if self.auth is not None: + cmd += ' ' + self.auth.makecmdoptions() + return self._cmdexec(cmd) + + def _cmdexec(self, cmd): + try: + out = process.cmdexec(cmd) + except py.process.cmdexec.Error: + e = sys.exc_info()[1] + if (e.err.find('File Exists') != -1 or + e.err.find('File already exists') != -1): + raise py.error.EEXIST(self) + raise + return out + + def _svnpopenauth(self, cmd): + """ execute an svn command, return a pipe for reading stdin """ + cmd = svncommon.fixlocale() + cmd + if self.auth is not None: + cmd += ' ' + self.auth.makecmdoptions() + return self._popen(cmd) + + def _popen(self, cmd): + return os.popen(cmd) + + def _encodedurl(self): + return self._escape(self.strpath) + + def _norev_delentry(self, path): + auth = self.auth and self.auth.makecmdoptions() or None + self._lsnorevcache.delentry((str(path), auth)) + + def open(self, mode='r'): + """ return an opened file with the given mode. """ + if mode not in ("r", "rU",): + raise ValueError("mode %r not supported" % (mode,)) + assert self.check(file=1) # svn cat returns an empty file otherwise + if self.rev is None: + return self._svnpopenauth('svn cat "%s"' % ( + self._escape(self.strpath), )) + else: + return self._svnpopenauth('svn cat -r %s "%s"' % ( + self.rev, self._escape(self.strpath))) + + def dirpath(self, *args, **kwargs): + """ return the directory path of the current path joined + with any given path arguments. + """ + l = self.strpath.split(self.sep) + if len(l) < 4: + raise py.error.EINVAL(self, "base is not valid") + elif len(l) == 4: + return self.join(*args, **kwargs) + else: + return self.new(basename='').join(*args, **kwargs) + + # modifying methods (cache must be invalidated) + def mkdir(self, *args, **kwargs): + """ create & return the directory joined with args. + pass a 'msg' keyword argument to set the commit message. + """ + commit_msg = kwargs.get('msg', "mkdir by py lib invocation") + createpath = self.join(*args) + createpath._svnwrite('mkdir', '-m', commit_msg) + self._norev_delentry(createpath.dirpath()) + return createpath + + def copy(self, target, msg='copied by py lib invocation'): + """ copy path to target with checkin message msg.""" + if getattr(target, 'rev', None) is not None: + raise py.error.EINVAL(target, "revisions are immutable") + self._svncmdexecauth('svn copy -m "%s" "%s" "%s"' %(msg, + self._escape(self), self._escape(target))) + self._norev_delentry(target.dirpath()) + + def rename(self, target, msg="renamed by py lib invocation"): + """ rename this path to target with checkin message msg. """ + if getattr(self, 'rev', None) is not None: + raise py.error.EINVAL(self, "revisions are immutable") + self._svncmdexecauth('svn move -m "%s" --force "%s" "%s"' %( + msg, self._escape(self), self._escape(target))) + self._norev_delentry(self.dirpath()) + self._norev_delentry(self) + + def remove(self, rec=1, msg='removed by py lib invocation'): + """ remove a file or directory (or a directory tree if rec=1) with +checkin message msg.""" + if self.rev is not None: + raise py.error.EINVAL(self, "revisions are immutable") + self._svncmdexecauth('svn rm -m "%s" "%s"' %(msg, self._escape(self))) + self._norev_delentry(self.dirpath()) + + def export(self, topath): + """ export to a local path + + topath should not exist prior to calling this, returns a + py.path.local instance + """ + topath = py.path.local(topath) + args = ['"%s"' % (self._escape(self),), + '"%s"' % (self._escape(topath),)] + if self.rev is not None: + args = ['-r', str(self.rev)] + args + self._svncmdexecauth('svn export %s' % (' '.join(args),)) + return topath + + def ensure(self, *args, **kwargs): + """ ensure that an args-joined path exists (by default as + a file). If you specify a keyword argument 'dir=True' + then the path is forced to be a directory path. + """ + if getattr(self, 'rev', None) is not None: + raise py.error.EINVAL(self, "revisions are immutable") + target = self.join(*args) + dir = kwargs.get('dir', 0) + for x in target.parts(reverse=True): + if x.check(): + break + else: + raise py.error.ENOENT(target, "has not any valid base!") + if x == target: + if not x.check(dir=dir): + raise dir and py.error.ENOTDIR(x) or py.error.EISDIR(x) + return x + tocreate = target.relto(x) + basename = tocreate.split(self.sep, 1)[0] + tempdir = py.path.local.mkdtemp() + try: + tempdir.ensure(tocreate, dir=dir) + cmd = 'svn import -m "%s" "%s" "%s"' % ( + "ensure %s" % self._escape(tocreate), + self._escape(tempdir.join(basename)), + x.join(basename)._encodedurl()) + self._svncmdexecauth(cmd) + self._norev_delentry(x) + finally: + tempdir.remove() + return target + + # end of modifying methods + def _propget(self, name): + res = self._svnwithrev('propget', name) + return res[:-1] # strip trailing newline + + def _proplist(self): + res = self._svnwithrev('proplist') + lines = res.split('\n') + lines = [x.strip() for x in lines[1:]] + return svncommon.PropListDict(self, lines) + + def info(self): + """ return an Info structure with svn-provided information. """ + parent = self.dirpath() + nameinfo_seq = parent._listdir_nameinfo() + bn = self.basename + for name, info in nameinfo_seq: + if name == bn: + return info + raise py.error.ENOENT(self) + + + def _listdir_nameinfo(self): + """ return sequence of name-info directory entries of self """ + def builder(): + try: + res = self._svnwithrev('ls', '-v') + except process.cmdexec.Error: + e = sys.exc_info()[1] + if e.err.find('non-existent in that revision') != -1: + raise py.error.ENOENT(self, e.err) + elif e.err.find("E200009:") != -1: + raise py.error.ENOENT(self, e.err) + elif e.err.find('File not found') != -1: + raise py.error.ENOENT(self, e.err) + elif e.err.find('not part of a repository')!=-1: + raise py.error.ENOENT(self, e.err) + elif e.err.find('Unable to open')!=-1: + raise py.error.ENOENT(self, e.err) + elif e.err.lower().find('method not allowed')!=-1: + raise py.error.EACCES(self, e.err) + raise py.error.Error(e.err) + lines = res.split('\n') + nameinfo_seq = [] + for lsline in lines: + if lsline: + info = InfoSvnCommand(lsline) + if info._name != '.': # svn 1.5 produces '.' dirs, + nameinfo_seq.append((info._name, info)) + nameinfo_seq.sort() + return nameinfo_seq + auth = self.auth and self.auth.makecmdoptions() or None + if self.rev is not None: + return self._lsrevcache.getorbuild((self.strpath, self.rev, auth), + builder) + else: + return self._lsnorevcache.getorbuild((self.strpath, auth), + builder) + + def listdir(self, fil=None, sort=None): + """ list directory contents, possibly filter by the given fil func + and possibly sorted. + """ + if isinstance(fil, str): + fil = common.FNMatcher(fil) + nameinfo_seq = self._listdir_nameinfo() + if len(nameinfo_seq) == 1: + name, info = nameinfo_seq[0] + if name == self.basename and info.kind == 'file': + #if not self.check(dir=1): + raise py.error.ENOTDIR(self) + paths = [self.join(name) for (name, info) in nameinfo_seq] + if fil: + paths = [x for x in paths if fil(x)] + self._sortlist(paths, sort) + return paths + + + def log(self, rev_start=None, rev_end=1, verbose=False): + """ return a list of LogEntry instances for this path. +rev_start is the starting revision (defaulting to the first one). +rev_end is the last revision (defaulting to HEAD). +if verbose is True, then the LogEntry instances also know which files changed. +""" + assert self.check() #make it simpler for the pipe + rev_start = rev_start is None and "HEAD" or rev_start + rev_end = rev_end is None and "HEAD" or rev_end + + if rev_start == "HEAD" and rev_end == 1: + rev_opt = "" + else: + rev_opt = "-r %s:%s" % (rev_start, rev_end) + verbose_opt = verbose and "-v" or "" + xmlpipe = self._svnpopenauth('svn log --xml %s %s "%s"' % + (rev_opt, verbose_opt, self.strpath)) + from xml.dom import minidom + tree = minidom.parse(xmlpipe) + result = [] + for logentry in filter(None, tree.firstChild.childNodes): + if logentry.nodeType == logentry.ELEMENT_NODE: + result.append(svncommon.LogEntry(logentry)) + return result + +#01234567890123456789012345678901234567890123467 +# 2256 hpk 165 Nov 24 17:55 __init__.py +# XXX spotted by Guido, SVN 1.3.0 has different aligning, breaks the code!!! +# 1312 johnny 1627 May 05 14:32 test_decorators.py +# +class InfoSvnCommand: + # the '0?' part in the middle is an indication of whether the resource is + # locked, see 'svn help ls' + lspattern = re.compile( + r'^ *(?P\d+) +(?P.+?) +(0? *(?P\d+))? ' + r'*(?P\w+ +\d{2} +[\d:]+) +(?P.*)$') + def __init__(self, line): + # this is a typical line from 'svn ls http://...' + #_ 1127 jum 0 Jul 13 15:28 branch/ + match = self.lspattern.match(line) + data = match.groupdict() + self._name = data['file'] + if self._name[-1] == '/': + self._name = self._name[:-1] + self.kind = 'dir' + else: + self.kind = 'file' + #self.has_props = l.pop(0) == 'P' + self.created_rev = int(data['rev']) + self.last_author = data['author'] + self.size = data['size'] and int(data['size']) or 0 + self.mtime = parse_time_with_missing_year(data['date']) + self.time = self.mtime * 1000000 + + def __eq__(self, other): + return self.__dict__ == other.__dict__ + + +#____________________________________________________ +# +# helper functions +#____________________________________________________ +def parse_time_with_missing_year(timestr): + """ analyze the time part from a single line of "svn ls -v" + the svn output doesn't show the year makes the 'timestr' + ambigous. + """ + import calendar + t_now = time.gmtime() + + tparts = timestr.split() + month = time.strptime(tparts.pop(0), '%b')[1] + day = time.strptime(tparts.pop(0), '%d')[2] + last = tparts.pop(0) # year or hour:minute + try: + if ":" in last: + raise ValueError() + year = time.strptime(last, '%Y')[0] + hour = minute = 0 + except ValueError: + hour, minute = time.strptime(last, '%H:%M')[3:5] + year = t_now[0] + + t_result = (year, month, day, hour, minute, 0,0,0,0) + if t_result > t_now: + year -= 1 + t_result = (year, month, day, hour, minute, 0,0,0,0) + return calendar.timegm(t_result) + +class PathEntry: + def __init__(self, ppart): + self.strpath = ppart.firstChild.nodeValue.encode('UTF-8') + self.action = ppart.getAttribute('action').encode('UTF-8') + if self.action == 'A': + self.copyfrom_path = ppart.getAttribute('copyfrom-path').encode('UTF-8') + if self.copyfrom_path: + self.copyfrom_rev = int(ppart.getAttribute('copyfrom-rev')) + diff --git a/venv/lib/python3.10/site-packages/py/_path/svnwc.py b/venv/lib/python3.10/site-packages/py/_path/svnwc.py new file mode 100644 index 0000000..b5b9d8d --- /dev/null +++ b/venv/lib/python3.10/site-packages/py/_path/svnwc.py @@ -0,0 +1,1240 @@ +""" +svn-Command based Implementation of a Subversion WorkingCopy Path. + + SvnWCCommandPath is the main class. + +""" + +import os, sys, time, re, calendar +import py +import subprocess +from py._path import common + +#----------------------------------------------------------- +# Caching latest repository revision and repo-paths +# (getting them is slow with the current implementations) +# +# XXX make mt-safe +#----------------------------------------------------------- + +class cache: + proplist = {} + info = {} + entries = {} + prop = {} + +class RepoEntry: + def __init__(self, url, rev, timestamp): + self.url = url + self.rev = rev + self.timestamp = timestamp + + def __str__(self): + return "repo: %s;%s %s" %(self.url, self.rev, self.timestamp) + +class RepoCache: + """ The Repocache manages discovered repository paths + and their revisions. If inside a timeout the cache + will even return the revision of the root. + """ + timeout = 20 # seconds after which we forget that we know the last revision + + def __init__(self): + self.repos = [] + + def clear(self): + self.repos = [] + + def put(self, url, rev, timestamp=None): + if rev is None: + return + if timestamp is None: + timestamp = time.time() + + for entry in self.repos: + if url == entry.url: + entry.timestamp = timestamp + entry.rev = rev + #print "set repo", entry + break + else: + entry = RepoEntry(url, rev, timestamp) + self.repos.append(entry) + #print "appended repo", entry + + def get(self, url): + now = time.time() + for entry in self.repos: + if url.startswith(entry.url): + if now < entry.timestamp + self.timeout: + #print "returning immediate Etrny", entry + return entry.url, entry.rev + return entry.url, -1 + return url, -1 + +repositories = RepoCache() + + +# svn support code + +ALLOWED_CHARS = "_ -/\\=$.~+%" #add characters as necessary when tested +if sys.platform == "win32": + ALLOWED_CHARS += ":" +ALLOWED_CHARS_HOST = ALLOWED_CHARS + '@:' + +def _getsvnversion(ver=[]): + try: + return ver[0] + except IndexError: + v = py.process.cmdexec("svn -q --version") + v.strip() + v = '.'.join(v.split('.')[:2]) + ver.append(v) + return v + +def _escape_helper(text): + text = str(text) + if sys.platform != 'win32': + text = str(text).replace('$', '\\$') + return text + +def _check_for_bad_chars(text, allowed_chars=ALLOWED_CHARS): + for c in str(text): + if c.isalnum(): + continue + if c in allowed_chars: + continue + return True + return False + +def checkbadchars(url): + # (hpk) not quite sure about the exact purpose, guido w.? + proto, uri = url.split("://", 1) + if proto != "file": + host, uripath = uri.split('/', 1) + # only check for bad chars in the non-protocol parts + if (_check_for_bad_chars(host, ALLOWED_CHARS_HOST) \ + or _check_for_bad_chars(uripath, ALLOWED_CHARS)): + raise ValueError("bad char in %r" % (url, )) + + +#_______________________________________________________________ + +class SvnPathBase(common.PathBase): + """ Base implementation for SvnPath implementations. """ + sep = '/' + + def _geturl(self): + return self.strpath + url = property(_geturl, None, None, "url of this svn-path.") + + def __str__(self): + """ return a string representation (including rev-number) """ + return self.strpath + + def __hash__(self): + return hash(self.strpath) + + def new(self, **kw): + """ create a modified version of this path. A 'rev' argument + indicates a new revision. + the following keyword arguments modify various path parts:: + + http://host.com/repo/path/file.ext + |-----------------------| dirname + |------| basename + |--| purebasename + |--| ext + """ + obj = object.__new__(self.__class__) + obj.rev = kw.get('rev', self.rev) + obj.auth = kw.get('auth', self.auth) + dirname, basename, purebasename, ext = self._getbyspec( + "dirname,basename,purebasename,ext") + if 'basename' in kw: + if 'purebasename' in kw or 'ext' in kw: + raise ValueError("invalid specification %r" % kw) + else: + pb = kw.setdefault('purebasename', purebasename) + ext = kw.setdefault('ext', ext) + if ext and not ext.startswith('.'): + ext = '.' + ext + kw['basename'] = pb + ext + + kw.setdefault('dirname', dirname) + kw.setdefault('sep', self.sep) + if kw['basename']: + obj.strpath = "%(dirname)s%(sep)s%(basename)s" % kw + else: + obj.strpath = "%(dirname)s" % kw + return obj + + def _getbyspec(self, spec): + """ get specified parts of the path. 'arg' is a string + with comma separated path parts. The parts are returned + in exactly the order of the specification. + + you may specify the following parts: + + http://host.com/repo/path/file.ext + |-----------------------| dirname + |------| basename + |--| purebasename + |--| ext + """ + res = [] + parts = self.strpath.split(self.sep) + for name in spec.split(','): + name = name.strip() + if name == 'dirname': + res.append(self.sep.join(parts[:-1])) + elif name == 'basename': + res.append(parts[-1]) + else: + basename = parts[-1] + i = basename.rfind('.') + if i == -1: + purebasename, ext = basename, '' + else: + purebasename, ext = basename[:i], basename[i:] + if name == 'purebasename': + res.append(purebasename) + elif name == 'ext': + res.append(ext) + else: + raise NameError("Don't know part %r" % name) + return res + + def __eq__(self, other): + """ return true if path and rev attributes each match """ + return (str(self) == str(other) and + (self.rev == other.rev or self.rev == other.rev)) + + def __ne__(self, other): + return not self == other + + def join(self, *args): + """ return a new Path (with the same revision) which is composed + of the self Path followed by 'args' path components. + """ + if not args: + return self + + args = tuple([arg.strip(self.sep) for arg in args]) + parts = (self.strpath, ) + args + newpath = self.__class__(self.sep.join(parts), self.rev, self.auth) + return newpath + + def propget(self, name): + """ return the content of the given property. """ + value = self._propget(name) + return value + + def proplist(self): + """ list all property names. """ + content = self._proplist() + return content + + def size(self): + """ Return the size of the file content of the Path. """ + return self.info().size + + def mtime(self): + """ Return the last modification time of the file. """ + return self.info().mtime + + # shared help methods + + def _escape(self, cmd): + return _escape_helper(cmd) + + + #def _childmaxrev(self): + # """ return maximum revision number of childs (or self.rev if no childs) """ + # rev = self.rev + # for name, info in self._listdir_nameinfo(): + # rev = max(rev, info.created_rev) + # return rev + + #def _getlatestrevision(self): + # """ return latest repo-revision for this path. """ + # url = self.strpath + # path = self.__class__(url, None) + # + # # we need a long walk to find the root-repo and revision + # while 1: + # try: + # rev = max(rev, path._childmaxrev()) + # previous = path + # path = path.dirpath() + # except (IOError, process.cmdexec.Error): + # break + # if rev is None: + # raise IOError, "could not determine newest repo revision for %s" % self + # return rev + + class Checkers(common.Checkers): + def dir(self): + try: + return self.path.info().kind == 'dir' + except py.error.Error: + return self._listdirworks() + + def _listdirworks(self): + try: + self.path.listdir() + except py.error.ENOENT: + return False + else: + return True + + def file(self): + try: + return self.path.info().kind == 'file' + except py.error.ENOENT: + return False + + def exists(self): + try: + return self.path.info() + except py.error.ENOENT: + return self._listdirworks() + +def parse_apr_time(timestr): + i = timestr.rfind('.') + if i == -1: + raise ValueError("could not parse %s" % timestr) + timestr = timestr[:i] + parsedtime = time.strptime(timestr, "%Y-%m-%dT%H:%M:%S") + return time.mktime(parsedtime) + +class PropListDict(dict): + """ a Dictionary which fetches values (InfoSvnCommand instances) lazily""" + def __init__(self, path, keynames): + dict.__init__(self, [(x, None) for x in keynames]) + self.path = path + + def __getitem__(self, key): + value = dict.__getitem__(self, key) + if value is None: + value = self.path.propget(key) + dict.__setitem__(self, key, value) + return value + +def fixlocale(): + if sys.platform != 'win32': + return 'LC_ALL=C ' + return '' + +# some nasty chunk of code to solve path and url conversion and quoting issues +ILLEGAL_CHARS = '* | \\ / : < > ? \t \n \x0b \x0c \r'.split(' ') +if os.sep in ILLEGAL_CHARS: + ILLEGAL_CHARS.remove(os.sep) +ISWINDOWS = sys.platform == 'win32' +_reg_allow_disk = re.compile(r'^([a-z]\:\\)?[^:]+$', re.I) +def _check_path(path): + illegal = ILLEGAL_CHARS[:] + sp = path.strpath + if ISWINDOWS: + illegal.remove(':') + if not _reg_allow_disk.match(sp): + raise ValueError('path may not contain a colon (:)') + for char in sp: + if char not in string.printable or char in illegal: + raise ValueError('illegal character %r in path' % (char,)) + +def path_to_fspath(path, addat=True): + _check_path(path) + sp = path.strpath + if addat and path.rev != -1: + sp = '%s@%s' % (sp, path.rev) + elif addat: + sp = '%s@HEAD' % (sp,) + return sp + +def url_from_path(path): + fspath = path_to_fspath(path, False) + from urllib import quote + if ISWINDOWS: + match = _reg_allow_disk.match(fspath) + fspath = fspath.replace('\\', '/') + if match.group(1): + fspath = '/%s%s' % (match.group(1).replace('\\', '/'), + quote(fspath[len(match.group(1)):])) + else: + fspath = quote(fspath) + else: + fspath = quote(fspath) + if path.rev != -1: + fspath = '%s@%s' % (fspath, path.rev) + else: + fspath = '%s@HEAD' % (fspath,) + return 'file://%s' % (fspath,) + +class SvnAuth(object): + """ container for auth information for Subversion """ + def __init__(self, username, password, cache_auth=True, interactive=True): + self.username = username + self.password = password + self.cache_auth = cache_auth + self.interactive = interactive + + def makecmdoptions(self): + uname = self.username.replace('"', '\\"') + passwd = self.password.replace('"', '\\"') + ret = [] + if uname: + ret.append('--username="%s"' % (uname,)) + if passwd: + ret.append('--password="%s"' % (passwd,)) + if not self.cache_auth: + ret.append('--no-auth-cache') + if not self.interactive: + ret.append('--non-interactive') + return ' '.join(ret) + + def __str__(self): + return "" %(self.username,) + +rex_blame = re.compile(r'\s*(\d+)\s+(\S+) (.*)') + +class SvnWCCommandPath(common.PathBase): + """ path implementation offering access/modification to svn working copies. + It has methods similar to the functions in os.path and similar to the + commands of the svn client. + """ + sep = os.sep + + def __new__(cls, wcpath=None, auth=None): + self = object.__new__(cls) + if isinstance(wcpath, cls): + if wcpath.__class__ == cls: + return wcpath + wcpath = wcpath.localpath + if _check_for_bad_chars(str(wcpath), + ALLOWED_CHARS): + raise ValueError("bad char in wcpath %s" % (wcpath, )) + self.localpath = py.path.local(wcpath) + self.auth = auth + return self + + strpath = property(lambda x: str(x.localpath), None, None, "string path") + rev = property(lambda x: x.info(usecache=0).rev, None, None, "revision") + + def __eq__(self, other): + return self.localpath == getattr(other, 'localpath', None) + + def _geturl(self): + if getattr(self, '_url', None) is None: + info = self.info() + self._url = info.url #SvnPath(info.url, info.rev) + assert isinstance(self._url, py.builtin._basestring) + return self._url + + url = property(_geturl, None, None, "url of this WC item") + + def _escape(self, cmd): + return _escape_helper(cmd) + + def dump(self, obj): + """ pickle object into path location""" + return self.localpath.dump(obj) + + def svnurl(self): + """ return current SvnPath for this WC-item. """ + info = self.info() + return py.path.svnurl(info.url) + + def __repr__(self): + return "svnwc(%r)" % (self.strpath) # , self._url) + + def __str__(self): + return str(self.localpath) + + def _makeauthoptions(self): + if self.auth is None: + return '' + return self.auth.makecmdoptions() + + def _authsvn(self, cmd, args=None): + args = args and list(args) or [] + args.append(self._makeauthoptions()) + return self._svn(cmd, *args) + + def _svn(self, cmd, *args): + l = ['svn %s' % cmd] + args = [self._escape(item) for item in args] + l.extend(args) + l.append('"%s"' % self._escape(self.strpath)) + # try fixing the locale because we can't otherwise parse + string = fixlocale() + " ".join(l) + try: + try: + key = 'LC_MESSAGES' + hold = os.environ.get(key) + os.environ[key] = 'C' + out = py.process.cmdexec(string) + finally: + if hold: + os.environ[key] = hold + else: + del os.environ[key] + except py.process.cmdexec.Error: + e = sys.exc_info()[1] + strerr = e.err.lower() + if strerr.find('not found') != -1: + raise py.error.ENOENT(self) + elif strerr.find("E200009:") != -1: + raise py.error.ENOENT(self) + if (strerr.find('file exists') != -1 or + strerr.find('file already exists') != -1 or + strerr.find('w150002:') != -1 or + strerr.find("can't create directory") != -1): + raise py.error.EEXIST(strerr) #self) + raise + return out + + def switch(self, url): + """ switch to given URL. """ + self._authsvn('switch', [url]) + + def checkout(self, url=None, rev=None): + """ checkout from url to local wcpath. """ + args = [] + if url is None: + url = self.url + if rev is None or rev == -1: + if (sys.platform != 'win32' and + _getsvnversion() == '1.3'): + url += "@HEAD" + else: + if _getsvnversion() == '1.3': + url += "@%d" % rev + else: + args.append('-r' + str(rev)) + args.append(url) + self._authsvn('co', args) + + def update(self, rev='HEAD', interactive=True): + """ update working copy item to given revision. (None -> HEAD). """ + opts = ['-r', rev] + if not interactive: + opts.append("--non-interactive") + self._authsvn('up', opts) + + def write(self, content, mode='w'): + """ write content into local filesystem wc. """ + self.localpath.write(content, mode) + + def dirpath(self, *args): + """ return the directory Path of the current Path. """ + return self.__class__(self.localpath.dirpath(*args), auth=self.auth) + + def _ensuredirs(self): + parent = self.dirpath() + if parent.check(dir=0): + parent._ensuredirs() + if self.check(dir=0): + self.mkdir() + return self + + def ensure(self, *args, **kwargs): + """ ensure that an args-joined path exists (by default as + a file). if you specify a keyword argument 'directory=True' + then the path is forced to be a directory path. + """ + p = self.join(*args) + if p.check(): + if p.check(versioned=False): + p.add() + return p + if kwargs.get('dir', 0): + return p._ensuredirs() + parent = p.dirpath() + parent._ensuredirs() + p.write("") + p.add() + return p + + def mkdir(self, *args): + """ create & return the directory joined with args. """ + if args: + return self.join(*args).mkdir() + else: + self._svn('mkdir') + return self + + def add(self): + """ add ourself to svn """ + self._svn('add') + + def remove(self, rec=1, force=1): + """ remove a file or a directory tree. 'rec'ursive is + ignored and considered always true (because of + underlying svn semantics. + """ + assert rec, "svn cannot remove non-recursively" + if not self.check(versioned=True): + # not added to svn (anymore?), just remove + py.path.local(self).remove() + return + flags = [] + if force: + flags.append('--force') + self._svn('remove', *flags) + + def copy(self, target): + """ copy path to target.""" + py.process.cmdexec("svn copy %s %s" %(str(self), str(target))) + + def rename(self, target): + """ rename this path to target. """ + py.process.cmdexec("svn move --force %s %s" %(str(self), str(target))) + + def lock(self): + """ set a lock (exclusive) on the resource """ + out = self._authsvn('lock').strip() + if not out: + # warning or error, raise exception + raise ValueError("unknown error in svn lock command") + + def unlock(self): + """ unset a previously set lock """ + out = self._authsvn('unlock').strip() + if out.startswith('svn:'): + # warning or error, raise exception + raise Exception(out[4:]) + + def cleanup(self): + """ remove any locks from the resource """ + # XXX should be fixed properly!!! + try: + self.unlock() + except: + pass + + def status(self, updates=0, rec=0, externals=0): + """ return (collective) Status object for this file. """ + # http://svnbook.red-bean.com/book.html#svn-ch-3-sect-4.3.1 + # 2201 2192 jum test + # XXX + if externals: + raise ValueError("XXX cannot perform status() " + "on external items yet") + else: + #1.2 supports: externals = '--ignore-externals' + externals = '' + if rec: + rec= '' + else: + rec = '--non-recursive' + + # XXX does not work on all subversion versions + #if not externals: + # externals = '--ignore-externals' + + if updates: + updates = '-u' + else: + updates = '' + + try: + cmd = 'status -v --xml --no-ignore %s %s %s' % ( + updates, rec, externals) + out = self._authsvn(cmd) + except py.process.cmdexec.Error: + cmd = 'status -v --no-ignore %s %s %s' % ( + updates, rec, externals) + out = self._authsvn(cmd) + rootstatus = WCStatus(self).fromstring(out, self) + else: + rootstatus = XMLWCStatus(self).fromstring(out, self) + return rootstatus + + def diff(self, rev=None): + """ return a diff of the current path against revision rev (defaulting + to the last one). + """ + args = [] + if rev is not None: + args.append("-r %d" % rev) + out = self._authsvn('diff', args) + return out + + def blame(self): + """ return a list of tuples of three elements: + (revision, commiter, line) + """ + out = self._svn('blame') + result = [] + blamelines = out.splitlines() + reallines = py.path.svnurl(self.url).readlines() + for i, (blameline, line) in enumerate( + zip(blamelines, reallines)): + m = rex_blame.match(blameline) + if not m: + raise ValueError("output line %r of svn blame does not match " + "expected format" % (line, )) + rev, name, _ = m.groups() + result.append((int(rev), name, line)) + return result + + _rex_commit = re.compile(r'.*Committed revision (\d+)\.$', re.DOTALL) + def commit(self, msg='', rec=1): + """ commit with support for non-recursive commits """ + # XXX i guess escaping should be done better here?!? + cmd = 'commit -m "%s" --force-log' % (msg.replace('"', '\\"'),) + if not rec: + cmd += ' -N' + out = self._authsvn(cmd) + try: + del cache.info[self] + except KeyError: + pass + if out: + m = self._rex_commit.match(out) + return int(m.group(1)) + + def propset(self, name, value, *args): + """ set property name to value on this path. """ + d = py.path.local.mkdtemp() + try: + p = d.join('value') + p.write(value) + self._svn('propset', name, '--file', str(p), *args) + finally: + d.remove() + + def propget(self, name): + """ get property name on this path. """ + res = self._svn('propget', name) + return res[:-1] # strip trailing newline + + def propdel(self, name): + """ delete property name on this path. """ + res = self._svn('propdel', name) + return res[:-1] # strip trailing newline + + def proplist(self, rec=0): + """ return a mapping of property names to property values. +If rec is True, then return a dictionary mapping sub-paths to such mappings. +""" + if rec: + res = self._svn('proplist -R') + return make_recursive_propdict(self, res) + else: + res = self._svn('proplist') + lines = res.split('\n') + lines = [x.strip() for x in lines[1:]] + return PropListDict(self, lines) + + def revert(self, rec=0): + """ revert the local changes of this path. if rec is True, do so +recursively. """ + if rec: + result = self._svn('revert -R') + else: + result = self._svn('revert') + return result + + def new(self, **kw): + """ create a modified version of this path. A 'rev' argument + indicates a new revision. + the following keyword arguments modify various path parts: + + http://host.com/repo/path/file.ext + |-----------------------| dirname + |------| basename + |--| purebasename + |--| ext + """ + if kw: + localpath = self.localpath.new(**kw) + else: + localpath = self.localpath + return self.__class__(localpath, auth=self.auth) + + def join(self, *args, **kwargs): + """ return a new Path (with the same revision) which is composed + of the self Path followed by 'args' path components. + """ + if not args: + return self + localpath = self.localpath.join(*args, **kwargs) + return self.__class__(localpath, auth=self.auth) + + def info(self, usecache=1): + """ return an Info structure with svn-provided information. """ + info = usecache and cache.info.get(self) + if not info: + try: + output = self._svn('info') + except py.process.cmdexec.Error: + e = sys.exc_info()[1] + if e.err.find('Path is not a working copy directory') != -1: + raise py.error.ENOENT(self, e.err) + elif e.err.find("is not under version control") != -1: + raise py.error.ENOENT(self, e.err) + raise + # XXX SVN 1.3 has output on stderr instead of stdout (while it does + # return 0!), so a bit nasty, but we assume no output is output + # to stderr... + if (output.strip() == '' or + output.lower().find('not a versioned resource') != -1): + raise py.error.ENOENT(self, output) + info = InfoSvnWCCommand(output) + + # Can't reliably compare on Windows without access to win32api + if sys.platform != 'win32': + if info.path != self.localpath: + raise py.error.ENOENT(self, "not a versioned resource:" + + " %s != %s" % (info.path, self.localpath)) + cache.info[self] = info + return info + + def listdir(self, fil=None, sort=None): + """ return a sequence of Paths. + + listdir will return either a tuple or a list of paths + depending on implementation choices. + """ + if isinstance(fil, str): + fil = common.FNMatcher(fil) + # XXX unify argument naming with LocalPath.listdir + def notsvn(path): + return path.basename != '.svn' + + paths = [] + for localpath in self.localpath.listdir(notsvn): + p = self.__class__(localpath, auth=self.auth) + if notsvn(p) and (not fil or fil(p)): + paths.append(p) + self._sortlist(paths, sort) + return paths + + def open(self, mode='r'): + """ return an opened file with the given mode. """ + return open(self.strpath, mode) + + def _getbyspec(self, spec): + return self.localpath._getbyspec(spec) + + class Checkers(py.path.local.Checkers): + def __init__(self, path): + self.svnwcpath = path + self.path = path.localpath + def versioned(self): + try: + s = self.svnwcpath.info() + except (py.error.ENOENT, py.error.EEXIST): + return False + except py.process.cmdexec.Error: + e = sys.exc_info()[1] + if e.err.find('is not a working copy')!=-1: + return False + if e.err.lower().find('not a versioned resource') != -1: + return False + raise + else: + return True + + def log(self, rev_start=None, rev_end=1, verbose=False): + """ return a list of LogEntry instances for this path. +rev_start is the starting revision (defaulting to the first one). +rev_end is the last revision (defaulting to HEAD). +if verbose is True, then the LogEntry instances also know which files changed. +""" + assert self.check() # make it simpler for the pipe + rev_start = rev_start is None and "HEAD" or rev_start + rev_end = rev_end is None and "HEAD" or rev_end + if rev_start == "HEAD" and rev_end == 1: + rev_opt = "" + else: + rev_opt = "-r %s:%s" % (rev_start, rev_end) + verbose_opt = verbose and "-v" or "" + locale_env = fixlocale() + # some blather on stderr + auth_opt = self._makeauthoptions() + #stdin, stdout, stderr = os.popen3(locale_env + + # 'svn log --xml %s %s %s "%s"' % ( + # rev_opt, verbose_opt, auth_opt, + # self.strpath)) + cmd = locale_env + 'svn log --xml %s %s %s "%s"' % ( + rev_opt, verbose_opt, auth_opt, self.strpath) + + popen = subprocess.Popen(cmd, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + shell=True, + ) + stdout, stderr = popen.communicate() + stdout = py.builtin._totext(stdout, sys.getdefaultencoding()) + minidom,ExpatError = importxml() + try: + tree = minidom.parseString(stdout) + except ExpatError: + raise ValueError('no such revision') + result = [] + for logentry in filter(None, tree.firstChild.childNodes): + if logentry.nodeType == logentry.ELEMENT_NODE: + result.append(LogEntry(logentry)) + return result + + def size(self): + """ Return the size of the file content of the Path. """ + return self.info().size + + def mtime(self): + """ Return the last modification time of the file. """ + return self.info().mtime + + def __hash__(self): + return hash((self.strpath, self.__class__, self.auth)) + + +class WCStatus: + attrnames = ('modified','added', 'conflict', 'unchanged', 'external', + 'deleted', 'prop_modified', 'unknown', 'update_available', + 'incomplete', 'kindmismatch', 'ignored', 'locked', 'replaced' + ) + + def __init__(self, wcpath, rev=None, modrev=None, author=None): + self.wcpath = wcpath + self.rev = rev + self.modrev = modrev + self.author = author + + for name in self.attrnames: + setattr(self, name, []) + + def allpath(self, sort=True, **kw): + d = {} + for name in self.attrnames: + if name not in kw or kw[name]: + for path in getattr(self, name): + d[path] = 1 + l = d.keys() + if sort: + l.sort() + return l + + # XXX a bit scary to assume there's always 2 spaces between username and + # path, however with win32 allowing spaces in user names there doesn't + # seem to be a more solid approach :( + _rex_status = re.compile(r'\s+(\d+|-)\s+(\S+)\s+(.+?)\s{2,}(.*)') + + def fromstring(data, rootwcpath, rev=None, modrev=None, author=None): + """ return a new WCStatus object from data 's' + """ + rootstatus = WCStatus(rootwcpath, rev, modrev, author) + update_rev = None + for line in data.split('\n'): + if not line.strip(): + continue + #print "processing %r" % line + flags, rest = line[:8], line[8:] + # first column + c0,c1,c2,c3,c4,c5,x6,c7 = flags + #if '*' in line: + # print "flags", repr(flags), "rest", repr(rest) + + if c0 in '?XI': + fn = line.split(None, 1)[1] + if c0 == '?': + wcpath = rootwcpath.join(fn, abs=1) + rootstatus.unknown.append(wcpath) + elif c0 == 'X': + wcpath = rootwcpath.__class__( + rootwcpath.localpath.join(fn, abs=1), + auth=rootwcpath.auth) + rootstatus.external.append(wcpath) + elif c0 == 'I': + wcpath = rootwcpath.join(fn, abs=1) + rootstatus.ignored.append(wcpath) + + continue + + #elif c0 in '~!' or c4 == 'S': + # raise NotImplementedError("received flag %r" % c0) + + m = WCStatus._rex_status.match(rest) + if not m: + if c7 == '*': + fn = rest.strip() + wcpath = rootwcpath.join(fn, abs=1) + rootstatus.update_available.append(wcpath) + continue + if line.lower().find('against revision:')!=-1: + update_rev = int(rest.split(':')[1].strip()) + continue + if line.lower().find('status on external') > -1: + # XXX not sure what to do here... perhaps we want to + # store some state instead of just continuing, as right + # now it makes the top-level external get added twice + # (once as external, once as 'normal' unchanged item) + # because of the way SVN presents external items + continue + # keep trying + raise ValueError("could not parse line %r" % line) + else: + rev, modrev, author, fn = m.groups() + wcpath = rootwcpath.join(fn, abs=1) + #assert wcpath.check() + if c0 == 'M': + assert wcpath.check(file=1), "didn't expect a directory with changed content here" + rootstatus.modified.append(wcpath) + elif c0 == 'A' or c3 == '+' : + rootstatus.added.append(wcpath) + elif c0 == 'D': + rootstatus.deleted.append(wcpath) + elif c0 == 'C': + rootstatus.conflict.append(wcpath) + elif c0 == '~': + rootstatus.kindmismatch.append(wcpath) + elif c0 == '!': + rootstatus.incomplete.append(wcpath) + elif c0 == 'R': + rootstatus.replaced.append(wcpath) + elif not c0.strip(): + rootstatus.unchanged.append(wcpath) + else: + raise NotImplementedError("received flag %r" % c0) + + if c1 == 'M': + rootstatus.prop_modified.append(wcpath) + # XXX do we cover all client versions here? + if c2 == 'L' or c5 == 'K': + rootstatus.locked.append(wcpath) + if c7 == '*': + rootstatus.update_available.append(wcpath) + + if wcpath == rootwcpath: + rootstatus.rev = rev + rootstatus.modrev = modrev + rootstatus.author = author + if update_rev: + rootstatus.update_rev = update_rev + continue + return rootstatus + fromstring = staticmethod(fromstring) + +class XMLWCStatus(WCStatus): + def fromstring(data, rootwcpath, rev=None, modrev=None, author=None): + """ parse 'data' (XML string as outputted by svn st) into a status obj + """ + # XXX for externals, the path is shown twice: once + # with external information, and once with full info as if + # the item was a normal non-external... the current way of + # dealing with this issue is by ignoring it - this does make + # externals appear as external items as well as 'normal', + # unchanged ones in the status object so this is far from ideal + rootstatus = WCStatus(rootwcpath, rev, modrev, author) + update_rev = None + minidom, ExpatError = importxml() + try: + doc = minidom.parseString(data) + except ExpatError: + e = sys.exc_info()[1] + raise ValueError(str(e)) + urevels = doc.getElementsByTagName('against') + if urevels: + rootstatus.update_rev = urevels[-1].getAttribute('revision') + for entryel in doc.getElementsByTagName('entry'): + path = entryel.getAttribute('path') + statusel = entryel.getElementsByTagName('wc-status')[0] + itemstatus = statusel.getAttribute('item') + + if itemstatus == 'unversioned': + wcpath = rootwcpath.join(path, abs=1) + rootstatus.unknown.append(wcpath) + continue + elif itemstatus == 'external': + wcpath = rootwcpath.__class__( + rootwcpath.localpath.join(path, abs=1), + auth=rootwcpath.auth) + rootstatus.external.append(wcpath) + continue + elif itemstatus == 'ignored': + wcpath = rootwcpath.join(path, abs=1) + rootstatus.ignored.append(wcpath) + continue + elif itemstatus == 'incomplete': + wcpath = rootwcpath.join(path, abs=1) + rootstatus.incomplete.append(wcpath) + continue + + rev = statusel.getAttribute('revision') + if itemstatus == 'added' or itemstatus == 'none': + rev = '0' + modrev = '?' + author = '?' + date = '' + elif itemstatus == "replaced": + pass + else: + #print entryel.toxml() + commitel = entryel.getElementsByTagName('commit')[0] + if commitel: + modrev = commitel.getAttribute('revision') + author = '' + author_els = commitel.getElementsByTagName('author') + if author_els: + for c in author_els[0].childNodes: + author += c.nodeValue + date = '' + for c in commitel.getElementsByTagName('date')[0]\ + .childNodes: + date += c.nodeValue + + wcpath = rootwcpath.join(path, abs=1) + + assert itemstatus != 'modified' or wcpath.check(file=1), ( + 'did\'t expect a directory with changed content here') + + itemattrname = { + 'normal': 'unchanged', + 'unversioned': 'unknown', + 'conflicted': 'conflict', + 'none': 'added', + }.get(itemstatus, itemstatus) + + attr = getattr(rootstatus, itemattrname) + attr.append(wcpath) + + propsstatus = statusel.getAttribute('props') + if propsstatus not in ('none', 'normal'): + rootstatus.prop_modified.append(wcpath) + + if wcpath == rootwcpath: + rootstatus.rev = rev + rootstatus.modrev = modrev + rootstatus.author = author + rootstatus.date = date + + # handle repos-status element (remote info) + rstatusels = entryel.getElementsByTagName('repos-status') + if rstatusels: + rstatusel = rstatusels[0] + ritemstatus = rstatusel.getAttribute('item') + if ritemstatus in ('added', 'modified'): + rootstatus.update_available.append(wcpath) + + lockels = entryel.getElementsByTagName('lock') + if len(lockels): + rootstatus.locked.append(wcpath) + + return rootstatus + fromstring = staticmethod(fromstring) + +class InfoSvnWCCommand: + def __init__(self, output): + # Path: test + # URL: http://codespeak.net/svn/std.path/trunk/dist/std.path/test + # Repository UUID: fd0d7bf2-dfb6-0310-8d31-b7ecfe96aada + # Revision: 2151 + # Node Kind: directory + # Schedule: normal + # Last Changed Author: hpk + # Last Changed Rev: 2100 + # Last Changed Date: 2003-10-27 20:43:14 +0100 (Mon, 27 Oct 2003) + # Properties Last Updated: 2003-11-03 14:47:48 +0100 (Mon, 03 Nov 2003) + + d = {} + for line in output.split('\n'): + if not line.strip(): + continue + key, value = line.split(':', 1) + key = key.lower().replace(' ', '') + value = value.strip() + d[key] = value + try: + self.url = d['url'] + except KeyError: + raise ValueError("Not a versioned resource") + #raise ValueError, "Not a versioned resource %r" % path + self.kind = d['nodekind'] == 'directory' and 'dir' or d['nodekind'] + try: + self.rev = int(d['revision']) + except KeyError: + self.rev = None + + self.path = py.path.local(d['path']) + self.size = self.path.size() + if 'lastchangedrev' in d: + self.created_rev = int(d['lastchangedrev']) + if 'lastchangedauthor' in d: + self.last_author = d['lastchangedauthor'] + if 'lastchangeddate' in d: + self.mtime = parse_wcinfotime(d['lastchangeddate']) + self.time = self.mtime * 1000000 + + def __eq__(self, other): + return self.__dict__ == other.__dict__ + +def parse_wcinfotime(timestr): + """ Returns seconds since epoch, UTC. """ + # example: 2003-10-27 20:43:14 +0100 (Mon, 27 Oct 2003) + m = re.match(r'(\d+-\d+-\d+ \d+:\d+:\d+) ([+-]\d+) .*', timestr) + if not m: + raise ValueError("timestring %r does not match" % timestr) + timestr, timezone = m.groups() + # do not handle timezone specially, return value should be UTC + parsedtime = time.strptime(timestr, "%Y-%m-%d %H:%M:%S") + return calendar.timegm(parsedtime) + +def make_recursive_propdict(wcroot, + output, + rex = re.compile("Properties on '(.*)':")): + """ Return a dictionary of path->PropListDict mappings. """ + lines = [x for x in output.split('\n') if x] + pdict = {} + while lines: + line = lines.pop(0) + m = rex.match(line) + if not m: + raise ValueError("could not parse propget-line: %r" % line) + path = m.groups()[0] + wcpath = wcroot.join(path, abs=1) + propnames = [] + while lines and lines[0].startswith(' '): + propname = lines.pop(0).strip() + propnames.append(propname) + assert propnames, "must have found properties!" + pdict[wcpath] = PropListDict(wcpath, propnames) + return pdict + + +def importxml(cache=[]): + if cache: + return cache + from xml.dom import minidom + from xml.parsers.expat import ExpatError + cache.extend([minidom, ExpatError]) + return cache + +class LogEntry: + def __init__(self, logentry): + self.rev = int(logentry.getAttribute('revision')) + for lpart in filter(None, logentry.childNodes): + if lpart.nodeType == lpart.ELEMENT_NODE: + if lpart.nodeName == 'author': + self.author = lpart.firstChild.nodeValue + elif lpart.nodeName == 'msg': + if lpart.firstChild: + self.msg = lpart.firstChild.nodeValue + else: + self.msg = '' + elif lpart.nodeName == 'date': + #2003-07-29T20:05:11.598637Z + timestr = lpart.firstChild.nodeValue + self.date = parse_apr_time(timestr) + elif lpart.nodeName == 'paths': + self.strpaths = [] + for ppart in filter(None, lpart.childNodes): + if ppart.nodeType == ppart.ELEMENT_NODE: + self.strpaths.append(PathEntry(ppart)) + def __repr__(self): + return '' % ( + self.rev, self.author, self.date) + + diff --git a/venv/lib/python3.10/site-packages/py/_process/__init__.py b/venv/lib/python3.10/site-packages/py/_process/__init__.py new file mode 100644 index 0000000..86c714a --- /dev/null +++ b/venv/lib/python3.10/site-packages/py/_process/__init__.py @@ -0,0 +1 @@ +""" high-level sub-process handling """ diff --git a/venv/lib/python3.10/site-packages/py/_process/cmdexec.py b/venv/lib/python3.10/site-packages/py/_process/cmdexec.py new file mode 100644 index 0000000..f83a249 --- /dev/null +++ b/venv/lib/python3.10/site-packages/py/_process/cmdexec.py @@ -0,0 +1,49 @@ +import sys +import subprocess +import py +from subprocess import Popen, PIPE + +def cmdexec(cmd): + """ return unicode output of executing 'cmd' in a separate process. + + raise cmdexec.Error exeception if the command failed. + the exception will provide an 'err' attribute containing + the error-output from the command. + if the subprocess module does not provide a proper encoding/unicode strings + sys.getdefaultencoding() will be used, if that does not exist, 'UTF-8'. + """ + process = subprocess.Popen(cmd, shell=True, + universal_newlines=True, + stdout=subprocess.PIPE, stderr=subprocess.PIPE) + out, err = process.communicate() + if sys.version_info[0] < 3: # on py3 we get unicode strings, on py2 not + try: + default_encoding = sys.getdefaultencoding() # jython may not have it + except AttributeError: + default_encoding = sys.stdout.encoding or 'UTF-8' + out = unicode(out, process.stdout.encoding or default_encoding) + err = unicode(err, process.stderr.encoding or default_encoding) + status = process.poll() + if status: + raise ExecutionFailed(status, status, cmd, out, err) + return out + +class ExecutionFailed(py.error.Error): + def __init__(self, status, systemstatus, cmd, out, err): + Exception.__init__(self) + self.status = status + self.systemstatus = systemstatus + self.cmd = cmd + self.err = err + self.out = out + + def __str__(self): + return "ExecutionFailed: %d %s\n%s" %(self.status, self.cmd, self.err) + +# export the exception under the name 'py.process.cmdexec.Error' +cmdexec.Error = ExecutionFailed +try: + ExecutionFailed.__module__ = 'py.process.cmdexec' + ExecutionFailed.__name__ = 'Error' +except (AttributeError, TypeError): + pass diff --git a/venv/lib/python3.10/site-packages/py/_process/forkedfunc.py b/venv/lib/python3.10/site-packages/py/_process/forkedfunc.py new file mode 100644 index 0000000..1c28530 --- /dev/null +++ b/venv/lib/python3.10/site-packages/py/_process/forkedfunc.py @@ -0,0 +1,120 @@ + +""" + ForkedFunc provides a way to run a function in a forked process + and get at its return value, stdout and stderr output as well + as signals and exitstatusus. +""" + +import py +import os +import sys +import marshal + + +def get_unbuffered_io(fd, filename): + f = open(str(filename), "w") + if fd != f.fileno(): + os.dup2(f.fileno(), fd) + class AutoFlush: + def write(self, data): + f.write(data) + f.flush() + def __getattr__(self, name): + return getattr(f, name) + return AutoFlush() + + +class ForkedFunc: + EXITSTATUS_EXCEPTION = 3 + + + def __init__(self, fun, args=None, kwargs=None, nice_level=0, + child_on_start=None, child_on_exit=None): + if args is None: + args = [] + if kwargs is None: + kwargs = {} + self.fun = fun + self.args = args + self.kwargs = kwargs + self.tempdir = tempdir = py.path.local.mkdtemp() + self.RETVAL = tempdir.ensure('retval') + self.STDOUT = tempdir.ensure('stdout') + self.STDERR = tempdir.ensure('stderr') + + pid = os.fork() + if pid: # in parent process + self.pid = pid + else: # in child process + self.pid = None + self._child(nice_level, child_on_start, child_on_exit) + + def _child(self, nice_level, child_on_start, child_on_exit): + # right now we need to call a function, but first we need to + # map all IO that might happen + sys.stdout = stdout = get_unbuffered_io(1, self.STDOUT) + sys.stderr = stderr = get_unbuffered_io(2, self.STDERR) + retvalf = self.RETVAL.open("wb") + EXITSTATUS = 0 + try: + if nice_level: + os.nice(nice_level) + try: + if child_on_start is not None: + child_on_start() + retval = self.fun(*self.args, **self.kwargs) + retvalf.write(marshal.dumps(retval)) + if child_on_exit is not None: + child_on_exit() + except: + excinfo = py.code.ExceptionInfo() + stderr.write(str(excinfo._getreprcrash())) + EXITSTATUS = self.EXITSTATUS_EXCEPTION + finally: + stdout.close() + stderr.close() + retvalf.close() + os.close(1) + os.close(2) + os._exit(EXITSTATUS) + + def waitfinish(self, waiter=os.waitpid): + pid, systemstatus = waiter(self.pid, 0) + if systemstatus: + if os.WIFSIGNALED(systemstatus): + exitstatus = os.WTERMSIG(systemstatus) + 128 + else: + exitstatus = os.WEXITSTATUS(systemstatus) + else: + exitstatus = 0 + signal = systemstatus & 0x7f + if not exitstatus and not signal: + retval = self.RETVAL.open('rb') + try: + retval_data = retval.read() + finally: + retval.close() + retval = marshal.loads(retval_data) + else: + retval = None + stdout = self.STDOUT.read() + stderr = self.STDERR.read() + self._removetemp() + return Result(exitstatus, signal, retval, stdout, stderr) + + def _removetemp(self): + if self.tempdir.check(): + self.tempdir.remove() + + def __del__(self): + if self.pid is not None: # only clean up in main process + self._removetemp() + + +class Result(object): + def __init__(self, exitstatus, signal, retval, stdout, stderr): + self.exitstatus = exitstatus + self.signal = signal + self.retval = retval + self.out = stdout + self.err = stderr diff --git a/venv/lib/python3.10/site-packages/py/_process/killproc.py b/venv/lib/python3.10/site-packages/py/_process/killproc.py new file mode 100644 index 0000000..18e8310 --- /dev/null +++ b/venv/lib/python3.10/site-packages/py/_process/killproc.py @@ -0,0 +1,23 @@ +import py +import os, sys + +if sys.platform == "win32" or getattr(os, '_name', '') == 'nt': + try: + import ctypes + except ImportError: + def dokill(pid): + py.process.cmdexec("taskkill /F /PID %d" %(pid,)) + else: + def dokill(pid): + PROCESS_TERMINATE = 1 + handle = ctypes.windll.kernel32.OpenProcess( + PROCESS_TERMINATE, False, pid) + ctypes.windll.kernel32.TerminateProcess(handle, -1) + ctypes.windll.kernel32.CloseHandle(handle) +else: + def dokill(pid): + os.kill(pid, 15) + +def kill(pid): + """ kill process by id. """ + dokill(pid) diff --git a/venv/lib/python3.10/site-packages/py/_std.py b/venv/lib/python3.10/site-packages/py/_std.py new file mode 100644 index 0000000..66adb7b --- /dev/null +++ b/venv/lib/python3.10/site-packages/py/_std.py @@ -0,0 +1,27 @@ +import sys +import warnings + + +class PyStdIsDeprecatedWarning(DeprecationWarning): + pass + + +class Std(object): + """ makes top-level python modules available as an attribute, + importing them on first access. + """ + + def __init__(self): + self.__dict__ = sys.modules + + def __getattr__(self, name): + warnings.warn("py.std is deprecated, please import %s directly" % name, + category=PyStdIsDeprecatedWarning, + stacklevel=2) + try: + m = __import__(name) + except ImportError: + raise AttributeError("py.std: could not import %s" % name) + return m + +std = Std() diff --git a/venv/lib/python3.10/site-packages/py/_vendored_packages/__init__.py b/venv/lib/python3.10/site-packages/py/_vendored_packages/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/venv/lib/python3.10/site-packages/py/_vendored_packages/apipkg-2.0.0.dist-info/INSTALLER b/venv/lib/python3.10/site-packages/py/_vendored_packages/apipkg-2.0.0.dist-info/INSTALLER new file mode 100644 index 0000000..a1b589e --- /dev/null +++ b/venv/lib/python3.10/site-packages/py/_vendored_packages/apipkg-2.0.0.dist-info/INSTALLER @@ -0,0 +1 @@ +pip diff --git a/venv/lib/python3.10/site-packages/py/_vendored_packages/apipkg-2.0.0.dist-info/LICENSE b/venv/lib/python3.10/site-packages/py/_vendored_packages/apipkg-2.0.0.dist-info/LICENSE new file mode 100644 index 0000000..ff33b8f --- /dev/null +++ b/venv/lib/python3.10/site-packages/py/_vendored_packages/apipkg-2.0.0.dist-info/LICENSE @@ -0,0 +1,18 @@ + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to deal + in the Software without restriction, including without limitation the rights + to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be included in all + copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + SOFTWARE. diff --git a/venv/lib/python3.10/site-packages/py/_vendored_packages/apipkg-2.0.0.dist-info/METADATA b/venv/lib/python3.10/site-packages/py/_vendored_packages/apipkg-2.0.0.dist-info/METADATA new file mode 100644 index 0000000..7eea770 --- /dev/null +++ b/venv/lib/python3.10/site-packages/py/_vendored_packages/apipkg-2.0.0.dist-info/METADATA @@ -0,0 +1,125 @@ +Metadata-Version: 2.1 +Name: apipkg +Version: 2.0.0 +Summary: apipkg: namespace control and lazy-import mechanism +Home-page: https://github.com/pytest-dev/apipkg +Author: holger krekel +Maintainer: Ronny Pfannschmidt +Maintainer-email: opensource@ronnypfannschmidt.de +License: MIT +Platform: unix +Platform: linux +Platform: osx +Platform: cygwin +Platform: win32 +Classifier: Development Status :: 4 - Beta +Classifier: Intended Audience :: Developers +Classifier: License :: OSI Approved :: MIT License +Classifier: Operating System :: MacOS :: MacOS X +Classifier: Operating System :: Microsoft :: Windows +Classifier: Operating System :: POSIX +Classifier: Programming Language :: Python +Classifier: Programming Language :: Python :: 2 +Classifier: Programming Language :: Python :: 2.7 +Classifier: Programming Language :: Python :: 3 +Classifier: Programming Language :: Python :: 3.4 +Classifier: Programming Language :: Python :: 3.5 +Classifier: Programming Language :: Python :: 3.6 +Classifier: Programming Language :: Python :: 3.7 +Classifier: Programming Language :: Python :: 3.8 +Classifier: Programming Language :: Python :: 3.9 +Classifier: Programming Language :: Python :: Implementation :: CPython +Classifier: Topic :: Software Development :: Libraries +Requires-Python: !=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,>=2.7 +Description-Content-Type: text/x-rst +License-File: LICENSE + +Welcome to apipkg ! +------------------- + +With apipkg you can control the exported namespace of a Python package and +greatly reduce the number of imports for your users. +It is a `small pure Python module`_ that works on CPython 2.7 and 3.4+, +Jython and PyPy. It cooperates well with Python's ``help()`` system, +custom importers (PEP302) and common command-line completion tools. + +Usage is very simple: you can require 'apipkg' as a dependency or you +can copy paste the ~200 lines of code into your project. + + +Tutorial example +------------------- + +Here is a simple ``mypkg`` package that specifies one namespace +and exports two objects imported from different modules:: + + + # mypkg/__init__.py + import apipkg + apipkg.initpkg(__name__, { + 'path': { + 'Class1': "_mypkg.somemodule:Class1", + 'clsattr': "_mypkg.othermodule:Class2.attr", + } + } + +The package is initialized with a dictionary as namespace. + +You need to create a ``_mypkg`` package with a ``somemodule.py`` +and ``othermodule.py`` containing the respective classes. +The ``_mypkg`` is not special - it's a completely +regular Python package. + +Namespace dictionaries contain ``name: value`` mappings +where the value may be another namespace dictionary or +a string specifying an import location. On accessing +an namespace attribute an import will be performed:: + + >>> import mypkg + >>> mypkg.path + + >>> mypkg.path.Class1 # '_mypkg.somemodule' gets imported now + + >>> mypkg.path.clsattr # '_mypkg.othermodule' gets imported now + 4 # the value of _mypkg.othermodule.Class2.attr + +The ``mypkg.path`` namespace and its two entries are +loaded when they are accessed. This means: + +* lazy loading - only what is actually needed is ever loaded + +* only the root "mypkg" ever needs to be imported to get + access to the complete functionality + +* the underlying modules are also accessible, for example:: + + from mypkg.sub import Class1 + + +Including apipkg in your package +-------------------------------------- + +If you don't want to add an ``apipkg`` dependency to your package you +can copy the `apipkg.py`_ file somewhere to your own package, +for example ``_mypkg/apipkg.py`` in the above example. You +then import the ``initpkg`` function from that new place and +are good to go. + +.. _`small pure Python module`: +.. _`apipkg.py`: https://github.com/pytest-dev/apipkg/blob/master/src/apipkg/__init__.py + +Feedback? +----------------------- + +If you have questions you are welcome to + +* join the **#pytest** channel on irc.libera.chat_ + (using an IRC client, via webchat_, or via Matrix_). +* create an issue on the bugtracker_ + +.. _irc.libera.chat: ircs://irc.libera.chat:6697/#pytest +.. _webchat: https://web.libera.chat/#pytest +.. _matrix: https://matrix.to/#/%23pytest:libera.chat +.. _bugtracker: https://github.com/pytest-dev/apipkg/issues + + diff --git a/venv/lib/python3.10/site-packages/py/_vendored_packages/apipkg-2.0.0.dist-info/RECORD b/venv/lib/python3.10/site-packages/py/_vendored_packages/apipkg-2.0.0.dist-info/RECORD new file mode 100644 index 0000000..62bf0c4 --- /dev/null +++ b/venv/lib/python3.10/site-packages/py/_vendored_packages/apipkg-2.0.0.dist-info/RECORD @@ -0,0 +1,11 @@ +apipkg-2.0.0.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +apipkg-2.0.0.dist-info/LICENSE,sha256=6J7tEHTTqUMZi6E5uAhE9bRFuGC7p0qK6twGEFZhZOo,1054 +apipkg-2.0.0.dist-info/METADATA,sha256=GqNwkxraK5UTxObLVXTLc2UqktOPwZnKqdk2ThzHX0A,4292 +apipkg-2.0.0.dist-info/RECORD,, +apipkg-2.0.0.dist-info/REQUESTED,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +apipkg-2.0.0.dist-info/WHEEL,sha256=WzZ8cwjh8l0jtULNjYq1Hpr-WCqCRgPr--TX4P5I1Wo,110 +apipkg-2.0.0.dist-info/top_level.txt,sha256=3TGS6nmN7kjxhUK4LpPCB3QkQI34QYGrT0ZQGWajoZ8,7 +apipkg/__init__.py,sha256=gpbD3O57S9f-LsO2e-XwI6IGISayicfnCq3B5y_8frg,6978 +apipkg/__pycache__/__init__.cpython-39.pyc,, +apipkg/__pycache__/version.cpython-39.pyc,, +apipkg/version.py,sha256=bgZFg-f3UKhgE-z2w8RoFrwqRBzJBZkM4_jKFiYB9eU,142 diff --git a/venv/lib/python3.10/site-packages/py/_vendored_packages/apipkg-2.0.0.dist-info/REQUESTED b/venv/lib/python3.10/site-packages/py/_vendored_packages/apipkg-2.0.0.dist-info/REQUESTED new file mode 100644 index 0000000..e69de29 diff --git a/venv/lib/python3.10/site-packages/py/_vendored_packages/apipkg-2.0.0.dist-info/WHEEL b/venv/lib/python3.10/site-packages/py/_vendored_packages/apipkg-2.0.0.dist-info/WHEEL new file mode 100644 index 0000000..b733a60 --- /dev/null +++ b/venv/lib/python3.10/site-packages/py/_vendored_packages/apipkg-2.0.0.dist-info/WHEEL @@ -0,0 +1,6 @@ +Wheel-Version: 1.0 +Generator: bdist_wheel (0.37.0) +Root-Is-Purelib: true +Tag: py2-none-any +Tag: py3-none-any + diff --git a/venv/lib/python3.10/site-packages/py/_vendored_packages/apipkg-2.0.0.dist-info/top_level.txt b/venv/lib/python3.10/site-packages/py/_vendored_packages/apipkg-2.0.0.dist-info/top_level.txt new file mode 100644 index 0000000..e2221c8 --- /dev/null +++ b/venv/lib/python3.10/site-packages/py/_vendored_packages/apipkg-2.0.0.dist-info/top_level.txt @@ -0,0 +1 @@ +apipkg diff --git a/venv/lib/python3.10/site-packages/py/_vendored_packages/apipkg/__init__.py b/venv/lib/python3.10/site-packages/py/_vendored_packages/apipkg/__init__.py new file mode 100644 index 0000000..350d8c4 --- /dev/null +++ b/venv/lib/python3.10/site-packages/py/_vendored_packages/apipkg/__init__.py @@ -0,0 +1,217 @@ +""" +apipkg: control the exported namespace of a Python package. + +see https://pypi.python.org/pypi/apipkg + +(c) holger krekel, 2009 - MIT license +""" +import os +import sys +from types import ModuleType + +from .version import version as __version__ # NOQA:F401 + + +def _py_abspath(path): + """ + special version of abspath + that will leave paths from jython jars alone + """ + if path.startswith("__pyclasspath__"): + + return path + else: + return os.path.abspath(path) + + +def distribution_version(name): + """try to get the version of the named distribution, + returs None on failure""" + from pkg_resources import get_distribution, DistributionNotFound + + try: + dist = get_distribution(name) + except DistributionNotFound: + pass + else: + return dist.version + + +def initpkg(pkgname, exportdefs, attr=None, eager=False): + """ initialize given package from the export definitions. """ + attr = attr or {} + oldmod = sys.modules.get(pkgname) + d = {} + f = getattr(oldmod, "__file__", None) + if f: + f = _py_abspath(f) + d["__file__"] = f + if hasattr(oldmod, "__version__"): + d["__version__"] = oldmod.__version__ + if hasattr(oldmod, "__loader__"): + d["__loader__"] = oldmod.__loader__ + if hasattr(oldmod, "__path__"): + d["__path__"] = [_py_abspath(p) for p in oldmod.__path__] + if hasattr(oldmod, "__package__"): + d["__package__"] = oldmod.__package__ + if "__doc__" not in exportdefs and getattr(oldmod, "__doc__", None): + d["__doc__"] = oldmod.__doc__ + d["__spec__"] = getattr(oldmod, "__spec__", None) + d.update(attr) + if hasattr(oldmod, "__dict__"): + oldmod.__dict__.update(d) + mod = ApiModule(pkgname, exportdefs, implprefix=pkgname, attr=d) + sys.modules[pkgname] = mod + # eagerload in bypthon to avoid their monkeypatching breaking packages + if "bpython" in sys.modules or eager: + for module in list(sys.modules.values()): + if isinstance(module, ApiModule): + module.__dict__ + return mod + + +def importobj(modpath, attrname): + """imports a module, then resolves the attrname on it""" + module = __import__(modpath, None, None, ["__doc__"]) + if not attrname: + return module + + retval = module + names = attrname.split(".") + for x in names: + retval = getattr(retval, x) + return retval + + +class ApiModule(ModuleType): + """the magical lazy-loading module standing""" + + def __docget(self): + try: + return self.__doc + except AttributeError: + if "__doc__" in self.__map__: + return self.__makeattr("__doc__") + + def __docset(self, value): + self.__doc = value + + __doc__ = property(__docget, __docset) + + def __init__(self, name, importspec, implprefix=None, attr=None): + self.__name__ = name + self.__all__ = [x for x in importspec if x != "__onfirstaccess__"] + self.__map__ = {} + self.__implprefix__ = implprefix or name + if attr: + for name, val in attr.items(): + # print "setting", self.__name__, name, val + setattr(self, name, val) + for name, importspec in importspec.items(): + if isinstance(importspec, dict): + subname = "{}.{}".format(self.__name__, name) + apimod = ApiModule(subname, importspec, implprefix) + sys.modules[subname] = apimod + setattr(self, name, apimod) + else: + parts = importspec.split(":") + modpath = parts.pop(0) + attrname = parts and parts[0] or "" + if modpath[0] == ".": + modpath = implprefix + modpath + + if not attrname: + subname = "{}.{}".format(self.__name__, name) + apimod = AliasModule(subname, modpath) + sys.modules[subname] = apimod + if "." not in name: + setattr(self, name, apimod) + else: + self.__map__[name] = (modpath, attrname) + + def __repr__(self): + repr_list = [] + if hasattr(self, "__version__"): + repr_list.append("version=" + repr(self.__version__)) + if hasattr(self, "__file__"): + repr_list.append("from " + repr(self.__file__)) + if repr_list: + return "".format(self.__name__, " ".join(repr_list)) + return "".format(self.__name__) + + def __makeattr(self, name): + """lazily compute value for name or raise AttributeError if unknown.""" + # print "makeattr", self.__name__, name + target = None + if "__onfirstaccess__" in self.__map__: + target = self.__map__.pop("__onfirstaccess__") + importobj(*target)() + try: + modpath, attrname = self.__map__[name] + except KeyError: + if target is not None and name != "__onfirstaccess__": + # retry, onfirstaccess might have set attrs + return getattr(self, name) + raise AttributeError(name) + else: + result = importobj(modpath, attrname) + setattr(self, name, result) + try: + del self.__map__[name] + except KeyError: + pass # in a recursive-import situation a double-del can happen + return result + + __getattr__ = __makeattr + + @property + def __dict__(self): + # force all the content of the module + # to be loaded when __dict__ is read + dictdescr = ModuleType.__dict__["__dict__"] + dict = dictdescr.__get__(self) + if dict is not None: + hasattr(self, "some") + for name in self.__all__: + try: + self.__makeattr(name) + except AttributeError: + pass + return dict + + +def AliasModule(modname, modpath, attrname=None): + mod = [] + + def getmod(): + if not mod: + x = importobj(modpath, None) + if attrname is not None: + x = getattr(x, attrname) + mod.append(x) + return mod[0] + + x = modpath + ("." + attrname if attrname else "") + repr_result = "".format(modname, x) + + class AliasModule(ModuleType): + def __repr__(self): + return repr_result + + def __getattribute__(self, name): + try: + return getattr(getmod(), name) + except ImportError: + if modpath == "pytest" and attrname is None: + # hack for pylibs py.test + return None + else: + raise + + def __setattr__(self, name, value): + setattr(getmod(), name, value) + + def __delattr__(self, name): + delattr(getmod(), name) + + return AliasModule(str(modname)) diff --git a/venv/lib/python3.10/site-packages/py/_vendored_packages/apipkg/version.py b/venv/lib/python3.10/site-packages/py/_vendored_packages/apipkg/version.py new file mode 100644 index 0000000..c5b4e0e --- /dev/null +++ b/venv/lib/python3.10/site-packages/py/_vendored_packages/apipkg/version.py @@ -0,0 +1,5 @@ +# coding: utf-8 +# file generated by setuptools_scm +# don't change, don't track in version control +version = '2.0.0' +version_tuple = (2, 0, 0) diff --git a/venv/lib/python3.10/site-packages/py/_vendored_packages/iniconfig-1.1.1.dist-info/INSTALLER b/venv/lib/python3.10/site-packages/py/_vendored_packages/iniconfig-1.1.1.dist-info/INSTALLER new file mode 100644 index 0000000..a1b589e --- /dev/null +++ b/venv/lib/python3.10/site-packages/py/_vendored_packages/iniconfig-1.1.1.dist-info/INSTALLER @@ -0,0 +1 @@ +pip diff --git a/venv/lib/python3.10/site-packages/py/_vendored_packages/iniconfig-1.1.1.dist-info/LICENSE b/venv/lib/python3.10/site-packages/py/_vendored_packages/iniconfig-1.1.1.dist-info/LICENSE new file mode 100644 index 0000000..31ecdfb --- /dev/null +++ b/venv/lib/python3.10/site-packages/py/_vendored_packages/iniconfig-1.1.1.dist-info/LICENSE @@ -0,0 +1,19 @@ + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to deal + in the Software without restriction, including without limitation the rights + to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be included in all + copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + SOFTWARE. + diff --git a/venv/lib/python3.10/site-packages/py/_vendored_packages/iniconfig-1.1.1.dist-info/METADATA b/venv/lib/python3.10/site-packages/py/_vendored_packages/iniconfig-1.1.1.dist-info/METADATA new file mode 100644 index 0000000..c078a75 --- /dev/null +++ b/venv/lib/python3.10/site-packages/py/_vendored_packages/iniconfig-1.1.1.dist-info/METADATA @@ -0,0 +1,78 @@ +Metadata-Version: 2.1 +Name: iniconfig +Version: 1.1.1 +Summary: iniconfig: brain-dead simple config-ini parsing +Home-page: http://github.com/RonnyPfannschmidt/iniconfig +Author: Ronny Pfannschmidt, Holger Krekel +Author-email: opensource@ronnypfannschmidt.de, holger.krekel@gmail.com +License: MIT License +Platform: unix +Platform: linux +Platform: osx +Platform: cygwin +Platform: win32 +Classifier: Development Status :: 4 - Beta +Classifier: Intended Audience :: Developers +Classifier: License :: OSI Approved :: MIT License +Classifier: Operating System :: POSIX +Classifier: Operating System :: Microsoft :: Windows +Classifier: Operating System :: MacOS :: MacOS X +Classifier: Topic :: Software Development :: Libraries +Classifier: Topic :: Utilities +Classifier: Programming Language :: Python +Classifier: Programming Language :: Python :: 2 +Classifier: Programming Language :: Python :: 3 + +iniconfig: brain-dead simple parsing of ini files +======================================================= + +iniconfig is a small and simple INI-file parser module +having a unique set of features: + +* tested against Python2.4 across to Python3.2, Jython, PyPy +* maintains order of sections and entries +* supports multi-line values with or without line-continuations +* supports "#" comments everywhere +* raises errors with proper line-numbers +* no bells and whistles like automatic substitutions +* iniconfig raises an Error if two sections have the same name. + +If you encounter issues or have feature wishes please report them to: + + http://github.com/RonnyPfannschmidt/iniconfig/issues + +Basic Example +=================================== + +If you have an ini file like this:: + + # content of example.ini + [section1] # comment + name1=value1 # comment + name1b=value1,value2 # comment + + [section2] + name2= + line1 + line2 + +then you can do:: + + >>> import iniconfig + >>> ini = iniconfig.IniConfig("example.ini") + >>> ini['section1']['name1'] # raises KeyError if not exists + 'value1' + >>> ini.get('section1', 'name1b', [], lambda x: x.split(",")) + ['value1', 'value2'] + >>> ini.get('section1', 'notexist', [], lambda x: x.split(",")) + [] + >>> [x.name for x in list(ini)] + ['section1', 'section2'] + >>> list(list(ini)[0].items()) + [('name1', 'value1'), ('name1b', 'value1,value2')] + >>> 'section1' in ini + True + >>> 'inexistendsection' in ini + False + + diff --git a/venv/lib/python3.10/site-packages/py/_vendored_packages/iniconfig-1.1.1.dist-info/RECORD b/venv/lib/python3.10/site-packages/py/_vendored_packages/iniconfig-1.1.1.dist-info/RECORD new file mode 100644 index 0000000..9b4c252 --- /dev/null +++ b/venv/lib/python3.10/site-packages/py/_vendored_packages/iniconfig-1.1.1.dist-info/RECORD @@ -0,0 +1,11 @@ +iniconfig-1.1.1.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +iniconfig-1.1.1.dist-info/LICENSE,sha256=KvaAw570k_uCgwNW0dPfGstaBgM8ui3sehniHKp3qGY,1061 +iniconfig-1.1.1.dist-info/METADATA,sha256=_4-oFKpRXuZv5rzepScpXRwhq6DzqsgbnA5ZpgMUMcs,2405 +iniconfig-1.1.1.dist-info/RECORD,, +iniconfig-1.1.1.dist-info/REQUESTED,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +iniconfig-1.1.1.dist-info/WHEEL,sha256=ADKeyaGyKF5DwBNE0sRE5pvW-bSkFMJfBuhzZ3rceP4,110 +iniconfig-1.1.1.dist-info/top_level.txt,sha256=7KfM0fugdlToj9UW7enKXk2HYALQD8qHiyKtjhSzgN8,10 +iniconfig/__init__.py,sha256=-pBe5AF_6aAwo1CxJQ8i_zJq6ejc6IxHta7qk2tNJhY,5208 +iniconfig/__init__.pyi,sha256=-4KOctzq28ohRmTZsqlH6aylyFqsNKxYqtk1dteypi4,1205 +iniconfig/__pycache__/__init__.cpython-39.pyc,, +iniconfig/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 diff --git a/venv/lib/python3.10/site-packages/py/_vendored_packages/iniconfig-1.1.1.dist-info/REQUESTED b/venv/lib/python3.10/site-packages/py/_vendored_packages/iniconfig-1.1.1.dist-info/REQUESTED new file mode 100644 index 0000000..e69de29 diff --git a/venv/lib/python3.10/site-packages/py/_vendored_packages/iniconfig-1.1.1.dist-info/WHEEL b/venv/lib/python3.10/site-packages/py/_vendored_packages/iniconfig-1.1.1.dist-info/WHEEL new file mode 100644 index 0000000..6d38aa0 --- /dev/null +++ b/venv/lib/python3.10/site-packages/py/_vendored_packages/iniconfig-1.1.1.dist-info/WHEEL @@ -0,0 +1,6 @@ +Wheel-Version: 1.0 +Generator: bdist_wheel (0.35.1) +Root-Is-Purelib: true +Tag: py2-none-any +Tag: py3-none-any + diff --git a/venv/lib/python3.10/site-packages/py/_vendored_packages/iniconfig-1.1.1.dist-info/top_level.txt b/venv/lib/python3.10/site-packages/py/_vendored_packages/iniconfig-1.1.1.dist-info/top_level.txt new file mode 100644 index 0000000..9dda536 --- /dev/null +++ b/venv/lib/python3.10/site-packages/py/_vendored_packages/iniconfig-1.1.1.dist-info/top_level.txt @@ -0,0 +1 @@ +iniconfig diff --git a/venv/lib/python3.10/site-packages/py/_vendored_packages/iniconfig/__init__.py b/venv/lib/python3.10/site-packages/py/_vendored_packages/iniconfig/__init__.py new file mode 100644 index 0000000..6ad9eaf --- /dev/null +++ b/venv/lib/python3.10/site-packages/py/_vendored_packages/iniconfig/__init__.py @@ -0,0 +1,165 @@ +""" brain-dead simple parser for ini-style files. +(C) Ronny Pfannschmidt, Holger Krekel -- MIT licensed +""" +__all__ = ['IniConfig', 'ParseError'] + +COMMENTCHARS = "#;" + + +class ParseError(Exception): + def __init__(self, path, lineno, msg): + Exception.__init__(self, path, lineno, msg) + self.path = path + self.lineno = lineno + self.msg = msg + + def __str__(self): + return "%s:%s: %s" % (self.path, self.lineno+1, self.msg) + + +class SectionWrapper(object): + def __init__(self, config, name): + self.config = config + self.name = name + + def lineof(self, name): + return self.config.lineof(self.name, name) + + def get(self, key, default=None, convert=str): + return self.config.get(self.name, key, + convert=convert, default=default) + + def __getitem__(self, key): + return self.config.sections[self.name][key] + + def __iter__(self): + section = self.config.sections.get(self.name, []) + + def lineof(key): + return self.config.lineof(self.name, key) + for name in sorted(section, key=lineof): + yield name + + def items(self): + for name in self: + yield name, self[name] + + +class IniConfig(object): + def __init__(self, path, data=None): + self.path = str(path) # convenience + if data is None: + f = open(self.path) + try: + tokens = self._parse(iter(f)) + finally: + f.close() + else: + tokens = self._parse(data.splitlines(True)) + + self._sources = {} + self.sections = {} + + for lineno, section, name, value in tokens: + if section is None: + self._raise(lineno, 'no section header defined') + self._sources[section, name] = lineno + if name is None: + if section in self.sections: + self._raise(lineno, 'duplicate section %r' % (section, )) + self.sections[section] = {} + else: + if name in self.sections[section]: + self._raise(lineno, 'duplicate name %r' % (name, )) + self.sections[section][name] = value + + def _raise(self, lineno, msg): + raise ParseError(self.path, lineno, msg) + + def _parse(self, line_iter): + result = [] + section = None + for lineno, line in enumerate(line_iter): + name, data = self._parseline(line, lineno) + # new value + if name is not None and data is not None: + result.append((lineno, section, name, data)) + # new section + elif name is not None and data is None: + if not name: + self._raise(lineno, 'empty section name') + section = name + result.append((lineno, section, None, None)) + # continuation + elif name is None and data is not None: + if not result: + self._raise(lineno, 'unexpected value continuation') + last = result.pop() + last_name, last_data = last[-2:] + if last_name is None: + self._raise(lineno, 'unexpected value continuation') + + if last_data: + data = '%s\n%s' % (last_data, data) + result.append(last[:-1] + (data,)) + return result + + def _parseline(self, line, lineno): + # blank lines + if iscommentline(line): + line = "" + else: + line = line.rstrip() + if not line: + return None, None + # section + if line[0] == '[': + realline = line + for c in COMMENTCHARS: + line = line.split(c)[0].rstrip() + if line[-1] == "]": + return line[1:-1], None + return None, realline.strip() + # value + elif not line[0].isspace(): + try: + name, value = line.split('=', 1) + if ":" in name: + raise ValueError() + except ValueError: + try: + name, value = line.split(":", 1) + except ValueError: + self._raise(lineno, 'unexpected line: %r' % line) + return name.strip(), value.strip() + # continuation + else: + return None, line.strip() + + def lineof(self, section, name=None): + lineno = self._sources.get((section, name)) + if lineno is not None: + return lineno + 1 + + def get(self, section, name, default=None, convert=str): + try: + return convert(self.sections[section][name]) + except KeyError: + return default + + def __getitem__(self, name): + if name not in self.sections: + raise KeyError(name) + return SectionWrapper(self, name) + + def __iter__(self): + for name in sorted(self.sections, key=self.lineof): + yield SectionWrapper(self, name) + + def __contains__(self, arg): + return arg in self.sections + + +def iscommentline(line): + c = line.lstrip()[:1] + return c in COMMENTCHARS diff --git a/venv/lib/python3.10/site-packages/py/_vendored_packages/iniconfig/__init__.pyi b/venv/lib/python3.10/site-packages/py/_vendored_packages/iniconfig/__init__.pyi new file mode 100644 index 0000000..b6284be --- /dev/null +++ b/venv/lib/python3.10/site-packages/py/_vendored_packages/iniconfig/__init__.pyi @@ -0,0 +1,31 @@ +from typing import Callable, Iterator, Mapping, Optional, Tuple, TypeVar, Union +from typing_extensions import Final + +_D = TypeVar('_D') +_T = TypeVar('_T') + +class ParseError(Exception): + # Private __init__. + path: Final[str] + lineno: Final[int] + msg: Final[str] + +class SectionWrapper: + # Private __init__. + config: Final[IniConfig] + name: Final[str] + def __getitem__(self, key: str) -> str: ... + def __iter__(self) -> Iterator[str]: ... + def get(self, key: str, default: _D = ..., convert: Callable[[str], _T] = ...) -> Union[_T, _D]: ... + def items(self) -> Iterator[Tuple[str, str]]: ... + def lineof(self, name: str) -> Optional[int]: ... + +class IniConfig: + path: Final[str] + sections: Final[Mapping[str, Mapping[str, str]]] + def __init__(self, path: str, data: Optional[str] = None): ... + def __contains__(self, arg: str) -> bool: ... + def __getitem__(self, name: str) -> SectionWrapper: ... + def __iter__(self) -> Iterator[SectionWrapper]: ... + def get(self, section: str, name: str, default: _D = ..., convert: Callable[[str], _T] = ...) -> Union[_T, _D]: ... + def lineof(self, section: str, name: Optional[str] = ...) -> Optional[int]: ... diff --git a/venv/lib/python3.10/site-packages/py/_vendored_packages/iniconfig/py.typed b/venv/lib/python3.10/site-packages/py/_vendored_packages/iniconfig/py.typed new file mode 100644 index 0000000..e69de29 diff --git a/venv/lib/python3.10/site-packages/py/_version.py b/venv/lib/python3.10/site-packages/py/_version.py new file mode 100644 index 0000000..3d30fbe --- /dev/null +++ b/venv/lib/python3.10/site-packages/py/_version.py @@ -0,0 +1,5 @@ +# coding: utf-8 +# file generated by setuptools_scm +# don't change, don't track in version control +version = '1.11.0' +version_tuple = (1, 11, 0) diff --git a/venv/lib/python3.10/site-packages/py/_xmlgen.py b/venv/lib/python3.10/site-packages/py/_xmlgen.py new file mode 100644 index 0000000..1c83545 --- /dev/null +++ b/venv/lib/python3.10/site-packages/py/_xmlgen.py @@ -0,0 +1,255 @@ +""" +module for generating and serializing xml and html structures +by using simple python objects. + +(c) holger krekel, holger at merlinux eu. 2009 +""" +import sys, re + +if sys.version_info >= (3,0): + def u(s): + return s + def unicode(x, errors=None): + if hasattr(x, '__unicode__'): + return x.__unicode__() + return str(x) +else: + def u(s): + return unicode(s) + unicode = unicode + + +class NamespaceMetaclass(type): + def __getattr__(self, name): + if name[:1] == '_': + raise AttributeError(name) + if self == Namespace: + raise ValueError("Namespace class is abstract") + tagspec = self.__tagspec__ + if tagspec is not None and name not in tagspec: + raise AttributeError(name) + classattr = {} + if self.__stickyname__: + classattr['xmlname'] = name + cls = type(name, (self.__tagclass__,), classattr) + setattr(self, name, cls) + return cls + +class Tag(list): + class Attr(object): + def __init__(self, **kwargs): + self.__dict__.update(kwargs) + + def __init__(self, *args, **kwargs): + super(Tag, self).__init__(args) + self.attr = self.Attr(**kwargs) + + def __unicode__(self): + return self.unicode(indent=0) + __str__ = __unicode__ + + def unicode(self, indent=2): + l = [] + SimpleUnicodeVisitor(l.append, indent).visit(self) + return u("").join(l) + + def __repr__(self): + name = self.__class__.__name__ + return "<%r tag object %d>" % (name, id(self)) + +Namespace = NamespaceMetaclass('Namespace', (object, ), { + '__tagspec__': None, + '__tagclass__': Tag, + '__stickyname__': False, +}) + +class HtmlTag(Tag): + def unicode(self, indent=2): + l = [] + HtmlVisitor(l.append, indent, shortempty=False).visit(self) + return u("").join(l) + +# exported plain html namespace +class html(Namespace): + __tagclass__ = HtmlTag + __stickyname__ = True + __tagspec__ = dict([(x,1) for x in ( + 'a,abbr,acronym,address,applet,area,article,aside,audio,b,' + 'base,basefont,bdi,bdo,big,blink,blockquote,body,br,button,' + 'canvas,caption,center,cite,code,col,colgroup,command,comment,' + 'datalist,dd,del,details,dfn,dir,div,dl,dt,em,embed,' + 'fieldset,figcaption,figure,footer,font,form,frame,frameset,h1,' + 'h2,h3,h4,h5,h6,head,header,hgroup,hr,html,i,iframe,img,input,' + 'ins,isindex,kbd,keygen,label,legend,li,link,listing,map,mark,' + 'marquee,menu,meta,meter,multicol,nav,nobr,noembed,noframes,' + 'noscript,object,ol,optgroup,option,output,p,param,pre,progress,' + 'q,rp,rt,ruby,s,samp,script,section,select,small,source,span,' + 'strike,strong,style,sub,summary,sup,table,tbody,td,textarea,' + 'tfoot,th,thead,time,title,tr,track,tt,u,ul,xmp,var,video,wbr' + ).split(',') if x]) + + class Style(object): + def __init__(self, **kw): + for x, y in kw.items(): + x = x.replace('_', '-') + setattr(self, x, y) + + +class raw(object): + """just a box that can contain a unicode string that will be + included directly in the output""" + def __init__(self, uniobj): + self.uniobj = uniobj + +class SimpleUnicodeVisitor(object): + """ recursive visitor to write unicode. """ + def __init__(self, write, indent=0, curindent=0, shortempty=True): + self.write = write + self.cache = {} + self.visited = {} # for detection of recursion + self.indent = indent + self.curindent = curindent + self.parents = [] + self.shortempty = shortempty # short empty tags or not + + def visit(self, node): + """ dispatcher on node's class/bases name. """ + cls = node.__class__ + try: + visitmethod = self.cache[cls] + except KeyError: + for subclass in cls.__mro__: + visitmethod = getattr(self, subclass.__name__, None) + if visitmethod is not None: + break + else: + visitmethod = self.__object + self.cache[cls] = visitmethod + visitmethod(node) + + # the default fallback handler is marked private + # to avoid clashes with the tag name object + def __object(self, obj): + #self.write(obj) + self.write(escape(unicode(obj))) + + def raw(self, obj): + self.write(obj.uniobj) + + def list(self, obj): + assert id(obj) not in self.visited + self.visited[id(obj)] = 1 + for elem in obj: + self.visit(elem) + + def Tag(self, tag): + assert id(tag) not in self.visited + try: + tag.parent = self.parents[-1] + except IndexError: + tag.parent = None + self.visited[id(tag)] = 1 + tagname = getattr(tag, 'xmlname', tag.__class__.__name__) + if self.curindent and not self._isinline(tagname): + self.write("\n" + u(' ') * self.curindent) + if tag: + self.curindent += self.indent + self.write(u('<%s%s>') % (tagname, self.attributes(tag))) + self.parents.append(tag) + for x in tag: + self.visit(x) + self.parents.pop() + self.write(u('') % tagname) + self.curindent -= self.indent + else: + nameattr = tagname+self.attributes(tag) + if self._issingleton(tagname): + self.write(u('<%s/>') % (nameattr,)) + else: + self.write(u('<%s>') % (nameattr, tagname)) + + def attributes(self, tag): + # serialize attributes + attrlist = dir(tag.attr) + attrlist.sort() + l = [] + for name in attrlist: + res = self.repr_attribute(tag.attr, name) + if res is not None: + l.append(res) + l.extend(self.getstyle(tag)) + return u("").join(l) + + def repr_attribute(self, attrs, name): + if name[:2] != '__': + value = getattr(attrs, name) + if name.endswith('_'): + name = name[:-1] + if isinstance(value, raw): + insert = value.uniobj + else: + insert = escape(unicode(value)) + return ' %s="%s"' % (name, insert) + + def getstyle(self, tag): + """ return attribute list suitable for styling. """ + try: + styledict = tag.style.__dict__ + except AttributeError: + return [] + else: + stylelist = [x+': ' + y for x,y in styledict.items()] + return [u(' style="%s"') % u('; ').join(stylelist)] + + def _issingleton(self, tagname): + """can (and will) be overridden in subclasses""" + return self.shortempty + + def _isinline(self, tagname): + """can (and will) be overridden in subclasses""" + return False + +class HtmlVisitor(SimpleUnicodeVisitor): + + single = dict([(x, 1) for x in + ('br,img,area,param,col,hr,meta,link,base,' + 'input,frame').split(',')]) + inline = dict([(x, 1) for x in + ('a abbr acronym b basefont bdo big br cite code dfn em font ' + 'i img input kbd label q s samp select small span strike ' + 'strong sub sup textarea tt u var'.split(' '))]) + + def repr_attribute(self, attrs, name): + if name == 'class_': + value = getattr(attrs, name) + if value is None: + return + return super(HtmlVisitor, self).repr_attribute(attrs, name) + + def _issingleton(self, tagname): + return tagname in self.single + + def _isinline(self, tagname): + return tagname in self.inline + + +class _escape: + def __init__(self): + self.escape = { + u('"') : u('"'), u('<') : u('<'), u('>') : u('>'), + u('&') : u('&'), u("'") : u('''), + } + self.charef_rex = re.compile(u("|").join(self.escape.keys())) + + def _replacer(self, match): + return self.escape[match.group(0)] + + def __call__(self, ustring): + """ xml-escape the given unicode string. """ + try: + ustring = unicode(ustring) + except UnicodeDecodeError: + ustring = unicode(ustring, 'utf-8', errors='replace') + return self.charef_rex.sub(self._replacer, ustring) + +escape = _escape() diff --git a/venv/lib/python3.10/site-packages/py/error.pyi b/venv/lib/python3.10/site-packages/py/error.pyi new file mode 100644 index 0000000..034eba6 --- /dev/null +++ b/venv/lib/python3.10/site-packages/py/error.pyi @@ -0,0 +1,129 @@ +from typing import Any, Callable, TypeVar + +_T = TypeVar('_T') + +def checked_call(func: Callable[..., _T], *args: Any, **kwargs: Any) -> _T: ... +class Error(EnvironmentError): ... +class EPERM(Error): ... +class ENOENT(Error): ... +class ESRCH(Error): ... +class EINTR(Error): ... +class EIO(Error): ... +class ENXIO(Error): ... +class E2BIG(Error): ... +class ENOEXEC(Error): ... +class EBADF(Error): ... +class ECHILD(Error): ... +class EAGAIN(Error): ... +class ENOMEM(Error): ... +class EACCES(Error): ... +class EFAULT(Error): ... +class ENOTBLK(Error): ... +class EBUSY(Error): ... +class EEXIST(Error): ... +class EXDEV(Error): ... +class ENODEV(Error): ... +class ENOTDIR(Error): ... +class EISDIR(Error): ... +class EINVAL(Error): ... +class ENFILE(Error): ... +class EMFILE(Error): ... +class ENOTTY(Error): ... +class ETXTBSY(Error): ... +class EFBIG(Error): ... +class ENOSPC(Error): ... +class ESPIPE(Error): ... +class EROFS(Error): ... +class EMLINK(Error): ... +class EPIPE(Error): ... +class EDOM(Error): ... +class ERANGE(Error): ... +class EDEADLCK(Error): ... +class ENAMETOOLONG(Error): ... +class ENOLCK(Error): ... +class ENOSYS(Error): ... +class ENOTEMPTY(Error): ... +class ELOOP(Error): ... +class EWOULDBLOCK(Error): ... +class ENOMSG(Error): ... +class EIDRM(Error): ... +class ECHRNG(Error): ... +class EL2NSYNC(Error): ... +class EL3HLT(Error): ... +class EL3RST(Error): ... +class ELNRNG(Error): ... +class EUNATCH(Error): ... +class ENOCSI(Error): ... +class EL2HLT(Error): ... +class EBADE(Error): ... +class EBADR(Error): ... +class EXFULL(Error): ... +class ENOANO(Error): ... +class EBADRQC(Error): ... +class EBADSLT(Error): ... +class EDEADLOCK(Error): ... +class EBFONT(Error): ... +class ENOSTR(Error): ... +class ENODATA(Error): ... +class ETIME(Error): ... +class ENOSR(Error): ... +class ENONET(Error): ... +class ENOPKG(Error): ... +class EREMOTE(Error): ... +class ENOLINK(Error): ... +class EADV(Error): ... +class ESRMNT(Error): ... +class ECOMM(Error): ... +class EPROTO(Error): ... +class EMULTIHOP(Error): ... +class EDOTDOT(Error): ... +class EBADMSG(Error): ... +class EOVERFLOW(Error): ... +class ENOTUNIQ(Error): ... +class EBADFD(Error): ... +class EREMCHG(Error): ... +class ELIBACC(Error): ... +class ELIBBAD(Error): ... +class ELIBSCN(Error): ... +class ELIBMAX(Error): ... +class ELIBEXEC(Error): ... +class EILSEQ(Error): ... +class ERESTART(Error): ... +class ESTRPIPE(Error): ... +class EUSERS(Error): ... +class ENOTSOCK(Error): ... +class EDESTADDRREQ(Error): ... +class EMSGSIZE(Error): ... +class EPROTOTYPE(Error): ... +class ENOPROTOOPT(Error): ... +class EPROTONOSUPPORT(Error): ... +class ESOCKTNOSUPPORT(Error): ... +class ENOTSUP(Error): ... +class EOPNOTSUPP(Error): ... +class EPFNOSUPPORT(Error): ... +class EAFNOSUPPORT(Error): ... +class EADDRINUSE(Error): ... +class EADDRNOTAVAIL(Error): ... +class ENETDOWN(Error): ... +class ENETUNREACH(Error): ... +class ENETRESET(Error): ... +class ECONNABORTED(Error): ... +class ECONNRESET(Error): ... +class ENOBUFS(Error): ... +class EISCONN(Error): ... +class ENOTCONN(Error): ... +class ESHUTDOWN(Error): ... +class ETOOMANYREFS(Error): ... +class ETIMEDOUT(Error): ... +class ECONNREFUSED(Error): ... +class EHOSTDOWN(Error): ... +class EHOSTUNREACH(Error): ... +class EALREADY(Error): ... +class EINPROGRESS(Error): ... +class ESTALE(Error): ... +class EUCLEAN(Error): ... +class ENOTNAM(Error): ... +class ENAVAIL(Error): ... +class EISNAM(Error): ... +class EREMOTEIO(Error): ... +class EDQUOT(Error): ... diff --git a/venv/lib/python3.10/site-packages/py/iniconfig.pyi b/venv/lib/python3.10/site-packages/py/iniconfig.pyi new file mode 100644 index 0000000..b6284be --- /dev/null +++ b/venv/lib/python3.10/site-packages/py/iniconfig.pyi @@ -0,0 +1,31 @@ +from typing import Callable, Iterator, Mapping, Optional, Tuple, TypeVar, Union +from typing_extensions import Final + +_D = TypeVar('_D') +_T = TypeVar('_T') + +class ParseError(Exception): + # Private __init__. + path: Final[str] + lineno: Final[int] + msg: Final[str] + +class SectionWrapper: + # Private __init__. + config: Final[IniConfig] + name: Final[str] + def __getitem__(self, key: str) -> str: ... + def __iter__(self) -> Iterator[str]: ... + def get(self, key: str, default: _D = ..., convert: Callable[[str], _T] = ...) -> Union[_T, _D]: ... + def items(self) -> Iterator[Tuple[str, str]]: ... + def lineof(self, name: str) -> Optional[int]: ... + +class IniConfig: + path: Final[str] + sections: Final[Mapping[str, Mapping[str, str]]] + def __init__(self, path: str, data: Optional[str] = None): ... + def __contains__(self, arg: str) -> bool: ... + def __getitem__(self, name: str) -> SectionWrapper: ... + def __iter__(self) -> Iterator[SectionWrapper]: ... + def get(self, section: str, name: str, default: _D = ..., convert: Callable[[str], _T] = ...) -> Union[_T, _D]: ... + def lineof(self, section: str, name: Optional[str] = ...) -> Optional[int]: ... diff --git a/venv/lib/python3.10/site-packages/py/io.pyi b/venv/lib/python3.10/site-packages/py/io.pyi new file mode 100644 index 0000000..d377e24 --- /dev/null +++ b/venv/lib/python3.10/site-packages/py/io.pyi @@ -0,0 +1,130 @@ +from io import StringIO as TextIO +from io import BytesIO as BytesIO +from typing import Any, AnyStr, Callable, Generic, IO, List, Optional, Text, Tuple, TypeVar, Union, overload +from typing_extensions import Final +import sys + +_T = TypeVar("_T") + +class FDCapture(Generic[AnyStr]): + def __init__(self, targetfd: int, tmpfile: Optional[IO[AnyStr]] = ..., now: bool = ..., patchsys: bool = ...) -> None: ... + def start(self) -> None: ... + def done(self) -> IO[AnyStr]: ... + def writeorg(self, data: AnyStr) -> None: ... + +class StdCaptureFD: + def __init__( + self, + out: Union[bool, IO[str]] = ..., + err: Union[bool, IO[str]] = ..., + mixed: bool = ..., + in_: bool = ..., + patchsys: bool = ..., + now: bool = ..., + ) -> None: ... + @classmethod + def call(cls, func: Callable[..., _T], *args: Any, **kwargs: Any) -> Tuple[_T, str, str]: ... + def reset(self) -> Tuple[str, str]: ... + def suspend(self) -> Tuple[str, str]: ... + def startall(self) -> None: ... + def resume(self) -> None: ... + def done(self, save: bool = ...) -> Tuple[IO[str], IO[str]]: ... + def readouterr(self) -> Tuple[str, str]: ... + +class StdCapture: + def __init__( + self, + out: Union[bool, IO[str]] = ..., + err: Union[bool, IO[str]] = ..., + in_: bool = ..., + mixed: bool = ..., + now: bool = ..., + ) -> None: ... + @classmethod + def call(cls, func: Callable[..., _T], *args: Any, **kwargs: Any) -> Tuple[_T, str, str]: ... + def reset(self) -> Tuple[str, str]: ... + def suspend(self) -> Tuple[str, str]: ... + def startall(self) -> None: ... + def resume(self) -> None: ... + def done(self, save: bool = ...) -> Tuple[IO[str], IO[str]]: ... + def readouterr(self) -> Tuple[IO[str], IO[str]]: ... + +# XXX: The type here is not exactly right. If f is IO[bytes] and +# encoding is not None, returns some weird hybrid, not exactly IO[bytes]. +def dupfile( + f: IO[AnyStr], + mode: Optional[str] = ..., + buffering: int = ..., + raising: bool = ..., + encoding: Optional[str] = ..., +) -> IO[AnyStr]: ... +def get_terminal_width() -> int: ... +def ansi_print( + text: Union[str, Text], + esc: Union[Union[str, Text], Tuple[Union[str, Text], ...]], + file: Optional[IO[Any]] = ..., + newline: bool = ..., + flush: bool = ..., +) -> None: ... +def saferepr(obj, maxsize: int = ...) -> str: ... + +class TerminalWriter: + stringio: TextIO + encoding: Final[str] + hasmarkup: bool + def __init__(self, file: Optional[IO[str]] = ..., stringio: bool = ..., encoding: Optional[str] = ...) -> None: ... + @property + def fullwidth(self) -> int: ... + @fullwidth.setter + def fullwidth(self, value: int) -> None: ... + @property + def chars_on_current_line(self) -> int: ... + @property + def width_of_current_line(self) -> int: ... + def markup( + self, + text: str, + *, + black: int = ..., red: int = ..., green: int = ..., yellow: int = ..., blue: int = ..., purple: int = ..., + cyan: int = ..., white: int = ..., Black: int = ..., Red: int = ..., Green: int = ..., Yellow: int = ..., + Blue: int = ..., Purple: int = ..., Cyan: int = ..., White: int = ..., bold: int = ..., light: int = ..., + blink: int = ..., invert: int = ..., + ) -> str: ... + def sep( + self, + sepchar: str, + title: Optional[str] = ..., + fullwidth: Optional[int] = ..., + *, + black: int = ..., red: int = ..., green: int = ..., yellow: int = ..., blue: int = ..., purple: int = ..., + cyan: int = ..., white: int = ..., Black: int = ..., Red: int = ..., Green: int = ..., Yellow: int = ..., + Blue: int = ..., Purple: int = ..., Cyan: int = ..., White: int = ..., bold: int = ..., light: int = ..., + blink: int = ..., invert: int = ..., + ) -> None: ... + def write( + self, + msg: str, + *, + black: int = ..., red: int = ..., green: int = ..., yellow: int = ..., blue: int = ..., purple: int = ..., + cyan: int = ..., white: int = ..., Black: int = ..., Red: int = ..., Green: int = ..., Yellow: int = ..., + Blue: int = ..., Purple: int = ..., Cyan: int = ..., White: int = ..., bold: int = ..., light: int = ..., + blink: int = ..., invert: int = ..., + ) -> None: ... + def line( + self, + s: str = ..., + *, + black: int = ..., red: int = ..., green: int = ..., yellow: int = ..., blue: int = ..., purple: int = ..., + cyan: int = ..., white: int = ..., Black: int = ..., Red: int = ..., Green: int = ..., Yellow: int = ..., + Blue: int = ..., Purple: int = ..., Cyan: int = ..., White: int = ..., bold: int = ..., light: int = ..., + blink: int = ..., invert: int = ..., + ) -> None: ... + def reline( + self, + line: str, + *, + black: int = ..., red: int = ..., green: int = ..., yellow: int = ..., blue: int = ..., purple: int = ..., + cyan: int = ..., white: int = ..., Black: int = ..., Red: int = ..., Green: int = ..., Yellow: int = ..., + Blue: int = ..., Purple: int = ..., Cyan: int = ..., White: int = ..., bold: int = ..., light: int = ..., + blink: int = ..., invert: int = ..., + ) -> None: ... diff --git a/venv/lib/python3.10/site-packages/py/path.pyi b/venv/lib/python3.10/site-packages/py/path.pyi new file mode 100644 index 0000000..1ddab96 --- /dev/null +++ b/venv/lib/python3.10/site-packages/py/path.pyi @@ -0,0 +1,197 @@ +from typing import Any, AnyStr, Callable, ContextManager, Generic, IO, Iterable, Iterator, List, Optional, Text, Type, Union +from typing_extensions import Final, Literal +import os +import sys + +class _FNMatcher(Generic[AnyStr]): + pattern: AnyStr = ... + def __init__(self, pattern: AnyStr) -> None: ... + def __call__(self, path: local) -> bool: ... + +class _Stat: + path: Final[local] = ... + mode: Final[int] + ino: Final[int] + dev: Final[int] + nlink: Final[int] + uid: Final[int] + gid: Final[int] + size: Final[int] + atime: Final[float] + mtime: Final[float] + ctime: Final[float] + atime_ns: Final[int] + mtime_ns: Final[int] + ctime_ns: Final[int] + if sys.version_info >= (3, 8) and sys.platform == "win32": + reparse_tag: Final[int] + blocks: Final[int] + blksize: Final[int] + rdev: Final[int] + flags: Final[int] + gen: Final[int] + birthtime: Final[int] + rsize: Final[int] + creator: Final[int] + type: Final[int] + if sys.platform != 'win32': + @property + def owner(self) -> str: ... + @property + def group(self) -> str: ... + def isdir(self) -> bool: ... + def isfile(self) -> bool: ... + def islink(self) -> bool: ... + + +if sys.version_info >= (3, 6): + _PathLike = os.PathLike +else: + class _PathLike(Generic[AnyStr]): + def __fspath__(self) -> AnyStr: ... +_PathType = Union[bytes, Text, _PathLike[str], _PathLike[bytes], local] + +class local(_PathLike[str]): + class ImportMismatchError(ImportError): ... + + sep: Final[str] + strpath: Final[str] + + def __init__(self, path: _PathType = ..., expanduser: bool = ...) -> None: ... + def __hash__(self) -> int: ... + def __eq__(self, other: object) -> bool: ... + def __ne__(self, other: object) -> bool: ... + def __lt__(self, other: object) -> bool: ... + def __gt__(self, other: object) -> bool: ... + def __add__(self, other: object) -> local: ... + def __cmp__(self, other: object) -> int: ... + def __div__(self, other: _PathType) -> local: ... + def __truediv__(self, other: _PathType) -> local: ... + def __fspath__(self) -> str: ... + + @classmethod + def get_temproot(cls) -> local: ... + @classmethod + def make_numbered_dir( + cls, + prefix: str = ..., + rootdir: Optional[local] = ..., + keep: Optional[int] = ..., + lock_timeout: int = ..., + ) -> local: ... + @classmethod + def mkdtemp(cls, rootdir: Optional[local] = ...) -> local: ... + @classmethod + def sysfind( + cls, + name: _PathType, + checker: Optional[Callable[[local], bool]] = ..., + paths: Optional[Iterable[_PathType]] = ..., + ) -> Optional[local]: ... + + @property + def basename(self) -> str: ... + @property + def dirname(self) -> str: ... + @property + def purebasename(self) -> str: ... + @property + def ext(self) -> str: ... + + def as_cwd(self) -> ContextManager[Optional[local]]: ... + def atime(self) -> float: ... + def bestrelpath(self, dest: local) -> str: ... + def chdir(self) -> local: ... + def check( + self, + *, + basename: int = ..., notbasename: int = ..., + basestarts: int = ..., notbasestarts: int = ..., + dir: int = ..., notdir: int = ..., + dotfile: int = ..., notdotfile: int = ..., + endswith: int = ..., notendswith: int = ..., + exists: int = ..., notexists: int = ..., + ext: int = ..., notext: int = ..., + file: int = ..., notfile: int = ..., + fnmatch: int = ..., notfnmatch: int = ..., + link: int = ..., notlink: int = ..., + relto: int = ..., notrelto: int = ..., + ) -> bool: ... + def chmod(self, mode: int, rec: Union[int, str, Text, Callable[[local], bool]] = ...) -> None: ... + if sys.platform != 'win32': + def chown(self, user: Union[int, str], group: Union[int, str], rec: int = ...) -> None: ... + def common(self, other: local) -> Optional[local]: ... + def computehash(self, hashtype: str = ..., chunksize: int = ...) -> str: ... + def copy(self, target: local, mode: bool = ..., stat: bool = ...) -> None: ... + def dirpath(self, *args: _PathType, abs: int = ...) -> local: ... + def dump(self, obj: Any, bin: Optional[int] = ...) -> None: ... + def ensure(self, *args: _PathType, dir: int = ...) -> local: ... + def ensure_dir(self, *args: _PathType) -> local: ... + def exists(self) -> bool: ... + def fnmatch(self, pattern: str): _FNMatcher + def isdir(self) -> bool: ... + def isfile(self) -> bool: ... + def islink(self) -> bool: ... + def join(self, *args: _PathType, abs: int = ...) -> local: ... + def listdir( + self, + fil: Optional[Union[str, Text, Callable[[local], bool]]] = ..., + sort: Optional[bool] = ..., + ) -> List[local]: ... + def load(self) -> Any: ... + def lstat(self) -> _Stat: ... + def mkdir(self, *args: _PathType) -> local: ... + if sys.platform != 'win32': + def mklinkto(self, oldname: Union[str, local]) -> None: ... + def mksymlinkto(self, value: local, absolute: int = ...) -> None: ... + def move(self, target: local) -> None: ... + def mtime(self) -> float: ... + def new( + self, + *, + drive: str = ..., + dirname: str = ..., + basename: str = ..., + purebasename: str = ..., + ext: str = ..., + ) -> local: ... + def open(self, mode: str = ..., ensure: bool = ..., encoding: Optional[str] = ...) -> IO[Any]: ... + def parts(self, reverse: bool = ...) -> List[local]: ... + def pyimport( + self, + modname: Optional[str] = ..., + ensuresyspath: Union[bool, Literal["append", "importlib"]] = ..., + ) -> Any: ... + def pypkgpath(self) -> Optional[local]: ... + def read(self, mode: str = ...) -> Union[Text, bytes]: ... + def read_binary(self) -> bytes: ... + def read_text(self, encoding: str) -> Text: ... + def readlines(self, cr: int = ...) -> List[str]: ... + if sys.platform != 'win32': + def readlink(self) -> str: ... + def realpath(self) -> local: ... + def relto(self, relpath: Union[str, local]) -> str: ... + def remove(self, rec: int = ..., ignore_errors: bool = ...) -> None: ... + def rename(self, target: _PathType) -> None: ... + def samefile(self, other: _PathType) -> bool: ... + def setmtime(self, mtime: Optional[float] = ...) -> None: ... + def size(self) -> int: ... + def stat(self, raising: bool = ...) -> _Stat: ... + def sysexec(self, *argv: Any, **popen_opts: Any) -> Text: ... + def visit( + self, + fil: Optional[Union[str, Text, Callable[[local], bool]]] = ..., + rec: Optional[Union[Literal[1, True], str, Text, Callable[[local], bool]]] = ..., + ignore: Type[Exception] = ..., + bf: bool = ..., + sort: bool = ..., + ) -> Iterator[local]: ... + def write(self, data: Any, mode: str = ..., ensure: bool = ...) -> None: ... + def write_binary(self, data: bytes, ensure: bool = ...) -> None: ... + def write_text(self, data: Union[str, Text], encoding: str, ensure: bool = ...) -> None: ... + + +# Untyped types below here. +svnwc: Any +svnurl: Any +SvnAuth: Any diff --git a/venv/lib/python3.10/site-packages/py/py.typed b/venv/lib/python3.10/site-packages/py/py.typed new file mode 100644 index 0000000..e69de29 diff --git a/venv/lib/python3.10/site-packages/py/test.py b/venv/lib/python3.10/site-packages/py/test.py new file mode 100644 index 0000000..aa5beb1 --- /dev/null +++ b/venv/lib/python3.10/site-packages/py/test.py @@ -0,0 +1,10 @@ +import sys +if __name__ == '__main__': + import pytest + sys.exit(pytest.main()) +else: + import sys, pytest + sys.modules['py.test'] = pytest + +# for more API entry points see the 'tests' definition +# in __init__.py diff --git a/venv/lib/python3.10/site-packages/py/xml.pyi b/venv/lib/python3.10/site-packages/py/xml.pyi new file mode 100644 index 0000000..9c44480 --- /dev/null +++ b/venv/lib/python3.10/site-packages/py/xml.pyi @@ -0,0 +1,25 @@ +from typing import ClassVar, Generic, Iterable, Text, Type, Union +from typing_extensions import Final + +class raw: + uniobj: Final[Text] + def __init__(self, uniobj: Text) -> None: ... + +class _NamespaceMetaclass(type): + def __getattr__(self, name: str) -> Type[Tag]: ... + +class Namespace(metaclass=_NamespaceMetaclass): ... + +class Tag(list): + class Attr: + def __getattr__(self, attr: str) -> Text: ... + attr: Final[Attr] + def __init__(self, *args: Union[Text, raw, Tag, Iterable[Tag]], **kwargs: Union[Text, raw]) -> None: ... + def unicode(self, indent: int = ...) -> Text: ... + +class html(Namespace): + class Style: + def __init__(self, **kw: Union[str, Text]) -> None: ... + style: ClassVar[Style] + +def escape(ustring: Union[str, Text]) -> Text: ... diff --git a/venv/lib/python3.10/site-packages/pytest-7.1.2.dist-info/INSTALLER b/venv/lib/python3.10/site-packages/pytest-7.1.2.dist-info/INSTALLER new file mode 100644 index 0000000..a1b589e --- /dev/null +++ b/venv/lib/python3.10/site-packages/pytest-7.1.2.dist-info/INSTALLER @@ -0,0 +1 @@ +pip diff --git a/venv/lib/python3.10/site-packages/pytest-7.1.2.dist-info/LICENSE b/venv/lib/python3.10/site-packages/pytest-7.1.2.dist-info/LICENSE new file mode 100644 index 0000000..c3f1657 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pytest-7.1.2.dist-info/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2004 Holger Krekel and others + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +of the Software, and to permit persons to whom the Software is furnished to do +so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/venv/lib/python3.10/site-packages/pytest-7.1.2.dist-info/METADATA b/venv/lib/python3.10/site-packages/pytest-7.1.2.dist-info/METADATA new file mode 100644 index 0000000..85dffa1 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pytest-7.1.2.dist-info/METADATA @@ -0,0 +1,222 @@ +Metadata-Version: 2.1 +Name: pytest +Version: 7.1.2 +Summary: pytest: simple powerful testing with Python +Home-page: https://docs.pytest.org/en/latest/ +Author: Holger Krekel, Bruno Oliveira, Ronny Pfannschmidt, Floris Bruynooghe, Brianna Laugher, Florian Bruhin and others +License: MIT +Project-URL: Changelog, https://docs.pytest.org/en/stable/changelog.html +Project-URL: Twitter, https://twitter.com/pytestdotorg +Project-URL: Source, https://github.com/pytest-dev/pytest +Project-URL: Tracker, https://github.com/pytest-dev/pytest/issues +Keywords: test,unittest +Platform: unix +Platform: linux +Platform: osx +Platform: cygwin +Platform: win32 +Classifier: Development Status :: 6 - Mature +Classifier: Intended Audience :: Developers +Classifier: License :: OSI Approved :: MIT License +Classifier: Operating System :: MacOS :: MacOS X +Classifier: Operating System :: Microsoft :: Windows +Classifier: Operating System :: POSIX +Classifier: Programming Language :: Python :: 3 +Classifier: Programming Language :: Python :: 3 :: Only +Classifier: Programming Language :: Python :: 3.7 +Classifier: Programming Language :: Python :: 3.8 +Classifier: Programming Language :: Python :: 3.9 +Classifier: Programming Language :: Python :: 3.10 +Classifier: Topic :: Software Development :: Libraries +Classifier: Topic :: Software Development :: Testing +Classifier: Topic :: Utilities +Requires-Python: >=3.7 +Description-Content-Type: text/x-rst +License-File: LICENSE +Requires-Dist: attrs (>=19.2.0) +Requires-Dist: iniconfig +Requires-Dist: packaging +Requires-Dist: pluggy (<2.0,>=0.12) +Requires-Dist: py (>=1.8.2) +Requires-Dist: tomli (>=1.0.0) +Requires-Dist: importlib-metadata (>=0.12) ; python_version < "3.8" +Requires-Dist: atomicwrites (>=1.0) ; sys_platform == "win32" +Requires-Dist: colorama ; sys_platform == "win32" +Provides-Extra: testing +Requires-Dist: argcomplete ; extra == 'testing' +Requires-Dist: hypothesis (>=3.56) ; extra == 'testing' +Requires-Dist: mock ; extra == 'testing' +Requires-Dist: nose ; extra == 'testing' +Requires-Dist: pygments (>=2.7.2) ; extra == 'testing' +Requires-Dist: requests ; extra == 'testing' +Requires-Dist: xmlschema ; extra == 'testing' + +.. image:: https://github.com/pytest-dev/pytest/raw/main/doc/en/img/pytest_logo_curves.svg + :target: https://docs.pytest.org/en/stable/ + :align: center + :height: 200 + :alt: pytest + + +------ + +.. image:: https://img.shields.io/pypi/v/pytest.svg + :target: https://pypi.org/project/pytest/ + +.. image:: https://img.shields.io/conda/vn/conda-forge/pytest.svg + :target: https://anaconda.org/conda-forge/pytest + +.. image:: https://img.shields.io/pypi/pyversions/pytest.svg + :target: https://pypi.org/project/pytest/ + +.. image:: https://codecov.io/gh/pytest-dev/pytest/branch/main/graph/badge.svg + :target: https://codecov.io/gh/pytest-dev/pytest + :alt: Code coverage Status + +.. image:: https://github.com/pytest-dev/pytest/workflows/test/badge.svg + :target: https://github.com/pytest-dev/pytest/actions?query=workflow%3Atest + +.. image:: https://results.pre-commit.ci/badge/github/pytest-dev/pytest/main.svg + :target: https://results.pre-commit.ci/latest/github/pytest-dev/pytest/main + :alt: pre-commit.ci status + +.. image:: https://img.shields.io/badge/code%20style-black-000000.svg + :target: https://github.com/psf/black + +.. image:: https://www.codetriage.com/pytest-dev/pytest/badges/users.svg + :target: https://www.codetriage.com/pytest-dev/pytest + +.. image:: https://readthedocs.org/projects/pytest/badge/?version=latest + :target: https://pytest.readthedocs.io/en/latest/?badge=latest + :alt: Documentation Status + +.. image:: https://img.shields.io/badge/Discord-pytest--dev-blue + :target: https://discord.com/invite/pytest-dev + :alt: Discord + +.. image:: https://img.shields.io/badge/Libera%20chat-%23pytest-orange + :target: https://web.libera.chat/#pytest + :alt: Libera chat + + +The ``pytest`` framework makes it easy to write small tests, yet +scales to support complex functional testing for applications and libraries. + +An example of a simple test: + +.. code-block:: python + + # content of test_sample.py + def inc(x): + return x + 1 + + + def test_answer(): + assert inc(3) == 5 + + +To execute it:: + + $ pytest + ============================= test session starts ============================= + collected 1 items + + test_sample.py F + + ================================== FAILURES =================================== + _________________________________ test_answer _________________________________ + + def test_answer(): + > assert inc(3) == 5 + E assert 4 == 5 + E + where 4 = inc(3) + + test_sample.py:5: AssertionError + ========================== 1 failed in 0.04 seconds =========================== + + +Due to ``pytest``'s detailed assertion introspection, only plain ``assert`` statements are used. See `getting-started `_ for more examples. + + +Features +-------- + +- Detailed info on failing `assert statements `_ (no need to remember ``self.assert*`` names) + +- `Auto-discovery + `_ + of test modules and functions + +- `Modular fixtures `_ for + managing small or parametrized long-lived test resources + +- Can run `unittest `_ (or trial), + `nose `_ test suites out of the box + +- Python 3.7+ or PyPy3 + +- Rich plugin architecture, with over 850+ `external plugins `_ and thriving community + + +Documentation +------------- + +For full documentation, including installation, tutorials and PDF documents, please see https://docs.pytest.org/en/stable/. + + +Bugs/Requests +------------- + +Please use the `GitHub issue tracker `_ to submit bugs or request features. + + +Changelog +--------- + +Consult the `Changelog `__ page for fixes and enhancements of each version. + + +Support pytest +-------------- + +`Open Collective`_ is an online funding platform for open and transparent communities. +It provides tools to raise money and share your finances in full transparency. + +It is the platform of choice for individuals and companies that want to make one-time or +monthly donations directly to the project. + +See more details in the `pytest collective`_. + +.. _Open Collective: https://opencollective.com +.. _pytest collective: https://opencollective.com/pytest + + +pytest for enterprise +--------------------- + +Available as part of the Tidelift Subscription. + +The maintainers of pytest and thousands of other packages are working with Tidelift to deliver commercial support and +maintenance for the open source dependencies you use to build your applications. +Save time, reduce risk, and improve code health, while paying the maintainers of the exact dependencies you use. + +`Learn more. `_ + +Security +^^^^^^^^ + +pytest has never been associated with a security vulnerability, but in any case, to report a +security vulnerability please use the `Tidelift security contact `_. +Tidelift will coordinate the fix and disclosure. + + +License +------- + +Copyright Holger Krekel and others, 2004. + +Distributed under the terms of the `MIT`_ license, pytest is free and open source software. + +.. _`MIT`: https://github.com/pytest-dev/pytest/blob/main/LICENSE + + diff --git a/venv/lib/python3.10/site-packages/pytest-7.1.2.dist-info/RECORD b/venv/lib/python3.10/site-packages/pytest-7.1.2.dist-info/RECORD new file mode 100644 index 0000000..60fcd3f --- /dev/null +++ b/venv/lib/python3.10/site-packages/pytest-7.1.2.dist-info/RECORD @@ -0,0 +1,146 @@ +../../../bin/py.test,sha256=mM9_gxEbdAhRVMRveFQYfJSJrg73yXkxyP56Wbh2fpk,262 +../../../bin/pytest,sha256=mM9_gxEbdAhRVMRveFQYfJSJrg73yXkxyP56Wbh2fpk,262 +_pytest/__init__.py,sha256=4K-_CZFPuvNtJXNwxyTtnbmpjVkSb-dC75bs29Sg0d4,356 +_pytest/__pycache__/__init__.cpython-310.pyc,, +_pytest/__pycache__/_argcomplete.cpython-310.pyc,, +_pytest/__pycache__/_version.cpython-310.pyc,, +_pytest/__pycache__/cacheprovider.cpython-310.pyc,, +_pytest/__pycache__/capture.cpython-310.pyc,, +_pytest/__pycache__/compat.cpython-310.pyc,, +_pytest/__pycache__/debugging.cpython-310.pyc,, +_pytest/__pycache__/deprecated.cpython-310.pyc,, +_pytest/__pycache__/doctest.cpython-310.pyc,, +_pytest/__pycache__/faulthandler.cpython-310.pyc,, +_pytest/__pycache__/fixtures.cpython-310.pyc,, +_pytest/__pycache__/freeze_support.cpython-310.pyc,, +_pytest/__pycache__/helpconfig.cpython-310.pyc,, +_pytest/__pycache__/hookspec.cpython-310.pyc,, +_pytest/__pycache__/junitxml.cpython-310.pyc,, +_pytest/__pycache__/legacypath.cpython-310.pyc,, +_pytest/__pycache__/logging.cpython-310.pyc,, +_pytest/__pycache__/main.cpython-310.pyc,, +_pytest/__pycache__/monkeypatch.cpython-310.pyc,, +_pytest/__pycache__/nodes.cpython-310.pyc,, +_pytest/__pycache__/nose.cpython-310.pyc,, +_pytest/__pycache__/outcomes.cpython-310.pyc,, +_pytest/__pycache__/pastebin.cpython-310.pyc,, +_pytest/__pycache__/pathlib.cpython-310.pyc,, +_pytest/__pycache__/pytester.cpython-310.pyc,, +_pytest/__pycache__/pytester_assertions.cpython-310.pyc,, +_pytest/__pycache__/python.cpython-310.pyc,, +_pytest/__pycache__/python_api.cpython-310.pyc,, +_pytest/__pycache__/python_path.cpython-310.pyc,, +_pytest/__pycache__/recwarn.cpython-310.pyc,, +_pytest/__pycache__/reports.cpython-310.pyc,, +_pytest/__pycache__/runner.cpython-310.pyc,, +_pytest/__pycache__/scope.cpython-310.pyc,, +_pytest/__pycache__/setuponly.cpython-310.pyc,, +_pytest/__pycache__/setupplan.cpython-310.pyc,, +_pytest/__pycache__/skipping.cpython-310.pyc,, +_pytest/__pycache__/stash.cpython-310.pyc,, +_pytest/__pycache__/stepwise.cpython-310.pyc,, +_pytest/__pycache__/terminal.cpython-310.pyc,, +_pytest/__pycache__/threadexception.cpython-310.pyc,, +_pytest/__pycache__/timing.cpython-310.pyc,, +_pytest/__pycache__/tmpdir.cpython-310.pyc,, +_pytest/__pycache__/unittest.cpython-310.pyc,, +_pytest/__pycache__/unraisableexception.cpython-310.pyc,, +_pytest/__pycache__/warning_types.cpython-310.pyc,, +_pytest/__pycache__/warnings.cpython-310.pyc,, +_pytest/_argcomplete.py,sha256=XDhxlI388A7hTHHcvwRD38fOMD2W1BtMS0WqEVDlrwY,3809 +_pytest/_code/__init__.py,sha256=S_sBUyBt-DdDWGJKJviYTWFHhhDFBM7pIMaENaocwaM,483 +_pytest/_code/__pycache__/__init__.cpython-310.pyc,, +_pytest/_code/__pycache__/code.cpython-310.pyc,, +_pytest/_code/__pycache__/source.cpython-310.pyc,, +_pytest/_code/code.py,sha256=KyEhL0b9SSztbjZ1ybdfxm5p-Mz5PEBHxo9GMahNxG8,43982 +_pytest/_code/source.py,sha256=URY36RBYU0mtBZF4HQoNC0OqVRjmHLetIrjNnvzjh9g,7436 +_pytest/_io/__init__.py,sha256=NWs125Ln6IqP5BZNw-V2iN_yYPwGM7vfrAP5ta6MhPA,154 +_pytest/_io/__pycache__/__init__.cpython-310.pyc,, +_pytest/_io/__pycache__/saferepr.cpython-310.pyc,, +_pytest/_io/__pycache__/terminalwriter.cpython-310.pyc,, +_pytest/_io/__pycache__/wcwidth.cpython-310.pyc,, +_pytest/_io/saferepr.py,sha256=MwhFXkaieoNvWe5e_Ll3cft8jBbHLTmbB6qMcBr5zQw,4592 +_pytest/_io/terminalwriter.py,sha256=aLbaFJ3KO-B8ZgeWonQ4-dZEcAt1ReX7xAW5BRoaODE,8152 +_pytest/_io/wcwidth.py,sha256=YhE3To-vBI7udLtV4B-g-04S3l8VoRD5ki935QipmJA,1253 +_pytest/_version.py,sha256=6RpKyGaGTfX_4aFv6Rxd1LhdZGA-tjh1liMhK2laRbQ,142 +_pytest/assertion/__init__.py,sha256=emr8u_aMvFDlwpT6jfIuye8vIIv1leVZSx7csqLqilg,6475 +_pytest/assertion/__pycache__/__init__.cpython-310.pyc,, +_pytest/assertion/__pycache__/rewrite.cpython-310.pyc,, +_pytest/assertion/__pycache__/truncate.cpython-310.pyc,, +_pytest/assertion/__pycache__/util.cpython-310.pyc,, +_pytest/assertion/rewrite.py,sha256=yym2k1lV6HoTNybwzp13f3GCfv6VD_DHd1Yt4bs2Yzo,43855 +_pytest/assertion/truncate.py,sha256=gK7qCG03AeL7P-eYApiTq0Wsu08x6veJ0cdGUcxWNNE,3286 +_pytest/assertion/util.py,sha256=05ORlGu_-3hD2tavwfe2t4ynCc1hB8fkiPunU26liHA,17546 +_pytest/cacheprovider.py,sha256=MKxsH_CwlQDjKN7w4n7gduwDyK11KolzmNq4DBuJf9E,20765 +_pytest/capture.py,sha256=6vxnJNJ_7mRG8E28QuUS0XoMAsw7vr0ZeJ-n2HbzMwc,31070 +_pytest/compat.py,sha256=3YDuLqLgwFjBA77fWKMF8NEec4xX8ZIG3gx_JcOYZ8Y,12467 +_pytest/config/__init__.py,sha256=zSbrg_A7tefQz_5jqrhKgKq4KWv10FIE7qMsGjKF__8,59443 +_pytest/config/__pycache__/__init__.cpython-310.pyc,, +_pytest/config/__pycache__/argparsing.cpython-310.pyc,, +_pytest/config/__pycache__/compat.cpython-310.pyc,, +_pytest/config/__pycache__/exceptions.cpython-310.pyc,, +_pytest/config/__pycache__/findpaths.cpython-310.pyc,, +_pytest/config/argparsing.py,sha256=0E5VYJkSBUZ3L8CBvxKLLZMf7ah_QsObADP6Lws7zOw,20740 +_pytest/config/compat.py,sha256=iK0e9nF-JwG13uoGwcSSsTP7hWEKMhNbJVTVG2G3FEk,2394 +_pytest/config/exceptions.py,sha256=21I5MARt26OLRmgvaAPu0BblFgYZXp2cxNZBpRRciAE,260 +_pytest/config/findpaths.py,sha256=YkawtHi-XAnYSXTl3MMgM8GP3Uc0YalBuy14pgNJQz8,7584 +_pytest/debugging.py,sha256=BAXCnnEe21bYnt9Jjp8p1NmZL1tXQ4VvrW5PBENdcVg,13413 +_pytest/deprecated.py,sha256=phdLdc5-Rr9ykvblWEs7LmQvtdE0PzLFgw9b_exEI1Y,4385 +_pytest/doctest.py,sha256=ScpdJYEO1-0vc7UPEji7lb1wv5IqlC6bk8qXLp-zCxM,25474 +_pytest/faulthandler.py,sha256=C8ZwJ2Be7hFutdDS_0POqIMCyli8e7N5GFsF21zePOo,3187 +_pytest/fixtures.py,sha256=WlfZcf_4K3eldjqC-S6rQYDFYL0r2qY1pNXOA2sVEq8,64602 +_pytest/freeze_support.py,sha256=Wmx-CJvzCbOAK3brpNJO6X_WHXcCA6Tr6-Tb_tjIyVQ,1339 +_pytest/helpconfig.py,sha256=we8RkZhfDzbn3YpCxPjuWhrduhpmyiH5Ha96jqLJXJU,8492 +_pytest/hookspec.py,sha256=XtcaAmSn4t_ZMNMI2t13TdQ2gwDEwrUVhGq-1_N7ib4,31051 +_pytest/junitxml.py,sha256=ylC6FyXYChA_ljWyEzeETOu7_4FwEs6D2nC4sondY2E,25596 +_pytest/legacypath.py,sha256=HoxVuQPAGbyYfBOQwSreNTMF31y6XhbBi0KJwB8i1K0,16587 +_pytest/logging.py,sha256=ktDGkq15rsCfW4PwTlx4rzXMS3DAQPjo35Kg1W72beY,30039 +_pytest/main.py,sha256=dJeVrT1b3wReBnI2Z9WPcLxsneQnXa16LC1q9-4oL9g,32226 +_pytest/mark/__init__.py,sha256=hxnVpFBtO_6wla5DSAWoiNH788cTThChl2lGhOkwpXg,8437 +_pytest/mark/__pycache__/__init__.cpython-310.pyc,, +_pytest/mark/__pycache__/expression.cpython-310.pyc,, +_pytest/mark/__pycache__/structures.cpython-310.pyc,, +_pytest/mark/expression.py,sha256=pUKdSaaO8qmAJWbPGx3oA7nbzp_XGq6-SsNNQyFZKcE,6412 +_pytest/mark/structures.py,sha256=XbJN8Covecg3b46odN0TjlSwrdgFArloujzZrKFbzJU,20416 +_pytest/monkeypatch.py,sha256=GHg1iMguP2yQbi7vbmiOxMDhSeX24YoBtlmLZ-MQx4Y,12905 +_pytest/nodes.py,sha256=mtRNiBv4IwYHAp8mEeeJ1h6pkWhrZwdFlOMkIYwwf9k,26035 +_pytest/nose.py,sha256=tSutHUA69g_0O4rlqKH60QqoEr8-uvkz5MX9I95fDx0,1370 +_pytest/outcomes.py,sha256=PV6-JwEIeDh-5kBmdAFVMnYgAlfC4NyOVAIU9ZstWMw,10034 +_pytest/pastebin.py,sha256=9rtINY7cdisacP7e7KI0YcplWRzqIjd_Bh6WzHTFWR8,3950 +_pytest/pathlib.py,sha256=R-QGxU8kPzCsrcAibn2gm_scb-QM6-yn5KIXAKsjkOI,24738 +_pytest/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +_pytest/pytester.py,sha256=cF2FnfLgJFhMxLdQCSpJxxPmzLX815jjZUkg2RDFj9o,61139 +_pytest/pytester_assertions.py,sha256=1BW3jDRSiHqGqdzmGSc7LfQt7nwc0w1lVRHMHHcFEQs,2327 +_pytest/python.py,sha256=wqtsYJ6FOW7WkMtghbOxN5hYozyOKoJ31rAC3_a0-Vc,69999 +_pytest/python_api.py,sha256=AmjemSTh_ubCE_9hboaRc7cjSXrRATurBUTNXiUmzgs,37367 +_pytest/python_path.py,sha256=TD7qJJ0S91XctgtpIjaq21DWh3rlxxVwXMvrjsjevaU,709 +_pytest/recwarn.py,sha256=ncC7xXfsNJ8LyDHOfWBk4_2m8VO8J0lr2U14M8VjO50,10358 +_pytest/reports.py,sha256=V0lldZ2voX55LKiRdChE_V9Mpvr01SdoUGGbAqjrS7Q,19944 +_pytest/runner.py,sha256=Jc-JK0eMef5tJEAZxXDVxNVKBxxMc4i3t1b7bq-MzcU,18046 +_pytest/scope.py,sha256=dNx6zm8ZWPrwsz8v7sAoemp537tEsdl1-_EOegPrwYE,2882 +_pytest/setuponly.py,sha256=oLc7IysTgFBppyKFnzdnP5bvxk26dDKeUDvar3-NJ-M,3263 +_pytest/setupplan.py,sha256=gVOO35Ge9Mp-FZX8M4Czyva2CMogFh3f4iswcl8xHws,1215 +_pytest/skipping.py,sha256=b1UwjPUjotMdX7rKComGWJGRErAjxDwkzl3GSPsDgzI,10171 +_pytest/stash.py,sha256=x_ywAeTfX84tI0vUyXmKmCDxwcXIETqnCrVkOUAtqQ8,3055 +_pytest/stepwise.py,sha256=cjORqu9eCYHmprPEXETGi0q4d4aq4cl7QuRoYOWn2Hs,4340 +_pytest/terminal.py,sha256=WJztyFTybcFa9uXus3Ep2zMGXJYfdvQcoTpA55jHxj8,50439 +_pytest/threadexception.py,sha256=TEohIXnQcof6D7cg10Ly4oMSRgHLCNsXPF6Du9FV4K8,2915 +_pytest/timing.py,sha256=vufB2Wrk_Bf4uol6U16WfpikCBttEmmtGKBNBshPN_k,375 +_pytest/tmpdir.py,sha256=vVdS0ulC0QDmJ-oUVM9qM60MkwCiPntwgJy_U3kHHaQ,7885 +_pytest/unittest.py,sha256=ammV_yWEFp2xTu-GuTVDF54ruzorv4pfbVn2wCOtyyo,14456 +_pytest/unraisableexception.py,sha256=FJmftKtjMHmUnlYyg1o9B_oQjvA_U0p1ABSNlKx1K2I,3191 +_pytest/warning_types.py,sha256=wMQjUXIZP0TnAjZOZZaqXlCfdZhi6rQQMy7l1Oja-Pw,3294 +_pytest/warnings.py,sha256=pBY3hIrOZobaWk9vHgW_ac44jXYhlyUuferDOhwaMGI,5070 +pytest-7.1.2.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +pytest-7.1.2.dist-info/LICENSE,sha256=yoNqX57Mo7LzUCMPqiCkj7ixRWU7VWjXhIYt-GRwa5s,1091 +pytest-7.1.2.dist-info/METADATA,sha256=9w3_jXICyHPjvJQnWx-YCXsGkyf2BeozG4onP4ZbnPE,7787 +pytest-7.1.2.dist-info/RECORD,, +pytest-7.1.2.dist-info/REQUESTED,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +pytest-7.1.2.dist-info/WHEEL,sha256=G16H4A3IeoQmnOrYV4ueZGKSjhipXx8zc8nu9FGlvMA,92 +pytest-7.1.2.dist-info/entry_points.txt,sha256=8IPrHPH3LNZQ7v5tNEOcNTZYk_SheNg64jsTM9erqL4,77 +pytest-7.1.2.dist-info/top_level.txt,sha256=ENE0IeZV1I1R61DOt8gs5KmSXwitaq2zstF0az5f9PA,15 +pytest/__init__.py,sha256=O7JA8He1sJRC8jmApH8bSWTr8cWlXAvP1zgxXsp40gU,5009 +pytest/__main__.py,sha256=PJoBBgRxbsenpjfDenJmkO0-UGzTad7Htcxgstu4g30,116 +pytest/__pycache__/__init__.cpython-310.pyc,, +pytest/__pycache__/__main__.cpython-310.pyc,, +pytest/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 diff --git a/venv/lib/python3.10/site-packages/pytest-7.1.2.dist-info/REQUESTED b/venv/lib/python3.10/site-packages/pytest-7.1.2.dist-info/REQUESTED new file mode 100644 index 0000000..e69de29 diff --git a/venv/lib/python3.10/site-packages/pip-22.3.1.dist-info/WHEEL b/venv/lib/python3.10/site-packages/pytest-7.1.2.dist-info/WHEEL similarity index 100% rename from venv/lib/python3.10/site-packages/pip-22.3.1.dist-info/WHEEL rename to venv/lib/python3.10/site-packages/pytest-7.1.2.dist-info/WHEEL diff --git a/venv/lib/python3.10/site-packages/pytest-7.1.2.dist-info/entry_points.txt b/venv/lib/python3.10/site-packages/pytest-7.1.2.dist-info/entry_points.txt new file mode 100644 index 0000000..192205d --- /dev/null +++ b/venv/lib/python3.10/site-packages/pytest-7.1.2.dist-info/entry_points.txt @@ -0,0 +1,3 @@ +[console_scripts] +py.test = pytest:console_main +pytest = pytest:console_main diff --git a/venv/lib/python3.10/site-packages/pytest-7.1.2.dist-info/top_level.txt b/venv/lib/python3.10/site-packages/pytest-7.1.2.dist-info/top_level.txt new file mode 100644 index 0000000..e94857a --- /dev/null +++ b/venv/lib/python3.10/site-packages/pytest-7.1.2.dist-info/top_level.txt @@ -0,0 +1,2 @@ +_pytest +pytest diff --git a/venv/lib/python3.10/site-packages/pytest-cov.pth b/venv/lib/python3.10/site-packages/pytest-cov.pth new file mode 100644 index 0000000..91f2b7c --- /dev/null +++ b/venv/lib/python3.10/site-packages/pytest-cov.pth @@ -0,0 +1 @@ +import os, sys;exec('if \'COV_CORE_SOURCE\' in os.environ:\n try:\n from pytest_cov.embed import init\n init()\n except Exception as exc:\n sys.stderr.write(\n "pytest-cov: Failed to setup subprocess coverage. "\n "Environ: {0!r} "\n "Exception: {1!r}\\n".format(\n dict((k, v) for k, v in os.environ.items() if k.startswith(\'COV_CORE\')),\n exc\n )\n )\n') \ No newline at end of file diff --git a/venv/lib/python3.10/site-packages/pytest/__init__.py b/venv/lib/python3.10/site-packages/pytest/__init__.py new file mode 100644 index 0000000..777d377 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pytest/__init__.py @@ -0,0 +1,165 @@ +# PYTHON_ARGCOMPLETE_OK +"""pytest: unit and functional testing with Python.""" +from _pytest import __version__ +from _pytest import version_tuple +from _pytest._code import ExceptionInfo +from _pytest.assertion import register_assert_rewrite +from _pytest.cacheprovider import Cache +from _pytest.capture import CaptureFixture +from _pytest.config import cmdline +from _pytest.config import Config +from _pytest.config import console_main +from _pytest.config import ExitCode +from _pytest.config import hookimpl +from _pytest.config import hookspec +from _pytest.config import main +from _pytest.config import PytestPluginManager +from _pytest.config import UsageError +from _pytest.config.argparsing import OptionGroup +from _pytest.config.argparsing import Parser +from _pytest.debugging import pytestPDB as __pytestPDB +from _pytest.fixtures import fixture +from _pytest.fixtures import FixtureLookupError +from _pytest.fixtures import FixtureRequest +from _pytest.fixtures import yield_fixture +from _pytest.freeze_support import freeze_includes +from _pytest.legacypath import TempdirFactory +from _pytest.legacypath import Testdir +from _pytest.logging import LogCaptureFixture +from _pytest.main import Session +from _pytest.mark import Mark +from _pytest.mark import MARK_GEN as mark +from _pytest.mark import MarkDecorator +from _pytest.mark import MarkGenerator +from _pytest.mark import param +from _pytest.monkeypatch import MonkeyPatch +from _pytest.nodes import Collector +from _pytest.nodes import File +from _pytest.nodes import Item +from _pytest.outcomes import exit +from _pytest.outcomes import fail +from _pytest.outcomes import importorskip +from _pytest.outcomes import skip +from _pytest.outcomes import xfail +from _pytest.pytester import HookRecorder +from _pytest.pytester import LineMatcher +from _pytest.pytester import Pytester +from _pytest.pytester import RecordedHookCall +from _pytest.pytester import RunResult +from _pytest.python import Class +from _pytest.python import Function +from _pytest.python import Metafunc +from _pytest.python import Module +from _pytest.python import Package +from _pytest.python_api import approx +from _pytest.python_api import raises +from _pytest.recwarn import deprecated_call +from _pytest.recwarn import WarningsRecorder +from _pytest.recwarn import warns +from _pytest.reports import CollectReport +from _pytest.reports import TestReport +from _pytest.runner import CallInfo +from _pytest.stash import Stash +from _pytest.stash import StashKey +from _pytest.tmpdir import TempPathFactory +from _pytest.warning_types import PytestAssertRewriteWarning +from _pytest.warning_types import PytestCacheWarning +from _pytest.warning_types import PytestCollectionWarning +from _pytest.warning_types import PytestConfigWarning +from _pytest.warning_types import PytestDeprecationWarning +from _pytest.warning_types import PytestExperimentalApiWarning +from _pytest.warning_types import PytestRemovedIn8Warning +from _pytest.warning_types import PytestUnhandledCoroutineWarning +from _pytest.warning_types import PytestUnhandledThreadExceptionWarning +from _pytest.warning_types import PytestUnknownMarkWarning +from _pytest.warning_types import PytestUnraisableExceptionWarning +from _pytest.warning_types import PytestWarning + +set_trace = __pytestPDB.set_trace + + +__all__ = [ + "__version__", + "approx", + "Cache", + "CallInfo", + "CaptureFixture", + "Class", + "cmdline", + "Collector", + "CollectReport", + "Config", + "console_main", + "deprecated_call", + "exit", + "ExceptionInfo", + "ExitCode", + "fail", + "File", + "fixture", + "FixtureLookupError", + "FixtureRequest", + "freeze_includes", + "Function", + "hookimpl", + "HookRecorder", + "hookspec", + "importorskip", + "Item", + "LineMatcher", + "LogCaptureFixture", + "main", + "mark", + "Mark", + "MarkDecorator", + "MarkGenerator", + "Metafunc", + "Module", + "MonkeyPatch", + "OptionGroup", + "Package", + "param", + "Parser", + "PytestAssertRewriteWarning", + "PytestCacheWarning", + "PytestCollectionWarning", + "PytestConfigWarning", + "PytestDeprecationWarning", + "PytestExperimentalApiWarning", + "PytestRemovedIn8Warning", + "Pytester", + "PytestPluginManager", + "PytestUnhandledCoroutineWarning", + "PytestUnhandledThreadExceptionWarning", + "PytestUnknownMarkWarning", + "PytestUnraisableExceptionWarning", + "PytestWarning", + "raises", + "RecordedHookCall", + "register_assert_rewrite", + "RunResult", + "Session", + "set_trace", + "skip", + "Stash", + "StashKey", + "version_tuple", + "TempdirFactory", + "TempPathFactory", + "Testdir", + "TestReport", + "UsageError", + "WarningsRecorder", + "warns", + "xfail", + "yield_fixture", +] + + +def __getattr__(name: str) -> object: + if name == "Instance": + # The import emits a deprecation warning. + from _pytest.python import Instance + + return Instance + raise AttributeError(f"module {__name__} has no attribute {name}") diff --git a/venv/lib/python3.10/site-packages/pytest/__main__.py b/venv/lib/python3.10/site-packages/pytest/__main__.py new file mode 100644 index 0000000..b170152 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pytest/__main__.py @@ -0,0 +1,5 @@ +"""The pytest entry point.""" +import pytest + +if __name__ == "__main__": + raise SystemExit(pytest.console_main()) diff --git a/venv/lib/python3.10/site-packages/pytest/py.typed b/venv/lib/python3.10/site-packages/pytest/py.typed new file mode 100644 index 0000000..e69de29 diff --git a/venv/lib/python3.10/site-packages/pytest_cov-3.0.0.dist-info/AUTHORS.rst b/venv/lib/python3.10/site-packages/pytest_cov-3.0.0.dist-info/AUTHORS.rst new file mode 100644 index 0000000..6aa916a --- /dev/null +++ b/venv/lib/python3.10/site-packages/pytest_cov-3.0.0.dist-info/AUTHORS.rst @@ -0,0 +1,52 @@ +Authors +======= + +* Marc Schlaich - http://www.schlamar.org +* Rick van Hattem - http://wol.ph +* Buck Evan - https://github.com/bukzor +* Eric Larson - http://larsoner.com +* Marc Abramowitz - http://marc-abramowitz.com +* Thomas Kluyver - https://github.com/takluyver +* Guillaume Ayoub - http://www.yabz.fr +* Federico Ceratto - http://firelet.net +* Josh Kalderimis - http://blog.cookiestack.com +* Ionel Cristian Mărieș - https://blog.ionelmc.ro +* Christian Ledermann - https://github.com/cleder +* Alec Nikolas Reiter - https://github.com/justanr +* Patrick Lannigan - https://github.com/plannigan +* David Szotten - https://github.com/davidszotten +* Michael Elovskikh - https://github.com/wronglink +* Saurabh Kumar - https://github.com/theskumar +* Michael Elovskikh - https://github.com/wronglink +* Daniel Hahler - https://daniel.hahler.de +* Florian Bruhin - http://www.the-compiler.org +* Zoltan Kozma - https://github.com/kozmaz87 +* Francis Niu - https://flniu.github.io +* Jannis Leidel - https://github.com/jezdez +* Ryan Hiebert - http://ryanhiebert.com/ +* Terence Honles - https://github.com/terencehonles +* Jeremy Bowman - https://github.com/jmbowman +* Samuel Giffard - https://github.com/Mulugruntz +* Семён Марьясин - https://github.com/MarSoft +* Alexander Shadchin - https://github.com/shadchin +* Thomas Grainger - https://graingert.co.uk +* Juanjo Bazán - https://github.com/xuanxu +* Andrew Murray - https://github.com/radarhere +* Ned Batchelder - https://nedbatchelder.com/ +* Albert Tugushev - https://github.com/atugushev +* Martín Gaitán - https://github.com/mgaitan +* Hugo van Kemenade - https://github.com/hugovk +* Michael Manganiello - https://github.com/adamantike +* Anders Hovmöller - https://github.com/boxed +* Zac Hatfield-Dodds - https://zhd.dev +* Mateus Berardo de Souza Terra - https://github.com/MatTerra +* Ganden Schaffner - https://github.com/gschaffner +* Michał Górny - https://github.com/mgorny +* Bernát Gábor - https://github.com/gaborbernat +* Pamela McA'Nulty - https://github.com/PamelaM +* Christian Riedel - https://github.com/Cielquan +* Chris Sreesangkom - https://github.com/csreesan +* Sorin Sbarnea - https://github.com/ssbarnea +* Brian Rutledge - https://github.com/bhrutledge +* Danilo Šegan - https://github.com/dsegan +* Michał Bielawski - https://github.com/D3X diff --git a/venv/lib/python3.10/site-packages/pytest_cov-3.0.0.dist-info/INSTALLER b/venv/lib/python3.10/site-packages/pytest_cov-3.0.0.dist-info/INSTALLER new file mode 100644 index 0000000..a1b589e --- /dev/null +++ b/venv/lib/python3.10/site-packages/pytest_cov-3.0.0.dist-info/INSTALLER @@ -0,0 +1 @@ +pip diff --git a/venv/lib/python3.10/site-packages/setuptools-65.5.1.dist-info/LICENSE b/venv/lib/python3.10/site-packages/pytest_cov-3.0.0.dist-info/LICENSE similarity index 64% rename from venv/lib/python3.10/site-packages/setuptools-65.5.1.dist-info/LICENSE rename to venv/lib/python3.10/site-packages/pytest_cov-3.0.0.dist-info/LICENSE index 353924b..5b3634b 100644 --- a/venv/lib/python3.10/site-packages/setuptools-65.5.1.dist-info/LICENSE +++ b/venv/lib/python3.10/site-packages/pytest_cov-3.0.0.dist-info/LICENSE @@ -1,10 +1,12 @@ -Copyright Jason R. Coombs +The MIT License + +Copyright (c) 2010 Meme Dough Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to -deal in the Software without restriction, including without limitation the -rights to use, copy, modify, merge, publish, distribute, sublicense, and/or -sell copies of the Software, and to permit persons to whom the Software is +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in @@ -14,6 +16,6 @@ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING -FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS -IN THE SOFTWARE. +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/venv/lib/python3.10/site-packages/pytest_cov-3.0.0.dist-info/METADATA b/venv/lib/python3.10/site-packages/pytest_cov-3.0.0.dist-info/METADATA new file mode 100644 index 0000000..c9c2ccd --- /dev/null +++ b/venv/lib/python3.10/site-packages/pytest_cov-3.0.0.dist-info/METADATA @@ -0,0 +1,538 @@ +Metadata-Version: 2.1 +Name: pytest-cov +Version: 3.0.0 +Summary: Pytest plugin for measuring coverage. +Home-page: https://github.com/pytest-dev/pytest-cov +Author: Marc Schlaich +Author-email: marc.schlaich@gmail.com +License: MIT +Keywords: cover,coverage,pytest,py.test,distributed,parallel +Platform: UNKNOWN +Classifier: Development Status :: 5 - Production/Stable +Classifier: Framework :: Pytest +Classifier: Intended Audience :: Developers +Classifier: License :: OSI Approved :: MIT License +Classifier: Operating System :: Microsoft :: Windows +Classifier: Operating System :: POSIX +Classifier: Operating System :: Unix +Classifier: Programming Language :: Python +Classifier: Programming Language :: Python :: 3 +Classifier: Programming Language :: Python :: 3 :: Only +Classifier: Programming Language :: Python :: 3.6 +Classifier: Programming Language :: Python :: 3.7 +Classifier: Programming Language :: Python :: 3.8 +Classifier: Programming Language :: Python :: 3.9 +Classifier: Programming Language :: Python :: 3.10 +Classifier: Programming Language :: Python :: Implementation :: CPython +Classifier: Programming Language :: Python :: Implementation :: PyPy +Classifier: Topic :: Software Development :: Testing +Classifier: Topic :: Utilities +Requires-Python: >=3.6 +Requires-Dist: pytest (>=4.6) +Requires-Dist: coverage[toml] (>=5.2.1) +Provides-Extra: testing +Requires-Dist: fields ; extra == 'testing' +Requires-Dist: hunter ; extra == 'testing' +Requires-Dist: process-tests ; extra == 'testing' +Requires-Dist: six ; extra == 'testing' +Requires-Dist: pytest-xdist ; extra == 'testing' +Requires-Dist: virtualenv ; extra == 'testing' + +======== +Overview +======== + +.. start-badges + +.. list-table:: + :stub-columns: 1 + + * - docs + - |docs| + * - tests + - | |github-actions| |appveyor| |requires| + * - package + - | |version| |conda-forge| |wheel| |supported-versions| |supported-implementations| + | |commits-since| + +.. |docs| image:: https://readthedocs.org/projects/pytest-cov/badge/?style=flat + :target: https://readthedocs.org/projects/pytest-cov + :alt: Documentation Status + +.. |github-actions| image:: https://github.com/pytest-dev/pytest-cov/actions/workflows/test.yml/badge.svg + :alt: GitHub Actions Status + :target: https://github.com/pytest-dev/pytest-cov/actions + +.. |appveyor| image:: https://ci.appveyor.com/api/projects/status/github/pytest-dev/pytest-cov?branch=master&svg=true + :alt: AppVeyor Build Status + :target: https://ci.appveyor.com/project/pytestbot/pytest-cov + +.. |requires| image:: https://requires.io/github/pytest-dev/pytest-cov/requirements.svg?branch=master + :alt: Requirements Status + :target: https://requires.io/github/pytest-dev/pytest-cov/requirements/?branch=master + +.. |version| image:: https://img.shields.io/pypi/v/pytest-cov.svg + :alt: PyPI Package latest release + :target: https://pypi.org/project/pytest-cov + +.. |conda-forge| image:: https://img.shields.io/conda/vn/conda-forge/pytest-cov.svg + :target: https://anaconda.org/conda-forge/pytest-cov + +.. |commits-since| image:: https://img.shields.io/github/commits-since/pytest-dev/pytest-cov/v3.0.0.svg + :alt: Commits since latest release + :target: https://github.com/pytest-dev/pytest-cov/compare/v3.0.0...master + +.. |wheel| image:: https://img.shields.io/pypi/wheel/pytest-cov.svg + :alt: PyPI Wheel + :target: https://pypi.org/project/pytest-cov + +.. |supported-versions| image:: https://img.shields.io/pypi/pyversions/pytest-cov.svg + :alt: Supported versions + :target: https://pypi.org/project/pytest-cov + +.. |supported-implementations| image:: https://img.shields.io/pypi/implementation/pytest-cov.svg + :alt: Supported implementations + :target: https://pypi.org/project/pytest-cov + +.. end-badges + +This plugin produces coverage reports. Compared to just using ``coverage run`` this plugin does some extras: + +* Subprocess support: you can fork or run stuff in a subprocess and will get covered without any fuss. +* Xdist support: you can use all of pytest-xdist's features and still get coverage. +* Consistent pytest behavior. If you run ``coverage run -m pytest`` you will have slightly different ``sys.path`` (CWD will be + in it, unlike when running ``pytest``). + +All features offered by the coverage package should work, either through pytest-cov's command line options or +through coverage's config file. + +* Free software: MIT license + +Installation +============ + +Install with pip:: + + pip install pytest-cov + +For distributed testing support install pytest-xdist:: + + pip install pytest-xdist + +Upgrading from ancient pytest-cov +--------------------------------- + +`pytest-cov 2.0` is using a new ``.pth`` file (``pytest-cov.pth``). You may want to manually remove the older +``init_cov_core.pth`` from site-packages as it's not automatically removed. + +Uninstalling +------------ + +Uninstall with pip:: + + pip uninstall pytest-cov + +Under certain scenarios a stray ``.pth`` file may be left around in site-packages. + +* `pytest-cov 2.0` may leave a ``pytest-cov.pth`` if you installed without wheels + (``easy_install``, ``setup.py install`` etc). +* `pytest-cov 1.8 or older` will leave a ``init_cov_core.pth``. + +Usage +===== + +:: + + pytest --cov=myproj tests/ + +Would produce a report like:: + + -------------------- coverage: ... --------------------- + Name Stmts Miss Cover + ---------------------------------------- + myproj/__init__ 2 0 100% + myproj/myproj 257 13 94% + myproj/feature4286 94 7 92% + ---------------------------------------- + TOTAL 353 20 94% + +Documentation +============= + + http://pytest-cov.rtfd.org/ + + + + + + +Coverage Data File +================== + +The data file is erased at the beginning of testing to ensure clean data for each test run. If you +need to combine the coverage of several test runs you can use the ``--cov-append`` option to append +this coverage data to coverage data from previous test runs. + +The data file is left at the end of testing so that it is possible to use normal coverage tools to +examine it. + +Limitations +=========== + +For distributed testing the workers must have the pytest-cov package installed. This is needed since +the plugin must be registered through setuptools for pytest to start the plugin on the +worker. + +For subprocess measurement environment variables must make it from the main process to the +subprocess. The python used by the subprocess must have pytest-cov installed. The subprocess must +do normal site initialisation so that the environment variables can be detected and coverage +started. + + +Acknowledgements +================ + +Whilst this plugin has been built fresh from the ground up it has been influenced by the work done +on pytest-coverage (Ross Lawley, James Mills, Holger Krekel) and nose-cover (Jason Pellerin) which are +other coverage plugins. + +Ned Batchelder for coverage and its ability to combine the coverage results of parallel runs. + +Holger Krekel for pytest with its distributed testing support. + +Jason Pellerin for nose. + +Michael Foord for unittest2. + +No doubt others have contributed to these tools as well. + +Changelog +========= + + +3.0.0 (2021-10-04) +------------------- + +**Note that this release drops support for Python 2.7 and Python 3.5.** + +* Added support for Python 3.10 and updated various test dependencies. + Contributed by Hugo van Kemenade in + `#500 `_. +* Switched from Travis CI to GitHub Actions. Contributed by Hugo van Kemenade in + `#494 `_ and + `#495 `_. +* Add a ``--cov-reset`` CLI option. + Contributed by Danilo Šegan in + `#459 `_. +* Improved validation of ``--cov-fail-under`` CLI option. + Contributed by ... Ronny Pfannschmidt's desire for skark in + `#480 `_. +* Dropped Python 2.7 support. + Contributed by Thomas Grainger in + `#488 `_. +* Updated trove classifiers. Contributed by Michał Bielawski in + `#481 `_. + + +2.13.0 (2021-06-01) +------------------- + +* Changed the `toml` requirement to be always be directly required (instead of being required through a coverage extra). + This fixes issues with pip-compile (`pip-tools#1300 `_). + Contributed by Sorin Sbarnea in `#472 `_. +* Documented ``show_contexts``. + Contributed by Brian Rutledge in `#473 `_. + + +2.12.1 (2021-06-01) +------------------- + +* Changed the `toml` requirement to be always be directly required (instead of being required through a coverage extra). + This fixes issues with pip-compile (`pip-tools#1300 `_). + Contributed by Sorin Sbarnea in `#472 `_. +* Documented ``show_contexts``. + Contributed by Brian Rutledge in `#473 `_. + +2.12.0 (2021-05-14) +------------------- + +* Added coverage's `toml` extra to install requirements in setup.py. + Contributed by Christian Riedel in `#410 `_. +* Fixed ``pytest_cov.__version__`` to have the right value (string with version instead of a string + including ``__version__ =``). +* Fixed license classifier in ``setup.py``. + Contributed by Chris Sreesangkom in `#467 `_. +* Fixed *commits since* badge. + Contributed by Terence Honles in `#470 `_. + +2.11.1 (2021-01-20) +------------------- + +* Fixed support for newer setuptools (v42+). + Contributed by Michał Górny in `#451 `_. + +2.11.0 (2021-01-18) +------------------- + +* Bumped minimum coverage requirement to 5.2.1. This prevents reporting issues. + Contributed by Mateus Berardo de Souza Terra in `#433 `_. +* Improved sample projects (from the `examples `_ + directory) to support running `tox -e pyXY`. Now the example configures a suffixed coverage data file, + and that makes the cleanup environment unnecessary. + Contributed by Ganden Schaffner in `#435 `_. +* Removed the empty `console_scripts` entrypoint that confused some Gentoo build script. + I didn't ask why it was so broken cause I didn't want to ruin my day. + Contributed by Michał Górny in `#434 `_. +* Fixed the missing `coverage context `_ + when using subprocesses. + Contributed by Bernát Gábor in `#443 `_. +* Updated the config section in the docs. + Contributed by Pamela McA'Nulty in `#429 `_. +* Migrated CI to travis-ci.com (from .org). + +2.10.1 (2020-08-14) +------------------- + +* Support for ``pytest-xdist`` 2.0, which breaks compatibility with ``pytest-xdist`` before 1.22.3 (from 2017). + Contributed by Zac Hatfield-Dodds in `#412 `_. +* Fixed the ``LocalPath has no attribute startswith`` failure that occurred when using the ``pytester`` plugin + in inline mode. + +2.10.0 (2020-06-12) +------------------- + +* Improved the ``--no-cov`` warning. Now it's only shown if ``--no-cov`` is present before ``--cov``. +* Removed legacy pytest support. Changed ``setup.py`` so that ``pytest>=4.6`` is required. + +2.9.0 (2020-05-22) +------------------ + +* Fixed ``RemovedInPytest4Warning`` when using Pytest 3.10. + Contributed by Michael Manganiello in `#354 `_. +* Made pytest startup faster when plugin not active by lazy-importing. + Contributed by Anders Hovmöller in `#339 `_. +* Various CI improvements. + Contributed by Daniel Hahler in `#363 `_ and + `#364 `_. +* Various Python support updates (drop EOL 3.4, test against 3.8 final). + Contributed by Hugo van Kemenade in + `#336 `_ and + `#367 `_. +* Changed ``--cov-append`` to always enable ``data_suffix`` (a coverage setting). + Contributed by Harm Geerts in + `#387 `_. +* Changed ``--cov-append`` to handle loading previous data better + (fixes various path aliasing issues). +* Various other testing improvements, github issue templates, example updates. +* Fixed internal failures that are caused by tests that change the current working directory by + ensuring a consistent working directory when coverage is called. + See `#306 `_ and + `coveragepy#881 `_ + +2.8.1 (2019-10-05) +------------------ + +* Fixed `#348 `_ - + regression when only certain reports (html or xml) are used then ``--cov-fail-under`` always fails. + +2.8.0 (2019-10-04) +------------------ + +* Fixed ``RecursionError`` that can occur when using + `cleanup_on_signal `__ or + `cleanup_on_sigterm `__. + See: `#294 `_. + The 2.7.x releases of pytest-cov should be considered broken regarding aforementioned cleanup API. +* Added compatibility with future xdist release that deprecates some internals + (match pytest-xdist master/worker terminology). + Contributed by Thomas Grainger in `#321 `_ +* Fixed breakage that occurs when multiple reporting options are used. + Contributed by Thomas Grainger in `#338 `_. +* Changed internals to use a stub instead of ``os.devnull``. + Contributed by Thomas Grainger in `#332 `_. +* Added support for Coverage 5.0. + Contributed by Ned Batchelder in `#319 `_. +* Added support for float values in ``--cov-fail-under``. + Contributed by Martín Gaitán in `#311 `_. +* Various documentation fixes. Contributed by + Juanjo Bazán, + Andrew Murray and + Albert Tugushev in + `#298 `_, + `#299 `_ and + `#307 `_. +* Various testing improvements. Contributed by + Ned Batchelder, + Daniel Hahler, + Ionel Cristian Mărieș and + Hugo van Kemenade in + `#313 `_, + `#314 `_, + `#315 `_, + `#316 `_, + `#325 `_, + `#326 `_, + `#334 `_ and + `#335 `_. +* Added the ``--cov-context`` CLI options that enables coverage contexts. Only works with coverage 5.0+. + Contributed by Ned Batchelder in `#345 `_. + +2.7.1 (2019-05-03) +------------------ + +* Fixed source distribution manifest so that garbage ain't included in the tarball. + +2.7.0 (2019-05-03) +------------------ + +* Fixed ``AttributeError: 'NoneType' object has no attribute 'configure_node'`` error when ``--no-cov`` is used. + Contributed by Alexander Shadchin in `#263 `_. +* Various testing and CI improvements. Contributed by Daniel Hahler in + `#255 `_, + `#266 `_, + `#272 `_, + `#271 `_ and + `#269 `_. +* Improved documentation regarding subprocess and multiprocessing. + Contributed in `#265 `_. +* Improved ``pytest_cov.embed.cleanup_on_sigterm`` to be reentrant (signal deliveries while signal handling is + running won't break stuff). +* Added ``pytest_cov.embed.cleanup_on_signal`` for customized cleanup. +* Improved cleanup code and fixed various issues with leftover data files. All contributed in + `#265 `_ or + `#262 `_. +* Improved examples. Now there are two examples for the common project layouts, complete with working coverage + configuration. The examples have CI testing. Contributed in + `#267 `_. +* Improved help text for CLI options. + +2.6.1 (2019-01-07) +------------------ + +* Added support for Pytest 4.1. Contributed by Daniel Hahler and Семён Марьясин in + `#253 `_ and + `#230 `_. +* Various test and docs fixes. Contributed by Daniel Hahler in + `#224 `_ and + `#223 `_. +* Fixed the "Module already imported" issue (`#211 `_). + Contributed by Daniel Hahler in `#228 `_. + +2.6.0 (2018-09-03) +------------------ + +* Dropped support for Python 3 < 3.4, Pytest < 3.5 and Coverage < 4.4. +* Fixed some documentation formatting. Contributed by Jean Jordaan and Julian. +* Added an example with ``addopts`` in documentation. Contributed by Samuel Giffard in + `#195 `_. +* Fixed ``TypeError: 'NoneType' object is not iterable`` in certain xdist configurations. Contributed by Jeremy Bowman in + `#213 `_. +* Added a ``no_cover`` marker and fixture. Fixes + `#78 `_. +* Fixed broken ``no_cover`` check when running doctests. Contributed by Terence Honles in + `#200 `_. +* Fixed various issues with path normalization in reports (when combining coverage data from parallel mode). Fixes + `#130 `_. + Contributed by Ryan Hiebert & Ionel Cristian Mărieș in + `#178 `_. +* Report generation failures don't raise exceptions anymore. A warning will be logged instead. Fixes + `#161 `_. +* Fixed multiprocessing issue on Windows (empty env vars are not passed). Fixes + `#165 `_. + +2.5.1 (2017-05-11) +------------------ + +* Fixed xdist breakage (regression in ``2.5.0``). + Fixes `#157 `_. +* Allow setting custom ``data_file`` name in ``.coveragerc``. + Fixes `#145 `_. + Contributed by Jannis Leidel & Ionel Cristian Mărieș in + `#156 `_. + +2.5.0 (2017-05-09) +------------------ + +* Always show a summary when ``--cov-fail-under`` is used. Contributed by Francis Niu in `PR#141 + `_. +* Added ``--cov-branch`` option. Fixes `#85 `_. +* Improve exception handling in subprocess setup. Fixes `#144 `_. +* Fixed handling when ``--cov`` is used multiple times. Fixes `#151 `_. + +2.4.0 (2016-10-10) +------------------ + +* Added a "disarm" option: ``--no-cov``. It will disable coverage measurements. Contributed by Zoltan Kozma in + `PR#135 `_. + + **WARNING: Do not put this in your configuration files, it's meant to be an one-off for situations where you want to + disable coverage from command line.** +* Fixed broken exception handling on ``.pth`` file. See `#136 `_. + +2.3.1 (2016-08-07) +------------------ + +* Fixed regression causing spurious errors when xdist was used. See `#124 + `_. +* Fixed DeprecationWarning about incorrect `addoption` use. Contributed by Florian Bruhin in `PR#127 + `_. +* Fixed deprecated use of funcarg fixture API. Contributed by Daniel Hahler in `PR#125 + `_. + +2.3.0 (2016-07-05) +------------------ + +* Add support for specifying output location for html, xml, and annotate report. + Contributed by Patrick Lannigan in `PR#113 `_. +* Fix bug hiding test failure when cov-fail-under failed. +* For coverage >= 4.0, match the default behaviour of `coverage report` and + error if coverage fails to find the source instead of just printing a warning. + Contributed by David Szotten in `PR#116 `_. +* Fixed bug occurred when bare ``--cov`` parameter was used with xdist. + Contributed by Michael Elovskikh in `PR#120 `_. +* Add support for ``skip_covered`` and added ``--cov-report=term-skip-covered`` command + line options. Contributed by Saurabh Kumar in `PR#115 `_. + +2.2.1 (2016-01-30) +------------------ + +* Fixed incorrect merging of coverage data when xdist was used and coverage was ``>= 4.0``. + +2.2.0 (2015-10-04) +------------------ + +* Added support for changing working directory in tests. Previously changing working + directory would disable coverage measurements in suprocesses. +* Fixed broken handling for ``--cov-report=annotate``. + +2.1.0 (2015-08-23) +------------------ + +* Added support for `coverage 4.0b2`. +* Added the ``--cov-append`` command line options. Contributed by Christian Ledermann + in `PR#80 `_. + +2.0.0 (2015-07-28) +------------------ + +* Added ``--cov-fail-under``, akin to the new ``fail_under`` option in `coverage-4.0` + (automatically activated if there's a ``[report] fail_under = ...`` in ``.coveragerc``). +* Changed ``--cov-report=term`` to automatically upgrade to ``--cov-report=term-missing`` + if there's ``[run] show_missing = True`` in ``.coveragerc``. +* Changed ``--cov`` so it can be used with no path argument (in which case the source + settings from ``.coveragerc`` will be used instead). +* Fixed `.pth` installation to work in all cases (install, easy_install, wheels, develop etc). +* Fixed `.pth` uninstallation to work for wheel installs. +* Support for coverage 4.0. +* Data file suffixing changed to use coverage's ``data_suffix=True`` option (instead of the + custom suffixing). +* Avoid warning about missing coverage data (just like ``coverage.control.process_startup``). +* Fixed a race condition when running with xdist (all the workers tried to combine the files). + It's possible that this issue is not present in `pytest-cov 1.8.X`. + +1.8.2 (2014-11-06) +------------------ + +* N/A + + diff --git a/venv/lib/python3.10/site-packages/pytest_cov-3.0.0.dist-info/RECORD b/venv/lib/python3.10/site-packages/pytest_cov-3.0.0.dist-info/RECORD new file mode 100644 index 0000000..661d5d9 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pytest_cov-3.0.0.dist-info/RECORD @@ -0,0 +1,20 @@ +pytest-cov.pth,sha256=uOG4KAkL_sSyVlbQTNCUjT5wIh6oKBrA4SVp7vSK4qY,376 +pytest_cov-3.0.0.dist-info/AUTHORS.rst,sha256=IMOfAE2tRfU28InBoWBCH2_em0Gz6P0febvmeTsAYWA,2314 +pytest_cov-3.0.0.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +pytest_cov-3.0.0.dist-info/LICENSE,sha256=g1WGrhVnZqJOPBA_vFXZr2saFt9XypMsl0gqJzf9g9U,1071 +pytest_cov-3.0.0.dist-info/METADATA,sha256=wzTCs7skvyI5bYTYu7hvzzgiTv2Dud6Xy6tD-g7lvgA,24340 +pytest_cov-3.0.0.dist-info/RECORD,, +pytest_cov-3.0.0.dist-info/REQUESTED,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +pytest_cov-3.0.0.dist-info/WHEEL,sha256=g4nMs7d-Xl9-xC9XovUrsDHGXt-FT0E17Yqo92DEfvY,92 +pytest_cov-3.0.0.dist-info/entry_points.txt,sha256=DoKvgP5Onz1w_poMszck_wUxA4kw3T7fUXqqerHy8bk,43 +pytest_cov-3.0.0.dist-info/top_level.txt,sha256=HvYHsAFV4MeTUNUwhawY_DKvrpE2lYratTHX_U45oBU,11 +pytest_cov/__init__.py,sha256=o1tGXJwfXWY90mwgdUBT8Bn_EsYkiSfJyLBQscpW00g,93 +pytest_cov/__pycache__/__init__.cpython-310.pyc,, +pytest_cov/__pycache__/compat.cpython-310.pyc,, +pytest_cov/__pycache__/embed.cpython-310.pyc,, +pytest_cov/__pycache__/engine.cpython-310.pyc,, +pytest_cov/__pycache__/plugin.cpython-310.pyc,, +pytest_cov/compat.py,sha256=XGbzGKditj91zTLpka98-9-zQ1aebSAmIQGZ-Arz7b4,708 +pytest_cov/embed.py,sha256=55PFkt8oCkRYsjCqSrA03cvBvbvvuYWZKAKMbccUkGE,3983 +pytest_cov/engine.py,sha256=7TDKYPcySDBPg79lv0Cnq6rZ1afEygFjBGO_trvzk9o,14761 +pytest_cov/plugin.py,sha256=mzCCjo1ZQ7F17XNp4SNJoet1uU-MPU8rhoB5UTpvr0A,14791 diff --git a/venv/lib/python3.10/site-packages/pytest_cov-3.0.0.dist-info/REQUESTED b/venv/lib/python3.10/site-packages/pytest_cov-3.0.0.dist-info/REQUESTED new file mode 100644 index 0000000..e69de29 diff --git a/venv/lib/python3.10/site-packages/pytest_cov-3.0.0.dist-info/WHEEL b/venv/lib/python3.10/site-packages/pytest_cov-3.0.0.dist-info/WHEEL new file mode 100644 index 0000000..b552003 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pytest_cov-3.0.0.dist-info/WHEEL @@ -0,0 +1,5 @@ +Wheel-Version: 1.0 +Generator: bdist_wheel (0.34.2) +Root-Is-Purelib: true +Tag: py3-none-any + diff --git a/venv/lib/python3.10/site-packages/pytest_cov-3.0.0.dist-info/entry_points.txt b/venv/lib/python3.10/site-packages/pytest_cov-3.0.0.dist-info/entry_points.txt new file mode 100644 index 0000000..9008b36 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pytest_cov-3.0.0.dist-info/entry_points.txt @@ -0,0 +1,3 @@ +[pytest11] +pytest_cov = pytest_cov.plugin + diff --git a/venv/lib/python3.10/site-packages/pytest_cov-3.0.0.dist-info/top_level.txt b/venv/lib/python3.10/site-packages/pytest_cov-3.0.0.dist-info/top_level.txt new file mode 100644 index 0000000..a2fe281 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pytest_cov-3.0.0.dist-info/top_level.txt @@ -0,0 +1 @@ +pytest_cov diff --git a/venv/lib/python3.10/site-packages/pytest_cov/__init__.py b/venv/lib/python3.10/site-packages/pytest_cov/__init__.py new file mode 100644 index 0000000..fba1d21 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pytest_cov/__init__.py @@ -0,0 +1,2 @@ +"""pytest-cov: avoid already-imported warning: PYTEST_DONT_REWRITE.""" +__version__ = '3.0.0' diff --git a/venv/lib/python3.10/site-packages/pytest_cov/compat.py b/venv/lib/python3.10/site-packages/pytest_cov/compat.py new file mode 100644 index 0000000..f422f25 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pytest_cov/compat.py @@ -0,0 +1,31 @@ +try: + from StringIO import StringIO +except ImportError: + from io import StringIO + +import pytest + +StringIO # pyflakes, this is for re-export + + +if hasattr(pytest, 'hookimpl'): + hookwrapper = pytest.hookimpl(hookwrapper=True) +else: + hookwrapper = pytest.mark.hookwrapper + + +class SessionWrapper: + def __init__(self, session): + self._session = session + if hasattr(session, 'testsfailed'): + self._attr = 'testsfailed' + else: + self._attr = '_testsfailed' + + @property + def testsfailed(self): + return getattr(self._session, self._attr) + + @testsfailed.setter + def testsfailed(self, value): + setattr(self._session, self._attr, value) diff --git a/venv/lib/python3.10/site-packages/pytest_cov/embed.py b/venv/lib/python3.10/site-packages/pytest_cov/embed.py new file mode 100644 index 0000000..3adecdb --- /dev/null +++ b/venv/lib/python3.10/site-packages/pytest_cov/embed.py @@ -0,0 +1,140 @@ +"""Activate coverage at python startup if appropriate. + +The python site initialisation will ensure that anything we import +will be removed and not visible at the end of python startup. However +we minimise all work by putting these init actions in this separate +module and only importing what is needed when needed. + +For normal python startup when coverage should not be activated the pth +file checks a single env var and does not import or call the init fn +here. + +For python startup when an ancestor process has set the env indicating +that code coverage is being collected we activate coverage based on +info passed via env vars. +""" +import atexit +import os +import signal + +_active_cov = None + + +def multiprocessing_start(_): + global _active_cov + cov = init() + if cov: + _active_cov = cov + multiprocessing.util.Finalize(None, cleanup, exitpriority=1000) + + +try: + import multiprocessing.util +except ImportError: + pass +else: + multiprocessing.util.register_after_fork(multiprocessing_start, multiprocessing_start) + + +def init(): + # Only continue if ancestor process has set everything needed in + # the env. + global _active_cov + + cov_source = os.environ.get('COV_CORE_SOURCE') + cov_config = os.environ.get('COV_CORE_CONFIG') + cov_datafile = os.environ.get('COV_CORE_DATAFILE') + cov_branch = True if os.environ.get('COV_CORE_BRANCH') == 'enabled' else None + cov_context = os.environ.get('COV_CORE_CONTEXT') + + if cov_datafile: + if _active_cov: + cleanup() + # Import what we need to activate coverage. + import coverage + + # Determine all source roots. + if cov_source in os.pathsep: + cov_source = None + else: + cov_source = cov_source.split(os.pathsep) + if cov_config == os.pathsep: + cov_config = True + + # Activate coverage for this process. + cov = _active_cov = coverage.Coverage( + source=cov_source, + branch=cov_branch, + data_suffix=True, + config_file=cov_config, + auto_data=True, + data_file=cov_datafile + ) + cov.load() + cov.start() + if cov_context: + cov.switch_context(cov_context) + cov._warn_no_data = False + cov._warn_unimported_source = False + return cov + + +def _cleanup(cov): + if cov is not None: + cov.stop() + cov.save() + cov._auto_save = False # prevent autosaving from cov._atexit in case the interpreter lacks atexit.unregister + try: + atexit.unregister(cov._atexit) + except Exception: + pass + + +def cleanup(): + global _active_cov + global _cleanup_in_progress + global _pending_signal + + _cleanup_in_progress = True + _cleanup(_active_cov) + _active_cov = None + _cleanup_in_progress = False + if _pending_signal: + pending_singal = _pending_signal + _pending_signal = None + _signal_cleanup_handler(*pending_singal) + + +multiprocessing_finish = cleanup # in case someone dared to use this internal + +_previous_handlers = {} +_pending_signal = None +_cleanup_in_progress = False + + +def _signal_cleanup_handler(signum, frame): + global _pending_signal + if _cleanup_in_progress: + _pending_signal = signum, frame + return + cleanup() + _previous_handler = _previous_handlers.get(signum) + if _previous_handler == signal.SIG_IGN: + return + elif _previous_handler and _previous_handler is not _signal_cleanup_handler: + _previous_handler(signum, frame) + elif signum == signal.SIGTERM: + os._exit(128 + signum) + elif signum == signal.SIGINT: + raise KeyboardInterrupt() + + +def cleanup_on_signal(signum): + previous = signal.getsignal(signum) + if previous is not _signal_cleanup_handler: + _previous_handlers[signum] = previous + signal.signal(signum, _signal_cleanup_handler) + + +def cleanup_on_sigterm(): + cleanup_on_signal(signal.SIGTERM) diff --git a/venv/lib/python3.10/site-packages/pytest_cov/engine.py b/venv/lib/python3.10/site-packages/pytest_cov/engine.py new file mode 100644 index 0000000..0303c2f --- /dev/null +++ b/venv/lib/python3.10/site-packages/pytest_cov/engine.py @@ -0,0 +1,402 @@ +"""Coverage controllers for use by pytest-cov and nose-cov.""" +import contextlib +import copy +import functools +import os +import random +import socket +import sys + +import coverage +from coverage.data import CoverageData + +from .compat import StringIO +from .embed import cleanup + + +class _NullFile: + @staticmethod + def write(v): + pass + + +@contextlib.contextmanager +def _backup(obj, attr): + backup = getattr(obj, attr) + try: + setattr(obj, attr, copy.copy(backup)) + yield + finally: + setattr(obj, attr, backup) + + +def _ensure_topdir(meth): + @functools.wraps(meth) + def ensure_topdir_wrapper(self, *args, **kwargs): + try: + original_cwd = os.getcwd() + except OSError: + # Looks like it's gone, this is non-ideal because a side-effect will + # be introduced in the tests here but we can't do anything about it. + original_cwd = None + os.chdir(self.topdir) + try: + return meth(self, *args, **kwargs) + finally: + if original_cwd is not None: + os.chdir(original_cwd) + + return ensure_topdir_wrapper + + +class CovController: + """Base class for different plugin implementations.""" + + def __init__(self, cov_source, cov_report, cov_config, cov_append, cov_branch, config=None, nodeid=None): + """Get some common config used by multiple derived classes.""" + self.cov_source = cov_source + self.cov_report = cov_report + self.cov_config = cov_config + self.cov_append = cov_append + self.cov_branch = cov_branch + self.config = config + self.nodeid = nodeid + + self.cov = None + self.combining_cov = None + self.data_file = None + self.node_descs = set() + self.failed_workers = [] + self.topdir = os.getcwd() + self.is_collocated = None + + @contextlib.contextmanager + def ensure_topdir(self): + original_cwd = os.getcwd() + os.chdir(self.topdir) + yield + os.chdir(original_cwd) + + @_ensure_topdir + def pause(self): + self.cov.stop() + self.unset_env() + + @_ensure_topdir + def resume(self): + self.cov.start() + self.set_env() + + @_ensure_topdir + def set_env(self): + """Put info about coverage into the env so that subprocesses can activate coverage.""" + if self.cov_source is None: + os.environ['COV_CORE_SOURCE'] = os.pathsep + else: + os.environ['COV_CORE_SOURCE'] = os.pathsep.join(self.cov_source) + config_file = os.path.abspath(self.cov_config) + if os.path.exists(config_file): + os.environ['COV_CORE_CONFIG'] = config_file + else: + os.environ['COV_CORE_CONFIG'] = os.pathsep + os.environ['COV_CORE_DATAFILE'] = os.path.abspath(self.cov.config.data_file) + if self.cov_branch: + os.environ['COV_CORE_BRANCH'] = 'enabled' + + @staticmethod + def unset_env(): + """Remove coverage info from env.""" + os.environ.pop('COV_CORE_SOURCE', None) + os.environ.pop('COV_CORE_CONFIG', None) + os.environ.pop('COV_CORE_DATAFILE', None) + os.environ.pop('COV_CORE_BRANCH', None) + os.environ.pop('COV_CORE_CONTEXT', None) + + @staticmethod + def get_node_desc(platform, version_info): + """Return a description of this node.""" + + return 'platform {}, python {}'.format(platform, '%s.%s.%s-%s-%s' % version_info[:5]) + + @staticmethod + def sep(stream, s, txt): + if hasattr(stream, 'sep'): + stream.sep(s, txt) + else: + sep_total = max((70 - 2 - len(txt)), 2) + sep_len = sep_total // 2 + sep_extra = sep_total % 2 + out = f'{s * sep_len} {txt} {s * (sep_len + sep_extra)}\n' + stream.write(out) + + @_ensure_topdir + def summary(self, stream): + """Produce coverage reports.""" + total = None + + if not self.cov_report: + with _backup(self.cov, "config"): + return self.cov.report(show_missing=True, ignore_errors=True, file=_NullFile) + + # Output coverage section header. + if len(self.node_descs) == 1: + self.sep(stream, '-', 'coverage: %s' % ''.join(self.node_descs)) + else: + self.sep(stream, '-', 'coverage') + for node_desc in sorted(self.node_descs): + self.sep(stream, ' ', '%s' % node_desc) + + # Report on any failed workers. + if self.failed_workers: + self.sep(stream, '-', 'coverage: failed workers') + stream.write('The following workers failed to return coverage data, ' + 'ensure that pytest-cov is installed on these workers.\n') + for node in self.failed_workers: + stream.write('%s\n' % node.gateway.id) + + # Produce terminal report if wanted. + if any(x in self.cov_report for x in ['term', 'term-missing']): + options = { + 'show_missing': ('term-missing' in self.cov_report) or None, + 'ignore_errors': True, + 'file': stream, + } + skip_covered = isinstance(self.cov_report, dict) and 'skip-covered' in self.cov_report.values() + options.update({'skip_covered': skip_covered or None}) + with _backup(self.cov, "config"): + total = self.cov.report(**options) + + # Produce annotated source code report if wanted. + if 'annotate' in self.cov_report: + annotate_dir = self.cov_report['annotate'] + + with _backup(self.cov, "config"): + self.cov.annotate(ignore_errors=True, directory=annotate_dir) + # We need to call Coverage.report here, just to get the total + # Coverage.annotate don't return any total and we need it for --cov-fail-under. + + with _backup(self.cov, "config"): + total = self.cov.report(ignore_errors=True, file=_NullFile) + if annotate_dir: + stream.write('Coverage annotated source written to dir %s\n' % annotate_dir) + else: + stream.write('Coverage annotated source written next to source\n') + + # Produce html report if wanted. + if 'html' in self.cov_report: + output = self.cov_report['html'] + with _backup(self.cov, "config"): + total = self.cov.html_report(ignore_errors=True, directory=output) + stream.write('Coverage HTML written to dir %s\n' % (self.cov.config.html_dir if output is None else output)) + + # Produce xml report if wanted. + if 'xml' in self.cov_report: + output = self.cov_report['xml'] + with _backup(self.cov, "config"): + total = self.cov.xml_report(ignore_errors=True, outfile=output) + stream.write('Coverage XML written to file %s\n' % (self.cov.config.xml_output if output is None else output)) + + return total + + +class Central(CovController): + """Implementation for centralised operation.""" + + @_ensure_topdir + def start(self): + cleanup() + + self.cov = coverage.Coverage(source=self.cov_source, + branch=self.cov_branch, + data_suffix=True, + config_file=self.cov_config) + self.combining_cov = coverage.Coverage(source=self.cov_source, + branch=self.cov_branch, + data_suffix=True, + data_file=os.path.abspath(self.cov.config.data_file), + config_file=self.cov_config) + + # Erase or load any previous coverage data and start coverage. + if not self.cov_append: + self.cov.erase() + self.cov.start() + self.set_env() + + @_ensure_topdir + def finish(self): + """Stop coverage, save data to file and set the list of coverage objects to report on.""" + + self.unset_env() + self.cov.stop() + self.cov.save() + + self.cov = self.combining_cov + self.cov.load() + self.cov.combine() + self.cov.save() + + node_desc = self.get_node_desc(sys.platform, sys.version_info) + self.node_descs.add(node_desc) + + +class DistMaster(CovController): + """Implementation for distributed master.""" + + @_ensure_topdir + def start(self): + cleanup() + + # Ensure coverage rc file rsynced if appropriate. + if self.cov_config and os.path.exists(self.cov_config): + self.config.option.rsyncdir.append(self.cov_config) + + self.cov = coverage.Coverage(source=self.cov_source, + branch=self.cov_branch, + data_suffix=True, + config_file=self.cov_config) + self.cov._warn_no_data = False + self.cov._warn_unimported_source = False + self.cov._warn_preimported_source = False + self.combining_cov = coverage.Coverage(source=self.cov_source, + branch=self.cov_branch, + data_suffix=True, + data_file=os.path.abspath(self.cov.config.data_file), + config_file=self.cov_config) + if not self.cov_append: + self.cov.erase() + self.cov.start() + self.cov.config.paths['source'] = [self.topdir] + + def configure_node(self, node): + """Workers need to know if they are collocated and what files have moved.""" + + node.workerinput.update({ + 'cov_master_host': socket.gethostname(), + 'cov_master_topdir': self.topdir, + 'cov_master_rsync_roots': [str(root) for root in node.nodemanager.roots], + }) + + def testnodedown(self, node, error): + """Collect data file name from worker.""" + + # If worker doesn't return any data then it is likely that this + # plugin didn't get activated on the worker side. + output = getattr(node, 'workeroutput', {}) + if 'cov_worker_node_id' not in output: + self.failed_workers.append(node) + return + + # If worker is not collocated then we must save the data file + # that it returns to us. + if 'cov_worker_data' in output: + data_suffix = '%s.%s.%06d.%s' % ( + socket.gethostname(), os.getpid(), + random.randint(0, 999999), + output['cov_worker_node_id'] + ) + + cov = coverage.Coverage(source=self.cov_source, + branch=self.cov_branch, + data_suffix=data_suffix, + config_file=self.cov_config) + cov.start() + if coverage.version_info < (5, 0): + data = CoverageData() + data.read_fileobj(StringIO(output['cov_worker_data'])) + cov.data.update(data) + else: + data = CoverageData(no_disk=True) + data.loads(output['cov_worker_data']) + cov.get_data().update(data) + cov.stop() + cov.save() + path = output['cov_worker_path'] + self.cov.config.paths['source'].append(path) + + # Record the worker types that contribute to the data file. + rinfo = node.gateway._rinfo() + node_desc = self.get_node_desc(rinfo.platform, rinfo.version_info) + self.node_descs.add(node_desc) + + @_ensure_topdir + def finish(self): + """Combines coverage data and sets the list of coverage objects to report on.""" + + # Combine all the suffix files into the data file. + self.cov.stop() + self.cov.save() + self.cov = self.combining_cov + self.cov.load() + self.cov.combine() + self.cov.save() + + +class DistWorker(CovController): + """Implementation for distributed workers.""" + + @_ensure_topdir + def start(self): + + cleanup() + + # Determine whether we are collocated with master. + self.is_collocated = (socket.gethostname() == self.config.workerinput['cov_master_host'] and + self.topdir == self.config.workerinput['cov_master_topdir']) + + # If we are not collocated then rewrite master paths to worker paths. + if not self.is_collocated: + master_topdir = self.config.workerinput['cov_master_topdir'] + worker_topdir = self.topdir + if self.cov_source is not None: + self.cov_source = [source.replace(master_topdir, worker_topdir) + for source in self.cov_source] + self.cov_config = self.cov_config.replace(master_topdir, worker_topdir) + + # Erase any previous data and start coverage. + self.cov = coverage.Coverage(source=self.cov_source, + branch=self.cov_branch, + data_suffix=True, + config_file=self.cov_config) + self.cov.start() + self.set_env() + + @_ensure_topdir + def finish(self): + """Stop coverage and send relevant info back to the master.""" + self.unset_env() + self.cov.stop() + + if self.is_collocated: + # We don't combine data if we're collocated - we can get + # race conditions in the .combine() call (it's not atomic) + # The data is going to be combined in the master. + self.cov.save() + + # If we are collocated then just inform the master of our + # data file to indicate that we have finished. + self.config.workeroutput['cov_worker_node_id'] = self.nodeid + else: + self.cov.combine() + self.cov.save() + # If we are not collocated then add the current path + # and coverage data to the output so we can combine + # it on the master node. + + # Send all the data to the master over the channel. + if coverage.version_info < (5, 0): + buff = StringIO() + self.cov.data.write_fileobj(buff) + data = buff.getvalue() + else: + data = self.cov.get_data().dumps() + + self.config.workeroutput.update({ + 'cov_worker_path': self.topdir, + 'cov_worker_node_id': self.nodeid, + 'cov_worker_data': data, + }) + + def summary(self, stream): + """Only the master reports so do nothing.""" + + pass diff --git a/venv/lib/python3.10/site-packages/pytest_cov/plugin.py b/venv/lib/python3.10/site-packages/pytest_cov/plugin.py new file mode 100644 index 0000000..94a1e49 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pytest_cov/plugin.py @@ -0,0 +1,404 @@ +"""Coverage plugin for pytest.""" +import argparse +import os +import warnings + +import coverage +import pytest + +from . import compat +from . import embed + + +class CoverageError(Exception): + """Indicates that our coverage is too low""" + + +class PytestCovWarning(pytest.PytestWarning): + """ + The base for all pytest-cov warnings, never raised directly + """ + + +class CovDisabledWarning(PytestCovWarning): + """Indicates that Coverage was manually disabled""" + + +class CovReportWarning(PytestCovWarning): + """Indicates that we failed to generate a report""" + + +def validate_report(arg): + file_choices = ['annotate', 'html', 'xml'] + term_choices = ['term', 'term-missing'] + term_modifier_choices = ['skip-covered'] + all_choices = term_choices + file_choices + values = arg.split(":", 1) + report_type = values[0] + if report_type not in all_choices + ['']: + msg = f'invalid choice: "{arg}" (choose from "{all_choices}")' + raise argparse.ArgumentTypeError(msg) + + if len(values) == 1: + return report_type, None + + report_modifier = values[1] + if report_type in term_choices and report_modifier in term_modifier_choices: + return report_type, report_modifier + + if report_type not in file_choices: + msg = 'output specifier not supported for: "{}" (choose from "{}")'.format(arg, + file_choices) + raise argparse.ArgumentTypeError(msg) + + return values + + +def validate_fail_under(num_str): + try: + value = int(num_str) + except ValueError: + try: + value = float(num_str) + except ValueError: + raise argparse.ArgumentTypeError('An integer or float value is required.') + if value > 100: + raise argparse.ArgumentTypeError('Your desire for over-achievement is admirable but misplaced. ' + 'The maximum value is 100. Perhaps write more integration tests?') + return value + + +def validate_context(arg): + if coverage.version_info <= (5, 0): + raise argparse.ArgumentTypeError('Contexts are only supported with coverage.py >= 5.x') + if arg != "test": + raise argparse.ArgumentTypeError('The only supported value is "test".') + return arg + + +class StoreReport(argparse.Action): + def __call__(self, parser, namespace, values, option_string=None): + report_type, file = values + namespace.cov_report[report_type] = file + + +def pytest_addoption(parser): + """Add options to control coverage.""" + + group = parser.getgroup( + 'cov', 'coverage reporting with distributed testing support') + group.addoption('--cov', action='append', default=[], metavar='SOURCE', + nargs='?', const=True, dest='cov_source', + help='Path or package name to measure during execution (multi-allowed). ' + 'Use --cov= to not do any source filtering and record everything.') + group.addoption('--cov-reset', action='store_const', const=[], dest='cov_source', + help='Reset cov sources accumulated in options so far. ') + group.addoption('--cov-report', action=StoreReport, default={}, + metavar='TYPE', type=validate_report, + help='Type of report to generate: term, term-missing, ' + 'annotate, html, xml (multi-allowed). ' + 'term, term-missing may be followed by ":skip-covered". ' + 'annotate, html and xml may be followed by ":DEST" ' + 'where DEST specifies the output location. ' + 'Use --cov-report= to not generate any output.') + group.addoption('--cov-config', action='store', default='.coveragerc', + metavar='PATH', + help='Config file for coverage. Default: .coveragerc') + group.addoption('--no-cov-on-fail', action='store_true', default=False, + help='Do not report coverage if test run fails. ' + 'Default: False') + group.addoption('--no-cov', action='store_true', default=False, + help='Disable coverage report completely (useful for debuggers). ' + 'Default: False') + group.addoption('--cov-fail-under', action='store', metavar='MIN', + type=validate_fail_under, + help='Fail if the total coverage is less than MIN.') + group.addoption('--cov-append', action='store_true', default=False, + help='Do not delete coverage but append to current. ' + 'Default: False') + group.addoption('--cov-branch', action='store_true', default=None, + help='Enable branch coverage.') + group.addoption('--cov-context', action='store', metavar='CONTEXT', + type=validate_context, + help='Dynamic contexts to use. "test" for now.') + + +def _prepare_cov_source(cov_source): + """ + Prepare cov_source so that: + + --cov --cov=foobar is equivalent to --cov (cov_source=None) + --cov=foo --cov=bar is equivalent to cov_source=['foo', 'bar'] + """ + return None if True in cov_source else [path for path in cov_source if path is not True] + + +@pytest.mark.tryfirst +def pytest_load_initial_conftests(early_config, parser, args): + options = early_config.known_args_namespace + no_cov = options.no_cov_should_warn = False + for arg in args: + arg = str(arg) + if arg == '--no-cov': + no_cov = True + elif arg.startswith('--cov') and no_cov: + options.no_cov_should_warn = True + break + + if early_config.known_args_namespace.cov_source: + plugin = CovPlugin(options, early_config.pluginmanager) + early_config.pluginmanager.register(plugin, '_cov') + + +class CovPlugin: + """Use coverage package to produce code coverage reports. + + Delegates all work to a particular implementation based on whether + this test process is centralised, a distributed master or a + distributed worker. + """ + + def __init__(self, options, pluginmanager, start=True, no_cov_should_warn=False): + """Creates a coverage pytest plugin. + + We read the rc file that coverage uses to get the data file + name. This is needed since we give coverage through it's API + the data file name. + """ + + # Our implementation is unknown at this time. + self.pid = None + self.cov_controller = None + self.cov_report = compat.StringIO() + self.cov_total = None + self.failed = False + self._started = False + self._start_path = None + self._disabled = False + self.options = options + + is_dist = (getattr(options, 'numprocesses', False) or + getattr(options, 'distload', False) or + getattr(options, 'dist', 'no') != 'no') + if getattr(options, 'no_cov', False): + self._disabled = True + return + + if not self.options.cov_report: + self.options.cov_report = ['term'] + elif len(self.options.cov_report) == 1 and '' in self.options.cov_report: + self.options.cov_report = {} + self.options.cov_source = _prepare_cov_source(self.options.cov_source) + + # import engine lazily here to avoid importing + # it for unit tests that don't need it + from . import engine + + if is_dist and start: + self.start(engine.DistMaster) + elif start: + self.start(engine.Central) + + # worker is started in pytest hook + + def start(self, controller_cls, config=None, nodeid=None): + + if config is None: + # fake config option for engine + class Config: + option = self.options + + config = Config() + + self.cov_controller = controller_cls( + self.options.cov_source, + self.options.cov_report, + self.options.cov_config, + self.options.cov_append, + self.options.cov_branch, + config, + nodeid + ) + self.cov_controller.start() + self._started = True + self._start_path = os.getcwd() + cov_config = self.cov_controller.cov.config + if self.options.cov_fail_under is None and hasattr(cov_config, 'fail_under'): + self.options.cov_fail_under = cov_config.fail_under + + def _is_worker(self, session): + return getattr(session.config, 'workerinput', None) is not None + + def pytest_sessionstart(self, session): + """At session start determine our implementation and delegate to it.""" + + if self.options.no_cov: + # Coverage can be disabled because it does not cooperate with debuggers well. + self._disabled = True + return + + # import engine lazily here to avoid importing + # it for unit tests that don't need it + from . import engine + + self.pid = os.getpid() + if self._is_worker(session): + nodeid = ( + session.config.workerinput.get('workerid', getattr(session, 'nodeid')) + ) + self.start(engine.DistWorker, session.config, nodeid) + elif not self._started: + self.start(engine.Central) + + if self.options.cov_context == 'test': + session.config.pluginmanager.register(TestContextPlugin(self.cov_controller.cov), '_cov_contexts') + + def pytest_configure_node(self, node): + """Delegate to our implementation. + + Mark this hook as optional in case xdist is not installed. + """ + if not self._disabled: + self.cov_controller.configure_node(node) + pytest_configure_node.optionalhook = True + + def pytest_testnodedown(self, node, error): + """Delegate to our implementation. + + Mark this hook as optional in case xdist is not installed. + """ + if not self._disabled: + self.cov_controller.testnodedown(node, error) + pytest_testnodedown.optionalhook = True + + def _should_report(self): + return not (self.failed and self.options.no_cov_on_fail) + + def _failed_cov_total(self): + cov_fail_under = self.options.cov_fail_under + return cov_fail_under is not None and self.cov_total < cov_fail_under + + # we need to wrap pytest_runtestloop. by the time pytest_sessionfinish + # runs, it's too late to set testsfailed + @compat.hookwrapper + def pytest_runtestloop(self, session): + yield + + if self._disabled: + return + + compat_session = compat.SessionWrapper(session) + + self.failed = bool(compat_session.testsfailed) + if self.cov_controller is not None: + self.cov_controller.finish() + + if not self._is_worker(session) and self._should_report(): + + # import coverage lazily here to avoid importing + # it for unit tests that don't need it + from coverage.misc import CoverageException + + try: + self.cov_total = self.cov_controller.summary(self.cov_report) + except CoverageException as exc: + message = 'Failed to generate report: %s\n' % exc + session.config.pluginmanager.getplugin("terminalreporter").write( + 'WARNING: %s\n' % message, red=True, bold=True) + warnings.warn(CovReportWarning(message)) + self.cov_total = 0 + assert self.cov_total is not None, 'Test coverage should never be `None`' + if self._failed_cov_total(): + # make sure we get the EXIT_TESTSFAILED exit code + compat_session.testsfailed += 1 + + def pytest_terminal_summary(self, terminalreporter): + if self._disabled: + if self.options.no_cov_should_warn: + message = 'Coverage disabled via --no-cov switch!' + terminalreporter.write('WARNING: %s\n' % message, red=True, bold=True) + warnings.warn(CovDisabledWarning(message)) + return + if self.cov_controller is None: + return + + if self.cov_total is None: + # we shouldn't report, or report generation failed (error raised above) + return + + terminalreporter.write('\n' + self.cov_report.getvalue() + '\n') + + if self.options.cov_fail_under is not None and self.options.cov_fail_under > 0: + failed = self.cov_total < self.options.cov_fail_under + markup = {'red': True, 'bold': True} if failed else {'green': True} + message = ( + '{fail}Required test coverage of {required}% {reached}. ' + 'Total coverage: {actual:.2f}%\n' + .format( + required=self.options.cov_fail_under, + actual=self.cov_total, + fail="FAIL " if failed else "", + reached="not reached" if failed else "reached" + ) + ) + terminalreporter.write(message, **markup) + + def pytest_runtest_setup(self, item): + if os.getpid() != self.pid: + # test is run in another process than session, run + # coverage manually + embed.init() + + def pytest_runtest_teardown(self, item): + embed.cleanup() + + @compat.hookwrapper + def pytest_runtest_call(self, item): + if (item.get_closest_marker('no_cover') + or 'no_cover' in getattr(item, 'fixturenames', ())): + self.cov_controller.pause() + yield + self.cov_controller.resume() + else: + yield + + +class TestContextPlugin: + def __init__(self, cov): + self.cov = cov + + def pytest_runtest_setup(self, item): + self.switch_context(item, 'setup') + + def pytest_runtest_teardown(self, item): + self.switch_context(item, 'teardown') + + def pytest_runtest_call(self, item): + self.switch_context(item, 'run') + + def switch_context(self, item, when): + context = f"{item.nodeid}|{when}" + self.cov.switch_context(context) + os.environ['COV_CORE_CONTEXT'] = context + + +@pytest.fixture +def no_cover(): + """A pytest fixture to disable coverage.""" + pass + + +@pytest.fixture +def cov(request): + """A pytest fixture to provide access to the underlying coverage object.""" + + # Check with hasplugin to avoid getplugin exception in older pytest. + if request.config.pluginmanager.hasplugin('_cov'): + plugin = request.config.pluginmanager.getplugin('_cov') + if plugin.cov_controller: + return plugin.cov_controller.cov + return None + + +def pytest_configure(config): + config.addinivalue_line("markers", "no_cover: disable coverage for this test.") diff --git a/venv/lib/python3.10/site-packages/pytest_forked-1.6.0.dist-info/INSTALLER b/venv/lib/python3.10/site-packages/pytest_forked-1.6.0.dist-info/INSTALLER new file mode 100644 index 0000000..a1b589e --- /dev/null +++ b/venv/lib/python3.10/site-packages/pytest_forked-1.6.0.dist-info/INSTALLER @@ -0,0 +1 @@ +pip diff --git a/venv/lib/python3.10/site-packages/pytest_forked-1.6.0.dist-info/LICENSE b/venv/lib/python3.10/site-packages/pytest_forked-1.6.0.dist-info/LICENSE new file mode 100644 index 0000000..ff33b8f --- /dev/null +++ b/venv/lib/python3.10/site-packages/pytest_forked-1.6.0.dist-info/LICENSE @@ -0,0 +1,18 @@ + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to deal + in the Software without restriction, including without limitation the rights + to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be included in all + copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + SOFTWARE. diff --git a/venv/lib/python3.10/site-packages/pytest_forked-1.6.0.dist-info/METADATA b/venv/lib/python3.10/site-packages/pytest_forked-1.6.0.dist-info/METADATA new file mode 100644 index 0000000..aac572f --- /dev/null +++ b/venv/lib/python3.10/site-packages/pytest_forked-1.6.0.dist-info/METADATA @@ -0,0 +1,110 @@ +Metadata-Version: 2.1 +Name: pytest-forked +Version: 1.6.0 +Summary: run tests in isolated forked subprocesses +Home-page: https://github.com/pytest-dev/pytest-forked +Author: pytest-dev +Author-email: pytest-dev@python.org +License: MIT +Platform: linux +Platform: osx +Classifier: Development Status :: 7 - Inactive +Classifier: Framework :: Pytest +Classifier: Intended Audience :: Developers +Classifier: License :: OSI Approved :: MIT License +Classifier: Operating System :: POSIX +Classifier: Operating System :: MacOS :: MacOS X +Classifier: Topic :: Software Development :: Testing +Classifier: Topic :: Software Development :: Quality Assurance +Classifier: Topic :: Utilities +Classifier: Programming Language :: Python +Classifier: Programming Language :: Python :: 3 +Classifier: Programming Language :: Python :: 3.7 +Classifier: Programming Language :: Python :: 3.8 +Classifier: Programming Language :: Python :: 3.9 +Classifier: Programming Language :: Python :: 3.10 +Classifier: Programming Language :: Python :: 3.11 +Classifier: Programming Language :: Python :: 3 :: Only +Requires-Python: >=3.7 +Description-Content-Type: text/x-rst +License-File: LICENSE +Requires-Dist: py +Requires-Dist: pytest (>=3.10) + +pytest-forked: run each test in a forked subprocess +==================================================== + + +.. warning:: + + this is a extraction of the xdist --forked module, + future maintenance beyond the bare minimum is not planned until a new maintainer is found. + + +This plugin **does not work on Windows** because there's no ``fork`` support. + + +* ``--forked``: run each test in a forked + subprocess to survive ``SEGFAULTS`` or otherwise dying processes. + +|python| |version| |ci| |pre-commit| |black| + +.. |version| image:: http://img.shields.io/pypi/v/pytest-forked.svg + :target: https://pypi.python.org/pypi/pytest-forked + +.. |ci| image:: https://github.com/pytest-dev/pytest-forked/workflows/build/badge.svg + :target: https://github.com/pytest-dev/pytest-forked/actions + +.. |python| image:: https://img.shields.io/pypi/pyversions/pytest-forked.svg + :target: https://pypi.python.org/pypi/pytest-forked/ + +.. |black| image:: https://img.shields.io/badge/code%20style-black-000000.svg + :target: https://github.com/ambv/black + +.. |pre-commit| image:: https://results.pre-commit.ci/badge/github/pytest-dev/pytest-forked/master.svg + :target: https://results.pre-commit.ci/latest/github/pytest-dev/pytest-forked/master + +Installation +----------------------- + +Install the plugin with:: + + pip install pytest-forked + +or use the package in develope/in-place mode with +a checkout of the `pytest-forked repository`_ :: + + pip install -e . + + +Usage examples +--------------------- + +If you have tests involving C or C++ libraries you might have to deal +with tests crashing the process. For this case you may use the boxing +options:: + + pytest --forked + +which will run each test in a subprocess and will report if a test +crashed the process. You can also combine this option with +running multiple processes via pytest-xdist to speed up the test run +and use your CPU cores:: + + pytest -n3 --forked + +this would run 3 testing subprocesses in parallel which each +create new forked subprocesses for each test. + + +You can also fork for individual tests:: + + @pytest.mark.forked + def test_with_leaky_state(): + run_some_monkey_patches() + + +This test will be unconditionally boxed, regardless of CLI flag. + + +.. _`pytest-forked repository`: https://github.com/pytest-dev/pytest-forked diff --git a/venv/lib/python3.10/site-packages/pytest_forked-1.6.0.dist-info/RECORD b/venv/lib/python3.10/site-packages/pytest_forked-1.6.0.dist-info/RECORD new file mode 100644 index 0000000..f4d212d --- /dev/null +++ b/venv/lib/python3.10/site-packages/pytest_forked-1.6.0.dist-info/RECORD @@ -0,0 +1,9 @@ +pytest_forked-1.6.0.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +pytest_forked-1.6.0.dist-info/LICENSE,sha256=6J7tEHTTqUMZi6E5uAhE9bRFuGC7p0qK6twGEFZhZOo,1054 +pytest_forked-1.6.0.dist-info/METADATA,sha256=GsdogjTJnvV0AdYTxOvUKvhCxvtr9G5_DEwOuRGBTvE,3485 +pytest_forked-1.6.0.dist-info/RECORD,, +pytest_forked-1.6.0.dist-info/WHEEL,sha256=2wepM1nk4DS4eFpYrW1TTqPcoGNfHhhO_i5m4cOimbo,92 +pytest_forked-1.6.0.dist-info/entry_points.txt,sha256=ZDOW6OO75F5ejdfcA3-DX0fHF8hXrw0SoZC5SnEdUcI,41 +pytest_forked-1.6.0.dist-info/top_level.txt,sha256=OSv0m7AwBLvJfHoAFz_WXLXzqSGSixscE8yLRHqgIls,14 +pytest_forked/__init__.py,sha256=lN9sxfHoPRr7NNiF47WwSbHomJ0nQmGYSRCn7SYVlaU,3665 +pytest_forked/__pycache__/__init__.cpython-310.pyc,, diff --git a/venv/lib/python3.10/site-packages/wheel-0.38.4.dist-info/WHEEL b/venv/lib/python3.10/site-packages/pytest_forked-1.6.0.dist-info/WHEEL similarity index 100% rename from venv/lib/python3.10/site-packages/wheel-0.38.4.dist-info/WHEEL rename to venv/lib/python3.10/site-packages/pytest_forked-1.6.0.dist-info/WHEEL diff --git a/venv/lib/python3.10/site-packages/pytest_forked-1.6.0.dist-info/entry_points.txt b/venv/lib/python3.10/site-packages/pytest_forked-1.6.0.dist-info/entry_points.txt new file mode 100644 index 0000000..46a4c9e --- /dev/null +++ b/venv/lib/python3.10/site-packages/pytest_forked-1.6.0.dist-info/entry_points.txt @@ -0,0 +1,2 @@ +[pytest11] +pytest_forked = pytest_forked diff --git a/venv/lib/python3.10/site-packages/pytest_forked-1.6.0.dist-info/top_level.txt b/venv/lib/python3.10/site-packages/pytest_forked-1.6.0.dist-info/top_level.txt new file mode 100644 index 0000000..ea481dd --- /dev/null +++ b/venv/lib/python3.10/site-packages/pytest_forked-1.6.0.dist-info/top_level.txt @@ -0,0 +1 @@ +pytest_forked diff --git a/venv/lib/python3.10/site-packages/pytest_forked/__init__.py b/venv/lib/python3.10/site-packages/pytest_forked/__init__.py new file mode 100644 index 0000000..a4e5d94 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pytest_forked/__init__.py @@ -0,0 +1,126 @@ +import os +import warnings + +import py +import pytest +from _pytest import runner + +# we know this bit is bad, but we cant help it with the current pytest setup + + +# copied from xdist remote +def serialize_report(rep): + import py + + d = rep.__dict__.copy() + if hasattr(rep.longrepr, "toterminal"): + d["longrepr"] = str(rep.longrepr) + else: + d["longrepr"] = rep.longrepr + for name in d: + if isinstance(d[name], py.path.local): + d[name] = str(d[name]) + elif name == "result": + d[name] = None # for now + return d + + +def pytest_addoption(parser): + group = parser.getgroup("forked", "forked subprocess test execution") + group.addoption( + "--forked", + action="store_true", + dest="forked", + default=False, + help="box each test run in a separate process (unix)", + ) + + +def pytest_load_initial_conftests(early_config, parser, args): + early_config.addinivalue_line( + "markers", + "forked: Always fork for this test.", + ) + + +@pytest.hookimpl(tryfirst=True) +def pytest_runtest_protocol(item): + if item.config.getvalue("forked") or item.get_closest_marker("forked"): + ihook = item.ihook + ihook.pytest_runtest_logstart(nodeid=item.nodeid, location=item.location) + reports = forked_run_report(item) + for rep in reports: + ihook.pytest_runtest_logreport(report=rep) + ihook.pytest_runtest_logfinish(nodeid=item.nodeid, location=item.location) + return True + + +def forked_run_report(item): + # for now, we run setup/teardown in the subprocess + # XXX optionally allow sharing of setup/teardown + from _pytest.runner import runtestprotocol + + EXITSTATUS_TESTEXIT = 4 + import marshal + + def runforked(): + try: + reports = runtestprotocol(item, log=False) + except KeyboardInterrupt: + os._exit(EXITSTATUS_TESTEXIT) + return marshal.dumps([serialize_report(x) for x in reports]) + + ff = py.process.ForkedFunc(runforked) + result = ff.waitfinish() + if result.retval is not None: + report_dumps = marshal.loads(result.retval) + return [runner.TestReport(**x) for x in report_dumps] + else: + if result.exitstatus == EXITSTATUS_TESTEXIT: + pytest.exit(f"forked test item {item} raised Exit") + return [report_process_crash(item, result)] + + +def report_process_crash(item, result): + from _pytest._code import getfslineno + + path, lineno = getfslineno(item) + info = "%s:%s: running the test CRASHED with signal %d" % ( + path, + lineno, + result.signal, + ) + from _pytest import runner + + # pytest >= 4.1 + has_from_call = getattr(runner.CallInfo, "from_call", None) is not None + if has_from_call: + call = runner.CallInfo.from_call(lambda: 0 / 0, "???") + else: + call = runner.CallInfo(lambda: 0 / 0, "???") + call.excinfo = info + rep = runner.pytest_runtest_makereport(item, call) + if result.out: + rep.sections.append(("captured stdout", result.out)) + if result.err: + rep.sections.append(("captured stderr", result.err)) + + xfail_marker = item.get_closest_marker("xfail") + if not xfail_marker: + return rep + + rep.outcome = "skipped" + rep.wasxfail = ( + "reason: {xfail_reason}; " + "pytest-forked reason: {crash_info}".format( + xfail_reason=xfail_marker.kwargs["reason"], + crash_info=info, + ) + ) + warnings.warn( + "pytest-forked xfail support is incomplete at the moment and may " + "output a misleading reason message", + RuntimeWarning, + ) + + return rep diff --git a/venv/lib/python3.10/site-packages/pytest_rerunfailures-10.2.dist-info/INSTALLER b/venv/lib/python3.10/site-packages/pytest_rerunfailures-10.2.dist-info/INSTALLER new file mode 100644 index 0000000..a1b589e --- /dev/null +++ b/venv/lib/python3.10/site-packages/pytest_rerunfailures-10.2.dist-info/INSTALLER @@ -0,0 +1 @@ +pip diff --git a/venv/lib/python3.10/site-packages/pytest_rerunfailures-10.2.dist-info/LICENSE b/venv/lib/python3.10/site-packages/pytest_rerunfailures-10.2.dist-info/LICENSE new file mode 100644 index 0000000..e34e4d9 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pytest_rerunfailures-10.2.dist-info/LICENSE @@ -0,0 +1,3 @@ +This Source Code Form is subject to the terms of the Mozilla Public +License, v. 2.0. If a copy of the MPL was not distributed with this +file, You can obtain one at https://www.mozilla.org/MPL/2.0/. diff --git a/venv/lib/python3.10/site-packages/pytest_rerunfailures-10.2.dist-info/METADATA b/venv/lib/python3.10/site-packages/pytest_rerunfailures-10.2.dist-info/METADATA new file mode 100644 index 0000000..2204f59 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pytest_rerunfailures-10.2.dist-info/METADATA @@ -0,0 +1,542 @@ +Metadata-Version: 2.1 +Name: pytest-rerunfailures +Version: 10.2 +Summary: pytest plugin to re-run tests to eliminate flaky failures +Home-page: https://github.com/pytest-dev/pytest-rerunfailures +Author: Leah Klearman +Author-email: lklrmn@gmail.com +License: Mozilla Public License 2.0 (MPL 2.0) +Keywords: py.test pytest rerun failures flaky +Platform: UNKNOWN +Classifier: Development Status :: 5 - Production/Stable +Classifier: Framework :: Pytest +Classifier: Intended Audience :: Developers +Classifier: License :: OSI Approved :: Mozilla Public License 2.0 (MPL 2.0) +Classifier: Operating System :: POSIX +Classifier: Operating System :: Microsoft :: Windows +Classifier: Operating System :: MacOS :: MacOS X +Classifier: Topic :: Software Development :: Quality Assurance +Classifier: Topic :: Software Development :: Testing +Classifier: Topic :: Utilities +Classifier: Programming Language :: Python :: 3 +Classifier: Programming Language :: Python :: 3.6 +Classifier: Programming Language :: Python :: 3.7 +Classifier: Programming Language :: Python :: 3.8 +Classifier: Programming Language :: Python :: 3.9 +Classifier: Programming Language :: Python :: 3.10 +Classifier: Programming Language :: Python :: 3 :: Only +Classifier: Programming Language :: Python :: Implementation :: CPython +Classifier: Programming Language :: Python :: Implementation :: PyPy +Requires-Python: >= 3.6 +Requires-Dist: setuptools (>=40.0) +Requires-Dist: pytest (>=5.3) + +.. contents:: + +pytest-rerunfailures +==================== + +pytest-rerunfailures is a plugin for `pytest `_ that +re-runs tests to eliminate intermittent failures. + +.. image:: https://img.shields.io/badge/license-MPL%202.0-blue.svg + :target: https://github.com/pytest-dev/pytest-rerunfailures/blob/master/LICENSE + :alt: License +.. image:: https://img.shields.io/pypi/v/pytest-rerunfailures.svg + :target: https://pypi.python.org/pypi/pytest-rerunfailures/ + :alt: PyPI +.. image:: https://github.com/pytest-dev/pytest-rerunfailures/workflows/Test/badge.svg + :target: https://github.com/pytest-dev/pytest-rerunfailures/actions + :alt: GitHub Actions + +Requirements +------------ + +You will need the following prerequisites in order to use pytest-rerunfailures: + +- Python 3.6, up to 3.10, or PyPy3 +- pytest 5.3 or newer + +This plugin can recover from a hard crash with the following optional +prerequisites: + +- pytest-xdist 2.3.0 or newer + +This package is currently tested against the last 5 minor pytest releases. In +case you work with an older version of pytest you should consider updating or +use one of the earlier versions of this package. + +Installation +------------ + +To install pytest-rerunfailures: + +.. code-block:: bash + + $ pip install pytest-rerunfailures + +Recover from hard crashes +------------------------- + +If one or more tests trigger a hard crash (for example: segfault), this plugin +will ordinarily be unable to rerun the test. However, if a compatible version of +pytest-xdist is installed, and the tests are run within pytest-xdist using the `-n` +flag, this plugin will be able to rerun crashed tests, assuming the workers and +controller are on the same LAN (this assumption is valid for almost all cases +because most of the time the workers and controller are on the same computer). +If this assumption is not the case, then this functionality may not operate. + +Re-run all failures +------------------- + +To re-run all test failures, use the ``--reruns`` command line option with the +maximum number of times you'd like the tests to run: + +.. code-block:: bash + + $ pytest --reruns 5 + +Failed fixture or setup_class will also be re-executed. + +To add a delay time between re-runs use the ``--reruns-delay`` command line +option with the amount of seconds that you would like wait before the next +test re-run is launched: + +.. code-block:: bash + + $ pytest --reruns 5 --reruns-delay 1 + +Re-run all failures matching certain expressions +------------------------------------------------ + +To re-run only those failures that match a certain list of expressions, use the +``--only-rerun`` flag and pass it a regular expression. For example, +the following would only rerun those errors that match ``AssertionError``: + +.. code-block:: bash + + $ pytest --reruns 5 --only-rerun AssertionError + +Passing the flag multiple times accumulates the arguments, so the following +would only rerun those errors that match ``AssertionError`` or ``ValueError``: + +.. code-block:: bash + + $ pytest --reruns 5 --only-rerun AssertionError --only-rerun ValueError + +Re-run individual failures +-------------------------- + +To mark individual tests as flaky, and have them automatically re-run when they +fail, add the ``flaky`` mark with the maximum number of times you'd like the +test to run: + +.. code-block:: python + + @pytest.mark.flaky(reruns=5) + def test_example(): + import random + assert random.choice([True, False]) + +Note that when teardown fails, two reports are generated for the case, one for +the test case and the other for the teardown error. + +You can also specify the re-run delay time in the marker: + +.. code-block:: python + + @pytest.mark.flaky(reruns=5, reruns_delay=2) + def test_example(): + import random + assert random.choice([True, False]) + +You can also specify an optional ``condition`` in the re-run marker: + +.. code-block:: python + + @pytest.mark.flaky(reruns=5, condition=sys.platform.startswith("win32")) + def test_example(): + import random + assert random.choice([True, False]) + +You can use ``@pytest.mark.flaky(condition)`` similarly as ``@pytest.mark.skipif(condition)``, see `pytest-mark-skipif `_ + +.. code-block:: python + + @pytest.mark.flaky(reruns=2,condition="sys.platform.startswith('win32')") + def test_example(): + import random + assert random.choice([True, False]) + # totally same as the above + @pytest.mark.flaky(reruns=2,condition=sys.platform.startswith("win32")) + def test_example(): + import random + assert random.choice([True, False]) + +Note that the test will re-run for any ``condition`` that is truthy. + +Output +------ + +Here's an example of the output provided by the plugin when run with +``--reruns 2`` and ``-r aR``:: + + test_report.py RRF + + ================================== FAILURES ================================== + __________________________________ test_fail _________________________________ + + def test_fail(): + > assert False + E assert False + + test_report.py:9: AssertionError + ============================ rerun test summary info ========================= + RERUN test_report.py::test_fail + RERUN test_report.py::test_fail + ============================ short test summary info ========================= + FAIL test_report.py::test_fail + ======================= 1 failed, 2 rerun in 0.02 seconds ==================== + +Note that output will show all re-runs. Tests that fail on all the re-runs will +be marked as failed. + +Compatibility +------------- + +* This plugin may *not* be used with class, module, and package level fixtures. +* This plugin is *not* compatible with pytest-xdist's --looponfail flag. +* This plugin is *not* compatible with the core --pdb flag. + +Resources +--------- + +- `Issue Tracker `_ +- `Code `_ + +Development +----------- + +* Test execution count can be retrieved from the ``execution_count`` attribute + in test ``item``'s object. Example: + + .. code-block:: python + + @hookimpl(tryfirst=True) + def pytest_runtest_makereport(item, call): + print(item.execution_count) + +Changelog +========= + +10.2 (2021-09-17) +----------------- + +Features +++++++++ + +- Allow recovery from crashed tests with pytest-xdist. +- Add support for Python 3.10 (as of Python 3.10.rc2). + (Thanks to `@hugovk `_ for the PR.) + + +10.1 (2021-07-02) +----------------- + +Features +++++++++ + +- Allows using a ``str`` as condition for + ``@pytest.mark.flaky(condition)`` + which gets evaluated dynamically similarly to + ``@pytest.mark.skipif(condition)``. + (`#162 `_ + provided by `@15klli `_) + +10.0 (2021-05-26) +----------------- + +Backwards incompatible changes +++++++++++++++++++++++++++++++ + +- Drop support for Python 3.5. + +- Drop support for pytest < 5.3. + +Features +++++++++ + +- Add ``condition`` keyword argument to the re-run marker. + (Thanks to `@BeyondEvil`_ for the PR.) + +- Add support for Python 3.9. + (Thanks to `@digitronik`_ for the PR.) + +- Add support for pytest 6.3. + (Thanks to `@bluetech`_ for the PR.) + +- Add compatibility with ``pytest-xdist >= 2.0``. + (Thanks to `@bluetech`_ for the PR.) + +Other changes ++++++++++++++ + +- Check for the resultlog by feature and not by version as pytest master does + not provide a consistent version. + +.. _@BeyondEvil: https://github.com/BeyondEvil +.. _@digitronik: https://github.com/digitronik +.. _@bluetech: https://github.com/bluetech + +9.1.1 (2020-09-29) +------------------ + +Compatibility fix. +++++++++++++++++++ + +- Ignore ``--result-log`` command line option when used together with ``pytest + >= 6.1.0``, as it was removed there. This is a quick fix, use an older + version of pytest, if you want to keep this feature for now. + (Thanks to `@ntessore`_ for the PR) + +- Support up to pytest 6.1.0. + +.. _@ntessore: https://github.com/ntessore + + +9.1 (2020-08-26) +---------------- + +Features +++++++++ + +- Add a new flag ``--only-rerun`` to allow for users to rerun only certain + errors. + +Other changes ++++++++++++++ + +- Drop dependency on ``mock``. + +- Add support for pre-commit and add a linting tox target. + (`#117 `_) + (PR from `@gnikonorov`_) + +.. _@gnikonorov: https://github.com/gnikonorov + + +9.0 (2020-03-18) +---------------- + +Backwards incompatible changes +++++++++++++++++++++++++++++++ + +- Drop support for pytest version 4.4, 4.5 and 4.6. + +- Drop support for Python 2.7. + + +Features +++++++++ + +- Add support for pytest 5.4. + +- Add support for Python 3.8. + + +8.0 (2019-11-18) +---------------- + +Backwards incompatible changes +++++++++++++++++++++++++++++++ + +- Drop support for pytest version 3.10, 4.0, 4.1, 4.2 and 4.3 + +- Drop support for Python 3.4. + +Features +++++++++ + +- Add support for pytest version 4.4, 4.5, 4.6, 5.0, 5.1 and 5.2. + +Bug fixes ++++++++++ + +- Explicitly depend on setuptools to ensure installation when working in + environments without it. + (`#98 `_) + (PR from `@Eric-Arellano`_) + +.. _@Eric-Arellano: https://github.com/Eric-Arellano + + +7.0 (2019-03-28) +---------------- + +Backwards incompatible changes +++++++++++++++++++++++++++++++ + +- Drop support for pytest version 3.8 and 3.9. + +Features +++++++++ + +- Add support for pytest version 4.2 and 4.3. + +Bug fixes ++++++++++ + +- Fixed #83 issue about ignored ``pytest_runtest_logfinish`` hooks. + (`#83 `_) + (PR from `@KillAChicken`_) + +.. _@KillAChicken: https://github.com/KillAChicken + + +6.0 (2019-01-08) +---------------- + +Backwards incompatible changes +++++++++++++++++++++++++++++++ + +- Drop support for pytest version 3.6 and 3.7. + +Features +++++++++ + +- Add support for pytest version 4.0 and 4.1. + +Bug fixes ++++++++++ + +- Fixed #77 regression issue introduced in 4.2 related to the ``rerun`` + attribute on the test report. + (`#77 `_) + (Thanks to `@RibeiroAna`_ for the PR). + +.. _@RibeiroAna: https://github.com/RibeiroAna + + +5.0 (2018-11-06) +---------------- + +- Drop support for pytest versions < 3.6 to reduce the maintenance burden. + +- Add support up to pytest version 3.10. Thus supporting the newest 5 pytest + releases. + +- Add support for Python 3.7. + +- Fix issue can occur when used together with `pytest-flake8` + (`#73 `_) + + +4.2 (2018-10-04) +---------------- + +- Fixed #64 issue related to ``setup_class`` and ``fixture`` executions on + rerun (Thanks to `@OlegKuzovkov`_ for the PR). + +- Added new ``execution_count`` attribute to reflect the number of test case + executions according to #67 issue. (Thanks to `@OlegKuzovkov`_ for the PR). + +.. _@OlegKuzovkov: https://github.com/OlegKuzovkov + + +4.1 (2018-05-23) +---------------- + +- Add support for pytest 3.6 by using ``Node.get_closest_marker()`` (Thanks to + `@The-Compiler`_ for the PR). + +.. _@The-Compiler: https://github.com/The-Compiler + +4.0 (2017-12-23) +---------------- + +- Added option to add a delay time between test re-runs (Thanks to `@Kanguros`_ + for the PR). + +- Added support for pytest >= 3.3. + +- Drop support for pytest < 2.8.7. + +.. _@Kanguros: https://github.com/Kanguros + + +3.1 (2017-08-29) +---------------- + +- Restored compatibility with pytest-xdist. (Thanks to `@davehunt`_ for the PR) + +.. _@davehunt: https://github.com/davehunt + + +3.0 (2017-08-17) +---------------- + +- Add support for Python 3.6. + +- Add support for pytest 2.9 up to 3.2 + +- Drop support for Python 2.6 and 3.3. + +- Drop support for pytest < 2.7. + + +2.2 (2017-06-23) +---------------- + +- Ensure that other plugins can run after this one, in case of a global setting + ``--rerun=0``. (Thanks to `@sublee`_ for the PR) + +.. _@sublee: https://github.com/sublee + +2.1.0 (2016-11-01) +------------------ + +- Add default value of ``reruns=1`` if ``pytest.mark.flaky()`` is called + without arguments. + +- Also offer a distribution as universal wheel. (Thanks to `@tltx`_ for the PR) + +.. _@tltx: https://github.com/tltx + + +2.0.1 (2016-08-10) +----------------------------- + +- Prepare CLI options to pytest 3.0, to avoid a deprecation warning. + +- Fix error due to missing CHANGES.rst when creating the source distribution + by adding a MANIFEST.in. + + +2.0.0 (2016-04-06) +------------------ + +- Drop support for Python 3.2, since supporting it became too much of a hassle. + (Reason: Virtualenv 14+ / PIP 8+ do not support Python 3.2 anymore.) + + +1.0.2 (2016-03-29) +------------------ + +- Add support for `--resultlog` option by parsing reruns accordingly. (#28) + + +1.0.1 (2016-02-02) +------------------ + +- Improve package description and include CHANGELOG into description. + + +1.0.0 (2016-02-02) +------------------ + +- Rewrite to use newer API of pytest >= 2.3.0 + +- Improve support for pytest-xdist by only logging the final result. + (Logging intermediate results will finish the test rather rerunning it.) + + diff --git a/venv/lib/python3.10/site-packages/pytest_rerunfailures-10.2.dist-info/RECORD b/venv/lib/python3.10/site-packages/pytest_rerunfailures-10.2.dist-info/RECORD new file mode 100644 index 0000000..b4e7245 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pytest_rerunfailures-10.2.dist-info/RECORD @@ -0,0 +1,10 @@ +__pycache__/pytest_rerunfailures.cpython-310.pyc,, +pytest_rerunfailures-10.2.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +pytest_rerunfailures-10.2.dist-info/LICENSE,sha256=u0Mc5IJvhYBUE4vgLtbT811lzljhA2npJgz-ZNlTc40,198 +pytest_rerunfailures-10.2.dist-info/METADATA,sha256=P7iG6gMpVEKInXq_MjeDSJXqo4XHjAmzfVYw6YV1VcY,14531 +pytest_rerunfailures-10.2.dist-info/RECORD,, +pytest_rerunfailures-10.2.dist-info/REQUESTED,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +pytest_rerunfailures-10.2.dist-info/WHEEL,sha256=S8S5VL-stOTSZDYxHyf0KP7eds0J72qrK0Evu3TfyAY,92 +pytest_rerunfailures-10.2.dist-info/entry_points.txt,sha256=BCKcdbHWVFQYi4zDouIXHHvJfsJIHt1Aump2Frcvlv4,49 +pytest_rerunfailures-10.2.dist-info/top_level.txt,sha256=WmlugCFhTek8mIj4SEVrpatzLsM_fed_LHgjYxthcQw,21 +pytest_rerunfailures.py,sha256=NX9jrLFB7i_k28KSDpo5FT3Vj2dXSG-OeX7BLhVAqQg,17649 diff --git a/venv/lib/python3.10/site-packages/pytest_rerunfailures-10.2.dist-info/REQUESTED b/venv/lib/python3.10/site-packages/pytest_rerunfailures-10.2.dist-info/REQUESTED new file mode 100644 index 0000000..e69de29 diff --git a/venv/lib/python3.10/site-packages/pytest_rerunfailures-10.2.dist-info/WHEEL b/venv/lib/python3.10/site-packages/pytest_rerunfailures-10.2.dist-info/WHEEL new file mode 100644 index 0000000..c57a597 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pytest_rerunfailures-10.2.dist-info/WHEEL @@ -0,0 +1,5 @@ +Wheel-Version: 1.0 +Generator: bdist_wheel (0.33.4) +Root-Is-Purelib: true +Tag: py3-none-any + diff --git a/venv/lib/python3.10/site-packages/pytest_rerunfailures-10.2.dist-info/entry_points.txt b/venv/lib/python3.10/site-packages/pytest_rerunfailures-10.2.dist-info/entry_points.txt new file mode 100644 index 0000000..aab02af --- /dev/null +++ b/venv/lib/python3.10/site-packages/pytest_rerunfailures-10.2.dist-info/entry_points.txt @@ -0,0 +1,3 @@ +[pytest11] +rerunfailures = pytest_rerunfailures + diff --git a/venv/lib/python3.10/site-packages/pytest_rerunfailures-10.2.dist-info/top_level.txt b/venv/lib/python3.10/site-packages/pytest_rerunfailures-10.2.dist-info/top_level.txt new file mode 100644 index 0000000..eb0dca9 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pytest_rerunfailures-10.2.dist-info/top_level.txt @@ -0,0 +1 @@ +pytest_rerunfailures diff --git a/venv/lib/python3.10/site-packages/pytest_rerunfailures.py b/venv/lib/python3.10/site-packages/pytest_rerunfailures.py new file mode 100644 index 0000000..dd5c980 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pytest_rerunfailures.py @@ -0,0 +1,584 @@ +import hashlib +import os +import platform +import re +import socket +import sys +import threading +import time +import traceback +import warnings +from contextlib import suppress + +import pytest +from _pytest.outcomes import fail +from _pytest.runner import runtestprotocol +from pkg_resources import DistributionNotFound +from pkg_resources import get_distribution +from pkg_resources import parse_version + +HAS_RESULTLOG = False + +try: + from _pytest.resultlog import ResultLog + + HAS_RESULTLOG = True +except ImportError: + # We have a pytest >= 6.1 + pass + +try: + from xdist.newhooks import pytest_handlecrashitem + + HAS_PYTEST_HANDLECRASHITEM = True + del pytest_handlecrashitem +except ImportError: + HAS_PYTEST_HANDLECRASHITEM = False + + +PYTEST_GTE_54 = parse_version(pytest.__version__) >= parse_version("5.4") +PYTEST_GTE_63 = parse_version(pytest.__version__) >= parse_version("6.3.0.dev") + + +def works_with_current_xdist(): + """Return compatibility with installed pytest-xdist version. + + When running tests in parallel using pytest-xdist < 1.20.0, the first + report that is logged will finish and terminate the current node rather + rerunning the test. Thus we must skip logging of intermediate results under + these circumstances, otherwise no test is rerun. + + """ + try: + d = get_distribution("pytest-xdist") + return d.parsed_version >= parse_version("1.20") + except DistributionNotFound: + return None + + +# command line options +def pytest_addoption(parser): + group = parser.getgroup( + "rerunfailures", "re-run failing tests to eliminate flaky failures" + ) + group._addoption( + "--only-rerun", + action="append", + dest="only_rerun", + type=str, + default=None, + help="If passed, only rerun errors matching the regex provided. " + "Pass this flag multiple times to accumulate a list of regexes " + "to match", + ) + group._addoption( + "--reruns", + action="store", + dest="reruns", + type=int, + default=0, + help="number of times to re-run failed tests. defaults to 0.", + ) + group._addoption( + "--reruns-delay", + action="store", + dest="reruns_delay", + type=float, + default=0, + help="add time (seconds) delay between reruns.", + ) + + +def _get_resultlog(config): + if not HAS_RESULTLOG: + return None + elif PYTEST_GTE_54: + # hack + from _pytest.resultlog import resultlog_key + + return config._store.get(resultlog_key, default=None) + else: + return getattr(config, "_resultlog", None) + + +def _set_resultlog(config, resultlog): + if not HAS_RESULTLOG: + pass + elif PYTEST_GTE_54: + # hack + from _pytest.resultlog import resultlog_key + + config._store[resultlog_key] = resultlog + else: + config._resultlog = resultlog + + +# making sure the options make sense +# should run before / at the beginning of pytest_cmdline_main +def check_options(config): + val = config.getvalue + if not val("collectonly"): + if config.option.reruns != 0: + if config.option.usepdb: # a core option + raise pytest.UsageError("--reruns incompatible with --pdb") + + resultlog = _get_resultlog(config) + if resultlog: + logfile = resultlog.logfile + config.pluginmanager.unregister(resultlog) + new_resultlog = RerunResultLog(config, logfile) + _set_resultlog(config, new_resultlog) + config.pluginmanager.register(new_resultlog) + + +def _get_marker(item): + try: + return item.get_closest_marker("flaky") + except AttributeError: + # pytest < 3.6 + return item.get_marker("flaky") + + +def get_reruns_count(item): + rerun_marker = _get_marker(item) + reruns = None + + # use the marker as a priority over the global setting. + if rerun_marker is not None: + if "reruns" in rerun_marker.kwargs: + # check for keyword arguments + reruns = rerun_marker.kwargs["reruns"] + elif len(rerun_marker.args) > 0: + # check for arguments + reruns = rerun_marker.args[0] + else: + reruns = 1 + elif item.session.config.option.reruns: + # default to the global setting + reruns = item.session.config.option.reruns + + return reruns + + +def get_reruns_delay(item): + rerun_marker = _get_marker(item) + + if rerun_marker is not None: + if "reruns_delay" in rerun_marker.kwargs: + delay = rerun_marker.kwargs["reruns_delay"] + elif len(rerun_marker.args) > 1: + # check for arguments + delay = rerun_marker.args[1] + else: + delay = 0 + else: + delay = item.session.config.option.reruns_delay + + if delay < 0: + delay = 0 + warnings.warn( + "Delay time between re-runs cannot be < 0. Using default value: 0" + ) + + return delay + + +def get_reruns_condition(item): + rerun_marker = _get_marker(item) + + condition = True + if rerun_marker is not None and "condition" in rerun_marker.kwargs: + condition = evaluate_condition( + item, rerun_marker, rerun_marker.kwargs["condition"] + ) + + return condition + + +def evaluate_condition(item, mark, condition: object) -> bool: + # copy from python3.8 _pytest.skipping.py + + result = False + # String condition. + if isinstance(condition, str): + globals_ = { + "os": os, + "sys": sys, + "platform": platform, + "config": item.config, + } + if hasattr(item, "obj"): + globals_.update(item.obj.__globals__) # type: ignore[attr-defined] + try: + filename = f"<{mark.name} condition>" + condition_code = compile(condition, filename, "eval") + result = eval(condition_code, globals_) + except SyntaxError as exc: + msglines = [ + "Error evaluating %r condition" % mark.name, + " " + condition, + " " + " " * (exc.offset or 0) + "^", + "SyntaxError: invalid syntax", + ] + fail("\n".join(msglines), pytrace=False) + except Exception as exc: + msglines = [ + "Error evaluating %r condition" % mark.name, + " " + condition, + *traceback.format_exception_only(type(exc), exc), + ] + fail("\n".join(msglines), pytrace=False) + + # Boolean condition. + else: + try: + result = bool(condition) + except Exception as exc: + msglines = [ + "Error evaluating %r condition as a boolean" % mark.name, + *traceback.format_exception_only(type(exc), exc), + ] + fail("\n".join(msglines), pytrace=False) + return result + + +def _remove_cached_results_from_failed_fixtures(item): + """Note: remove all cached_result attribute from every fixture.""" + cached_result = "cached_result" + fixture_info = getattr(item, "_fixtureinfo", None) + for fixture_def_str in getattr(fixture_info, "name2fixturedefs", ()): + fixture_defs = fixture_info.name2fixturedefs[fixture_def_str] + for fixture_def in fixture_defs: + if getattr(fixture_def, cached_result, None) is not None: + result, _, err = getattr(fixture_def, cached_result) + if err: # Deleting cached results for only failed fixtures + if PYTEST_GTE_54: + setattr(fixture_def, cached_result, None) + else: + delattr(fixture_def, cached_result) + + +def _remove_failed_setup_state_from_session(item): + """ + Clean up setup state. + + Note: remove all failures from every node in _setupstate stack + and clean the stack itself + """ + setup_state = item.session._setupstate + if PYTEST_GTE_63: + setup_state.stack = {} + else: + for node in setup_state.stack: + if hasattr(node, "_prepare_exc"): + del node._prepare_exc + setup_state.stack = [] + + +def _should_hard_fail_on_error(session_config, report): + if report.outcome != "failed": + return False + + rerun_errors = session_config.option.only_rerun + if not rerun_errors: + return False + + for rerun_regex in rerun_errors: + if re.search(rerun_regex, report.longrepr.reprcrash.message): + return False + + return True + + +def _should_not_rerun(item, report, reruns): + xfail = hasattr(report, "wasxfail") + is_terminal_error = _should_hard_fail_on_error(item.session.config, report) + condition = get_reruns_condition(item) + return ( + item.execution_count > reruns + or not report.failed + or xfail + or is_terminal_error + or not condition + ) + + +def is_master(config): + return not (hasattr(config, "workerinput") or hasattr(config, "slaveinput")) + + +def pytest_configure(config): + # add flaky marker + config.addinivalue_line( + "markers", + "flaky(reruns=1, reruns_delay=0): mark test to re-run up " + "to 'reruns' times. Add a delay of 'reruns_delay' seconds " + "between re-runs.", + ) + + if HAS_PYTEST_HANDLECRASHITEM: + if is_master(config): + config.failures_db = ServerStatusDB() + else: + config.failures_db = ClientStatusDB(config.workerinput["sock_port"]) + else: + config.failures_db = StatusDB() # no-op db + + +if HAS_PYTEST_HANDLECRASHITEM: + + def pytest_configure_node(node): + """xdist hook""" + node.workerinput["sock_port"] = node.config.failures_db.sock_port + + def pytest_handlecrashitem(crashitem, report, sched): + """ + Return the crashitem from pending and collection. + """ + db = sched.config.failures_db + reruns = db.get_test_reruns(crashitem) + if db.get_test_failures(crashitem) < reruns: + sched.mark_test_pending(crashitem) + report.outcome = "rerun" + + db.add_test_failure(crashitem) + + +# An in-memory db residing in the master that records +# the number of reruns (set before test setup) +# and failures (set after each failure or crash) +# accessible from both the master and worker +class StatusDB: + def __init__(self): + self.delim = b"\n" + self.hmap = {} + + def _hash(self, crashitem: str) -> str: + if crashitem not in self.hmap: + self.hmap[crashitem] = hashlib.sha1( + crashitem.encode(), + ).hexdigest()[:10] + + return self.hmap[crashitem] + + def add_test_failure(self, crashitem): + hash = self._hash(crashitem) + failures = self._get(hash, "f") + failures += 1 + self._set(hash, "f", failures) + + def get_test_failures(self, crashitem): + hash = self._hash(crashitem) + return self._get(hash, "f") + + def set_test_reruns(self, crashitem, reruns): + hash = self._hash(crashitem) + self._set(hash, "r", reruns) + + def get_test_reruns(self, crashitem): + hash = self._hash(crashitem) + return self._get(hash, "r") + + # i is a hash of the test name, t_f.py::test_t + # k is f for failures or r for reruns + # v is the number of failures or reruns (an int) + def _set(self, i: str, k: str, v: int): + pass + + def _get(self, i: str, k: str) -> int: + return 0 + + +class SocketDB(StatusDB): + def __init__(self): + super().__init__() + self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) + self.sock.setblocking(1) + + def _sock_recv(self, conn) -> str: + buf = b"" + while True: + b = conn.recv(1) + if b == self.delim: + break + buf += b + + return buf.decode() + + def _sock_send(self, conn, msg: str): + conn.send(msg.encode() + self.delim) + + +class ServerStatusDB(SocketDB): + def __init__(self): + super().__init__() + self.sock.bind(("", 0)) + self.sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) + + self.rerunfailures_db = {} + t = threading.Thread(target=self.run_server, daemon=True) + t.start() + + @property + def sock_port(self): + return self.sock.getsockname()[1] + + def run_server(self): + self.sock.listen() + while True: + conn, _ = self.sock.accept() + t = threading.Thread(target=self.run_connection, args=(conn,), daemon=True) + t.start() + + def run_connection(self, conn): + with suppress(ConnectionError): + while True: + op, i, k, v = self._sock_recv(conn).split("|") + if op == "set": + self._set(i, k, int(v)) + elif op == "get": + self._sock_send(conn, str(self._get(i, k))) + + def _set(self, i: str, k: str, v: int): + if i not in self.rerunfailures_db: + self.rerunfailures_db[i] = {} + self.rerunfailures_db[i][k] = v + + def _get(self, i: str, k: str) -> int: + try: + return self.rerunfailures_db[i][k] + except KeyError: + return 0 + + +class ClientStatusDB(SocketDB): + def __init__(self, sock_port): + super().__init__() + self.sock.connect(("localhost", sock_port)) + + def _set(self, i: str, k: str, v: int): + self._sock_send(self.sock, "|".join(("set", i, k, str(v)))) + + def _get(self, i: str, k: str) -> int: + self._sock_send(self.sock, "|".join(("get", i, k, ""))) + return int(self._sock_recv(self.sock)) + + +def pytest_runtest_protocol(item, nextitem): + """ + Run the test protocol. + + Note: when teardown fails, two reports are generated for the case, one for + the test case and the other for the teardown error. + """ + reruns = get_reruns_count(item) + if reruns is None: + # global setting is not specified, and this test is not marked with + # flaky + return + + # while this doesn't need to be run with every item, it will fail on the + # first item if necessary + check_options(item.session.config) + delay = get_reruns_delay(item) + parallel = not is_master(item.config) + item_location = (item.location[0] + "::" + item.location[2]).replace("\\", "/") + db = item.session.config.failures_db + item.execution_count = db.get_test_failures(item_location) + db.set_test_reruns(item_location, reruns) + + if item.execution_count > reruns: + return True + + need_to_run = True + while need_to_run: + item.execution_count += 1 + item.ihook.pytest_runtest_logstart(nodeid=item.nodeid, location=item.location) + reports = runtestprotocol(item, nextitem=nextitem, log=False) + + for report in reports: # 3 reports: setup, call, teardown + report.rerun = item.execution_count - 1 + if _should_not_rerun(item, report, reruns): + # last run or no failure detected, log normally + item.ihook.pytest_runtest_logreport(report=report) + else: + # failure detected and reruns not exhausted, since i < reruns + report.outcome = "rerun" + time.sleep(delay) + + if not parallel or works_with_current_xdist(): + # will rerun test, log intermediate result + item.ihook.pytest_runtest_logreport(report=report) + + # cleanin item's cashed results from any level of setups + _remove_cached_results_from_failed_fixtures(item) + _remove_failed_setup_state_from_session(item) + + break # trigger rerun + else: + need_to_run = False + + item.ihook.pytest_runtest_logfinish(nodeid=item.nodeid, location=item.location) + + return True + + +def pytest_report_teststatus(report): + # Adapted from https://pytest.org/latest/_modules/_pytest/skipping.html + if report.outcome == "rerun": + return "rerun", "R", ("RERUN", {"yellow": True}) + + +def pytest_terminal_summary(terminalreporter): + # Adapted from https://pytest.org/latest/_modules/_pytest/skipping.html + tr = terminalreporter + if not tr.reportchars: + return + + lines = [] + for char in tr.reportchars: + if char in "rR": + show_rerun(terminalreporter, lines) + + if lines: + tr._tw.sep("=", "rerun test summary info") + for line in lines: + tr._tw.line(line) + + +def show_rerun(terminalreporter, lines): + rerun = terminalreporter.stats.get("rerun") + if rerun: + for rep in rerun: + pos = rep.nodeid + lines.append(f"RERUN {pos}") + + +if HAS_RESULTLOG: + + class RerunResultLog(ResultLog): + def __init__(self, config, logfile): + ResultLog.__init__(self, config, logfile) + + def pytest_runtest_logreport(self, report): + """Add support for rerun report.""" + if report.when != "call" and report.passed: + return + res = self.config.hook.pytest_report_teststatus(report=report) + code = res[1] + if code == "x": + longrepr = str(report.longrepr) + elif code == "X": + longrepr = "" + elif report.passed: + longrepr = "" + elif report.failed: + longrepr = str(report.longrepr) + elif report.skipped: + longrepr = str(report.longrepr[2]) + elif report.outcome == "rerun": + longrepr = str(report.longrepr) + else: + longrepr = str(report.longrepr) + + self.log_outcome(report, code, longrepr) diff --git a/venv/lib/python3.10/site-packages/pytest_xdist-2.5.0.dist-info/INSTALLER b/venv/lib/python3.10/site-packages/pytest_xdist-2.5.0.dist-info/INSTALLER new file mode 100644 index 0000000..a1b589e --- /dev/null +++ b/venv/lib/python3.10/site-packages/pytest_xdist-2.5.0.dist-info/INSTALLER @@ -0,0 +1 @@ +pip diff --git a/venv/lib/python3.10/site-packages/pytest_xdist-2.5.0.dist-info/LICENSE b/venv/lib/python3.10/site-packages/pytest_xdist-2.5.0.dist-info/LICENSE new file mode 100644 index 0000000..ff33b8f --- /dev/null +++ b/venv/lib/python3.10/site-packages/pytest_xdist-2.5.0.dist-info/LICENSE @@ -0,0 +1,18 @@ + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to deal + in the Software without restriction, including without limitation the rights + to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be included in all + copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + SOFTWARE. diff --git a/venv/lib/python3.10/site-packages/pytest_xdist-2.5.0.dist-info/METADATA b/venv/lib/python3.10/site-packages/pytest_xdist-2.5.0.dist-info/METADATA new file mode 100644 index 0000000..591a814 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pytest_xdist-2.5.0.dist-info/METADATA @@ -0,0 +1,586 @@ +Metadata-Version: 2.1 +Name: pytest-xdist +Version: 2.5.0 +Summary: pytest xdist plugin for distributed testing and loop-on-failing modes +Home-page: https://github.com/pytest-dev/pytest-xdist +Author: holger krekel and contributors +Author-email: pytest-dev@python.org,holger@merlinux.eu +License: MIT +Platform: linux +Platform: osx +Platform: win32 +Classifier: Development Status :: 5 - Production/Stable +Classifier: Framework :: Pytest +Classifier: Intended Audience :: Developers +Classifier: License :: OSI Approved :: MIT License +Classifier: Operating System :: POSIX +Classifier: Operating System :: Microsoft :: Windows +Classifier: Operating System :: MacOS :: MacOS X +Classifier: Topic :: Software Development :: Testing +Classifier: Topic :: Software Development :: Quality Assurance +Classifier: Topic :: Utilities +Classifier: Programming Language :: Python +Classifier: Programming Language :: Python :: 3 +Classifier: Programming Language :: Python :: 3 :: Only +Classifier: Programming Language :: Python :: 3.6 +Classifier: Programming Language :: Python :: 3.7 +Classifier: Programming Language :: Python :: 3.8 +Classifier: Programming Language :: Python :: 3.9 +Classifier: Programming Language :: Python :: 3.10 +Requires-Python: >=3.6 +Requires-Dist: execnet (>=1.1) +Requires-Dist: pytest (>=6.2.0) +Requires-Dist: pytest-forked +Provides-Extra: psutil +Requires-Dist: psutil (>=3.0) ; extra == 'psutil' +Provides-Extra: setproctitle +Requires-Dist: setproctitle ; extra == 'setproctitle' +Provides-Extra: testing +Requires-Dist: filelock ; extra == 'testing' + +============ +pytest-xdist +============ + +.. image:: http://img.shields.io/pypi/v/pytest-xdist.svg + :alt: PyPI version + :target: https://pypi.python.org/pypi/pytest-xdist + +.. image:: https://img.shields.io/conda/vn/conda-forge/pytest-xdist.svg + :target: https://anaconda.org/conda-forge/pytest-xdist + +.. image:: https://img.shields.io/pypi/pyversions/pytest-xdist.svg + :alt: Python versions + :target: https://pypi.python.org/pypi/pytest-xdist + +.. image:: https://github.com/pytest-dev/pytest-xdist/workflows/build/badge.svg + :target: https://github.com/pytest-dev/pytest-xdist/actions + +.. image:: https://img.shields.io/badge/code%20style-black-000000.svg + :target: https://github.com/ambv/black + +The `pytest-xdist`_ plugin extends pytest with new test execution modes, the most used being distributing +tests across multiple CPUs to speed up test execution:: + + pytest -n auto + +With this call, pytest will spawn a number of workers processes equal to the number of available CPUs, and distribute +the tests randomly across them. There is also a number of `distribution modes`_ to choose from. + +**NOTE**: due to how pytest-xdist is implemented, the ``-s/--capture=no`` option does not work. + +.. contents:: **Table of Contents** + +Installation +------------ + +Install the plugin with:: + + pip install pytest-xdist + + +To use ``psutil`` for detection of the number of CPUs available, install the ``psutil`` extra:: + + pip install pytest-xdist[psutil] + + +Features +-------- + +* Test run parallelization_: tests can be executed across multiple CPUs or hosts. + This allows to speed up development or to use special resources of `remote machines`_. + +* ``--looponfail``: run your tests repeatedly in a subprocess. After each run + pytest waits until a file in your project changes and then re-runs + the previously failing tests. This is repeated until all tests pass + after which again a full run is performed. + +* `Multi-Platform`_ coverage: you can specify different Python interpreters + or different platforms and run tests in parallel on all of them. + + Before running tests remotely, ``pytest`` efficiently "rsyncs" your + program source code to the remote place. + You may specify different Python versions and interpreters. It does not + installs/synchronize dependencies however. + + **Note**: this mode exists mostly for backward compatibility, as modern development + relies on continuous integration for multi-platform testing. + +.. _parallelization: + +Running tests across multiple CPUs +---------------------------------- + +To send tests to multiple CPUs, use the ``-n`` (or ``--numprocesses``) option:: + + pytest -n 8 + +Pass ``-n auto`` to use as many processes as your computer has CPU cores. This +can lead to considerable speed ups, especially if your test suite takes a +noticeable amount of time. + +The test distribution algorithm is configured with the ``--dist`` command-line option: + +.. _distribution modes: + +* ``--dist load`` **(default)**: Sends pending tests to any worker that is + available, without any guaranteed order. + +* ``--dist loadscope``: Tests are grouped by **module** for *test functions* + and by **class** for *test methods*. Groups are distributed to available + workers as whole units. This guarantees that all tests in a group run in the + same process. This can be useful if you have expensive module-level or + class-level fixtures. Grouping by class takes priority over grouping by + module. + +* ``--dist loadfile``: Tests are grouped by their containing file. Groups are + distributed to available workers as whole units. This guarantees that all + tests in a file run in the same worker. + +* ``--dist loadgroup``: Tests are grouped by the ``xdist_group`` mark. Groups are + distributed to available workers as whole units. This guarantees that all + tests with same ``xdist_group`` name run in the same worker. + + .. code-block:: python + + @pytest.mark.xdist_group(name="group1") + def test1(): + pass + + class TestA: + @pytest.mark.xdist_group("group1") + def test2(): + pass + + This will make sure ``test1`` and ``TestA::test2`` will run in the same worker. + Tests without the ``xdist_group`` mark are distributed normally as in the ``--dist=load`` mode. + +* ``--dist no``: The normal pytest execution mode, runs one test at a time (no distribution at all). + + +Running tests in a Python subprocess +------------------------------------ + +To instantiate a ``python3.9`` subprocess and send tests to it, you may type:: + + pytest -d --tx popen//python=python3.9 + +This will start a subprocess which is run with the ``python3.9`` +Python interpreter, found in your system binary lookup path. + +If you prefix the --tx option value like this:: + + --tx 3*popen//python=python3.9 + +then three subprocesses would be created and tests +will be load-balanced across these three processes. + +.. _boxed: + +Running tests in a boxed subprocess +----------------------------------- + +This functionality has been moved to the +`pytest-forked `_ plugin, but the ``--boxed`` option +is still kept for backward compatibility. + +.. _`remote machines`: + +Sending tests to remote SSH accounts +------------------------------------ + +Suppose you have a package ``mypkg`` which contains some +tests that you can successfully run locally. And you +have a ssh-reachable machine ``myhost``. Then +you can ad-hoc distribute your tests by typing:: + + pytest -d --tx ssh=myhostpopen --rsyncdir mypkg mypkg + +This will synchronize your :code:`mypkg` package directory +to a remote ssh account and then locally collect tests +and send them to remote places for execution. + +You can specify multiple :code:`--rsyncdir` directories +to be sent to the remote side. + +.. note:: + + For pytest to collect and send tests correctly + you not only need to make sure all code and tests + directories are rsynced, but that any test (sub) directory + also has an :code:`__init__.py` file because internally + pytest references tests as a fully qualified python + module path. **You will otherwise get strange errors** + during setup of the remote side. + + +You can specify multiple :code:`--rsyncignore` glob patterns +to be ignored when file are sent to the remote side. +There are also internal ignores: :code:`.*, *.pyc, *.pyo, *~` +Those you cannot override using rsyncignore command-line or +ini-file option(s). + + +Sending tests to remote Socket Servers +-------------------------------------- + +Download the single-module `socketserver.py`_ Python program +and run it like this:: + + python socketserver.py + +It will tell you that it starts listening on the default +port. You can now on your home machine specify this +new socket host with something like this:: + + pytest -d --tx socket=192.168.1.102:8888 --rsyncdir mypkg mypkg + + +.. _`atonce`: +.. _`Multi-Platform`: + + +Running tests on many platforms at once +--------------------------------------- + +The basic command to run tests on multiple platforms is:: + + pytest --dist=each --tx=spec1 --tx=spec2 + +If you specify a windows host, an OSX host and a Linux +environment this command will send each tests to all +platforms - and report back failures from all platforms +at once. The specifications strings use the `xspec syntax`_. + +.. _`xspec syntax`: https://codespeak.net/execnet/basics.html#xspec + +.. _`socketserver.py`: https://raw.githubusercontent.com/pytest-dev/execnet/master/execnet/script/socketserver.py + +.. _`execnet`: https://codespeak.net/execnet + + +When tests crash +---------------- + +If a test crashes a worker, pytest-xdist will automatically restart that worker +and report the test’s failure. You can use the ``--max-worker-restart`` option +to limit the number of worker restarts that are allowed, or disable restarting +altogether using ``--max-worker-restart 0``. + + +How-tos +------- + +Identifying the worker process during a test +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +*New in version 1.15.* + +If you need to determine the identity of a worker process in +a test or fixture, you may use the ``worker_id`` fixture to do so: + +.. code-block:: python + + @pytest.fixture() + def user_account(worker_id): + """ use a different account in each xdist worker """ + return "account_%s" % worker_id + +When ``xdist`` is disabled (running with ``-n0`` for example), then +``worker_id`` will return ``"master"``. + +Worker processes also have the following environment variables +defined: + +* ``PYTEST_XDIST_WORKER``: the name of the worker, e.g., ``"gw2"``. +* ``PYTEST_XDIST_WORKER_COUNT``: the total number of workers in this session, + e.g., ``"4"`` when ``-n 4`` is given in the command-line. + +The information about the worker_id in a test is stored in the ``TestReport`` as +well, under the ``worker_id`` attribute. + +Since version 2.0, the following functions are also available in the ``xdist`` module: + +.. code-block:: python + + def is_xdist_worker(request_or_session) -> bool: + """Return `True` if this is an xdist worker, `False` otherwise + + :param request_or_session: the `pytest` `request` or `session` object + """ + + def is_xdist_controller(request_or_session) -> bool: + """Return `True` if this is the xdist controller, `False` otherwise + + Note: this method also returns `False` when distribution has not been + activated at all. + + :param request_or_session: the `pytest` `request` or `session` object + """ + + def is_xdist_master(request_or_session) -> bool: + """Deprecated alias for is_xdist_controller.""" + + def get_xdist_worker_id(request_or_session) -> str: + """Return the id of the current worker ('gw0', 'gw1', etc) or 'master' + if running on the controller node. + + If not distributing tests (for example passing `-n0` or not passing `-n` at all) + also return 'master'. + + :param request_or_session: the `pytest` `request` or `session` object + """ + + +Identifying workers from the system environment +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +*New in version 2.4* + +If the `setproctitle`_ package is installed, ``pytest-xdist`` will use it to +update the process title (command line) on its workers to show their current +state. The titles used are ``[pytest-xdist running] file.py/node::id`` and +``[pytest-xdist idle]``, visible in standard tools like ``ps`` and ``top`` on +Linux, Mac OS X and BSD systems. For Windows, please follow `setproctitle`_'s +pointer regarding the Process Explorer tool. + +This is intended purely as an UX enhancement, e.g. to track down issues with +long-running or CPU intensive tests. Errors in changing the title are ignored +silently. Please try not to rely on the title format or title changes in +external scripts. + +.. _`setproctitle`: https://pypi.org/project/setproctitle/ + + +Uniquely identifying the current test run +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +*New in version 1.32.* + +If you need to globally distinguish one test run from others in your +workers, you can use the ``testrun_uid`` fixture. For instance, let's say you +wanted to create a separate database for each test run: + +.. code-block:: python + + import pytest + from posix_ipc import Semaphore, O_CREAT + + @pytest.fixture(scope="session", autouse=True) + def create_unique_database(testrun_uid): + """ create a unique database for this particular test run """ + database_url = f"psql://myapp-{testrun_uid}" + + with Semaphore(f"/{testrun_uid}-lock", flags=O_CREAT, initial_value=1): + if not database_exists(database_url): + create_database(database_url) + + @pytest.fixture() + def db(testrun_uid): + """ retrieve unique database """ + database_url = f"psql://myapp-{testrun_uid}" + return database_get_instance(database_url) + + +Additionally, during a test run, the following environment variable is defined: + +* ``PYTEST_XDIST_TESTRUNUID``: the unique id of the test run. + +Accessing ``sys.argv`` from the controller node in workers +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +To access the ``sys.argv`` passed to the command-line of the controller node, use +``request.config.workerinput["mainargv"]``. + + +Specifying test exec environments in an ini file +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +You can use pytest's ini file configuration to avoid typing common options. +You can for example make running with three subprocesses your default like this: + +.. code-block:: ini + + [pytest] + addopts = -n3 + +You can also add default environments like this: + +.. code-block:: ini + + [pytest] + addopts = --tx ssh=myhost//python=python3.9 --tx ssh=myhost//python=python3.6 + +and then just type:: + + pytest --dist=each + +to run tests in each of the environments. + + +Specifying "rsync" dirs in an ini-file +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +In a ``tox.ini`` or ``setup.cfg`` file in your root project directory +you may specify directories to include or to exclude in synchronisation: + +.. code-block:: ini + + [pytest] + rsyncdirs = . mypkg helperpkg + rsyncignore = .hg + +These directory specifications are relative to the directory +where the configuration file was found. + +.. _`pytest-xdist`: http://pypi.python.org/pypi/pytest-xdist +.. _`pytest-xdist repository`: https://github.com/pytest-dev/pytest-xdist +.. _`pytest`: http://pytest.org + + +Making session-scoped fixtures execute only once +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +``pytest-xdist`` is designed so that each worker process will perform its own collection and execute +a subset of all tests. This means that tests in different processes requesting a high-level +scoped fixture (for example ``session``) will execute the fixture code more than once, which +breaks expectations and might be undesired in certain situations. + +While ``pytest-xdist`` does not have a builtin support for ensuring a session-scoped fixture is +executed exactly once, this can be achieved by using a lock file for inter-process communication. + +The example below needs to execute the fixture ``session_data`` only once (because it is +resource intensive, or needs to execute only once to define configuration options, etc), so it makes +use of a `FileLock `_ to produce the fixture data only once +when the first process requests the fixture, while the other processes will then read +the data from a file. + +Here is the code: + +.. code-block:: python + + import json + + import pytest + from filelock import FileLock + + + @pytest.fixture(scope="session") + def session_data(tmp_path_factory, worker_id): + if worker_id == "master": + # not executing in with multiple workers, just produce the data and let + # pytest's fixture caching do its job + return produce_expensive_data() + + # get the temp directory shared by all workers + root_tmp_dir = tmp_path_factory.getbasetemp().parent + + fn = root_tmp_dir / "data.json" + with FileLock(str(fn) + ".lock"): + if fn.is_file(): + data = json.loads(fn.read_text()) + else: + data = produce_expensive_data() + fn.write_text(json.dumps(data)) + return data + + +The example above can also be use in cases a fixture needs to execute exactly once per test session, like +initializing a database service and populating initial tables. + +This technique might not work for every case, but should be a starting point for many situations +where executing a high-scope fixture exactly once is important. + + +How does xdist work? +-------------------- + +``xdist`` works by spawning one or more **workers**, which are +controlled by the **controller**. Each **worker** is responsible for +performing a full test collection and afterwards running tests as +dictated by the **controller**. + +The execution flow is: + +1. **controller** spawns one or more **workers** at the beginning of the + test session. The communication between **controller** and **worker** + nodes makes use of `execnet `__ and + its + `gateways `__. + The actual interpreters executing the code for the **workers** might + be remote or local. + +2. Each **worker** itself is a mini pytest runner. **workers** at this + point perform a full test collection, sending back the collected + test-ids back to the **controller** which does not perform any + collection itself. + +3. The **controller** receives the result of the collection from all + nodes. At this point the **controller** performs some sanity check to + ensure that all **workers** collected the same tests (including + order), bailing out otherwise. If all is well, it converts the list + of test-ids into a list of simple indexes, where each index + corresponds to the position of that test in the original collection + list. This works because all nodes have the same collection list, and + saves bandwidth because the **controller** can now tell one of the + workers to just *execute test index 3* index of passing the full test + id. + +4. If **dist-mode** is **each**: the **controller** just sends the full + list of test indexes to each node at this moment. + +5. If **dist-mode** is **load**: the **controller** takes around 25% of + the tests and sends them one by one to each **worker** in a round + robin fashion. The rest of the tests will be distributed later as + **workers** finish tests (see below). + +6. Note that ``pytest_xdist_make_scheduler`` hook can be used to + implement custom tests distribution logic. + +7. **workers** re-implement ``pytest_runtestloop``: pytest’s default + implementation basically loops over all collected items in the + ``session`` object and executes the ``pytest_runtest_protocol`` for + each test item, but in xdist **workers** sit idly waiting for + **controller** to send tests for execution. As tests are received by + **workers**, ``pytest_runtest_protocol`` is executed for each test. + Here it worth noting an implementation detail: **workers** always + must keep at least one test item on their queue due to how the + ``pytest_runtest_protocol(item, nextitem)`` hook is defined: in order + to pass the ``nextitem`` to the hook, the worker must wait for more + instructions from controller before executing that remaining test. If + it receives more tests, then it can safely call + ``pytest_runtest_protocol`` because it knows what the ``nextitem`` + parameter will be. If it receives a “shutdown” signal, then it can + execute the hook passing ``nextitem`` as ``None``. + +8. As tests are started and completed at the **workers**, the results + are sent back to the **controller**, which then just forwards the + results to the appropriate pytest hooks: ``pytest_runtest_logstart`` + and ``pytest_runtest_logreport``. This way other plugins (for example + ``junitxml``) can work normally. The **controller** (when in + dist-mode **load**) decides to send more tests to a node when a test + completes, using some heuristics such as test durations and how many + tests each **worker** still has to run. + +9. When the **controller** has no more pending tests it will send a + “shutdown” signal to all **workers**, which will then run their + remaining tests to completion and shut down. At this point the + **controller** will sit waiting for **workers** to shut down, still + processing events such as ``pytest_runtest_logreport``. + +FAQ +--- + +**Question**: Why does each worker do its own collection, as opposed to having the +controller collect once and distribute from that collection to the +workers? + +If collection was performed by controller then it would have to +serialize collected items to send them through the wire, as workers live +in another process. The problem is that test items are not easily +(impossible?) to serialize, as they contain references to the test +functions, fixture managers, config objects, etc. Even if one manages to +serialize it, it seems it would be very hard to get it right and easy to +break by any small change in pytest. + + diff --git a/venv/lib/python3.10/site-packages/pytest_xdist-2.5.0.dist-info/RECORD b/venv/lib/python3.10/site-packages/pytest_xdist-2.5.0.dist-info/RECORD new file mode 100644 index 0000000..7bef997 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pytest_xdist-2.5.0.dist-info/RECORD @@ -0,0 +1,38 @@ +pytest_xdist-2.5.0.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +pytest_xdist-2.5.0.dist-info/LICENSE,sha256=6J7tEHTTqUMZi6E5uAhE9bRFuGC7p0qK6twGEFZhZOo,1054 +pytest_xdist-2.5.0.dist-info/METADATA,sha256=6_5nBWLL55i8JAeTqp910o2h8J89mFsMwjZVSDWcAck,21674 +pytest_xdist-2.5.0.dist-info/RECORD,, +pytest_xdist-2.5.0.dist-info/REQUESTED,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +pytest_xdist-2.5.0.dist-info/WHEEL,sha256=ewwEueio1C2XeHTvT17n8dZUJgOvyCWCt0WVNLClP9o,92 +pytest_xdist-2.5.0.dist-info/entry_points.txt,sha256=sIdKnpIqpWuRLa-xvlRlWiWEEumZ-w_MUu7cKrN5tUM,69 +pytest_xdist-2.5.0.dist-info/top_level.txt,sha256=B9Iy6rAx6Uvy-JTIu9N7Yv_ug1-EzD3nX_CB-odTarY,6 +xdist/__init__.py,sha256=U4Rb5msAzlQQCkj7WcX3En_y3BwGTqx7PC4FQ9MArlg,305 +xdist/__pycache__/__init__.cpython-310.pyc,, +xdist/__pycache__/_version.cpython-310.pyc,, +xdist/__pycache__/dsession.cpython-310.pyc,, +xdist/__pycache__/looponfail.cpython-310.pyc,, +xdist/__pycache__/newhooks.cpython-310.pyc,, +xdist/__pycache__/plugin.cpython-310.pyc,, +xdist/__pycache__/remote.cpython-310.pyc,, +xdist/__pycache__/report.cpython-310.pyc,, +xdist/__pycache__/workermanage.cpython-310.pyc,, +xdist/_version.py,sha256=3rSZt6aqlTHHOApxAAl5MkUeNod44YwmIjxqU1pn5A4,142 +xdist/dsession.py,sha256=NYMyH62F24-n6AB72WGg3nU6AMWZpEmpe8V1Ikh6xZc,16910 +xdist/looponfail.py,sha256=AZ5iUvc-eknWInTxeC-fOlvs6zDwyxoJubpv84SbQ-E,8907 +xdist/newhooks.py,sha256=d8R101dLWCBT6GC0r0gDQaKC4WpjMobKOzxffJUxmeY,2423 +xdist/plugin.py,sha256=rJWdqwQBLohqROVgksRAiuQMMcjdLKHn0iOCIc-TMu4,9932 +xdist/remote.py,sha256=6FT1axN4yCrLCXFAzCq_9JNNhf94_PbRKFuKGGlrugs,9993 +xdist/report.py,sha256=7yVt5vSEmEaj7YYFX8uiQ86N797g7-C_dfopGI4Pw5Q,742 +xdist/scheduler/__init__.py,sha256=ioKSpZLsaplDz8pwTmO6zNyDQN-rmOnpkHKWYO_FDXA,308 +xdist/scheduler/__pycache__/__init__.cpython-310.pyc,, +xdist/scheduler/__pycache__/each.cpython-310.pyc,, +xdist/scheduler/__pycache__/load.cpython-310.pyc,, +xdist/scheduler/__pycache__/loadfile.cpython-310.pyc,, +xdist/scheduler/__pycache__/loadgroup.cpython-310.pyc,, +xdist/scheduler/__pycache__/loadscope.cpython-310.pyc,, +xdist/scheduler/each.py,sha256=0vPUq-Nn_X5eCf7iHyEAebLT4SgH9BcAbktODAAqYXA,5136 +xdist/scheduler/load.py,sha256=9JakdbCQen8nU4hhJxAZY_Ysb-DyGoOyyM_rYPJOi-s,10997 +xdist/scheduler/loadfile.py,sha256=V0Wbrq1YpThMHjyaeyAOFEXThI3iYQLDApyv6mrgntE,2166 +xdist/scheduler/loadgroup.py,sha256=7mg35LUJA-AVYBHXwcsSXX1PBgT0_KAWeCv3Vjh4oR0,2161 +xdist/scheduler/loadscope.py,sha256=RNXFA4zXV7CjUbfk68kFTKvlvWxyb9AzO1gqgc0NEHM,14217 +xdist/workermanage.py,sha256=ApsJZu_gWhlc2mqGo6DcoskbR1XkAPrQVFYCJ_W7Ehk,16019 diff --git a/venv/lib/python3.10/site-packages/pytest_xdist-2.5.0.dist-info/REQUESTED b/venv/lib/python3.10/site-packages/pytest_xdist-2.5.0.dist-info/REQUESTED new file mode 100644 index 0000000..e69de29 diff --git a/venv/lib/python3.10/site-packages/pytest_xdist-2.5.0.dist-info/WHEEL b/venv/lib/python3.10/site-packages/pytest_xdist-2.5.0.dist-info/WHEEL new file mode 100644 index 0000000..5bad85f --- /dev/null +++ b/venv/lib/python3.10/site-packages/pytest_xdist-2.5.0.dist-info/WHEEL @@ -0,0 +1,5 @@ +Wheel-Version: 1.0 +Generator: bdist_wheel (0.37.0) +Root-Is-Purelib: true +Tag: py3-none-any + diff --git a/venv/lib/python3.10/site-packages/pytest_xdist-2.5.0.dist-info/entry_points.txt b/venv/lib/python3.10/site-packages/pytest_xdist-2.5.0.dist-info/entry_points.txt new file mode 100644 index 0000000..dcb013b --- /dev/null +++ b/venv/lib/python3.10/site-packages/pytest_xdist-2.5.0.dist-info/entry_points.txt @@ -0,0 +1,4 @@ +[pytest11] +xdist = xdist.plugin +xdist.looponfail = xdist.looponfail + diff --git a/venv/lib/python3.10/site-packages/pytest_xdist-2.5.0.dist-info/top_level.txt b/venv/lib/python3.10/site-packages/pytest_xdist-2.5.0.dist-info/top_level.txt new file mode 100644 index 0000000..ad07247 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pytest_xdist-2.5.0.dist-info/top_level.txt @@ -0,0 +1 @@ +xdist diff --git a/venv/lib/python3.10/site-packages/setuptools-65.5.1.dist-info/METADATA b/venv/lib/python3.10/site-packages/setuptools-65.5.1.dist-info/METADATA deleted file mode 100644 index 7b12160..0000000 --- a/venv/lib/python3.10/site-packages/setuptools-65.5.1.dist-info/METADATA +++ /dev/null @@ -1,144 +0,0 @@ -Metadata-Version: 2.1 -Name: setuptools -Version: 65.5.1 -Summary: Easily download, build, install, upgrade, and uninstall Python packages -Home-page: https://github.com/pypa/setuptools -Author: Python Packaging Authority -Author-email: distutils-sig@python.org -Project-URL: Documentation, https://setuptools.pypa.io/ -Project-URL: Changelog, https://setuptools.pypa.io/en/stable/history.html -Keywords: CPAN PyPI distutils eggs package management -Classifier: Development Status :: 5 - Production/Stable -Classifier: Intended Audience :: Developers -Classifier: License :: OSI Approved :: MIT License -Classifier: Programming Language :: Python :: 3 -Classifier: Programming Language :: Python :: 3 :: Only -Classifier: Topic :: Software Development :: Libraries :: Python Modules -Classifier: Topic :: System :: Archiving :: Packaging -Classifier: Topic :: System :: Systems Administration -Classifier: Topic :: Utilities -Requires-Python: >=3.7 -License-File: LICENSE -Provides-Extra: certs -Provides-Extra: docs -Requires-Dist: sphinx (>=3.5) ; extra == 'docs' -Requires-Dist: jaraco.packaging (>=9) ; extra == 'docs' -Requires-Dist: rst.linker (>=1.9) ; extra == 'docs' -Requires-Dist: furo ; extra == 'docs' -Requires-Dist: jaraco.tidelift (>=1.4) ; extra == 'docs' -Requires-Dist: pygments-github-lexers (==0.0.5) ; extra == 'docs' -Requires-Dist: sphinx-favicon ; extra == 'docs' -Requires-Dist: sphinx-inline-tabs ; extra == 'docs' -Requires-Dist: sphinx-reredirects ; extra == 'docs' -Requires-Dist: sphinxcontrib-towncrier ; extra == 'docs' -Requires-Dist: sphinx-notfound-page (==0.8.3) ; extra == 'docs' -Requires-Dist: sphinx-hoverxref (<2) ; extra == 'docs' -Provides-Extra: ssl -Provides-Extra: testing -Requires-Dist: pytest (>=6) ; extra == 'testing' -Requires-Dist: pytest-checkdocs (>=2.4) ; extra == 'testing' -Requires-Dist: pytest-flake8 ; extra == 'testing' -Requires-Dist: flake8 (<5) ; extra == 'testing' -Requires-Dist: pytest-enabler (>=1.3) ; extra == 'testing' -Requires-Dist: pytest-perf ; extra == 'testing' -Requires-Dist: flake8-2020 ; extra == 'testing' -Requires-Dist: virtualenv (>=13.0.0) ; extra == 'testing' -Requires-Dist: wheel ; extra == 'testing' -Requires-Dist: pip (>=19.1) ; extra == 'testing' -Requires-Dist: jaraco.envs (>=2.2) ; extra == 'testing' -Requires-Dist: pytest-xdist ; extra == 'testing' -Requires-Dist: jaraco.path (>=3.2.0) ; extra == 'testing' -Requires-Dist: build[virtualenv] ; extra == 'testing' -Requires-Dist: filelock (>=3.4.0) ; extra == 'testing' -Requires-Dist: pip-run (>=8.8) ; extra == 'testing' -Requires-Dist: ini2toml[lite] (>=0.9) ; extra == 'testing' -Requires-Dist: tomli-w (>=1.0.0) ; extra == 'testing' -Requires-Dist: pytest-timeout ; extra == 'testing' -Provides-Extra: testing-integration -Requires-Dist: pytest ; extra == 'testing-integration' -Requires-Dist: pytest-xdist ; extra == 'testing-integration' -Requires-Dist: pytest-enabler ; extra == 'testing-integration' -Requires-Dist: virtualenv (>=13.0.0) ; extra == 'testing-integration' -Requires-Dist: tomli ; extra == 'testing-integration' -Requires-Dist: wheel ; extra == 'testing-integration' -Requires-Dist: jaraco.path (>=3.2.0) ; extra == 'testing-integration' -Requires-Dist: jaraco.envs (>=2.2) ; extra == 'testing-integration' -Requires-Dist: build[virtualenv] ; extra == 'testing-integration' -Requires-Dist: filelock (>=3.4.0) ; extra == 'testing-integration' -Requires-Dist: pytest-black (>=0.3.7) ; (platform_python_implementation != "PyPy") and extra == 'testing' -Requires-Dist: pytest-cov ; (platform_python_implementation != "PyPy") and extra == 'testing' -Requires-Dist: pytest-mypy (>=0.9.1) ; (platform_python_implementation != "PyPy") and extra == 'testing' - -.. image:: https://raw.githubusercontent.com/pypa/setuptools/main/docs/images/banner-640x320.svg - :align: center - -| - -.. image:: https://img.shields.io/pypi/v/setuptools.svg - :target: `PyPI link`_ - -.. image:: https://img.shields.io/pypi/pyversions/setuptools.svg - :target: `PyPI link`_ - -.. _PyPI link: https://pypi.org/project/setuptools - -.. image:: https://github.com/pypa/setuptools/workflows/tests/badge.svg - :target: https://github.com/pypa/setuptools/actions?query=workflow%3A%22tests%22 - :alt: tests - -.. image:: https://img.shields.io/badge/code%20style-black-000000.svg - :target: https://github.com/psf/black - :alt: Code style: Black - -.. image:: https://img.shields.io/readthedocs/setuptools/latest.svg - :target: https://setuptools.pypa.io - -.. image:: https://img.shields.io/badge/skeleton-2022-informational - :target: https://blog.jaraco.com/skeleton - -.. image:: https://img.shields.io/codecov/c/github/pypa/setuptools/master.svg?logo=codecov&logoColor=white - :target: https://codecov.io/gh/pypa/setuptools - -.. image:: https://tidelift.com/badges/github/pypa/setuptools?style=flat - :target: https://tidelift.com/subscription/pkg/pypi-setuptools?utm_source=pypi-setuptools&utm_medium=readme - -.. image:: https://img.shields.io/discord/803025117553754132 - :target: https://discord.com/channels/803025117553754132/815945031150993468 - :alt: Discord - -See the `Installation Instructions -`_ in the Python Packaging -User's Guide for instructions on installing, upgrading, and uninstalling -Setuptools. - -Questions and comments should be directed to `GitHub Discussions -`_. -Bug reports and especially tested patches may be -submitted directly to the `bug tracker -`_. - - -Code of Conduct -=============== - -Everyone interacting in the setuptools project's codebases, issue trackers, -chat rooms, and fora is expected to follow the -`PSF Code of Conduct `_. - - -For Enterprise -============== - -Available as part of the Tidelift Subscription. - -Setuptools and the maintainers of thousands of other packages are working with Tidelift to deliver one enterprise subscription that covers all of the open source you use. - -`Learn more `_. - - -Security Contact -================ - -To report a security vulnerability, please use the -`Tidelift security contact `_. -Tidelift will coordinate the fix and disclosure. diff --git a/venv/lib/python3.10/site-packages/setuptools-65.5.1.dist-info/RECORD b/venv/lib/python3.10/site-packages/setuptools-65.5.1.dist-info/RECORD deleted file mode 100644 index 51fe3dd..0000000 --- a/venv/lib/python3.10/site-packages/setuptools-65.5.1.dist-info/RECORD +++ /dev/null @@ -1,495 +0,0 @@ -distutils-precedence.pth,sha256=JjjOniUA5XKl4N5_rtZmHrVp0baW_LoHsN0iPaX10iQ,151 -_distutils_hack/__init__.py,sha256=TSekhUW1fdE3rjU3b88ybSBkJxCEpIeWBob4cEuU3ko,6128 -_distutils_hack/override.py,sha256=Eu_s-NF6VIZ4Cqd0tbbA5wtWky2IZPNd8et6GLt1mzo,44 -pkg_resources/__init__.py,sha256=fT5Y3P1tcSX8sJomClUU10WHeFmvqyNZM4UZHzdpAvg,108568 -pkg_resources/_vendor/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 -pkg_resources/_vendor/appdirs.py,sha256=MievUEuv3l_mQISH5SF0shDk_BNhHHzYiAPrT3ITN4I,24701 -pkg_resources/_vendor/zipp.py,sha256=ajztOH-9I7KA_4wqDYygtHa6xUBVZgFpmZ8FE74HHHI,8425 -pkg_resources/_vendor/importlib_resources/__init__.py,sha256=evPm12kLgYqTm-pbzm60bOuumumT8IpBNWFp0uMyrzE,506 -pkg_resources/_vendor/importlib_resources/_adapters.py,sha256=o51tP2hpVtohP33gSYyAkGNpLfYDBqxxYsadyiRZi1E,4504 -pkg_resources/_vendor/importlib_resources/_common.py,sha256=iIxAaQhotSh6TLLUEfL_ynU2fzEeyHMz9JcL46mUhLg,2741 -pkg_resources/_vendor/importlib_resources/_compat.py,sha256=nFBCGMvImglrqgYkb9aPgOj68-h6xbw-ca94XOv1-zs,2706 -pkg_resources/_vendor/importlib_resources/_itertools.py,sha256=WCdJ1Gs_kNFwKENyIG7TO0Y434IWCu0zjVVSsSbZwU8,884 -pkg_resources/_vendor/importlib_resources/_legacy.py,sha256=TMLkx6aEM6U8xIREPXqGZrMbUhTiPUuPl6ESD7RdYj4,3494 -pkg_resources/_vendor/importlib_resources/abc.py,sha256=MvTJJXajbl74s36Gyeesf76egtbFnh-TMtzQMVhFWXo,3886 -pkg_resources/_vendor/importlib_resources/readers.py,sha256=_9QLGQ5AzrED3PY8S2Zf8V6yLR0-nqqYqtQmgleDJzY,3566 -pkg_resources/_vendor/importlib_resources/simple.py,sha256=xt0qhXbwt3bZ86zuaaKbTiE9A0mDbwu0saRjUq_pcY0,2836 -pkg_resources/_vendor/jaraco/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 -pkg_resources/_vendor/jaraco/context.py,sha256=7X1tpCLc5EN45iWGzGcsH0Unx62REIkvtRvglj0SiUA,5420 -pkg_resources/_vendor/jaraco/functools.py,sha256=eLwPh8FWY7rQ_cj1YxCekUkibTuerwyoJ_41H7Q7oWM,13515 -pkg_resources/_vendor/jaraco/text/__init__.py,sha256=cN55bFcceW4wTHG5ruv5IuEDRarP-4hBYX8zl94_c30,15526 -pkg_resources/_vendor/more_itertools/__init__.py,sha256=ZQYu_9H6stSG7viUgT32TFqslqcZwq82kWRZooKiI8Y,83 -pkg_resources/_vendor/more_itertools/more.py,sha256=oave_26jctLsuF30e1SOWMgW0bEuwS-t08wkaLUwvXc,132569 -pkg_resources/_vendor/more_itertools/recipes.py,sha256=N6aCDwoIPvE-aiqpGU-nbFwqiM3X8MKRcxBM84naW88,18410 -pkg_resources/_vendor/packaging/__about__.py,sha256=ugASIO2w1oUyH8_COqQ2X_s0rDhjbhQC3yJocD03h2c,661 -pkg_resources/_vendor/packaging/__init__.py,sha256=b9Kk5MF7KxhhLgcDmiUWukN-LatWFxPdNug0joPhHSk,497 -pkg_resources/_vendor/packaging/_manylinux.py,sha256=XcbiXB-qcjv3bcohp6N98TMpOP4_j3m-iOA8ptK2GWY,11488 -pkg_resources/_vendor/packaging/_musllinux.py,sha256=_KGgY_qc7vhMGpoqss25n2hiLCNKRtvz9mCrS7gkqyc,4378 -pkg_resources/_vendor/packaging/_structures.py,sha256=q3eVNmbWJGG_S0Dit_S3Ao8qQqz_5PYTXFAKBZe5yr4,1431 -pkg_resources/_vendor/packaging/markers.py,sha256=gFSKoBTb0sKDw1v_apJy15lPr0v2mEvuEkfooTtcWx4,8496 -pkg_resources/_vendor/packaging/requirements.py,sha256=uJ4cjwm3_nrfHJLCcGU9mT5aw8SXfw8v1aBUD7OFuVs,4706 -pkg_resources/_vendor/packaging/specifiers.py,sha256=LRQ0kFsHrl5qfcFNEEJrIFYsnIHQUJXY9fIsakTrrqE,30110 -pkg_resources/_vendor/packaging/tags.py,sha256=lmsnGNiJ8C4D_Pf9PbM0qgbZvD9kmB9lpZBQUZa3R_Y,15699 -pkg_resources/_vendor/packaging/utils.py,sha256=dJjeat3BS-TYn1RrUFVwufUMasbtzLfYRoy_HXENeFQ,4200 -pkg_resources/_vendor/packaging/version.py,sha256=_fLRNrFrxYcHVfyo8vk9j8s6JM8N_xsSxVFr6RJyco8,14665 -pkg_resources/_vendor/pyparsing/__init__.py,sha256=52QH3lgPbJhba0estckoGPHRH8JvQSSCGoWiEn2m0bU,9159 -pkg_resources/_vendor/pyparsing/actions.py,sha256=wU9i32e0y1ymxKE3OUwSHO-SFIrt1h_wv6Ws0GQjpNU,6426 -pkg_resources/_vendor/pyparsing/common.py,sha256=lFL97ooIeR75CmW5hjURZqwDCTgruqltcTCZ-ulLO2Q,12936 -pkg_resources/_vendor/pyparsing/core.py,sha256=u8GptQE_H6wMkl8OZhxeK1aAPIDXXNgwdShORBwBVS4,213310 -pkg_resources/_vendor/pyparsing/exceptions.py,sha256=3LbSafD32NYb1Tzt85GHNkhEAU1eZkTtNSk24cPMemo,9023 -pkg_resources/_vendor/pyparsing/helpers.py,sha256=QpUOjW0-psvueMwWb9bQpU2noqKCv98_wnw1VSzSdVo,39129 -pkg_resources/_vendor/pyparsing/results.py,sha256=HgNvWVXBdQP-Q6PtJfoCEeOJk2nwEvG-2KVKC5sGA30,25341 -pkg_resources/_vendor/pyparsing/testing.py,sha256=7tu4Abp4uSeJV0N_yEPRmmNUhpd18ZQP3CrX41DM814,13402 -pkg_resources/_vendor/pyparsing/unicode.py,sha256=fwuhMj30SQ165Cv7HJpu-rSxGbRm93kN9L4Ei7VGc1Y,10787 -pkg_resources/_vendor/pyparsing/util.py,sha256=kq772O5YSeXOSdP-M31EWpbH_ayj7BMHImBYo9xPD5M,6805 -pkg_resources/_vendor/pyparsing/diagram/__init__.py,sha256=f_EfxahqrdkRVahmTwLJXkZ9EEDKNd-O7lBbpJYlE1g,23668 -pkg_resources/extern/__init__.py,sha256=inFoCK9jn_yRFqkbNSOxOYyZD0aB3awch_xtbwIW_-Y,2426 -setuptools/__init__.py,sha256=DqL4WTwyXFp0OakiBKz0HfB0nH4Fm06b3PX8sJWUg88,8429 -setuptools/_deprecation_warning.py,sha256=jU9-dtfv6cKmtQJOXN8nP1mm7gONw5kKEtiPtbwnZyI,218 -setuptools/_entry_points.py,sha256=5rRyEuiC0tdEsoCRJ6NWii5RET134mtDtjoSTFdLCwA,1972 -setuptools/_imp.py,sha256=HmF91IbitRfsD5z-g4_wmcuH-RahyIONbPgiCOFgtzA,2392 -setuptools/_importlib.py,sha256=1RLRzpNCPKEJRbUPVIPU1-H9dzUXulyL6N_ryxnjEwc,1311 -setuptools/_itertools.py,sha256=pZAgXNz6tRPUFnHAaKJ90xAgD0gLPemcE1396Zgz73o,675 -setuptools/_path.py,sha256=9GdbEur6f_lWmokar-Y-DDyds-XmzYnXrcBy0DExwDw,749 -setuptools/_reqs.py,sha256=ApdTOmDFyK7hbHDnAH8VwhtVD5kvnOthyMNTmrUeFXs,501 -setuptools/archive_util.py,sha256=6WShpDR_uGZOaORRfzBmJyTYtX9xtrhmXTFPqE8kL8s,7346 -setuptools/build_meta.py,sha256=Lw6LmKQVASeUGcSsRa7fQl3RZNkxNgSk4eRRUwcuJGs,19539 -setuptools/cli-32.exe,sha256=dfEuovMNnA2HLa3jRfMPVi5tk4R7alCbpTvuxtCyw0Y,65536 -setuptools/cli-64.exe,sha256=KLABu5pyrnokJCv6skjXZ6GsXeyYHGcqOUT3oHI3Xpo,74752 -setuptools/cli-arm64.exe,sha256=o9amxowudZ98NvNWh_a2DRY8LhoIRqTAekxABqltiMc,137216 -setuptools/cli.exe,sha256=dfEuovMNnA2HLa3jRfMPVi5tk4R7alCbpTvuxtCyw0Y,65536 -setuptools/dep_util.py,sha256=BDx1BkzNQntvAB4alypHbW5UVBzjqths000PrUL4Zqc,949 -setuptools/depends.py,sha256=QYQIadr5DwLxPzkErhNt5hmRhvGhWxoXZMRXCm_jcQ0,5499 -setuptools/discovery.py,sha256=UZCeULUrV21xBTFBTTLNbta_rq2yjKa9kRwNXUIafRA,20799 -setuptools/dist.py,sha256=olE_CMNlg5_NGAPy7UWmaQ5Ev35_PQNiywtripWLtyU,45578 -setuptools/errors.py,sha256=2uToNIRA7dG995pf8ox8a4r7nJtP62-hpLhzsRirnx0,2464 -setuptools/extension.py,sha256=jpsAdQvCBCkAuvmEXYI90TV4kNGO2Y13NqDr_PrvdhA,5591 -setuptools/glob.py,sha256=1oZjbfjAHSXbgdhSuR6YGU8jKob9L8NtEmBYqcPTLYk,4873 -setuptools/gui-32.exe,sha256=XBr0bHMA6Hpz2s9s9Bzjl-PwXfa9nH4ie0rFn4V2kWA,65536 -setuptools/gui-64.exe,sha256=aYKMhX1IJLn4ULHgWX0sE0yREUt6B3TEHf_jOw6yNyE,75264 -setuptools/gui-arm64.exe,sha256=TEFnOKDi-mq3ZszxqbCoCXTnM_lhUWjdIqBpr6fVs40,137728 -setuptools/gui.exe,sha256=XBr0bHMA6Hpz2s9s9Bzjl-PwXfa9nH4ie0rFn4V2kWA,65536 -setuptools/installer.py,sha256=s6DQfsoICBJxbUqbduhOJtl1oG0S4yegRCg3EAs0i3M,3824 -setuptools/launch.py,sha256=TyPT-Ic1T2EnYvGO26gfNRP4ysBlrhpbRjQxWsiO414,812 -setuptools/logging.py,sha256=a8IDhV55qyEGDP6DTLNMxzSQaz-h4EfvnWfeBUJh0Nc,1210 -setuptools/monkey.py,sha256=t6To7LEhTyOWRRZLwiFv7Eeg2mjHZlVmTdHD1DC94QM,4857 -setuptools/msvc.py,sha256=x6jsjA9JdUew6VAfHapIHgEjAjy-T5dxqjPCZr0Tt04,47724 -setuptools/namespaces.py,sha256=PMqGVPXPYQgjUTvEg9bGccRAkIODrQ6NmsDg_fwErwI,3093 -setuptools/package_index.py,sha256=NfCNavs6DWe7gZzBcLYYNCIeCYA_pzqLw-ayyKU8hRk,40329 -setuptools/py34compat.py,sha256=KYOd6ybRxjBW8NJmYD8t_UyyVmysppFXqHpFLdslGXU,245 -setuptools/sandbox.py,sha256=mR83i-mu-ZUU_7TaMgYCeRSyzkqv8loJ_GR9xhS2DDw,14348 -setuptools/script (dev).tmpl,sha256=RUzQzCQUaXtwdLtYHWYbIQmOaES5Brqq1FvUA_tu-5I,218 -setuptools/script.tmpl,sha256=WGTt5piezO27c-Dbx6l5Q4T3Ff20A5z7872hv3aAhYY,138 -setuptools/unicode_utils.py,sha256=aOOFo4JGwAsiBttGYDsqFS7YqWQeZ2j6DWiCuctR_00,941 -setuptools/version.py,sha256=og_cuZQb0QI6ukKZFfZWPlr1HgJBPPn2vO2m_bI9ZTE,144 -setuptools/wheel.py,sha256=6LphzUKYfdLnIp9kIUzLGPY-F7MTJr4hiabB5almLps,8376 -setuptools/windows_support.py,sha256=KXrFWrteXjhIou0gGwlfBy0ttAszHP52ETq-2pc0mes,718 -setuptools/_distutils/__init__.py,sha256=3TQPLqYDwgPwPP3WxYGrW19zjkyPkDGt0su31fdT0tA,537 -setuptools/_distutils/_collections.py,sha256=s7zkSh7QUyJWEYSt5n10ouAZNDYvux8YCHnnY3k0wmQ,1330 -setuptools/_distutils/_functools.py,sha256=ABZ-Lyw-igKwBFoLF3QYtFmfutwZLiAdWcpRMbcacGU,411 -setuptools/_distutils/_macos_compat.py,sha256=-v_Z0M1LEH5k-VhSBBbuz_pDp3nSZ4rzU9E7iIskPDc,239 -setuptools/_distutils/_msvccompiler.py,sha256=mGmlhw7uCSr-naH-kq2t3DTzn9Zul6miF75QjzkTq3k,19672 -setuptools/_distutils/archive_util.py,sha256=kXxjRKAqwKjeraYVWmzMD1ylRmVowtRaO_f-arIPvPE,8603 -setuptools/_distutils/bcppcompiler.py,sha256=w0VwvAmyt2jIAdUlvoAciZ1y8KHZjG4-WVagHPLSNhI,14789 -setuptools/_distutils/ccompiler.py,sha256=r0JMuNfApR5YYGcyPNS1A9QwmmHQULYfQtGBClBYH_c,47369 -setuptools/_distutils/cmd.py,sha256=8cx-WB6UsaDFrxMJxhddrQBGiuz6Jg74m_5nz31JxVs,17973 -setuptools/_distutils/config.py,sha256=0MJdEXAnP5Hogox3Vc7wAPotM5eXptvMBQ_GDJTye9Y,4920 -setuptools/_distutils/core.py,sha256=sc2pALG3HsxUZovkvh4YywABlJ_r-FnnM2UqKfrPlI4,9451 -setuptools/_distutils/cygwinccompiler.py,sha256=H9N5ImWVvV0ktYfos1uEcTOK9KaVXvXaUf1lAa74mQ8,12537 -setuptools/_distutils/debug.py,sha256=N6MrTAqK6l9SVk6tWweR108PM8Ol7qNlfyV-nHcLhsY,139 -setuptools/_distutils/dep_util.py,sha256=RBh8ksJHdBNu9kG1Ivd0lRTpETNDgzjOwfrRjio1RGc,3423 -setuptools/_distutils/dir_util.py,sha256=GfAMvlEPkvrvolgJ0u_2oISCKsmOFP3I1WrxPGHgFhY,8082 -setuptools/_distutils/dist.py,sha256=JTHHae0rwFVo2Vm9u6-pn1Hos9NyKWcjGUKjEj_Ta7o,50186 -setuptools/_distutils/errors.py,sha256=ZtBwnhDpQA2bxIazPXNDQ25uNxM4p2omsaSRNpV3rpE,3589 -setuptools/_distutils/extension.py,sha256=F0TBNjYkMmte_Yg1bhKVHXSNWWNFEPIDUgwhuHdkox8,10270 -setuptools/_distutils/fancy_getopt.py,sha256=kxVQOEBg2AfuBmyVEwvApPdYmJ3JpIcneIwQFlCHnsw,17910 -setuptools/_distutils/file_util.py,sha256=OURpiLPhWmVhPJZ5n6DB462kcG7mosr2FDmp91R9kW8,8226 -setuptools/_distutils/filelist.py,sha256=N5zJXHnprT_lUPzn9LCTe35q-Pkcd5D77vbzflj8iyA,13713 -setuptools/_distutils/log.py,sha256=prAQJ_iy4HACk3rx5Ynl9L99DrFyYWJpYGmLtbiqLKg,1972 -setuptools/_distutils/msvc9compiler.py,sha256=1BvnnUIJ1RcYRjK1uCOCjoAes0WTxatxgIpQSZjLy2w,30235 -setuptools/_distutils/msvccompiler.py,sha256=NH0KkKJ0ZE9T-uMBcOjfpZrSFDYuPINszQPHZJEWCW8,23602 -setuptools/_distutils/py38compat.py,sha256=gZ-NQ5c6ufwVEkJ0BwkbrqG9TvWirVJIrVGqhgvaY-Q,217 -setuptools/_distutils/py39compat.py,sha256=vkxjv22H1bhToalClz3M0UUD8Xr21klbUBTQoVQxx20,639 -setuptools/_distutils/spawn.py,sha256=XZQ9jfawr20Q5i0cv0Qxy0wY6YfQsJwtjyLcKOnz1wU,3517 -setuptools/_distutils/sysconfig.py,sha256=Xg6K4abFhVD9vB3tWheXNGylEZxbKUkL4mzMXFsEN1g,18858 -setuptools/_distutils/text_file.py,sha256=tLjIJVBu7VMY2ZamSpQ9aBv0kbvX9_Abt26cjAAgHiQ,12096 -setuptools/_distutils/unixccompiler.py,sha256=0g8rPNK1-xRIycIavxdf-1gFDZXkWETS7rLqHqiZmrI,15641 -setuptools/_distutils/util.py,sha256=kkZvfAXiehXnlJ0tcyLPDMWfyzdjtK1BMCvk_VMyD3Q,18128 -setuptools/_distutils/version.py,sha256=6HV4l0tHESXxMJMDwd5Fn8Y9_U8ivZIowFCNXhCSnRM,12952 -setuptools/_distutils/versionpredicate.py,sha256=jwMtNwKtEqjiZPBFRDiMwgKcNMHAYyakpIyVdp-WRAU,5248 -setuptools/_distutils/command/__init__.py,sha256=fVUps4DJhvShMAod0y7xl02m46bd7r31irEhNofPrrs,430 -setuptools/_distutils/command/_framework_compat.py,sha256=HW84Z1cWmg4b6aMJvlMI9o6sGZSEH_aWMTlDKstL8lY,1614 -setuptools/_distutils/command/bdist.py,sha256=juqMz8WUyGZi8QEYSIReynjxyEmsOyrpAftLdwsmE5o,5441 -setuptools/_distutils/command/bdist_dumb.py,sha256=Ik1_7m9IPfc_bTDuaf32juKOtWQOjjitzTkuXShJ5Bk,4701 -setuptools/_distutils/command/bdist_rpm.py,sha256=HFny7hHrvfPBbhdQp7c7kST5W6xM3dP8YivWq9YI6Qw,22051 -setuptools/_distutils/command/build.py,sha256=K6nfwP1TYF62ARyJf5kurhpc6aFyOf8HcGyrbdcjPX8,5617 -setuptools/_distutils/command/build_clib.py,sha256=2leXQANbcKoQ91FRYi00P4HtF-amRE67JkPAl6R3OhE,7728 -setuptools/_distutils/command/build_ext.py,sha256=mFIERa96pJVmXi6WSod_PMspSD8s_izBYLFSHxKpEcc,31558 -setuptools/_distutils/command/build_py.py,sha256=A-kUuLRXf-MWJMFMLFmwGo9zwIQ-BMRYzki9CRybKZc,16568 -setuptools/_distutils/command/build_scripts.py,sha256=VYSLutq7hWskla0HeVuXYATaqvuuG2vqLiGoRP2Za08,5624 -setuptools/_distutils/command/check.py,sha256=2Pb7m1jOjY4iWiqbhyn8GEaOmgtbpobXH34ugkkXJYE,4888 -setuptools/_distutils/command/clean.py,sha256=952TxGe0ZyhkrOSpKmzixXePWN6rocIWFQbI7OwAh7I,2603 -setuptools/_distutils/command/config.py,sha256=dzPncgVTq6QOnNMpZ5IhUeNCTeJDJlk9SvR2XZ0oRy8,13137 -setuptools/_distutils/command/install.py,sha256=4Lq4aVSSfNBU1twLT5c9meHt_JBp0MD7zAetE6Kp8d0,30221 -setuptools/_distutils/command/install_data.py,sha256=mzduSrxl3IxmQiG-TBrPOWIVHGB4h7INjbiiq868bcU,2779 -setuptools/_distutils/command/install_egg_info.py,sha256=dOjNNytTUcr97jG1BZkE7t1OZJ0U4bxx0HhqnwBJrUc,2785 -setuptools/_distutils/command/install_headers.py,sha256=d8RICcQ8NgfNB2IFQi_DOMcge5lY-41QsEycmRoqwbI,1189 -setuptools/_distutils/command/install_lib.py,sha256=a-iS1F160bApBsrs0DsVaHVRH1mVTk84BM27g9NMQzk,8434 -setuptools/_distutils/command/install_scripts.py,sha256=6IIwz8xJj5aeEUqD-QWiVGGU1OEU0qMJQytJH5kNEOc,1936 -setuptools/_distutils/command/py37compat.py,sha256=EoJC8gVYMIv2tA1NpVA2XDyCT1qGp4BEn7aX_5ve1gw,672 -setuptools/_distutils/command/register.py,sha256=Sytr6ABBudvAp0lI2AUFBs3F55kbpkz0YxbhsmKoGTI,11765 -setuptools/_distutils/command/sdist.py,sha256=zMFkdvdxk9ezitmR0jGDO0w3P-BG2ohtUgzSllCbf3Q,19241 -setuptools/_distutils/command/upload.py,sha256=fJ5nBueGciB24ofXf7Rw4pzuFGM4a3JfTjbJi3iXxqE,7477 -setuptools/_vendor/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 -setuptools/_vendor/ordered_set.py,sha256=dbaCcs27dyN9gnMWGF5nA_BrVn6Q-NrjKYJpV9_fgBs,15130 -setuptools/_vendor/typing_extensions.py,sha256=1uqi_RSlI7gos4eJB_NEV3d5wQwzTUQHd3_jrkbTo8Q,87149 -setuptools/_vendor/zipp.py,sha256=ajztOH-9I7KA_4wqDYygtHa6xUBVZgFpmZ8FE74HHHI,8425 -setuptools/_vendor/importlib_metadata/__init__.py,sha256=xRXwTtvg4EAYuBotYeGawbjraQD4GFIvKgMClxApCDY,30130 -setuptools/_vendor/importlib_metadata/_adapters.py,sha256=B6fCi5-8mLVDFUZj3krI5nAo-mKp1dH_qIavyIyFrJs,1862 -setuptools/_vendor/importlib_metadata/_collections.py,sha256=CJ0OTCHIjWA0ZIVS4voORAsn2R4R2cQBEtPsZEJpASY,743 -setuptools/_vendor/importlib_metadata/_compat.py,sha256=cotBaMUB-2pIRZboQnWp9fEqm6Dwlypndn-EEn0bj5M,1828 -setuptools/_vendor/importlib_metadata/_functools.py,sha256=PsY2-4rrKX4RVeRC1oGp1lB1pmC9eKN88_f-bD9uOoA,2895 -setuptools/_vendor/importlib_metadata/_itertools.py,sha256=cvr_2v8BRbxcIl5x5ldfqdHjhI8Yi8s8yk50G_nm6jQ,2068 -setuptools/_vendor/importlib_metadata/_meta.py,sha256=_F48Hu_jFxkfKWz5wcYS8vO23qEygbVdF9r-6qh-hjE,1154 -setuptools/_vendor/importlib_metadata/_text.py,sha256=HCsFksZpJLeTP3NEk_ngrAeXVRRtTrtyh9eOABoRP4A,2166 -setuptools/_vendor/importlib_resources/__init__.py,sha256=evPm12kLgYqTm-pbzm60bOuumumT8IpBNWFp0uMyrzE,506 -setuptools/_vendor/importlib_resources/_adapters.py,sha256=o51tP2hpVtohP33gSYyAkGNpLfYDBqxxYsadyiRZi1E,4504 -setuptools/_vendor/importlib_resources/_common.py,sha256=iIxAaQhotSh6TLLUEfL_ynU2fzEeyHMz9JcL46mUhLg,2741 -setuptools/_vendor/importlib_resources/_compat.py,sha256=nFBCGMvImglrqgYkb9aPgOj68-h6xbw-ca94XOv1-zs,2706 -setuptools/_vendor/importlib_resources/_itertools.py,sha256=WCdJ1Gs_kNFwKENyIG7TO0Y434IWCu0zjVVSsSbZwU8,884 -setuptools/_vendor/importlib_resources/_legacy.py,sha256=TMLkx6aEM6U8xIREPXqGZrMbUhTiPUuPl6ESD7RdYj4,3494 -setuptools/_vendor/importlib_resources/abc.py,sha256=MvTJJXajbl74s36Gyeesf76egtbFnh-TMtzQMVhFWXo,3886 -setuptools/_vendor/importlib_resources/readers.py,sha256=_9QLGQ5AzrED3PY8S2Zf8V6yLR0-nqqYqtQmgleDJzY,3566 -setuptools/_vendor/importlib_resources/simple.py,sha256=xt0qhXbwt3bZ86zuaaKbTiE9A0mDbwu0saRjUq_pcY0,2836 -setuptools/_vendor/jaraco/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 -setuptools/_vendor/jaraco/context.py,sha256=7X1tpCLc5EN45iWGzGcsH0Unx62REIkvtRvglj0SiUA,5420 -setuptools/_vendor/jaraco/functools.py,sha256=ap1qoXaNABOx897366NTMEd2objrqAoSO1zuxZPjcmM,13512 -setuptools/_vendor/jaraco/text/__init__.py,sha256=KfFGMerrkN_0V0rgtJVx-9dHt3tW7i_uJypjwEcLtC0,15517 -setuptools/_vendor/more_itertools/__init__.py,sha256=C7sXffHTXM3P-iaLPPfqfmDoxOflQMJLcM7ed9p3jak,82 -setuptools/_vendor/more_itertools/more.py,sha256=0rB_mibFR51sq33UlAI_bWfaNdsYNnJr1v6S0CaW7QA,117959 -setuptools/_vendor/more_itertools/recipes.py,sha256=UkNkrsZyqiwgLHANBTmvMhCvaNSvSNYhyOpz_Jc55DY,16256 -setuptools/_vendor/packaging/__about__.py,sha256=ugASIO2w1oUyH8_COqQ2X_s0rDhjbhQC3yJocD03h2c,661 -setuptools/_vendor/packaging/__init__.py,sha256=b9Kk5MF7KxhhLgcDmiUWukN-LatWFxPdNug0joPhHSk,497 -setuptools/_vendor/packaging/_manylinux.py,sha256=XcbiXB-qcjv3bcohp6N98TMpOP4_j3m-iOA8ptK2GWY,11488 -setuptools/_vendor/packaging/_musllinux.py,sha256=_KGgY_qc7vhMGpoqss25n2hiLCNKRtvz9mCrS7gkqyc,4378 -setuptools/_vendor/packaging/_structures.py,sha256=q3eVNmbWJGG_S0Dit_S3Ao8qQqz_5PYTXFAKBZe5yr4,1431 -setuptools/_vendor/packaging/markers.py,sha256=lihRgqpZjLM-JW-vxlLPqU3kmVe79g9vypy1kxmTRuQ,8493 -setuptools/_vendor/packaging/requirements.py,sha256=Opd0FjqgdEiWkzBLyo1oLU0Dj01uIFwTAnAJQrr6j2A,4700 -setuptools/_vendor/packaging/specifiers.py,sha256=LRQ0kFsHrl5qfcFNEEJrIFYsnIHQUJXY9fIsakTrrqE,30110 -setuptools/_vendor/packaging/tags.py,sha256=lmsnGNiJ8C4D_Pf9PbM0qgbZvD9kmB9lpZBQUZa3R_Y,15699 -setuptools/_vendor/packaging/utils.py,sha256=dJjeat3BS-TYn1RrUFVwufUMasbtzLfYRoy_HXENeFQ,4200 -setuptools/_vendor/packaging/version.py,sha256=_fLRNrFrxYcHVfyo8vk9j8s6JM8N_xsSxVFr6RJyco8,14665 -setuptools/_vendor/pyparsing/__init__.py,sha256=52QH3lgPbJhba0estckoGPHRH8JvQSSCGoWiEn2m0bU,9159 -setuptools/_vendor/pyparsing/actions.py,sha256=wU9i32e0y1ymxKE3OUwSHO-SFIrt1h_wv6Ws0GQjpNU,6426 -setuptools/_vendor/pyparsing/common.py,sha256=lFL97ooIeR75CmW5hjURZqwDCTgruqltcTCZ-ulLO2Q,12936 -setuptools/_vendor/pyparsing/core.py,sha256=u8GptQE_H6wMkl8OZhxeK1aAPIDXXNgwdShORBwBVS4,213310 -setuptools/_vendor/pyparsing/exceptions.py,sha256=3LbSafD32NYb1Tzt85GHNkhEAU1eZkTtNSk24cPMemo,9023 -setuptools/_vendor/pyparsing/helpers.py,sha256=QpUOjW0-psvueMwWb9bQpU2noqKCv98_wnw1VSzSdVo,39129 -setuptools/_vendor/pyparsing/results.py,sha256=HgNvWVXBdQP-Q6PtJfoCEeOJk2nwEvG-2KVKC5sGA30,25341 -setuptools/_vendor/pyparsing/testing.py,sha256=7tu4Abp4uSeJV0N_yEPRmmNUhpd18ZQP3CrX41DM814,13402 -setuptools/_vendor/pyparsing/unicode.py,sha256=fwuhMj30SQ165Cv7HJpu-rSxGbRm93kN9L4Ei7VGc1Y,10787 -setuptools/_vendor/pyparsing/util.py,sha256=kq772O5YSeXOSdP-M31EWpbH_ayj7BMHImBYo9xPD5M,6805 -setuptools/_vendor/pyparsing/diagram/__init__.py,sha256=f_EfxahqrdkRVahmTwLJXkZ9EEDKNd-O7lBbpJYlE1g,23668 -setuptools/_vendor/tomli/__init__.py,sha256=JhUwV66DB1g4Hvt1UQCVMdfCu-IgAV8FXmvDU9onxd4,396 -setuptools/_vendor/tomli/_parser.py,sha256=g9-ENaALS-B8dokYpCuzUFalWlog7T-SIYMjLZSWrtM,22633 -setuptools/_vendor/tomli/_re.py,sha256=dbjg5ChZT23Ka9z9DHOXfdtSpPwUfdgMXnj8NOoly-w,2943 -setuptools/_vendor/tomli/_types.py,sha256=-GTG2VUqkpxwMqzmVO4F7ybKddIbAnuAHXfmWQcTi3Q,254 -setuptools/command/__init__.py,sha256=HZlSppOB8Vro73ffvP-xrORuMrh4GnVkOqJspFRG8Pg,396 -setuptools/command/alias.py,sha256=1sLQxZcNh6dDQpDmm4G7UGGTol83nY1NTPmNBbm2siI,2381 -setuptools/command/bdist_egg.py,sha256=QEIu1AkgS02j6ejonJY7kwGp6LNxfMeYZ3sxkd55ftA,16623 -setuptools/command/bdist_rpm.py,sha256=PxrgoHPNaw2Pw2qNjjHDPC-Ay_IaDbCqP3d_5N-cj2A,1182 -setuptools/command/build.py,sha256=cgkmzJhXFXw5lHMPgohJFyEByz8L7H9JurCnk2iRnFI,6589 -setuptools/command/build_clib.py,sha256=fWHSFGkk10VCddBWCszvNhowbG9Z9CZXVjQ2uSInoOs,4415 -setuptools/command/build_ext.py,sha256=cYm4OvllPf6I9YE3cWlnjPqqE546Mc7nQTpdJ-yH3jg,15821 -setuptools/command/build_py.py,sha256=CMoD9Gxd5vs8KfPVNFFD1cmJsCd3l0NJS5kdDTlx4Y4,14115 -setuptools/command/develop.py,sha256=5_Ss7ENd1_B_jVMY1tF5UV_y1Xu6jbVzAPG8oKeluGA,7012 -setuptools/command/dist_info.py,sha256=VdcNHtbPFGdPD_t20wxcROa4uALbyz1RnJMJEHQmrQU,4800 -setuptools/command/easy_install.py,sha256=sx7_Rwpa2wUvPZZTa7jLpY3shEL4Ti2d2u1yIUMahHs,85662 -setuptools/command/editable_wheel.py,sha256=yUCwBNcS75sBqcEOkW9CvRypgQ0dsMTn9646yXftAhk,31188 -setuptools/command/egg_info.py,sha256=BWo5Fw2_BT-vM3p3fgheRQP4zwym1TH38wqKPr5dmWs,26795 -setuptools/command/install.py,sha256=CBdw9iITHAc0Zt1YE_8dSWY5BscuTJGrCe2jtEsnepk,5163 -setuptools/command/install_egg_info.py,sha256=pgZ64m_-kmtx3QISHN_kRtMiZC_Y8x1Nr1j38jXEbXQ,2226 -setuptools/command/install_lib.py,sha256=Uz42McsyHZAjrB6cw9E7Bz0xsaTbzxnM1PI9CBhiPtE,3875 -setuptools/command/install_scripts.py,sha256=APFFpt_lYUEo-viMtpXr-Hkwycwq8knTxSTNUu_TwHo,2612 -setuptools/command/launcher manifest.xml,sha256=xlLbjWrB01tKC0-hlVkOKkiSPbzMml2eOPtJ_ucCnbE,628 -setuptools/command/py36compat.py,sha256=7yLWzQj179Enx3pJ8V1cDDCzeLMFMd9XJXlK-iZTq5Y,4946 -setuptools/command/register.py,sha256=kk3DxXCb5lXTvqnhfwx2g6q7iwbUmgTyXUCaBooBOUk,468 -setuptools/command/rotate.py,sha256=SvsQPasezIojPjvMnfkqzh8P0U0tCj0daczF8uc3NQM,2128 -setuptools/command/saveopts.py,sha256=za7QCBcQimKKriWcoCcbhxPjUz30gSB74zuTL47xpP4,658 -setuptools/command/sdist.py,sha256=d8Ty0eCiUKfWh4VTjqV9e8g-02Zsy8L4BcMe1OzIIn8,7071 -setuptools/command/setopt.py,sha256=okxhqD1NM1nQlbSVDCNv6P7Y7g680sc2r-tUW7wPH1Y,5086 -setuptools/command/test.py,sha256=ZWoIUdm6u2Zv-WhvSC5If1rPouxm5JmygwsajNA8WWI,8102 -setuptools/command/upload.py,sha256=XT3YFVfYPAmA5qhGg0euluU98ftxRUW-PzKcODMLxUs,462 -setuptools/command/upload_docs.py,sha256=1gHSs8Cyte2fSWwJPbAFD17eOdNxPWoBiHOJd1gdpaI,7494 -setuptools/config/__init__.py,sha256=Jg48Ac6C8AtdjkAFhe4Kh_xwNUfK6q04CJlJ5LbVMB0,1121 -setuptools/config/_apply_pyprojecttoml.py,sha256=Ev1RwtQbPiD2za3di5T7ExY8T7TAvMIFot0efIHYzAY,13398 -setuptools/config/expand.py,sha256=FQja-T8zG9bV_G1b7SBjWjsZNjvSbhg5vxFWhusSYoE,16319 -setuptools/config/pyprojecttoml.py,sha256=3dYGfZB_fjlwkumOQ2bhH2L4UJ3rDu0hN7HJjmd1Akc,19304 -setuptools/config/setupcfg.py,sha256=aqXdUuB5llJz9hZmQUjganZAyo34lHrRsK6wV1NzX2M,25198 -setuptools/config/_validate_pyproject/__init__.py,sha256=5YXPW1sabVn5jpZ25sUjeF6ij3_4odJiwUWi4nRD2Dc,1038 -setuptools/config/_validate_pyproject/error_reporting.py,sha256=vWiDs0hjlCBjZ_g4Xszsh97lIP9M4_JaLQ6MCQ26W9U,11266 -setuptools/config/_validate_pyproject/extra_validations.py,sha256=wHzrgfdZUMRPBR1ke1lg5mhqRsBSbjEYOMsuFXQH9jY,1153 -setuptools/config/_validate_pyproject/fastjsonschema_exceptions.py,sha256=w749JgqKi8clBFcObdcbZVqsmF4oJ_QByhZ1SGbUFNw,1612 -setuptools/config/_validate_pyproject/fastjsonschema_validations.py,sha256=oqXSDfYecymwM2I40JGcTB-1P9vd7CtfSIW5kDxZQPM,269900 -setuptools/config/_validate_pyproject/formats.py,sha256=uMUnp4mLIjrQCTe6-LDjtqglmEFLfOW9E1ZZLqOzhMI,8736 -setuptools/extern/__init__.py,sha256=LYHS20uf-nl_zBPmrIzTxokYdiVMZNZBYVu6hd8c5zg,2512 -setuptools-65.5.1.dist-info/LICENSE,sha256=2z8CRrH5J48VhFuZ_sR4uLUG63ZIeZNyL4xuJUKF-vg,1050 -setuptools-65.5.1.dist-info/METADATA,sha256=Gte2p_BLawLXnXmAZ-1HL2vFrx5WvV1c9v16LpqW3FQ,6311 -setuptools-65.5.1.dist-info/WHEEL,sha256=00yskusixUoUt5ob_CiUp6LsnN5lqzTJpoqOFg_FVIc,92 -setuptools-65.5.1.dist-info/entry_points.txt,sha256=3siAu4kYm1ybFJHJ7ooqpX5TAW70Gitp9dcdHC-7BFM,2740 -setuptools-65.5.1.dist-info/top_level.txt,sha256=d9yL39v_W7qmKDDSH6sT4bE0j_Ls1M3P161OGgdsm4g,41 -setuptools-65.5.1.dist-info/RECORD,, -setuptools/py34compat.cpython-310.pyc,, -setuptools/command/upload.cpython-310.pyc,, -setuptools/command/upload_docs.cpython-310.pyc,, -pkg_resources/_vendor/pyparsing/core.cpython-310.pyc,, -pkg_resources/_vendor/pyparsing/actions.cpython-310.pyc,, -pkg_resources/_vendor/jaraco/context.cpython-310.pyc,, -setuptools/_distutils/command/clean.cpython-310.pyc,, -setuptools/extern/__pycache__,, -setuptools/_distutils/__pycache__,, -pkg_resources/_vendor/jaraco/text/__init__.cpython-310.pyc,, -setuptools/_distutils/command/install_data.cpython-310.pyc,, -setuptools/launch.cpython-310.pyc,, -setuptools/_distutils/config.cpython-310.pyc,, -setuptools/monkey.cpython-310.pyc,, -pkg_resources/_vendor/packaging/_structures.cpython-310.pyc,, -pkg_resources/_vendor/pyparsing/diagram/__init__.cpython-310.pyc,, -setuptools/_distutils/command/__init__.cpython-310.pyc,, -setuptools/_vendor/pyparsing/actions.cpython-310.pyc,, -setuptools/command/py36compat.cpython-310.pyc,, -pkg_resources/_vendor/more_itertools/__pycache__,, -setuptools/_vendor/packaging/markers.cpython-310.pyc,, -setuptools/_distutils/spawn.cpython-310.pyc,, -setuptools/command/build.cpython-310.pyc,, -setuptools/command/saveopts.cpython-310.pyc,, -setuptools/_vendor/more_itertools/__pycache__,, -setuptools/command/install.cpython-310.pyc,, -setuptools/_distutils/msvccompiler.cpython-310.pyc,, -setuptools/_distutils/_functools.cpython-310.pyc,, -setuptools/dep_util.cpython-310.pyc,, -setuptools/command/build_clib.cpython-310.pyc,, -setuptools/_reqs.cpython-310.pyc,, -setuptools/sandbox.cpython-310.pyc,, -setuptools/command/build_py.cpython-310.pyc,, -setuptools/command/sdist.cpython-310.pyc,, -setuptools/_vendor/importlib_resources/_compat.cpython-310.pyc,, -setuptools/_imp.cpython-310.pyc,, -setuptools/config/_validate_pyproject/__init__.cpython-310.pyc,, -setuptools/_vendor/importlib_metadata/__pycache__,, -pkg_resources/_vendor/pyparsing/util.cpython-310.pyc,, -pkg_resources/_vendor/more_itertools/more.cpython-310.pyc,, -setuptools/depends.cpython-310.pyc,, -setuptools/config/pyprojecttoml.cpython-310.pyc,, -setuptools/_distutils/archive_util.cpython-310.pyc,, -setuptools/_distutils/command/upload.cpython-310.pyc,, -setuptools/_distutils/ccompiler.cpython-310.pyc,, -setuptools/command/bdist_egg.cpython-310.pyc,, -setuptools/_vendor/importlib_metadata/_compat.cpython-310.pyc,, -setuptools/_vendor/tomli/__pycache__,, -setuptools/_vendor/pyparsing/diagram/__init__.cpython-310.pyc,, -setuptools/_entry_points.cpython-310.pyc,, -setuptools/_distutils/dist.cpython-310.pyc,, -setuptools/command/bdist_rpm.cpython-310.pyc,, -setuptools/command/install_lib.cpython-310.pyc,, -pkg_resources/_vendor/packaging/_musllinux.cpython-310.pyc,, -setuptools/_distutils/file_util.cpython-310.pyc,, -setuptools/_distutils/command/check.cpython-310.pyc,, -pkg_resources/_vendor/appdirs.cpython-310.pyc,, -pkg_resources/extern/__init__.cpython-310.pyc,, -setuptools/_distutils/py39compat.cpython-310.pyc,, -setuptools/_vendor/importlib_resources/_itertools.cpython-310.pyc,, -setuptools/_vendor/jaraco/__init__.cpython-310.pyc,, -setuptools/config/__init__.cpython-310.pyc,, -setuptools/command/register.cpython-310.pyc,, -pkg_resources/_vendor/packaging/__pycache__,, -setuptools/_distutils/command/py37compat.cpython-310.pyc,, -setuptools/_distutils/cmd.cpython-310.pyc,, -setuptools/_vendor/importlib_metadata/_functools.cpython-310.pyc,, -setuptools/_vendor/__pycache__,, -setuptools/namespaces.cpython-310.pyc,, -setuptools/config/_validate_pyproject/formats.cpython-310.pyc,, -setuptools/discovery.cpython-310.pyc,, -setuptools/_vendor/importlib_metadata/_itertools.cpython-310.pyc,, -setuptools/command/setopt.cpython-310.pyc,, -_distutils_hack/__init__.cpython-310.pyc,, -setuptools/_vendor/typing_extensions.cpython-310.pyc,, -setuptools/_vendor/importlib_resources/readers.cpython-310.pyc,, -pkg_resources/_vendor/packaging/tags.cpython-310.pyc,, -pkg_resources/_vendor/importlib_resources/abc.cpython-310.pyc,, -setuptools/_distutils/_msvccompiler.cpython-310.pyc,, -setuptools/_vendor/packaging/__pycache__,, -pkg_resources/_vendor/pyparsing/results.cpython-310.pyc,, -pkg_resources/_vendor/packaging/markers.cpython-310.pyc,, -setuptools/_distutils/command/sdist.cpython-310.pyc,, -setuptools/command/rotate.cpython-310.pyc,, -pkg_resources/_vendor/jaraco/functools.cpython-310.pyc,, -setuptools/_vendor/pyparsing/helpers.cpython-310.pyc,, -setuptools/_vendor/pyparsing/results.cpython-310.pyc,, -setuptools/_vendor/packaging/tags.cpython-310.pyc,, -setuptools/_vendor/importlib_resources/abc.cpython-310.pyc,, -pkg_resources/_vendor/jaraco/__pycache__,, -pkg_resources/_vendor/packaging/_manylinux.cpython-310.pyc,, -setuptools/glob.cpython-310.pyc,, -setuptools/_vendor/tomli/__init__.cpython-310.pyc,, -setuptools/_path.cpython-310.pyc,, -setuptools/_distutils/command/install_egg_info.cpython-310.pyc,, -pkg_resources/_vendor/__pycache__,, -pkg_resources/_vendor/pyparsing/__pycache__,, -pkg_resources/_vendor/more_itertools/recipes.cpython-310.pyc,, -setuptools/_vendor/importlib_metadata/_meta.cpython-310.pyc,, -pkg_resources/_vendor/importlib_resources/_compat.cpython-310.pyc,, -setuptools/_distutils/sysconfig.cpython-310.pyc,, -setuptools/command/develop.cpython-310.pyc,, -setuptools/_vendor/zipp.cpython-310.pyc,, -setuptools/_vendor/tomli/_re.cpython-310.pyc,, -setuptools/_distutils/errors.cpython-310.pyc,, -setuptools/_vendor/importlib_resources/__pycache__,, -setuptools/config/expand.cpython-310.pyc,, -setuptools/_vendor/packaging/_manylinux.cpython-310.pyc,, -setuptools/_vendor/pyparsing/common.cpython-310.pyc,, -setuptools/_vendor/pyparsing/exceptions.cpython-310.pyc,, -setuptools/extension.cpython-310.pyc,, -setuptools/_vendor/pyparsing/__pycache__,, -setuptools/_distutils/cygwinccompiler.cpython-310.pyc,, -setuptools/_vendor/more_itertools/recipes.cpython-310.pyc,, -setuptools/command/editable_wheel.cpython-310.pyc,, -setuptools/command/install_scripts.cpython-310.pyc,, -setuptools/_distutils/command/install_headers.cpython-310.pyc,, -setuptools/_distutils/command/register.cpython-310.pyc,, -setuptools/_vendor/ordered_set.cpython-310.pyc,, -setuptools/config/_validate_pyproject/fastjsonschema_exceptions.cpython-310.pyc,, -setuptools/_vendor/importlib_resources/_adapters.cpython-310.pyc,, -pkg_resources/__pycache__,, -setuptools-65.5.1.dist-info/INSTALLER,, -setuptools/extern/__init__.cpython-310.pyc,, -setuptools/_vendor/jaraco/text/__pycache__,, -setuptools/_distutils/__init__.cpython-310.pyc,, -pkg_resources/_vendor/importlib_resources/simple.cpython-310.pyc,, -pkg_resources/_vendor/zipp.cpython-310.pyc,, -pkg_resources/_vendor/importlib_resources/_itertools.cpython-310.pyc,, -setuptools/_vendor/importlib_metadata/_adapters.cpython-310.pyc,, -setuptools/command/__pycache__,, -setuptools/_vendor/pyparsing/testing.cpython-310.pyc,, -setuptools/command/egg_info.cpython-310.pyc,, -setuptools/command/build_ext.cpython-310.pyc,, -pkg_resources/_vendor/pyparsing/helpers.cpython-310.pyc,, -pkg_resources/_vendor/more_itertools/__init__.cpython-310.pyc,, -setuptools/_vendor/importlib_resources/simple.cpython-310.pyc,, -setuptools/_distutils/text_file.cpython-310.pyc,, -setuptools/_distutils/version.cpython-310.pyc,, -setuptools/_vendor/tomli/_types.cpython-310.pyc,, -setuptools/_distutils/dir_util.cpython-310.pyc,, -setuptools/installer.cpython-310.pyc,, -pkg_resources/_vendor/importlib_resources/readers.cpython-310.pyc,, -setuptools/_distutils/_macos_compat.cpython-310.pyc,, -setuptools/_distutils/command/build_scripts.cpython-310.pyc,, -setuptools/_vendor/jaraco/context.cpython-310.pyc,, -setuptools/_vendor/more_itertools/__init__.cpython-310.pyc,, -setuptools/config/_apply_pyprojecttoml.cpython-310.pyc,, -setuptools/_distutils/fancy_getopt.cpython-310.pyc,, -setuptools/_distutils/command/build.cpython-310.pyc,, -setuptools/_itertools.cpython-310.pyc,, -setuptools/_vendor/pyparsing/__init__.cpython-310.pyc,, -setuptools/_distutils/command/install.cpython-310.pyc,, -setuptools/_distutils/log.cpython-310.pyc,, -setuptools/_distutils/debug.cpython-310.pyc,, -setuptools/_distutils/command/build_clib.cpython-310.pyc,, -setuptools/_distutils/command/install_scripts.cpython-310.pyc,, -setuptools/_distutils/command/build_py.cpython-310.pyc,, -setuptools/_vendor/importlib_metadata/__init__.cpython-310.pyc,, -setuptools/_distutils/_collections.cpython-310.pyc,, -pkg_resources/__init__.cpython-310.pyc,, -setuptools/_vendor/importlib_metadata/_text.cpython-310.pyc,, -setuptools/_vendor/tomli/_parser.cpython-310.pyc,, -pkg_resources/_vendor/importlib_resources/__pycache__,, -pkg_resources/_vendor/pyparsing/exceptions.cpython-310.pyc,, -setuptools/dist.cpython-310.pyc,, -pkg_resources/_vendor/pyparsing/common.cpython-310.pyc,, -setuptools/command/__init__.cpython-310.pyc,, -pkg_resources/_vendor/jaraco/text/__pycache__,, -setuptools/_distutils/command/install_lib.cpython-310.pyc,, -pkg_resources/_vendor/pyparsing/diagram/__pycache__,, -setuptools/_distutils/command/bdist_rpm.cpython-310.pyc,, -setuptools/_distutils/command/__pycache__,, -setuptools/_distutils/unixccompiler.cpython-310.pyc,, -pkg_resources/_vendor/importlib_resources/_adapters.cpython-310.pyc,, -setuptools/_distutils/command/config.cpython-310.pyc,, -pkg_resources/_vendor/packaging/__init__.cpython-310.pyc,, -setuptools/_distutils/command/bdist.cpython-310.pyc,, -setuptools/_vendor/__init__.cpython-310.pyc,, -setuptools/_vendor/packaging/utils.cpython-310.pyc,, -setuptools/package_index.cpython-310.pyc,, -setuptools/__pycache__,, -setuptools/_importlib.cpython-310.pyc,, -setuptools/_distutils/core.cpython-310.pyc,, -pkg_resources/_vendor/pyparsing/testing.cpython-310.pyc,, -setuptools/_vendor/packaging/__init__.cpython-310.pyc,, -setuptools/_vendor/importlib_metadata/_collections.cpython-310.pyc,, -pkg_resources/_vendor/packaging/version.cpython-310.pyc,, -pkg_resources/_vendor/packaging/__about__.cpython-310.pyc,, -setuptools/_vendor/pyparsing/unicode.cpython-310.pyc,, -setuptools/_distutils/dep_util.cpython-310.pyc,, -setuptools/config/_validate_pyproject/error_reporting.cpython-310.pyc,, -setuptools/_distutils/command/bdist_dumb.cpython-310.pyc,, -pkg_resources/_vendor/jaraco/__init__.cpython-310.pyc,, -setuptools/config/_validate_pyproject/__pycache__,, -pkg_resources/_vendor/importlib_resources/__init__.cpython-310.pyc,, -setuptools/config/_validate_pyproject/extra_validations.cpython-310.pyc,, -setuptools/_vendor/packaging/version.cpython-310.pyc,, -pkg_resources/_vendor/__init__.cpython-310.pyc,, -setuptools/config/_validate_pyproject/fastjsonschema_validations.cpython-310.pyc,, -setuptools/_vendor/packaging/__about__.cpython-310.pyc,, -pkg_resources/_vendor/pyparsing/__init__.cpython-310.pyc,, -setuptools/errors.cpython-310.pyc,, -setuptools/unicode_utils.cpython-310.pyc,, -setuptools/_vendor/pyparsing/diagram/__pycache__,, -setuptools/_vendor/importlib_resources/__init__.cpython-310.pyc,, -setuptools/_vendor/pyparsing/core.cpython-310.pyc,, -setuptools/logging.cpython-310.pyc,, -setuptools/_distutils/filelist.cpython-310.pyc,, -setuptools/_vendor/jaraco/functools.cpython-310.pyc,, -pkg_resources/extern/__pycache__,, -setuptools/_vendor/jaraco/__pycache__,, -setuptools/_vendor/packaging/_structures.cpython-310.pyc,, -setuptools/config/__pycache__,, -setuptools/archive_util.cpython-310.pyc,, -setuptools/__init__.cpython-310.pyc,, -setuptools/_distutils/util.cpython-310.pyc,, -setuptools/wheel.cpython-310.pyc,, -setuptools/_distutils/command/_framework_compat.cpython-310.pyc,, -setuptools/command/test.cpython-310.pyc,, -pkg_resources/_vendor/importlib_resources/_legacy.cpython-310.pyc,, -setuptools/_vendor/jaraco/text/__init__.cpython-310.pyc,, -setuptools/command/dist_info.cpython-310.pyc,, -_distutils_hack/__pycache__,, -pkg_resources/_vendor/packaging/specifiers.cpython-310.pyc,, -setuptools/version.cpython-310.pyc,, -setuptools/_distutils/py38compat.cpython-310.pyc,, -setuptools/_vendor/importlib_resources/_legacy.cpython-310.pyc,, -pkg_resources/_vendor/packaging/utils.cpython-310.pyc,, -setuptools/_distutils/command/build_ext.cpython-310.pyc,, -setuptools/command/install_egg_info.cpython-310.pyc,, -pkg_resources/_vendor/packaging/requirements.cpython-310.pyc,, -pkg_resources/_vendor/importlib_resources/_common.cpython-310.pyc,, -setuptools/_vendor/packaging/specifiers.cpython-310.pyc,, -setuptools/_distutils/msvc9compiler.cpython-310.pyc,, -setuptools/_vendor/more_itertools/more.cpython-310.pyc,, -setuptools/_vendor/pyparsing/util.cpython-310.pyc,, -setuptools/windows_support.cpython-310.pyc,, -pkg_resources/_vendor/pyparsing/unicode.cpython-310.pyc,, -setuptools/command/easy_install.cpython-310.pyc,, -setuptools/build_meta.cpython-310.pyc,, -setuptools/command/alias.cpython-310.pyc,, -setuptools-65.5.1.dist-info/__pycache__,, -setuptools/_deprecation_warning.cpython-310.pyc,, -setuptools/config/setupcfg.cpython-310.pyc,, -setuptools/_distutils/versionpredicate.cpython-310.pyc,, -setuptools/_vendor/packaging/requirements.cpython-310.pyc,, -setuptools-65.5.1.virtualenv,, -setuptools/_vendor/importlib_resources/_common.cpython-310.pyc,, -setuptools/_distutils/bcppcompiler.cpython-310.pyc,, -setuptools/msvc.cpython-310.pyc,, -setuptools/_vendor/packaging/_musllinux.cpython-310.pyc,, -_distutils_hack/override.cpython-310.pyc,, -setuptools/_distutils/extension.cpython-310.pyc,, \ No newline at end of file diff --git a/venv/lib/python3.10/site-packages/setuptools-65.5.1.dist-info/entry_points.txt b/venv/lib/python3.10/site-packages/setuptools-65.5.1.dist-info/entry_points.txt deleted file mode 100644 index 93df463..0000000 --- a/venv/lib/python3.10/site-packages/setuptools-65.5.1.dist-info/entry_points.txt +++ /dev/null @@ -1,57 +0,0 @@ -[distutils.commands] -alias = setuptools.command.alias:alias -bdist_egg = setuptools.command.bdist_egg:bdist_egg -bdist_rpm = setuptools.command.bdist_rpm:bdist_rpm -build = setuptools.command.build:build -build_clib = setuptools.command.build_clib:build_clib -build_ext = setuptools.command.build_ext:build_ext -build_py = setuptools.command.build_py:build_py -develop = setuptools.command.develop:develop -dist_info = setuptools.command.dist_info:dist_info -easy_install = setuptools.command.easy_install:easy_install -editable_wheel = setuptools.command.editable_wheel:editable_wheel -egg_info = setuptools.command.egg_info:egg_info -install = setuptools.command.install:install -install_egg_info = setuptools.command.install_egg_info:install_egg_info -install_lib = setuptools.command.install_lib:install_lib -install_scripts = setuptools.command.install_scripts:install_scripts -rotate = setuptools.command.rotate:rotate -saveopts = setuptools.command.saveopts:saveopts -sdist = setuptools.command.sdist:sdist -setopt = setuptools.command.setopt:setopt -test = setuptools.command.test:test -upload_docs = setuptools.command.upload_docs:upload_docs - -[distutils.setup_keywords] -dependency_links = setuptools.dist:assert_string_list -eager_resources = setuptools.dist:assert_string_list -entry_points = setuptools.dist:check_entry_points -exclude_package_data = setuptools.dist:check_package_data -extras_require = setuptools.dist:check_extras -include_package_data = setuptools.dist:assert_bool -install_requires = setuptools.dist:check_requirements -namespace_packages = setuptools.dist:check_nsp -package_data = setuptools.dist:check_package_data -packages = setuptools.dist:check_packages -python_requires = setuptools.dist:check_specifier -setup_requires = setuptools.dist:check_requirements -test_loader = setuptools.dist:check_importable -test_runner = setuptools.dist:check_importable -test_suite = setuptools.dist:check_test_suite -tests_require = setuptools.dist:check_requirements -use_2to3 = setuptools.dist:invalid_unless_false -zip_safe = setuptools.dist:assert_bool - -[egg_info.writers] -PKG-INFO = setuptools.command.egg_info:write_pkg_info -dependency_links.txt = setuptools.command.egg_info:overwrite_arg -depends.txt = setuptools.command.egg_info:warn_depends_obsolete -eager_resources.txt = setuptools.command.egg_info:overwrite_arg -entry_points.txt = setuptools.command.egg_info:write_entries -namespace_packages.txt = setuptools.command.egg_info:overwrite_arg -requires.txt = setuptools.command.egg_info:write_requirements -top_level.txt = setuptools.command.egg_info:write_toplevel_names - -[setuptools.finalize_distribution_options] -keywords = setuptools.dist:Distribution._finalize_setup_keywords -parent_finalize = setuptools.dist:_Distribution.finalize_options diff --git a/venv/lib/python3.10/site-packages/setuptools-65.5.1.dist-info/top_level.txt b/venv/lib/python3.10/site-packages/setuptools-65.5.1.dist-info/top_level.txt deleted file mode 100644 index b5ac107..0000000 --- a/venv/lib/python3.10/site-packages/setuptools-65.5.1.dist-info/top_level.txt +++ /dev/null @@ -1,3 +0,0 @@ -_distutils_hack -pkg_resources -setuptools diff --git a/venv/lib/python3.10/site-packages/setuptools/__init__.py b/venv/lib/python3.10/site-packages/setuptools/__init__.py index 6c24cc2..9d6f0bc 100644 --- a/venv/lib/python3.10/site-packages/setuptools/__init__.py +++ b/venv/lib/python3.10/site-packages/setuptools/__init__.py @@ -1,15 +1,15 @@ """Extensions to the 'distutils' for large or complex distributions""" +from fnmatch import fnmatchcase import functools import os import re -import warnings import _distutils_hack.override # noqa: F401 import distutils.core from distutils.errors import DistutilsOptionError -from distutils.util import convert_path as _convert_path +from distutils.util import convert_path from ._deprecation_warning import SetuptoolsDeprecationWarning @@ -17,9 +17,7 @@ from setuptools.extension import Extension from setuptools.dist import Distribution from setuptools.depends import Require -from setuptools.discovery import PackageFinder, PEP420PackageFinder from . import monkey -from . import logging __all__ = [ @@ -38,6 +36,85 @@ bootstrap_install_from = None +class PackageFinder: + """ + Generate a list of all Python packages found within a directory + """ + + @classmethod + def find(cls, where='.', exclude=(), include=('*',)): + """Return a list all Python packages found within directory 'where' + + 'where' is the root directory which will be searched for packages. It + should be supplied as a "cross-platform" (i.e. URL-style) path; it will + be converted to the appropriate local path syntax. + + 'exclude' is a sequence of package names to exclude; '*' can be used + as a wildcard in the names, such that 'foo.*' will exclude all + subpackages of 'foo' (but not 'foo' itself). + + 'include' is a sequence of package names to include. If it's + specified, only the named packages will be included. If it's not + specified, all found packages will be included. 'include' can contain + shell style wildcard patterns just like 'exclude'. + """ + + return list( + cls._find_packages_iter( + convert_path(where), + cls._build_filter('ez_setup', '*__pycache__', *exclude), + cls._build_filter(*include), + ) + ) + + @classmethod + def _find_packages_iter(cls, where, exclude, include): + """ + All the packages found in 'where' that pass the 'include' filter, but + not the 'exclude' filter. + """ + for root, dirs, files in os.walk(where, followlinks=True): + # Copy dirs to iterate over it, then empty dirs. + all_dirs = dirs[:] + dirs[:] = [] + + for dir in all_dirs: + full_path = os.path.join(root, dir) + rel_path = os.path.relpath(full_path, where) + package = rel_path.replace(os.path.sep, '.') + + # Skip directory trees that are not valid packages + if '.' in dir or not cls._looks_like_package(full_path): + continue + + # Should this package be included? + if include(package) and not exclude(package): + yield package + + # Keep searching subdirectories, as there may be more packages + # down there, even if the parent was excluded. + dirs.append(dir) + + @staticmethod + def _looks_like_package(path): + """Does a directory look like a package?""" + return os.path.isfile(os.path.join(path, '__init__.py')) + + @staticmethod + def _build_filter(*patterns): + """ + Given a list of patterns, return a callable that will be true only if + the input matches at least one of the patterns. + """ + return lambda name: any(fnmatchcase(name, pat=pat) for pat in patterns) + + +class PEP420PackageFinder(PackageFinder): + @staticmethod + def _looks_like_package(path): + return True + + find_packages = PackageFinder.find find_namespace_packages = PEP420PackageFinder.find @@ -54,17 +131,7 @@ class MinimalDistribution(distutils.core.Distribution): def __init__(self, attrs): _incl = 'dependency_links', 'setup_requires' filtered = {k: attrs[k] for k in set(_incl) & set(attrs)} - super().__init__(filtered) - # Prevent accidentally triggering discovery with incomplete set of attrs - self.set_defaults._disable() - - def _get_project_config_files(self, filenames=None): - """Ignore ``pyproject.toml``, they are not related to setup_requires""" - try: - cfg, toml = super()._split_standard_project_metadata(filenames) - return cfg, () - except Exception: - return filenames, () + distutils.core.Distribution.__init__(self, filtered) def finalize_options(self): """ @@ -82,7 +149,6 @@ def finalize_options(self): def setup(**attrs): # Make sure we have any requirements needed to interpret 'attrs'. - logging.configure() _install_setup_requires(attrs) return distutils.core.setup(**attrs) @@ -94,59 +160,7 @@ def setup(**attrs): class Command(_Command): - """ - Setuptools internal actions are organized using a *command design pattern*. - This means that each action (or group of closely related actions) executed during - the build should be implemented as a ``Command`` subclass. - - These commands are abstractions and do not necessarily correspond to a command that - can (or should) be executed via a terminal, in a CLI fashion (although historically - they would). - - When creating a new command from scratch, custom defined classes **SHOULD** inherit - from ``setuptools.Command`` and implement a few mandatory methods. - Between these mandatory methods, are listed: - - .. method:: initialize_options(self) - - Set or (reset) all options/attributes/caches used by the command - to their default values. Note that these values may be overwritten during - the build. - - .. method:: finalize_options(self) - - Set final values for all options/attributes used by the command. - Most of the time, each option/attribute/cache should only be set if it does not - have any value yet (e.g. ``if self.attr is None: self.attr = val``). - - .. method:: run(self) - - Execute the actions intended by the command. - (Side effects **SHOULD** only take place when ``run`` is executed, - for example, creating new files or writing to the terminal output). - - A useful analogy for command classes is to think of them as subroutines with local - variables called "options". The options are "declared" in ``initialize_options()`` - and "defined" (given their final values, aka "finalized") in ``finalize_options()``, - both of which must be defined by every command class. The "body" of the subroutine, - (where it does all the work) is the ``run()`` method. - Between ``initialize_options()`` and ``finalize_options()``, ``setuptools`` may set - the values for options/attributes based on user's input (or circumstance), - which means that the implementation should be careful to not overwrite values in - ``finalize_options`` unless necessary. - - Please note that other commands (or other parts of setuptools) may also overwrite - the values of the command's options/attributes multiple times during the build - process. - Therefore it is important to consistently implement ``initialize_options()`` and - ``finalize_options()``. For example, all derived attributes (or attributes that - depend on the value of other attributes) **SHOULD** be recomputed in - ``finalize_options``. - - When overwriting existing commands, custom defined classes **MUST** abide by the - same APIs implemented by the original class. They also **SHOULD** inherit from the - original class. - """ + __doc__ = _Command.__doc__ command_consumes_arguments = False @@ -155,7 +169,7 @@ def __init__(self, dist, **kw): Construct the command for dist, updating vars(self) with any keyword parameters. """ - super().__init__(dist) + _Command.__init__(self, dist) vars(self).update(kw) def _ensure_stringlike(self, option, what, default=None): @@ -174,12 +188,6 @@ def ensure_string_list(self, option): currently a string, we split it either on /,\s*/ or /\s+/, so "foo bar baz", "foo,bar,baz", and "foo, bar baz" all become ["foo", "bar", "baz"]. - - .. - TODO: This method seems to be similar to the one in ``distutils.cmd`` - Probably it is just here for backward compatibility with old Python versions? - - :meta private: """ val = getattr(self, option) if val is None: @@ -226,19 +234,6 @@ def findall(dir=os.curdir): return list(files) -@functools.wraps(_convert_path) -def convert_path(pathname): - from inspect import cleandoc - - msg = """ - The function `convert_path` is considered internal and not part of the public API. - Its direct usage by 3rd-party packages is considered deprecated and the function - may be removed in the future. - """ - warnings.warn(cleandoc(msg), SetuptoolsDeprecationWarning) - return _convert_path(pathname) - - class sic(str): """Treat this string as-is (https://en.wikipedia.org/wiki/Sic)""" diff --git a/venv/lib/python3.10/site-packages/setuptools/_distutils/__init__.py b/venv/lib/python3.10/site-packages/setuptools/_distutils/__init__.py index b3ac014..8fd493b 100644 --- a/venv/lib/python3.10/site-packages/setuptools/_distutils/__init__.py +++ b/venv/lib/python3.10/site-packages/setuptools/_distutils/__init__.py @@ -11,7 +11,7 @@ import sys import importlib -__version__ = sys.version[: sys.version.index(' ')] +__version__ = sys.version[:sys.version.index(' ')] try: diff --git a/venv/lib/python3.10/site-packages/setuptools/_distutils/_collections.py b/venv/lib/python3.10/site-packages/setuptools/_distutils/_collections.py deleted file mode 100644 index 98fce80..0000000 --- a/venv/lib/python3.10/site-packages/setuptools/_distutils/_collections.py +++ /dev/null @@ -1,56 +0,0 @@ -import collections -import itertools - - -# from jaraco.collections 3.5.1 -class DictStack(list, collections.abc.Mapping): - """ - A stack of dictionaries that behaves as a view on those dictionaries, - giving preference to the last. - - >>> stack = DictStack([dict(a=1, c=2), dict(b=2, a=2)]) - >>> stack['a'] - 2 - >>> stack['b'] - 2 - >>> stack['c'] - 2 - >>> len(stack) - 3 - >>> stack.push(dict(a=3)) - >>> stack['a'] - 3 - >>> set(stack.keys()) == set(['a', 'b', 'c']) - True - >>> set(stack.items()) == set([('a', 3), ('b', 2), ('c', 2)]) - True - >>> dict(**stack) == dict(stack) == dict(a=3, c=2, b=2) - True - >>> d = stack.pop() - >>> stack['a'] - 2 - >>> d = stack.pop() - >>> stack['a'] - 1 - >>> stack.get('b', None) - >>> 'c' in stack - True - """ - - def __iter__(self): - dicts = list.__iter__(self) - return iter(set(itertools.chain.from_iterable(c.keys() for c in dicts))) - - def __getitem__(self, key): - for scope in reversed(tuple(list.__iter__(self))): - if key in scope: - return scope[key] - raise KeyError(key) - - push = list.append - - def __contains__(self, other): - return collections.abc.Mapping.__contains__(self, other) - - def __len__(self): - return len(list(iter(self))) diff --git a/venv/lib/python3.10/site-packages/setuptools/_distutils/_functools.py b/venv/lib/python3.10/site-packages/setuptools/_distutils/_functools.py deleted file mode 100644 index e7053ba..0000000 --- a/venv/lib/python3.10/site-packages/setuptools/_distutils/_functools.py +++ /dev/null @@ -1,20 +0,0 @@ -import functools - - -# from jaraco.functools 3.5 -def pass_none(func): - """ - Wrap func so it's not called if its first param is None - - >>> print_text = pass_none(print) - >>> print_text('text') - text - >>> print_text(None) - """ - - @functools.wraps(func) - def wrapper(param, *args, **kwargs): - if param is not None: - return func(param, *args, **kwargs) - - return wrapper diff --git a/venv/lib/python3.10/site-packages/setuptools/_distutils/_macos_compat.py b/venv/lib/python3.10/site-packages/setuptools/_distutils/_macos_compat.py deleted file mode 100644 index 17769e9..0000000 --- a/venv/lib/python3.10/site-packages/setuptools/_distutils/_macos_compat.py +++ /dev/null @@ -1,12 +0,0 @@ -import sys -import importlib - - -def bypass_compiler_fixup(cmd, args): - return cmd - - -if sys.platform == 'darwin': - compiler_fixup = importlib.import_module('_osx_support').compiler_fixup -else: - compiler_fixup = bypass_compiler_fixup diff --git a/venv/lib/python3.10/site-packages/setuptools/_distutils/_msvccompiler.py b/venv/lib/python3.10/site-packages/setuptools/_distutils/_msvccompiler.py index 729c2dd..b7a0608 100644 --- a/venv/lib/python3.10/site-packages/setuptools/_distutils/_msvccompiler.py +++ b/venv/lib/python3.10/site-packages/setuptools/_distutils/_msvccompiler.py @@ -17,31 +17,24 @@ import subprocess import contextlib import warnings -import unittest.mock as mock - +import unittest.mock with contextlib.suppress(ImportError): import winreg -from distutils.errors import ( - DistutilsExecError, - DistutilsPlatformError, - CompileError, - LibError, - LinkError, -) +from distutils.errors import DistutilsExecError, DistutilsPlatformError, \ + CompileError, LibError, LinkError from distutils.ccompiler import CCompiler, gen_lib_options from distutils import log from distutils.util import get_platform from itertools import count - def _find_vc2015(): try: key = winreg.OpenKeyEx( winreg.HKEY_LOCAL_MACHINE, r"Software\Microsoft\VisualStudio\SxS\VC7", - access=winreg.KEY_READ | winreg.KEY_WOW64_32KEY, + access=winreg.KEY_READ | winreg.KEY_WOW64_32KEY ) except OSError: log.debug("Visual C++ is not registered") @@ -64,7 +57,6 @@ def _find_vc2015(): best_version, best_dir = version, vc_dir return best_version, best_dir - def _find_vc2017(): """Returns "15, path" based on the result of invoking vswhere.exe If no install is found, returns "None, None" @@ -80,23 +72,14 @@ def _find_vc2017(): return None, None try: - path = subprocess.check_output( - [ - os.path.join( - root, "Microsoft Visual Studio", "Installer", "vswhere.exe" - ), - "-latest", - "-prerelease", - "-requires", - "Microsoft.VisualStudio.Component.VC.Tools.x86.x64", - "-property", - "installationPath", - "-products", - "*", - ], - encoding="mbcs", - errors="strict", - ).strip() + path = subprocess.check_output([ + os.path.join(root, "Microsoft Visual Studio", "Installer", "vswhere.exe"), + "-latest", + "-prerelease", + "-requires", "Microsoft.VisualStudio.Component.VC.Tools.x86.x64", + "-property", "installationPath", + "-products", "*", + ], encoding="mbcs", errors="strict").strip() except (subprocess.CalledProcessError, OSError, UnicodeDecodeError): return None, None @@ -106,15 +89,13 @@ def _find_vc2017(): return None, None - PLAT_SPEC_TO_RUNTIME = { - 'x86': 'x86', - 'x86_amd64': 'x64', - 'x86_arm': 'arm', - 'x86_arm64': 'arm64', + 'x86' : 'x86', + 'x86_amd64' : 'x64', + 'x86_arm' : 'arm', + 'x86_arm64' : 'arm64' } - def _find_vcvarsall(plat_spec): # bpo-38597: Removed vcruntime return value _, best_dir = _find_vc2017() @@ -133,10 +114,12 @@ def _find_vcvarsall(plat_spec): return vcvarsall, None - def _get_vc_env(plat_spec): if os.getenv("DISTUTILS_USE_SDK"): - return {key.lower(): value for key, value in os.environ.items()} + return { + key.lower(): value + for key, value in os.environ.items() + } vcvarsall, _ = _find_vcvarsall(plat_spec) if not vcvarsall: @@ -144,22 +127,23 @@ def _get_vc_env(plat_spec): try: out = subprocess.check_output( - f'cmd /u /c "{vcvarsall}" {plat_spec} && set', + 'cmd /u /c "{}" {} && set'.format(vcvarsall, plat_spec), stderr=subprocess.STDOUT, ).decode('utf-16le', errors='replace') except subprocess.CalledProcessError as exc: log.error(exc.output) - raise DistutilsPlatformError(f"Error executing {exc.cmd}") + raise DistutilsPlatformError("Error executing {}" + .format(exc.cmd)) env = { key.lower(): value - for key, _, value in (line.partition('=') for line in out.splitlines()) + for key, _, value in + (line.partition('=') for line in out.splitlines()) if key and value } return env - def _find_exe(exe, paths=None): """Return path to an MSVC executable program. @@ -177,21 +161,19 @@ def _find_exe(exe, paths=None): return fn return exe - # A map keyed by get_platform() return values to values accepted by # 'vcvarsall.bat'. Always cross-compile from x86 to work with the # lighter-weight MSVC installs that do not include native 64-bit tools. PLAT_TO_VCVARS = { - 'win32': 'x86', - 'win-amd64': 'x86_amd64', - 'win-arm32': 'x86_arm', - 'win-arm64': 'x86_arm64', + 'win32' : 'x86', + 'win-amd64' : 'x86_amd64', + 'win-arm32' : 'x86_arm', + 'win-arm64' : 'x86_arm64' } - -class MSVCCompiler(CCompiler): +class MSVCCompiler(CCompiler) : """Concrete class that implements an interface to Microsoft Visual C++, - as defined by the CCompiler abstract class.""" + as defined by the CCompiler abstract class.""" compiler_type = 'msvc' @@ -210,7 +192,8 @@ class MSVCCompiler(CCompiler): # Needed for the filename generation methods provided by the # base class, CCompiler. - src_extensions = _c_extensions + _cpp_extensions + _rc_extensions + _mc_extensions + src_extensions = (_c_extensions + _cpp_extensions + + _rc_extensions + _mc_extensions) res_extension = '.res' obj_extension = '.obj' static_lib_extension = '.lib' @@ -218,24 +201,13 @@ class MSVCCompiler(CCompiler): static_lib_format = shared_lib_format = '%s%s' exe_extension = '.exe' + def __init__(self, verbose=0, dry_run=0, force=0): - super().__init__(verbose, dry_run, force) + CCompiler.__init__ (self, verbose, dry_run, force) # target platform (.plat_name is consistent with 'bdist') self.plat_name = None self.initialized = False - @classmethod - def _configure(cls, vc_env): - """ - Set class-level include/lib dirs. - """ - cls.include_dirs = cls._parse_path(vc_env.get('include', '')) - cls.library_dirs = cls._parse_path(vc_env.get('lib', '')) - - @staticmethod - def _parse_path(val): - return [dir.rstrip(os.sep) for dir in val.split(os.pathsep) if dir] - def initialize(self, plat_name=None): # multi-init means we would need to check platform same each time... assert not self.initialized, "don't init multiple times" @@ -243,62 +215,58 @@ def initialize(self, plat_name=None): plat_name = get_platform() # sanity check for platforms to prevent obscure errors later. if plat_name not in PLAT_TO_VCVARS: - raise DistutilsPlatformError( - f"--plat-name must be one of {tuple(PLAT_TO_VCVARS)}" - ) + raise DistutilsPlatformError("--plat-name must be one of {}" + .format(tuple(PLAT_TO_VCVARS))) # Get the vcvarsall.bat spec for the requested platform. plat_spec = PLAT_TO_VCVARS[plat_name] vc_env = _get_vc_env(plat_spec) if not vc_env: - raise DistutilsPlatformError( - "Unable to find a compatible " "Visual Studio installation." - ) - self._configure(vc_env) + raise DistutilsPlatformError("Unable to find a compatible " + "Visual Studio installation.") self._paths = vc_env.get('path', '') paths = self._paths.split(os.pathsep) self.cc = _find_exe("cl.exe", paths) self.linker = _find_exe("link.exe", paths) self.lib = _find_exe("lib.exe", paths) - self.rc = _find_exe("rc.exe", paths) # resource compiler - self.mc = _find_exe("mc.exe", paths) # message compiler - self.mt = _find_exe("mt.exe", paths) # message compiler + self.rc = _find_exe("rc.exe", paths) # resource compiler + self.mc = _find_exe("mc.exe", paths) # message compiler + self.mt = _find_exe("mt.exe", paths) # message compiler + + for dir in vc_env.get('include', '').split(os.pathsep): + if dir: + self.add_include_dir(dir.rstrip(os.sep)) + + for dir in vc_env.get('lib', '').split(os.pathsep): + if dir: + self.add_library_dir(dir.rstrip(os.sep)) self.preprocess_options = None # bpo-38597: Always compile with dynamic linking # Future releases of Python 3.x will include all past # versions of vcruntime*.dll for compatibility. - self.compile_options = ['/nologo', '/O2', '/W3', '/GL', '/DNDEBUG', '/MD'] + self.compile_options = [ + '/nologo', '/O2', '/W3', '/GL', '/DNDEBUG', '/MD' + ] self.compile_options_debug = [ - '/nologo', - '/Od', - '/MDd', - '/Zi', - '/W3', - '/D_DEBUG', + '/nologo', '/Od', '/MDd', '/Zi', '/W3', '/D_DEBUG' ] - ldflags = ['/nologo', '/INCREMENTAL:NO', '/LTCG'] + ldflags = [ + '/nologo', '/INCREMENTAL:NO', '/LTCG' + ] - ldflags_debug = ['/nologo', '/INCREMENTAL:NO', '/LTCG', '/DEBUG:FULL'] + ldflags_debug = [ + '/nologo', '/INCREMENTAL:NO', '/LTCG', '/DEBUG:FULL' + ] self.ldflags_exe = [*ldflags, '/MANIFEST:EMBED,ID=1'] self.ldflags_exe_debug = [*ldflags_debug, '/MANIFEST:EMBED,ID=1'] - self.ldflags_shared = [ - *ldflags, - '/DLL', - '/MANIFEST:EMBED,ID=2', - '/MANIFESTUAC:NO', - ] - self.ldflags_shared_debug = [ - *ldflags_debug, - '/DLL', - '/MANIFEST:EMBED,ID=2', - '/MANIFESTUAC:NO', - ] + self.ldflags_shared = [*ldflags, '/DLL', '/MANIFEST:EMBED,ID=2', '/MANIFESTUAC:NO'] + self.ldflags_shared_debug = [*ldflags_debug, '/DLL', '/MANIFEST:EMBED,ID=2', '/MANIFESTUAC:NO'] self.ldflags_static = [*ldflags] self.ldflags_static_debug = [*ldflags_debug] @@ -318,33 +286,47 @@ def initialize(self, plat_name=None): # -- Worker methods ------------------------------------------------ - @property - def out_extensions(self): - return { - **super().out_extensions, - **{ - ext: self.res_extension - for ext in self._rc_extensions + self._mc_extensions - }, + def object_filenames(self, + source_filenames, + strip_dir=0, + output_dir=''): + ext_map = { + **{ext: self.obj_extension for ext in self.src_extensions}, + **{ext: self.res_extension for ext in self._rc_extensions + self._mc_extensions}, } - def compile( # noqa: C901 - self, - sources, - output_dir=None, - macros=None, - include_dirs=None, - debug=0, - extra_preargs=None, - extra_postargs=None, - depends=None, - ): + output_dir = output_dir or '' + + def make_out_path(p): + base, ext = os.path.splitext(p) + if strip_dir: + base = os.path.basename(base) + else: + _, base = os.path.splitdrive(base) + if base.startswith((os.path.sep, os.path.altsep)): + base = base[1:] + try: + # XXX: This may produce absurdly long paths. We should check + # the length of the result and trim base until we fit within + # 260 characters. + return os.path.join(output_dir, base + ext_map[ext]) + except LookupError: + # Better to raise an exception instead of silently continuing + # and later complain about sources and targets having + # different lengths + raise CompileError("Don't know how to compile {}".format(p)) + + return list(map(make_out_path, source_filenames)) + + + def compile(self, sources, + output_dir=None, macros=None, include_dirs=None, debug=0, + extra_preargs=None, extra_postargs=None, depends=None): if not self.initialized: self.initialize() - compile_info = self._setup_compile( - output_dir, macros, include_dirs, sources, depends, extra_postargs - ) + compile_info = self._setup_compile(output_dir, macros, include_dirs, + sources, depends, extra_postargs) macros, objects, extra_postargs, pp_opts, build = compile_info compile_opts = extra_preargs or [] @@ -354,6 +336,7 @@ def compile( # noqa: C901 else: compile_opts.extend(self.compile_options) + add_cpp_opts = False for obj in objects: @@ -398,7 +381,7 @@ def compile( # noqa: C901 try: # first compile .MC to .RC and .H file self.spawn([self.mc, '-h', h_dir, '-r', rc_dir, src]) - base, _ = os.path.splitext(os.path.basename(src)) + base, _ = os.path.splitext(os.path.basename (src)) rc_file = os.path.join(rc_dir, base + '.rc') # then compile .RC to .RES file self.spawn([self.rc, "/fo" + obj, rc_file]) @@ -408,7 +391,8 @@ def compile( # noqa: C901 continue else: # how to handle this file? - raise CompileError(f"Don't know how to compile {src} to {obj}") + raise CompileError("Don't know how to compile {} to {}" + .format(src, obj)) args = [self.cc] + compile_opts + pp_opts if add_cpp_opts: @@ -424,19 +408,24 @@ def compile( # noqa: C901 return objects - def create_static_lib( - self, objects, output_libname, output_dir=None, debug=0, target_lang=None - ): + + def create_static_lib(self, + objects, + output_libname, + output_dir=None, + debug=0, + target_lang=None): if not self.initialized: self.initialize() objects, output_dir = self._fix_object_args(objects, output_dir) - output_filename = self.library_filename(output_libname, output_dir=output_dir) + output_filename = self.library_filename(output_libname, + output_dir=output_dir) if self._need_link(objects, output_filename): lib_args = objects + ['/OUT:' + output_filename] if debug: - pass # XXX what goes here? + pass # XXX what goes here? try: log.debug('Executing "%s" %s', self.lib, ' '.join(lib_args)) self.spawn([self.lib] + lib_args) @@ -445,36 +434,36 @@ def create_static_lib( else: log.debug("skipping %s (up-to-date)", output_filename) - def link( - self, - target_desc, - objects, - output_filename, - output_dir=None, - libraries=None, - library_dirs=None, - runtime_library_dirs=None, - export_symbols=None, - debug=0, - extra_preargs=None, - extra_postargs=None, - build_temp=None, - target_lang=None, - ): + + def link(self, + target_desc, + objects, + output_filename, + output_dir=None, + libraries=None, + library_dirs=None, + runtime_library_dirs=None, + export_symbols=None, + debug=0, + extra_preargs=None, + extra_postargs=None, + build_temp=None, + target_lang=None): if not self.initialized: self.initialize() objects, output_dir = self._fix_object_args(objects, output_dir) - fixed_args = self._fix_lib_args(libraries, library_dirs, runtime_library_dirs) + fixed_args = self._fix_lib_args(libraries, library_dirs, + runtime_library_dirs) libraries, library_dirs, runtime_library_dirs = fixed_args if runtime_library_dirs: - self.warn( - "I don't know what to do with 'runtime_library_dirs': " - + str(runtime_library_dirs) - ) + self.warn("I don't know what to do with 'runtime_library_dirs': " + + str(runtime_library_dirs)) - lib_opts = gen_lib_options(self, library_dirs, runtime_library_dirs, libraries) + lib_opts = gen_lib_options(self, + library_dirs, runtime_library_dirs, + libraries) if output_dir is not None: output_filename = os.path.join(output_dir, output_filename) @@ -483,9 +472,8 @@ def link( export_opts = ["/EXPORT:" + sym for sym in (export_symbols or [])] - ld_args = ( - ldflags + lib_opts + export_opts + objects + ['/OUT:' + output_filename] - ) + ld_args = (ldflags + lib_opts + export_opts + + objects + ['/OUT:' + output_filename]) # The MSVC linker generates .lib and .exp files, which cannot be # suppressed by any linker switches. The .lib files may even be @@ -495,10 +483,11 @@ def link( build_temp = os.path.dirname(objects[0]) if export_symbols is not None: (dll_name, dll_ext) = os.path.splitext( - os.path.basename(output_filename) - ) - implib_file = os.path.join(build_temp, self.library_filename(dll_name)) - ld_args.append('/IMPLIB:' + implib_file) + os.path.basename(output_filename)) + implib_file = os.path.join( + build_temp, + self.library_filename(dll_name)) + ld_args.append ('/IMPLIB:' + implib_file) if extra_preargs: ld_args[:0] = extra_preargs @@ -536,8 +525,9 @@ def _fallback_spawn(self, cmd, env): raise else: return - warnings.warn("Fallback spawn triggered. Please update distutils monkeypatch.") - with mock.patch.dict('os.environ', env): + warnings.warn( + "Fallback spawn triggered. Please update distutils monkeypatch.") + with unittest.mock.patch('os.environ', env): bag.value = super().spawn(cmd) # -- Miscellaneous methods ----------------------------------------- @@ -549,8 +539,7 @@ def library_dir_option(self, dir): def runtime_library_dir_option(self, dir): raise DistutilsPlatformError( - "don't know how to set runtime library search path for MSVC" - ) + "don't know how to set runtime library search path for MSVC") def library_option(self, lib): return self.library_filename(lib) diff --git a/venv/lib/python3.10/site-packages/setuptools/_distutils/archive_util.py b/venv/lib/python3.10/site-packages/setuptools/_distutils/archive_util.py index 5dfe2a1..565a311 100644 --- a/venv/lib/python3.10/site-packages/setuptools/_distutils/archive_util.py +++ b/venv/lib/python3.10/site-packages/setuptools/_distutils/archive_util.py @@ -28,7 +28,6 @@ except ImportError: getgrnam = None - def _get_gid(name): """Returns a gid, given a group name.""" if getgrnam is None or name is None: @@ -41,7 +40,6 @@ def _get_gid(name): return result[2] return None - def _get_uid(name): """Returns an uid, given a user name.""" if getpwnam is None or name is None: @@ -54,10 +52,8 @@ def _get_uid(name): return result[2] return None - -def make_tarball( - base_name, base_dir, compress="gzip", verbose=0, dry_run=0, owner=None, group=None -): +def make_tarball(base_name, base_dir, compress="gzip", verbose=0, dry_run=0, + owner=None, group=None): """Create a (possibly compressed) tar file from all the files under 'base_dir'. @@ -73,21 +69,16 @@ def make_tarball( Returns the output filename. """ - tar_compression = { - 'gzip': 'gz', - 'bzip2': 'bz2', - 'xz': 'xz', - None: '', - 'compress': '', - } - compress_ext = {'gzip': '.gz', 'bzip2': '.bz2', 'xz': '.xz', 'compress': '.Z'} + tar_compression = {'gzip': 'gz', 'bzip2': 'bz2', 'xz': 'xz', None: '', + 'compress': ''} + compress_ext = {'gzip': '.gz', 'bzip2': '.bz2', 'xz': '.xz', + 'compress': '.Z'} # flags for compression program, each element of list will be an argument if compress is not None and compress not in compress_ext.keys(): raise ValueError( - "bad value for 'compress': must be None, 'gzip', 'bzip2', " - "'xz' or 'compress'" - ) + "bad value for 'compress': must be None, 'gzip', 'bzip2', " + "'xz' or 'compress'") archive_name = base_name + '.tar' if compress != 'compress': @@ -121,7 +112,7 @@ def _set_uid_gid(tarinfo): # compression using `compress` if compress == 'compress': - warn("'compress' is deprecated.", DeprecationWarning) + warn("'compress' will be deprecated.", PendingDeprecationWarning) # the option varies depending on the platform compressed_name = archive_name + compress_ext[compress] if sys.platform == 'win32': @@ -133,8 +124,7 @@ def _set_uid_gid(tarinfo): return archive_name - -def make_zipfile(base_name, base_dir, verbose=0, dry_run=0): # noqa: C901 +def make_zipfile(base_name, base_dir, verbose=0, dry_run=0): """Create a zip file from all the files under 'base_dir'. The output zip file will be named 'base_name' + ".zip". Uses either the @@ -155,29 +145,26 @@ def make_zipfile(base_name, base_dir, verbose=0, dry_run=0): # noqa: C901 zipoptions = "-rq" try: - spawn(["zip", zipoptions, zip_filename, base_dir], dry_run=dry_run) + spawn(["zip", zipoptions, zip_filename, base_dir], + dry_run=dry_run) except DistutilsExecError: # XXX really should distinguish between "couldn't find # external 'zip' command" and "zip failed". - raise DistutilsExecError( - ( - "unable to create zip file '%s': " - "could neither import the 'zipfile' module nor " - "find a standalone zip utility" - ) - % zip_filename - ) + raise DistutilsExecError(("unable to create zip file '%s': " + "could neither import the 'zipfile' module nor " + "find a standalone zip utility") % zip_filename) else: - log.info("creating '%s' and adding '%s' to it", zip_filename, base_dir) + log.info("creating '%s' and adding '%s' to it", + zip_filename, base_dir) if not dry_run: try: - zip = zipfile.ZipFile( - zip_filename, "w", compression=zipfile.ZIP_DEFLATED - ) + zip = zipfile.ZipFile(zip_filename, "w", + compression=zipfile.ZIP_DEFLATED) except RuntimeError: - zip = zipfile.ZipFile(zip_filename, "w", compression=zipfile.ZIP_STORED) + zip = zipfile.ZipFile(zip_filename, "w", + compression=zipfile.ZIP_STORED) with zip: if base_dir != os.curdir: @@ -197,16 +184,14 @@ def make_zipfile(base_name, base_dir, verbose=0, dry_run=0): # noqa: C901 return zip_filename - ARCHIVE_FORMATS = { 'gztar': (make_tarball, [('compress', 'gzip')], "gzip'ed tar-file"), 'bztar': (make_tarball, [('compress', 'bzip2')], "bzip2'ed tar-file"), 'xztar': (make_tarball, [('compress', 'xz')], "xz'ed tar-file"), - 'ztar': (make_tarball, [('compress', 'compress')], "compressed tar file"), - 'tar': (make_tarball, [('compress', None)], "uncompressed tar file"), - 'zip': (make_zipfile, [], "ZIP file"), -} - + 'ztar': (make_tarball, [('compress', 'compress')], "compressed tar file"), + 'tar': (make_tarball, [('compress', None)], "uncompressed tar file"), + 'zip': (make_zipfile, [],"ZIP file") + } def check_archive_formats(formats): """Returns the first format from the 'format' list that is unknown. @@ -218,17 +203,8 @@ def check_archive_formats(formats): return format return None - -def make_archive( - base_name, - format, - root_dir=None, - base_dir=None, - verbose=0, - dry_run=0, - owner=None, - group=None, -): +def make_archive(base_name, format, root_dir=None, base_dir=None, verbose=0, + dry_run=0, owner=None, group=None): """Create an archive file (eg. zip or tar). 'base_name' is the name of the file to create, minus any format-specific diff --git a/venv/lib/python3.10/site-packages/setuptools/_distutils/bcppcompiler.py b/venv/lib/python3.10/site-packages/setuptools/_distutils/bcppcompiler.py index 80b6bd8..071fea5 100644 --- a/venv/lib/python3.10/site-packages/setuptools/_distutils/bcppcompiler.py +++ b/venv/lib/python3.10/site-packages/setuptools/_distutils/bcppcompiler.py @@ -13,30 +13,16 @@ import os -import warnings - -from distutils.errors import ( - DistutilsExecError, - CompileError, - LibError, - LinkError, - UnknownFileError, -) -from distutils.ccompiler import CCompiler, gen_preprocess_options +from distutils.errors import \ + DistutilsExecError, \ + CompileError, LibError, LinkError, UnknownFileError +from distutils.ccompiler import \ + CCompiler, gen_preprocess_options from distutils.file_util import write_file from distutils.dep_util import newer from distutils import log - -warnings.warn( - "bcppcompiler is deprecated and slated to be removed " - "in the future. Please discontinue use or file an issue " - "with pypa/distutils describing your use case.", - DeprecationWarning, -) - - -class BCPPCompiler(CCompiler): +class BCPPCompiler(CCompiler) : """Concrete class that implements an interface to the Borland C/C++ compiler, as defined by the CCompiler abstract class. """ @@ -63,9 +49,13 @@ class BCPPCompiler(CCompiler): static_lib_format = shared_lib_format = '%s%s' exe_extension = '.exe' - def __init__(self, verbose=0, dry_run=0, force=0): - super().__init__(verbose, dry_run, force) + def __init__ (self, + verbose=0, + dry_run=0, + force=0): + + CCompiler.__init__ (self, verbose, dry_run, force) # These executables are assumed to all be in the path. # Borland doesn't seem to use any special registry settings to @@ -83,31 +73,24 @@ def __init__(self, verbose=0, dry_run=0, force=0): self.ldflags_shared_debug = ['/Tpd', '/Gn', '/q', '/x'] self.ldflags_static = [] self.ldflags_exe = ['/Gn', '/q', '/x'] - self.ldflags_exe_debug = ['/Gn', '/q', '/x', '/r'] + self.ldflags_exe_debug = ['/Gn', '/q', '/x','/r'] + # -- Worker methods ------------------------------------------------ - def compile( # noqa: C901 - self, - sources, - output_dir=None, - macros=None, - include_dirs=None, - debug=0, - extra_preargs=None, - extra_postargs=None, - depends=None, - ): - - macros, objects, extra_postargs, pp_opts, build = self._setup_compile( - output_dir, macros, include_dirs, sources, depends, extra_postargs - ) + def compile(self, sources, + output_dir=None, macros=None, include_dirs=None, debug=0, + extra_preargs=None, extra_postargs=None, depends=None): + + macros, objects, extra_postargs, pp_opts, build = \ + self._setup_compile(output_dir, macros, include_dirs, sources, + depends, extra_postargs) compile_opts = extra_preargs or [] - compile_opts.append('-c') + compile_opts.append ('-c') if debug: - compile_opts.extend(self.compile_options_debug) + compile_opts.extend (self.compile_options_debug) else: - compile_opts.extend(self.compile_options) + compile_opts.extend (self.compile_options) for obj in objects: try: @@ -123,14 +106,14 @@ def compile( # noqa: C901 if ext == '.res': # This is already a binary file -- skip it. - continue # the 'for' loop + continue # the 'for' loop if ext == '.rc': # This needs to be compiled to a .res file -- do it now. try: - self.spawn(["brcc32", "-fo", obj, src]) + self.spawn (["brcc32", "-fo", obj, src]) except DistutilsExecError as msg: raise CompileError(msg) - continue # the 'for' loop + continue # the 'for' loop # The next two are both for the real compiler. if ext in self._c_extensions: @@ -149,14 +132,9 @@ def compile( # noqa: C901 # Note that the source file names must appear at the end of # the command line. try: - self.spawn( - [self.cc] - + compile_opts - + pp_opts - + [input_opt, output_opt] - + extra_postargs - + [src] - ) + self.spawn ([self.cc] + compile_opts + pp_opts + + [input_opt, output_opt] + + extra_postargs + [src]) except DistutilsExecError as msg: raise CompileError(msg) @@ -164,19 +142,24 @@ def compile( # noqa: C901 # compile () - def create_static_lib( - self, objects, output_libname, output_dir=None, debug=0, target_lang=None - ): - (objects, output_dir) = self._fix_object_args(objects, output_dir) - output_filename = self.library_filename(output_libname, output_dir=output_dir) + def create_static_lib (self, + objects, + output_libname, + output_dir=None, + debug=0, + target_lang=None): + + (objects, output_dir) = self._fix_object_args (objects, output_dir) + output_filename = \ + self.library_filename (output_libname, output_dir=output_dir) - if self._need_link(objects, output_filename): + if self._need_link (objects, output_filename): lib_args = [output_filename, '/u'] + objects if debug: - pass # XXX what goes here? + pass # XXX what goes here? try: - self.spawn([self.lib] + lib_args) + self.spawn ([self.lib] + lib_args) except DistutilsExecError as msg: raise LibError(msg) else: @@ -184,41 +167,37 @@ def create_static_lib( # create_static_lib () - def link( # noqa: C901 - self, - target_desc, - objects, - output_filename, - output_dir=None, - libraries=None, - library_dirs=None, - runtime_library_dirs=None, - export_symbols=None, - debug=0, - extra_preargs=None, - extra_postargs=None, - build_temp=None, - target_lang=None, - ): + + def link (self, + target_desc, + objects, + output_filename, + output_dir=None, + libraries=None, + library_dirs=None, + runtime_library_dirs=None, + export_symbols=None, + debug=0, + extra_preargs=None, + extra_postargs=None, + build_temp=None, + target_lang=None): # XXX this ignores 'build_temp'! should follow the lead of # msvccompiler.py - (objects, output_dir) = self._fix_object_args(objects, output_dir) - (libraries, library_dirs, runtime_library_dirs) = self._fix_lib_args( - libraries, library_dirs, runtime_library_dirs - ) + (objects, output_dir) = self._fix_object_args (objects, output_dir) + (libraries, library_dirs, runtime_library_dirs) = \ + self._fix_lib_args (libraries, library_dirs, runtime_library_dirs) if runtime_library_dirs: - log.warn( - "I don't know what to do with 'runtime_library_dirs': %s", - str(runtime_library_dirs), - ) + log.warn("I don't know what to do with 'runtime_library_dirs': %s", + str(runtime_library_dirs)) if output_dir is not None: - output_filename = os.path.join(output_dir, output_filename) + output_filename = os.path.join (output_dir, output_filename) - if self._need_link(objects, output_filename): + if self._need_link (objects, output_filename): # Figure out linker args based on type of target. if target_desc == CCompiler.EXECUTABLE: @@ -234,18 +213,20 @@ def link( # noqa: C901 else: ld_args = self.ldflags_shared[:] + # Create a temporary exports file for use by the linker if export_symbols is None: def_file = '' else: - head, tail = os.path.split(output_filename) - modname, ext = os.path.splitext(tail) - temp_dir = os.path.dirname(objects[0]) # preserve tree structure - def_file = os.path.join(temp_dir, '%s.def' % modname) + head, tail = os.path.split (output_filename) + modname, ext = os.path.splitext (tail) + temp_dir = os.path.dirname(objects[0]) # preserve tree structure + def_file = os.path.join (temp_dir, '%s.def' % modname) contents = ['EXPORTS'] - for sym in export_symbols or []: - contents.append(' {}=_{}'.format(sym, sym)) - self.execute(write_file, (def_file, contents), "writing %s" % def_file) + for sym in (export_symbols or []): + contents.append(' %s=_%s' % (sym, sym)) + self.execute(write_file, (def_file, contents), + "writing %s" % def_file) # Borland C++ has problems with '/' in paths objects2 = map(os.path.normpath, objects) @@ -260,9 +241,10 @@ def link( # noqa: C901 else: objects.append(file) - for ell in library_dirs: - ld_args.append("/L%s" % os.path.normpath(ell)) - ld_args.append("/L.") # we sometimes use relative paths + + for l in library_dirs: + ld_args.append("/L%s" % os.path.normpath(l)) + ld_args.append("/L.") # we sometimes use relative paths # list of object files ld_args.extend(objects) @@ -278,7 +260,7 @@ def link( # noqa: C901 # them. Arghghh!. Apparently it works fine as coded... # name of dll/exe file - ld_args.extend([',', output_filename]) + ld_args.extend([',',output_filename]) # no map file and start libraries ld_args.append(',,') @@ -294,23 +276,24 @@ def link( # noqa: C901 ld_args.append(libfile) # some default libraries - ld_args.append('import32') - ld_args.append('cw32mt') + ld_args.append ('import32') + ld_args.append ('cw32mt') # def file for export symbols - ld_args.extend([',', def_file]) + ld_args.extend([',',def_file]) # add resource files ld_args.append(',') ld_args.extend(resources) + if extra_preargs: ld_args[:0] = extra_preargs if extra_postargs: ld_args.extend(extra_postargs) - self.mkpath(os.path.dirname(output_filename)) + self.mkpath (os.path.dirname (output_filename)) try: - self.spawn([self.linker] + ld_args) + self.spawn ([self.linker] + ld_args) except DistutilsExecError as msg: raise LinkError(msg) @@ -321,7 +304,8 @@ def link( # noqa: C901 # -- Miscellaneous methods ----------------------------------------- - def find_library_file(self, dirs, lib, debug=0): + + def find_library_file (self, dirs, lib, debug=0): # List of effective library names to try, in order of preference: # xxx_bcpp.lib is better than xxx.lib # and xxx_d.lib is better than xxx.lib if debug is set @@ -332,7 +316,7 @@ def find_library_file(self, dirs, lib, debug=0): # compiler they care about, since (almost?) every Windows compiler # seems to have a different format for static libraries. if debug: - dlib = lib + "_d" + dlib = (lib + "_d") try_names = (dlib + "_bcpp", lib + "_bcpp", dlib, lib) else: try_names = (lib + "_bcpp", lib) @@ -347,42 +331,43 @@ def find_library_file(self, dirs, lib, debug=0): return None # overwrite the one from CCompiler to support rc and res-files - def object_filenames(self, source_filenames, strip_dir=0, output_dir=''): - if output_dir is None: - output_dir = '' + def object_filenames (self, + source_filenames, + strip_dir=0, + output_dir=''): + if output_dir is None: output_dir = '' obj_names = [] for src_name in source_filenames: # use normcase to make sure '.rc' is really '.rc' and not '.RC' - (base, ext) = os.path.splitext(os.path.normcase(src_name)) - if ext not in (self.src_extensions + ['.rc', '.res']): - raise UnknownFileError( - "unknown file type '{}' (from '{}')".format(ext, src_name) - ) + (base, ext) = os.path.splitext (os.path.normcase(src_name)) + if ext not in (self.src_extensions + ['.rc','.res']): + raise UnknownFileError("unknown file type '%s' (from '%s')" % \ + (ext, src_name)) if strip_dir: - base = os.path.basename(base) + base = os.path.basename (base) if ext == '.res': # these can go unchanged - obj_names.append(os.path.join(output_dir, base + ext)) + obj_names.append (os.path.join (output_dir, base + ext)) elif ext == '.rc': # these need to be compiled to .res-files - obj_names.append(os.path.join(output_dir, base + '.res')) + obj_names.append (os.path.join (output_dir, base + '.res')) else: - obj_names.append(os.path.join(output_dir, base + self.obj_extension)) + obj_names.append (os.path.join (output_dir, + base + self.obj_extension)) return obj_names # object_filenames () - def preprocess( - self, - source, - output_file=None, - macros=None, - include_dirs=None, - extra_preargs=None, - extra_postargs=None, - ): - - (_, macros, include_dirs) = self._fix_compile_args(None, macros, include_dirs) + def preprocess (self, + source, + output_file=None, + macros=None, + include_dirs=None, + extra_preargs=None, + extra_postargs=None): + + (_, macros, include_dirs) = \ + self._fix_compile_args(None, macros, include_dirs) pp_opts = gen_preprocess_options(macros, include_dirs) pp_args = ['cpp32.exe'] + pp_opts if output_file is not None: diff --git a/venv/lib/python3.10/site-packages/setuptools/_distutils/ccompiler.py b/venv/lib/python3.10/site-packages/setuptools/_distutils/ccompiler.py index 97551c9..777fc66 100644 --- a/venv/lib/python3.10/site-packages/setuptools/_distutils/ccompiler.py +++ b/venv/lib/python3.10/site-packages/setuptools/_distutils/ccompiler.py @@ -3,17 +3,8 @@ Contains CCompiler, an abstract base class that defines the interface for the Distutils compiler abstraction model.""" -import sys -import os -import re - -from distutils.errors import ( - CompileError, - LinkError, - UnknownFileError, - DistutilsPlatformError, - DistutilsModuleError, -) +import sys, os, re +from distutils.errors import * from distutils.spawn import spawn from distutils.file_util import move_file from distutils.dir_util import mkpath @@ -21,7 +12,6 @@ from distutils.util import split_quoted, execute from distutils import log - class CCompiler: """Abstract base class to define the interface that must be implemented by real compiler classes. Also has some utility methods used by @@ -66,16 +56,17 @@ class CCompiler: # think this is useless without the ability to null out the # library search path anyways. + # Subclasses that rely on the standard filename generation methods # implemented below should override these; see the comment near # those methods ('object_filenames()' et. al.) for details: - src_extensions = None # list of strings - obj_extension = None # string + src_extensions = None # list of strings + obj_extension = None # string static_lib_extension = None - shared_lib_extension = None # string - static_lib_format = None # format string - shared_lib_format = None # prob. same as static_lib_format - exe_extension = None # string + shared_lib_extension = None # string + static_lib_format = None # format string + shared_lib_format = None # prob. same as static_lib_format + exe_extension = None # string # Default language settings. language_map is used to detect a source # file or Extension target language, checking source filenames. @@ -83,25 +74,14 @@ class CCompiler: # what language to use when mixing source types. For example, if some # extension has two files with ".c" extension, and one with ".cpp", it # is still linked as c++. - language_map = { - ".c": "c", - ".cc": "c++", - ".cpp": "c++", - ".cxx": "c++", - ".m": "objc", - } + language_map = {".c" : "c", + ".cc" : "c++", + ".cpp" : "c++", + ".cxx" : "c++", + ".m" : "objc", + } language_order = ["c++", "objc", "c"] - include_dirs = [] - """ - include dirs specific to this compiler class - """ - - library_dirs = [] - """ - library dirs specific to this compiler class - """ - def __init__(self, verbose=0, dry_run=0, force=0): self.dry_run = dry_run self.force = force @@ -166,10 +146,8 @@ class (via the 'executables' class attribute), but most will have: for key in kwargs: if key not in self.executables: - raise ValueError( - "unknown executable '%s' for class %s" - % (key, self.__class__.__name__) - ) + raise ValueError("unknown executable '%s' for class %s" % + (key, self.__class__.__name__)) self.set_executable(key, kwargs[key]) def set_executable(self, key, value): @@ -192,19 +170,14 @@ def _check_macro_definitions(self, definitions): nothing if all definitions are OK, raise TypeError otherwise. """ for defn in definitions: - if not ( - isinstance(defn, tuple) - and ( - len(defn) in (1, 2) - and (isinstance(defn[1], str) or defn[1] is None) - ) - and isinstance(defn[0], str) - ): - raise TypeError( - ("invalid macro definition '%s': " % defn) - + "must be tuple (string,), (string, string), or " - + "(string, None)" - ) + if not (isinstance(defn, tuple) and + (len(defn) in (1, 2) and + (isinstance (defn[1], str) or defn[1] is None)) and + isinstance (defn[0], str)): + raise TypeError(("invalid macro definition '%s': " % defn) + \ + "must be tuple (string,), (string, string), or " + \ + "(string, None)") + # -- Bookkeeping methods ------------------------------------------- @@ -217,7 +190,7 @@ def define_macro(self, name, value=None): """ # Delete from the list of macro definitions/undefinitions if # already there (so that this one will take precedence). - i = self._find_macro(name) + i = self._find_macro (name) if i is not None: del self.macros[i] @@ -234,7 +207,7 @@ def undefine_macro(self, name): """ # Delete from the list of macro definitions/undefinitions if # already there (so that this one will take precedence). - i = self._find_macro(name) + i = self._find_macro (name) if i is not None: del self.macros[i] @@ -328,20 +301,41 @@ def set_link_objects(self, objects): """ self.objects = objects[:] + # -- Private utility methods -------------------------------------- # (here for the convenience of subclasses) # Helper method to prep compiler in subclass compile() methods - def _setup_compile(self, outdir, macros, incdirs, sources, depends, extra): + def _setup_compile(self, outdir, macros, incdirs, sources, depends, + extra): """Process arguments and decide which source files to compile.""" - outdir, macros, incdirs = self._fix_compile_args(outdir, macros, incdirs) + if outdir is None: + outdir = self.output_dir + elif not isinstance(outdir, str): + raise TypeError("'output_dir' must be a string or None") + + if macros is None: + macros = self.macros + elif isinstance(macros, list): + macros = macros + (self.macros or []) + else: + raise TypeError("'macros' (if supplied) must be a list of tuples") + + if incdirs is None: + incdirs = self.include_dirs + elif isinstance(incdirs, (list, tuple)): + incdirs = list(incdirs) + (self.include_dirs or []) + else: + raise TypeError( + "'include_dirs' (if supplied) must be a list of strings") if extra is None: extra = [] # Get the list of expected output (object) files - objects = self.object_filenames(sources, strip_dir=0, output_dir=outdir) + objects = self.object_filenames(sources, strip_dir=0, + output_dir=outdir) assert len(objects) == len(sources) pp_opts = gen_preprocess_options(macros, incdirs) @@ -392,10 +386,8 @@ def _fix_compile_args(self, output_dir, macros, include_dirs): elif isinstance(include_dirs, (list, tuple)): include_dirs = list(include_dirs) + (self.include_dirs or []) else: - raise TypeError("'include_dirs' (if supplied) must be a list of strings") - - # add include dirs for class - include_dirs += self.__class__.include_dirs + raise TypeError( + "'include_dirs' (if supplied) must be a list of strings") return output_dir, macros, include_dirs @@ -442,30 +434,27 @@ def _fix_lib_args(self, libraries, library_dirs, runtime_library_dirs): if libraries is None: libraries = self.libraries elif isinstance(libraries, (list, tuple)): - libraries = list(libraries) + (self.libraries or []) + libraries = list (libraries) + (self.libraries or []) else: - raise TypeError("'libraries' (if supplied) must be a list of strings") + raise TypeError( + "'libraries' (if supplied) must be a list of strings") if library_dirs is None: library_dirs = self.library_dirs elif isinstance(library_dirs, (list, tuple)): - library_dirs = list(library_dirs) + (self.library_dirs or []) + library_dirs = list (library_dirs) + (self.library_dirs or []) else: - raise TypeError("'library_dirs' (if supplied) must be a list of strings") - - # add library dirs for class - library_dirs += self.__class__.library_dirs + raise TypeError( + "'library_dirs' (if supplied) must be a list of strings") if runtime_library_dirs is None: runtime_library_dirs = self.runtime_library_dirs elif isinstance(runtime_library_dirs, (list, tuple)): - runtime_library_dirs = list(runtime_library_dirs) + ( - self.runtime_library_dirs or [] - ) + runtime_library_dirs = (list(runtime_library_dirs) + + (self.runtime_library_dirs or [])) else: - raise TypeError( - "'runtime_library_dirs' (if supplied) " "must be a list of strings" - ) + raise TypeError("'runtime_library_dirs' (if supplied) " + "must be a list of strings") return (libraries, library_dirs, runtime_library_dirs) @@ -477,9 +466,9 @@ def _need_link(self, objects, output_file): return True else: if self.dry_run: - newer = newer_group(objects, output_file, missing='newer') + newer = newer_group (objects, output_file, missing='newer') else: - newer = newer_group(objects, output_file) + newer = newer_group (objects, output_file) return newer def detect_language(self, sources): @@ -502,18 +491,12 @@ def detect_language(self, sources): pass return lang + # -- Worker methods ------------------------------------------------ # (must be implemented by subclasses) - def preprocess( - self, - source, - output_file=None, - macros=None, - include_dirs=None, - extra_preargs=None, - extra_postargs=None, - ): + def preprocess(self, source, output_file=None, macros=None, + include_dirs=None, extra_preargs=None, extra_postargs=None): """Preprocess a single C/C++ source file, named in 'source'. Output will be written to file named 'output_file', or stdout if 'output_file' not supplied. 'macros' is a list of macro @@ -525,17 +508,9 @@ def preprocess( """ pass - def compile( - self, - sources, - output_dir=None, - macros=None, - include_dirs=None, - debug=0, - extra_preargs=None, - extra_postargs=None, - depends=None, - ): + def compile(self, sources, output_dir=None, macros=None, + include_dirs=None, debug=0, extra_preargs=None, + extra_postargs=None, depends=None): """Compile one or more source files. 'sources' must be a list of filenames, most likely C/C++ @@ -586,9 +561,9 @@ def compile( """ # A concrete compiler class can either override this method # entirely or implement _compile(). - macros, objects, extra_postargs, pp_opts, build = self._setup_compile( - output_dir, macros, include_dirs, sources, depends, extra_postargs - ) + macros, objects, extra_postargs, pp_opts, build = \ + self._setup_compile(output_dir, macros, include_dirs, sources, + depends, extra_postargs) cc_args = self._get_cc_args(pp_opts, debug, extra_preargs) for obj in objects: @@ -607,9 +582,8 @@ def _compile(self, obj, src, ext, cc_args, extra_postargs, pp_opts): # should implement _compile(). pass - def create_static_lib( - self, objects, output_libname, output_dir=None, debug=0, target_lang=None - ): + def create_static_lib(self, objects, output_libname, output_dir=None, + debug=0, target_lang=None): """Link a bunch of stuff together to create a static library file. The "bunch of stuff" consists of the list of object files supplied as 'objects', the extra object files supplied to @@ -634,27 +608,26 @@ def create_static_lib( """ pass + # values for target_desc parameter in link() SHARED_OBJECT = "shared_object" SHARED_LIBRARY = "shared_library" EXECUTABLE = "executable" - def link( - self, - target_desc, - objects, - output_filename, - output_dir=None, - libraries=None, - library_dirs=None, - runtime_library_dirs=None, - export_symbols=None, - debug=0, - extra_preargs=None, - extra_postargs=None, - build_temp=None, - target_lang=None, - ): + def link(self, + target_desc, + objects, + output_filename, + output_dir=None, + libraries=None, + library_dirs=None, + runtime_library_dirs=None, + export_symbols=None, + debug=0, + extra_preargs=None, + extra_postargs=None, + build_temp=None, + target_lang=None): """Link a bunch of stuff together to create an executable or shared library file. @@ -700,98 +673,66 @@ def link( """ raise NotImplementedError + # Old 'link_*()' methods, rewritten to use the new 'link()' method. - def link_shared_lib( - self, - objects, - output_libname, - output_dir=None, - libraries=None, - library_dirs=None, - runtime_library_dirs=None, - export_symbols=None, - debug=0, - extra_preargs=None, - extra_postargs=None, - build_temp=None, - target_lang=None, - ): - self.link( - CCompiler.SHARED_LIBRARY, - objects, - self.library_filename(output_libname, lib_type='shared'), - output_dir, - libraries, - library_dirs, - runtime_library_dirs, - export_symbols, - debug, - extra_preargs, - extra_postargs, - build_temp, - target_lang, - ) - - def link_shared_object( - self, - objects, - output_filename, - output_dir=None, - libraries=None, - library_dirs=None, - runtime_library_dirs=None, - export_symbols=None, - debug=0, - extra_preargs=None, - extra_postargs=None, - build_temp=None, - target_lang=None, - ): - self.link( - CCompiler.SHARED_OBJECT, - objects, - output_filename, - output_dir, - libraries, - library_dirs, - runtime_library_dirs, - export_symbols, - debug, - extra_preargs, - extra_postargs, - build_temp, - target_lang, - ) - - def link_executable( - self, - objects, - output_progname, - output_dir=None, - libraries=None, - library_dirs=None, - runtime_library_dirs=None, - debug=0, - extra_preargs=None, - extra_postargs=None, - target_lang=None, - ): - self.link( - CCompiler.EXECUTABLE, - objects, - self.executable_filename(output_progname), - output_dir, - libraries, - library_dirs, - runtime_library_dirs, - None, - debug, - extra_preargs, - extra_postargs, - None, - target_lang, - ) + def link_shared_lib(self, + objects, + output_libname, + output_dir=None, + libraries=None, + library_dirs=None, + runtime_library_dirs=None, + export_symbols=None, + debug=0, + extra_preargs=None, + extra_postargs=None, + build_temp=None, + target_lang=None): + self.link(CCompiler.SHARED_LIBRARY, objects, + self.library_filename(output_libname, lib_type='shared'), + output_dir, + libraries, library_dirs, runtime_library_dirs, + export_symbols, debug, + extra_preargs, extra_postargs, build_temp, target_lang) + + + def link_shared_object(self, + objects, + output_filename, + output_dir=None, + libraries=None, + library_dirs=None, + runtime_library_dirs=None, + export_symbols=None, + debug=0, + extra_preargs=None, + extra_postargs=None, + build_temp=None, + target_lang=None): + self.link(CCompiler.SHARED_OBJECT, objects, + output_filename, output_dir, + libraries, library_dirs, runtime_library_dirs, + export_symbols, debug, + extra_preargs, extra_postargs, build_temp, target_lang) + + + def link_executable(self, + objects, + output_progname, + output_dir=None, + libraries=None, + library_dirs=None, + runtime_library_dirs=None, + debug=0, + extra_preargs=None, + extra_postargs=None, + target_lang=None): + self.link(CCompiler.EXECUTABLE, objects, + self.executable_filename(output_progname), output_dir, + libraries, library_dirs, runtime_library_dirs, None, + debug, extra_preargs, extra_postargs, None, target_lang) + # -- Miscellaneous methods ----------------------------------------- # These are all used by the 'gen_lib_options() function; there is @@ -816,14 +757,8 @@ def library_option(self, lib): """ raise NotImplementedError - def has_function( # noqa: C901 - self, - funcname, - includes=None, - include_dirs=None, - libraries=None, - library_dirs=None, - ): + def has_function(self, funcname, includes=None, include_dirs=None, + libraries=None, library_dirs=None): """Return a boolean indicating whether funcname is supported on the current platform. The optional arguments can be used to augment the compilation environment. @@ -832,7 +767,6 @@ def has_function( # noqa: C901 # import math which might not be available at that point - maybe # the necessary logic should just be inlined? import tempfile - if includes is None: includes = [] if include_dirs is None: @@ -846,15 +780,12 @@ def has_function( # noqa: C901 try: for incl in includes: f.write("""#include "%s"\n""" % incl) - f.write( - """\ + f.write("""\ int main (int argc, char **argv) { %s(); return 0; } -""" - % funcname - ) +""" % funcname) finally: f.close() try: @@ -865,9 +796,9 @@ def has_function( # noqa: C901 os.remove(fname) try: - self.link_executable( - objects, "a.out", libraries=libraries, library_dirs=library_dirs - ) + self.link_executable(objects, "a.out", + libraries=libraries, + library_dirs=library_dirs) except (LinkError, TypeError): return False else: @@ -877,7 +808,7 @@ def has_function( # noqa: C901 os.remove(fn) return True - def find_library_file(self, dirs, lib, debug=0): + def find_library_file (self, dirs, lib, debug=0): """Search the specified list of directories for a static or shared library file 'lib' and return the full path to that file. If 'debug' true, look for a debugging version (if that makes sense on @@ -923,39 +854,19 @@ def find_library_file(self, dirs, lib, debug=0): def object_filenames(self, source_filenames, strip_dir=0, output_dir=''): if output_dir is None: output_dir = '' - return list( - self._make_out_path(output_dir, strip_dir, src_name) - for src_name in source_filenames - ) - - @property - def out_extensions(self): - return dict.fromkeys(self.src_extensions, self.obj_extension) - - def _make_out_path(self, output_dir, strip_dir, src_name): - base, ext = os.path.splitext(src_name) - base = self._make_relative(base) - try: - new_ext = self.out_extensions[ext] - except LookupError: - raise UnknownFileError( - "unknown file type '{}' (from '{}')".format(ext, src_name) - ) - if strip_dir: - base = os.path.basename(base) - return os.path.join(output_dir, base + new_ext) - - @staticmethod - def _make_relative(base): - """ - In order to ensure that a filename always honors the - indicated output_dir, make sure it's relative. - Ref python/cpython#37775. - """ - # Chop off the drive - no_drive = os.path.splitdrive(base)[1] - # If abs, chop off leading / - return no_drive[os.path.isabs(no_drive) :] + obj_names = [] + for src_name in source_filenames: + base, ext = os.path.splitext(src_name) + base = os.path.splitdrive(base)[1] # Chop off the drive + base = base[os.path.isabs(base):] # If abs, chop off leading / + if ext not in self.src_extensions: + raise UnknownFileError( + "unknown file type '%s' (from '%s')" % (ext, src_name)) + if strip_dir: + base = os.path.basename(base) + obj_names.append(os.path.join(output_dir, + base + self.obj_extension)) + return obj_names def shared_object_filename(self, basename, strip_dir=0, output_dir=''): assert output_dir is not None @@ -969,13 +880,12 @@ def executable_filename(self, basename, strip_dir=0, output_dir=''): basename = os.path.basename(basename) return os.path.join(output_dir, basename + (self.exe_extension or '')) - def library_filename( - self, libname, lib_type='static', strip_dir=0, output_dir='' # or 'shared' - ): + def library_filename(self, libname, lib_type='static', # or 'shared' + strip_dir=0, output_dir=''): assert output_dir is not None - expected = '"static", "shared", "dylib", "xcode_stub"' - if lib_type not in eval(expected): - raise ValueError(f"'lib_type' must be {expected}") + if lib_type not in ("static", "shared", "dylib", "xcode_stub"): + raise ValueError( + "'lib_type' must be \"static\", \"shared\", \"dylib\", or \"xcode_stub\"") fmt = getattr(self, lib_type + "_lib_format") ext = getattr(self, lib_type + "_lib_extension") @@ -986,6 +896,7 @@ def library_filename( return os.path.join(output_dir, dir, filename) + # -- Utility methods ----------------------------------------------- def announce(self, msg, level=1): @@ -993,7 +904,6 @@ def announce(self, msg, level=1): def debug_print(self, msg): from distutils.debug import DEBUG - if DEBUG: print(msg) @@ -1009,7 +919,7 @@ def spawn(self, cmd, **kwargs): def move_file(self, src, dst): return move_file(src, dst, dry_run=self.dry_run) - def mkpath(self, name, mode=0o777): + def mkpath (self, name, mode=0o777): mkpath(name, mode, dry_run=self.dry_run) @@ -1018,59 +928,54 @@ def mkpath(self, name, mode=0o777): # patterns. Order is important; platform mappings are preferred over # OS names. _default_compilers = ( + # Platform string mappings + # on a cygwin built python we can use gcc like an ordinary UNIXish # compiler ('cygwin.*', 'unix'), + # OS name mappings ('posix', 'unix'), ('nt', 'msvc'), -) + ) def get_default_compiler(osname=None, platform=None): """Determine the default compiler to use for the given platform. - osname should be one of the standard Python OS names (i.e. the - ones returned by os.name) and platform the common value - returned by sys.platform for the platform in question. + osname should be one of the standard Python OS names (i.e. the + ones returned by os.name) and platform the common value + returned by sys.platform for the platform in question. - The default values are os.name and sys.platform in case the - parameters are not given. + The default values are os.name and sys.platform in case the + parameters are not given. """ if osname is None: osname = os.name if platform is None: platform = sys.platform for pattern, compiler in _default_compilers: - if ( - re.match(pattern, platform) is not None - or re.match(pattern, osname) is not None - ): + if re.match(pattern, platform) is not None or \ + re.match(pattern, osname) is not None: return compiler # Default to Unix compiler return 'unix' - # Map compiler types to (module_name, class_name) pairs -- ie. where to # find the code that implements an interface to this compiler. (The module # is assumed to be in the 'distutils' package.) -compiler_class = { - 'unix': ('unixccompiler', 'UnixCCompiler', "standard UNIX-style compiler"), - 'msvc': ('_msvccompiler', 'MSVCCompiler', "Microsoft Visual C++"), - 'cygwin': ( - 'cygwinccompiler', - 'CygwinCCompiler', - "Cygwin port of GNU C Compiler for Win32", - ), - 'mingw32': ( - 'cygwinccompiler', - 'Mingw32CCompiler', - "Mingw32 port of GNU C Compiler for Win32", - ), - 'bcpp': ('bcppcompiler', 'BCPPCompiler', "Borland C++ Compiler"), -} - +compiler_class = { 'unix': ('unixccompiler', 'UnixCCompiler', + "standard UNIX-style compiler"), + 'msvc': ('_msvccompiler', 'MSVCCompiler', + "Microsoft Visual C++"), + 'cygwin': ('cygwinccompiler', 'CygwinCCompiler', + "Cygwin port of GNU C Compiler for Win32"), + 'mingw32': ('cygwinccompiler', 'Mingw32CCompiler', + "Mingw32 port of GNU C Compiler for Win32"), + 'bcpp': ('bcppcompiler', 'BCPPCompiler', + "Borland C++ Compiler"), + } def show_compilers(): """Print list of available compilers (used by the "--help-compiler" @@ -1080,10 +985,10 @@ def show_compilers(): # "--compiler", which just happens to be the case for the three # commands that use it. from distutils.fancy_getopt import FancyGetopt - compilers = [] for compiler in compiler_class.keys(): - compilers.append(("compiler=" + compiler, None, compiler_class[compiler][2])) + compilers.append(("compiler="+compiler, None, + compiler_class[compiler][2])) compilers.sort() pretty_printer = FancyGetopt(compilers) pretty_printer.print_help("List of available compilers:") @@ -1116,18 +1021,17 @@ def new_compiler(plat=None, compiler=None, verbose=0, dry_run=0, force=0): try: module_name = "distutils." + module_name - __import__(module_name) + __import__ (module_name) module = sys.modules[module_name] klass = vars(module)[class_name] except ImportError: raise DistutilsModuleError( - "can't compile C/C++ code: unable to load module '%s'" % module_name - ) + "can't compile C/C++ code: unable to load module '%s'" % \ + module_name) except KeyError: raise DistutilsModuleError( - "can't compile C/C++ code: unable to find class '%s' " - "in module '%s'" % (class_name, module_name) - ) + "can't compile C/C++ code: unable to find class '%s' " + "in module '%s'" % (class_name, module_name)) # XXX The None is necessary to preserve backwards compatibility # with classes that expect verbose to be the first positional @@ -1160,14 +1064,14 @@ def gen_preprocess_options(macros, include_dirs): for macro in macros: if not (isinstance(macro, tuple) and 1 <= len(macro) <= 2): raise TypeError( - "bad macro definition '%s': " - "each element of 'macros' list must be a 1- or 2-tuple" % macro - ) + "bad macro definition '%s': " + "each element of 'macros' list must be a 1- or 2-tuple" + % macro) - if len(macro) == 1: # undefine this macro + if len(macro) == 1: # undefine this macro pp_opts.append("-U%s" % macro[0]) elif len(macro) == 2: - if macro[1] is None: # define with no explicit value + if macro[1] is None: # define with no explicit value pp_opts.append("-D%s" % macro[0]) else: # XXX *don't* need to be clever about quoting the @@ -1180,7 +1084,7 @@ def gen_preprocess_options(macros, include_dirs): return pp_opts -def gen_lib_options(compiler, library_dirs, runtime_library_dirs, libraries): +def gen_lib_options (compiler, library_dirs, runtime_library_dirs, libraries): """Generate linker options for searching library directories and linking with specific libraries. 'libraries' and 'library_dirs' are, respectively, lists of library names (not filenames!) and search @@ -1212,9 +1116,8 @@ def gen_lib_options(compiler, library_dirs, runtime_library_dirs, libraries): if lib_file: lib_opts.append(lib_file) else: - compiler.warn( - "no library file corresponding to " "'%s' found (skipping)" % lib - ) + compiler.warn("no library file corresponding to " + "'%s' found (skipping)" % lib) else: - lib_opts.append(compiler.library_option(lib)) + lib_opts.append(compiler.library_option (lib)) return lib_opts diff --git a/venv/lib/python3.10/site-packages/setuptools/_distutils/cmd.py b/venv/lib/python3.10/site-packages/setuptools/_distutils/cmd.py index 68a9267..dba3191 100644 --- a/venv/lib/python3.10/site-packages/setuptools/_distutils/cmd.py +++ b/venv/lib/python3.10/site-packages/setuptools/_distutils/cmd.py @@ -4,14 +4,11 @@ in the distutils.command package. """ -import sys -import os -import re +import sys, os, re from distutils.errors import DistutilsOptionError from distutils import util, dir_util, file_util, archive_util, dep_util from distutils import log - class Command: """Abstract base class for defining command classes, the "worker bees" of the Distutils. A useful analogy for command classes is to think of @@ -44,6 +41,7 @@ class Command: # defined. The canonical example is the "install" command. sub_commands = [] + # -- Creation/initialization methods ------------------------------- def __init__(self, dist): @@ -132,9 +130,8 @@ def initialize_options(self): This method must be implemented by all command classes. """ - raise RuntimeError( - "abstract method -- subclass %s must override" % self.__class__ - ) + raise RuntimeError("abstract method -- subclass %s must override" + % self.__class__) def finalize_options(self): """Set final values for all the options that this command supports. @@ -147,13 +144,12 @@ def finalize_options(self): This method must be implemented by all command classes. """ - raise RuntimeError( - "abstract method -- subclass %s must override" % self.__class__ - ) + raise RuntimeError("abstract method -- subclass %s must override" + % self.__class__) + def dump_options(self, header=None, indent=""): from distutils.fancy_getopt import longopt_xlate - if header is None: header = "command options for '%s':" % self.get_command_name() self.announce(indent + header, level=log.INFO) @@ -163,7 +159,8 @@ def dump_options(self, header=None, indent=""): if option[-1] == "=": option = option[:-1] value = getattr(self, option) - self.announce(indent + "{} = {}".format(option, value), level=log.INFO) + self.announce(indent + "%s = %s" % (option, value), + level=log.INFO) def run(self): """A command's raison d'etre: carry out the action it exists to @@ -175,9 +172,8 @@ def run(self): This method must be implemented by all command classes. """ - raise RuntimeError( - "abstract method -- subclass %s must override" % self.__class__ - ) + raise RuntimeError("abstract method -- subclass %s must override" + % self.__class__) def announce(self, msg, level=1): """If the current verbosity level is of greater than or equal to @@ -190,11 +186,11 @@ def debug_print(self, msg): DISTUTILS_DEBUG environment variable) flag is true. """ from distutils.debug import DEBUG - if DEBUG: print(msg) sys.stdout.flush() + # -- Option validation methods ------------------------------------- # (these are very handy in writing the 'finalize_options()' method) # @@ -214,9 +210,8 @@ def _ensure_stringlike(self, option, what, default=None): setattr(self, option, default) return default elif not isinstance(val, str): - raise DistutilsOptionError( - "'{}' must be a {} (got `{}`)".format(option, what, val) - ) + raise DistutilsOptionError("'%s' must be a %s (got `%s`)" + % (option, what, val)) return val def ensure_string(self, option, default=None): @@ -243,29 +238,27 @@ def ensure_string_list(self, option): ok = False if not ok: raise DistutilsOptionError( - "'{}' must be a list of strings (got {!r})".format(option, val) - ) + "'%s' must be a list of strings (got %r)" + % (option, val)) - def _ensure_tested_string(self, option, tester, what, error_fmt, default=None): + def _ensure_tested_string(self, option, tester, what, error_fmt, + default=None): val = self._ensure_stringlike(option, what, default) if val is not None and not tester(val): - raise DistutilsOptionError( - ("error in '%s' option: " + error_fmt) % (option, val) - ) + raise DistutilsOptionError(("error in '%s' option: " + error_fmt) + % (option, val)) def ensure_filename(self, option): """Ensure that 'option' is the name of an existing file.""" - self._ensure_tested_string( - option, os.path.isfile, "filename", "'%s' does not exist or is not a file" - ) + self._ensure_tested_string(option, os.path.isfile, + "filename", + "'%s' does not exist or is not a file") def ensure_dirname(self, option): - self._ensure_tested_string( - option, - os.path.isdir, - "directory name", - "'%s' does not exist or is not a directory", - ) + self._ensure_tested_string(option, os.path.isdir, + "directory name", + "'%s' does not exist or is not a directory") + # -- Convenience methods for commands ------------------------------ @@ -309,7 +302,8 @@ def get_finalized_command(self, command, create=1): # XXX rename to 'get_reinitialized_command()'? (should do the # same in dist.py, if so) def reinitialize_command(self, command, reinit_subcommands=0): - return self.distribution.reinitialize_command(command, reinit_subcommands) + return self.distribution.reinitialize_command(command, + reinit_subcommands) def run_command(self, command): """Run some other command: uses the 'run_command()' method of @@ -331,6 +325,7 @@ def get_sub_commands(self): commands.append(cmd_name) return commands + # -- External world manipulation ----------------------------------- def warn(self, msg): @@ -342,70 +337,41 @@ def execute(self, func, args, msg=None, level=1): def mkpath(self, name, mode=0o777): dir_util.mkpath(name, mode, dry_run=self.dry_run) - def copy_file( - self, infile, outfile, preserve_mode=1, preserve_times=1, link=None, level=1 - ): + def copy_file(self, infile, outfile, preserve_mode=1, preserve_times=1, + link=None, level=1): """Copy a file respecting verbose, dry-run and force flags. (The former two default to whatever is in the Distribution object, and the latter defaults to false for commands that don't define it.)""" - return file_util.copy_file( - infile, - outfile, - preserve_mode, - preserve_times, - not self.force, - link, - dry_run=self.dry_run, - ) - - def copy_tree( - self, - infile, - outfile, - preserve_mode=1, - preserve_times=1, - preserve_symlinks=0, - level=1, - ): + return file_util.copy_file(infile, outfile, preserve_mode, + preserve_times, not self.force, link, + dry_run=self.dry_run) + + def copy_tree(self, infile, outfile, preserve_mode=1, preserve_times=1, + preserve_symlinks=0, level=1): """Copy an entire directory tree respecting verbose, dry-run, and force flags. """ - return dir_util.copy_tree( - infile, - outfile, - preserve_mode, - preserve_times, - preserve_symlinks, - not self.force, - dry_run=self.dry_run, - ) - - def move_file(self, src, dst, level=1): + return dir_util.copy_tree(infile, outfile, preserve_mode, + preserve_times, preserve_symlinks, + not self.force, dry_run=self.dry_run) + + def move_file (self, src, dst, level=1): """Move a file respecting dry-run flag.""" return file_util.move_file(src, dst, dry_run=self.dry_run) def spawn(self, cmd, search_path=1, level=1): """Spawn an external command respecting dry-run flag.""" from distutils.spawn import spawn - spawn(cmd, search_path, dry_run=self.dry_run) - def make_archive( - self, base_name, format, root_dir=None, base_dir=None, owner=None, group=None - ): - return archive_util.make_archive( - base_name, - format, - root_dir, - base_dir, - dry_run=self.dry_run, - owner=owner, - group=group, - ) - - def make_file( - self, infiles, outfile, func, args, exec_msg=None, skip_msg=None, level=1 - ): + def make_archive(self, base_name, format, root_dir=None, base_dir=None, + owner=None, group=None): + return archive_util.make_archive(base_name, format, root_dir, base_dir, + dry_run=self.dry_run, + owner=owner, group=group) + + def make_file(self, infiles, outfile, func, args, + exec_msg=None, skip_msg=None, level=1): """Special case of 'execute()' for operations that process one or more input files and generate one output file. Works just like 'execute()', except the operation is skipped and a different @@ -421,10 +387,11 @@ def make_file( if isinstance(infiles, str): infiles = (infiles,) elif not isinstance(infiles, (list, tuple)): - raise TypeError("'infiles' must be a string, or a list or tuple of strings") + raise TypeError( + "'infiles' must be a string, or a list or tuple of strings") if exec_msg is None: - exec_msg = "generating {} from {}".format(outfile, ', '.join(infiles)) + exec_msg = "generating %s from %s" % (outfile, ', '.join(infiles)) # If 'outfile' must be regenerated (either because it doesn't # exist, is out-of-date, or the 'force' flag is true) then diff --git a/venv/lib/python3.10/site-packages/setuptools/_distutils/command/__init__.py b/venv/lib/python3.10/site-packages/setuptools/_distutils/command/__init__.py index 028dcfa..481eea9 100644 --- a/venv/lib/python3.10/site-packages/setuptools/_distutils/command/__init__.py +++ b/venv/lib/python3.10/site-packages/setuptools/_distutils/command/__init__.py @@ -3,23 +3,29 @@ Package containing implementation of all the standard Distutils commands.""" -__all__ = [ # noqa: F822 - 'build', - 'build_py', - 'build_ext', - 'build_clib', - 'build_scripts', - 'clean', - 'install', - 'install_lib', - 'install_headers', - 'install_scripts', - 'install_data', - 'sdist', - 'register', - 'bdist', - 'bdist_dumb', - 'bdist_rpm', - 'check', - 'upload', -] +__all__ = ['build', + 'build_py', + 'build_ext', + 'build_clib', + 'build_scripts', + 'clean', + 'install', + 'install_lib', + 'install_headers', + 'install_scripts', + 'install_data', + 'sdist', + 'register', + 'bdist', + 'bdist_dumb', + 'bdist_rpm', + 'bdist_wininst', + 'check', + 'upload', + # These two are reserved for future use: + #'bdist_sdux', + #'bdist_pkgtool', + # Note: + # bdist_packager is not included because it only provides + # an abstract base class + ] diff --git a/venv/lib/python3.10/site-packages/setuptools/_distutils/command/_framework_compat.py b/venv/lib/python3.10/site-packages/setuptools/_distutils/command/_framework_compat.py deleted file mode 100644 index cffa27c..0000000 --- a/venv/lib/python3.10/site-packages/setuptools/_distutils/command/_framework_compat.py +++ /dev/null @@ -1,55 +0,0 @@ -""" -Backward compatibility for homebrew builds on macOS. -""" - - -import sys -import os -import functools -import subprocess -import sysconfig - - -@functools.lru_cache() -def enabled(): - """ - Only enabled for Python 3.9 framework homebrew builds - except ensurepip and venv. - """ - PY39 = (3, 9) < sys.version_info < (3, 10) - framework = sys.platform == 'darwin' and sys._framework - homebrew = "Cellar" in sysconfig.get_config_var('projectbase') - venv = sys.prefix != sys.base_prefix - ensurepip = os.environ.get("ENSUREPIP_OPTIONS") - return PY39 and framework and homebrew and not venv and not ensurepip - - -schemes = dict( - osx_framework_library=dict( - stdlib='{installed_base}/{platlibdir}/python{py_version_short}', - platstdlib='{platbase}/{platlibdir}/python{py_version_short}', - purelib='{homebrew_prefix}/lib/python{py_version_short}/site-packages', - platlib='{homebrew_prefix}/{platlibdir}/python{py_version_short}/site-packages', - include='{installed_base}/include/python{py_version_short}{abiflags}', - platinclude='{installed_platbase}/include/python{py_version_short}{abiflags}', - scripts='{homebrew_prefix}/bin', - data='{homebrew_prefix}', - ) -) - - -@functools.lru_cache() -def vars(): - if not enabled(): - return {} - homebrew_prefix = subprocess.check_output(['brew', '--prefix'], text=True).strip() - return locals() - - -def scheme(name): - """ - Override the selected scheme for posix_prefix. - """ - if not enabled() or not name.endswith('_prefix'): - return name - return 'osx_framework_library' diff --git a/venv/lib/python3.10/site-packages/setuptools/_distutils/command/bdist.py b/venv/lib/python3.10/site-packages/setuptools/_distutils/command/bdist.py index de37dae..014871d 100644 --- a/venv/lib/python3.10/site-packages/setuptools/_distutils/command/bdist.py +++ b/venv/lib/python3.10/site-packages/setuptools/_distutils/command/bdist.py @@ -4,93 +4,79 @@ distribution).""" import os -import warnings - from distutils.core import Command -from distutils.errors import DistutilsPlatformError, DistutilsOptionError +from distutils.errors import * from distutils.util import get_platform def show_formats(): - """Print list of available formats (arguments to "--format" option).""" + """Print list of available formats (arguments to "--format" option). + """ from distutils.fancy_getopt import FancyGetopt - formats = [] for format in bdist.format_commands: - formats.append(("formats=" + format, None, bdist.format_commands[format][1])) + formats.append(("formats=" + format, None, + bdist.format_command[format][1])) pretty_printer = FancyGetopt(formats) pretty_printer.print_help("List of available distribution formats:") -class ListCompat(dict): - # adapter to allow for Setuptools compatibility in format_commands - def append(self, item): - warnings.warn( - """format_commands is now a dict. append is deprecated.""", - DeprecationWarning, - stacklevel=2, - ) - - class bdist(Command): description = "create a built (binary) distribution" - user_options = [ - ('bdist-base=', 'b', "temporary directory for creating built distributions"), - ( - 'plat-name=', - 'p', - "platform name to embed in generated filenames " - "(default: %s)" % get_platform(), - ), - ('formats=', None, "formats for distribution (comma-separated list)"), - ( - 'dist-dir=', - 'd', - "directory to put final built distributions in " "[default: dist]", - ), - ('skip-build', None, "skip rebuilding everything (for testing/debugging)"), - ( - 'owner=', - 'u', - "Owner name used when creating a tar file" " [default: current user]", - ), - ( - 'group=', - 'g', - "Group name used when creating a tar file" " [default: current group]", - ), - ] + user_options = [('bdist-base=', 'b', + "temporary directory for creating built distributions"), + ('plat-name=', 'p', + "platform name to embed in generated filenames " + "(default: %s)" % get_platform()), + ('formats=', None, + "formats for distribution (comma-separated list)"), + ('dist-dir=', 'd', + "directory to put final built distributions in " + "[default: dist]"), + ('skip-build', None, + "skip rebuilding everything (for testing/debugging)"), + ('owner=', 'u', + "Owner name used when creating a tar file" + " [default: current user]"), + ('group=', 'g', + "Group name used when creating a tar file" + " [default: current group]"), + ] boolean_options = ['skip-build'] help_options = [ - ('help-formats', None, "lists available distribution formats", show_formats), - ] + ('help-formats', None, + "lists available distribution formats", show_formats), + ] # The following commands do not take a format option from bdist no_format_option = ('bdist_rpm',) # This won't do in reality: will need to distinguish RPM-ish Linux, # Debian-ish Linux, Solaris, FreeBSD, ..., Windows, Mac OS. - default_format = {'posix': 'gztar', 'nt': 'zip'} - - # Define commands in preferred order for the --help-formats option - format_commands = ListCompat( - { - 'rpm': ('bdist_rpm', "RPM distribution"), - 'gztar': ('bdist_dumb', "gzip'ed tar file"), - 'bztar': ('bdist_dumb', "bzip2'ed tar file"), - 'xztar': ('bdist_dumb', "xz'ed tar file"), - 'ztar': ('bdist_dumb', "compressed tar file"), - 'tar': ('bdist_dumb', "tar file"), - 'zip': ('bdist_dumb', "ZIP file"), - } - ) - - # for compatibility until consumers only reference format_commands - format_command = format_commands + default_format = {'posix': 'gztar', + 'nt': 'zip'} + + # Establish the preferred order (for the --help-formats option). + format_commands = ['rpm', 'gztar', 'bztar', 'xztar', 'ztar', 'tar', + 'wininst', 'zip', 'msi'] + + # And the real information. + format_command = {'rpm': ('bdist_rpm', "RPM distribution"), + 'gztar': ('bdist_dumb', "gzip'ed tar file"), + 'bztar': ('bdist_dumb', "bzip2'ed tar file"), + 'xztar': ('bdist_dumb', "xz'ed tar file"), + 'ztar': ('bdist_dumb', "compressed tar file"), + 'tar': ('bdist_dumb', "tar file"), + 'wininst': ('bdist_wininst', + "Windows executable installer"), + 'zip': ('bdist_dumb', "ZIP file"), + 'msi': ('bdist_msi', "Microsoft Installer") + } + def initialize_options(self): self.bdist_base = None @@ -114,7 +100,8 @@ def finalize_options(self): # "build/bdist./dumb", "build/bdist./rpm", etc.) if self.bdist_base is None: build_base = self.get_finalized_command('build').build_base - self.bdist_base = os.path.join(build_base, 'bdist.' + self.plat_name) + self.bdist_base = os.path.join(build_base, + 'bdist.' + self.plat_name) self.ensure_string_list('formats') if self.formats is None: @@ -122,9 +109,8 @@ def finalize_options(self): self.formats = [self.default_format[os.name]] except KeyError: raise DistutilsPlatformError( - "don't know how to create built distributions " - "on platform %s" % os.name - ) + "don't know how to create built distributions " + "on platform %s" % os.name) if self.dist_dir is None: self.dist_dir = "dist" @@ -134,7 +120,7 @@ def run(self): commands = [] for format in self.formats: try: - commands.append(self.format_commands[format][0]) + commands.append(self.format_command[format][0]) except KeyError: raise DistutilsOptionError("invalid format '%s'" % format) @@ -152,6 +138,6 @@ def run(self): # If we're going to need to run this command again, tell it to # keep its temporary files around so subsequent runs go faster. - if cmd_name in commands[i + 1 :]: + if cmd_name in commands[i+1:]: sub_cmd.keep_temp = 1 self.run_command(cmd_name) diff --git a/venv/lib/python3.10/site-packages/setuptools/_distutils/command/bdist_dumb.py b/venv/lib/python3.10/site-packages/setuptools/_distutils/command/bdist_dumb.py index 0f52330..f0d6b5b 100644 --- a/venv/lib/python3.10/site-packages/setuptools/_distutils/command/bdist_dumb.py +++ b/venv/lib/python3.10/site-packages/setuptools/_distutils/command/bdist_dumb.py @@ -8,56 +8,44 @@ from distutils.core import Command from distutils.util import get_platform from distutils.dir_util import remove_tree, ensure_relative -from distutils.errors import DistutilsPlatformError +from distutils.errors import * from distutils.sysconfig import get_python_version from distutils import log - class bdist_dumb(Command): description = "create a \"dumb\" built distribution" - user_options = [ - ('bdist-dir=', 'd', "temporary directory for creating the distribution"), - ( - 'plat-name=', - 'p', - "platform name to embed in generated filenames " - "(default: %s)" % get_platform(), - ), - ( - 'format=', - 'f', - "archive format to create (tar, gztar, bztar, xztar, " "ztar, zip)", - ), - ( - 'keep-temp', - 'k', - "keep the pseudo-installation tree around after " - + "creating the distribution archive", - ), - ('dist-dir=', 'd', "directory to put final built distributions in"), - ('skip-build', None, "skip rebuilding everything (for testing/debugging)"), - ( - 'relative', - None, - "build the archive using relative paths " "(default: false)", - ), - ( - 'owner=', - 'u', - "Owner name used when creating a tar file" " [default: current user]", - ), - ( - 'group=', - 'g', - "Group name used when creating a tar file" " [default: current group]", - ), - ] + user_options = [('bdist-dir=', 'd', + "temporary directory for creating the distribution"), + ('plat-name=', 'p', + "platform name to embed in generated filenames " + "(default: %s)" % get_platform()), + ('format=', 'f', + "archive format to create (tar, gztar, bztar, xztar, " + "ztar, zip)"), + ('keep-temp', 'k', + "keep the pseudo-installation tree around after " + + "creating the distribution archive"), + ('dist-dir=', 'd', + "directory to put final built distributions in"), + ('skip-build', None, + "skip rebuilding everything (for testing/debugging)"), + ('relative', None, + "build the archive using relative paths " + "(default: false)"), + ('owner=', 'u', + "Owner name used when creating a tar file" + " [default: current user]"), + ('group=', 'g', + "Group name used when creating a tar file" + " [default: current group]"), + ] boolean_options = ['keep-temp', 'skip-build', 'relative'] - default_format = {'posix': 'gztar', 'nt': 'zip'} + default_format = { 'posix': 'gztar', + 'nt': 'zip' } def initialize_options(self): self.bdist_dir = None @@ -80,16 +68,13 @@ def finalize_options(self): self.format = self.default_format[os.name] except KeyError: raise DistutilsPlatformError( - "don't know how to create dumb built distributions " - "on platform %s" % os.name - ) + "don't know how to create dumb built distributions " + "on platform %s" % os.name) - self.set_undefined_options( - 'bdist', - ('dist_dir', 'dist_dir'), - ('plat_name', 'plat_name'), - ('skip_build', 'skip_build'), - ) + self.set_undefined_options('bdist', + ('dist_dir', 'dist_dir'), + ('plat_name', 'plat_name'), + ('skip_build', 'skip_build')) def run(self): if not self.skip_build: @@ -105,40 +90,34 @@ def run(self): # And make an archive relative to the root of the # pseudo-installation tree. - archive_basename = "{}.{}".format( - self.distribution.get_fullname(), self.plat_name - ) + archive_basename = "%s.%s" % (self.distribution.get_fullname(), + self.plat_name) pseudoinstall_root = os.path.join(self.dist_dir, archive_basename) if not self.relative: archive_root = self.bdist_dir else: - if self.distribution.has_ext_modules() and ( - install.install_base != install.install_platbase - ): + if (self.distribution.has_ext_modules() and + (install.install_base != install.install_platbase)): raise DistutilsPlatformError( - "can't make a dumb built distribution where " - "base and platbase are different (%s, %s)" - % (repr(install.install_base), repr(install.install_platbase)) - ) + "can't make a dumb built distribution where " + "base and platbase are different (%s, %s)" + % (repr(install.install_base), + repr(install.install_platbase))) else: - archive_root = os.path.join( - self.bdist_dir, ensure_relative(install.install_base) - ) + archive_root = os.path.join(self.bdist_dir, + ensure_relative(install.install_base)) # Make the archive - filename = self.make_archive( - pseudoinstall_root, - self.format, - root_dir=archive_root, - owner=self.owner, - group=self.group, - ) + filename = self.make_archive(pseudoinstall_root, + self.format, root_dir=archive_root, + owner=self.owner, group=self.group) if self.distribution.has_ext_modules(): pyversion = get_python_version() else: pyversion = 'any' - self.distribution.dist_files.append(('bdist_dumb', pyversion, filename)) + self.distribution.dist_files.append(('bdist_dumb', pyversion, + filename)) if not self.keep_temp: remove_tree(self.bdist_dir, dry_run=self.dry_run) diff --git a/venv/lib/python3.10/site-packages/setuptools/_distutils/command/bdist_rpm.py b/venv/lib/python3.10/site-packages/setuptools/_distutils/command/bdist_rpm.py index 6a50ef3..550cbfa 100644 --- a/venv/lib/python3.10/site-packages/setuptools/_distutils/command/bdist_rpm.py +++ b/venv/lib/python3.10/site-packages/setuptools/_distutils/command/bdist_rpm.py @@ -3,153 +3,134 @@ Implements the Distutils 'bdist_rpm' command (create RPM source and binary distributions).""" -import subprocess -import sys -import os - +import subprocess, sys, os from distutils.core import Command from distutils.debug import DEBUG from distutils.file_util import write_file -from distutils.errors import ( - DistutilsOptionError, - DistutilsPlatformError, - DistutilsFileError, - DistutilsExecError, -) +from distutils.errors import * from distutils.sysconfig import get_python_version from distutils import log - class bdist_rpm(Command): description = "create an RPM distribution" user_options = [ - ('bdist-base=', None, "base directory for creating built distributions"), - ( - 'rpm-base=', - None, - "base directory for creating RPMs (defaults to \"rpm\" under " - "--bdist-base; must be specified for RPM 2)", - ), - ( - 'dist-dir=', - 'd', - "directory to put final RPM files in " "(and .spec files if --spec-only)", - ), - ( - 'python=', - None, - "path to Python interpreter to hard-code in the .spec file " - "(default: \"python\")", - ), - ( - 'fix-python', - None, - "hard-code the exact path to the current Python interpreter in " - "the .spec file", - ), - ('spec-only', None, "only regenerate spec file"), - ('source-only', None, "only generate source RPM"), - ('binary-only', None, "only generate binary RPM"), - ('use-bzip2', None, "use bzip2 instead of gzip to create source distribution"), + ('bdist-base=', None, + "base directory for creating built distributions"), + ('rpm-base=', None, + "base directory for creating RPMs (defaults to \"rpm\" under " + "--bdist-base; must be specified for RPM 2)"), + ('dist-dir=', 'd', + "directory to put final RPM files in " + "(and .spec files if --spec-only)"), + ('python=', None, + "path to Python interpreter to hard-code in the .spec file " + "(default: \"python\")"), + ('fix-python', None, + "hard-code the exact path to the current Python interpreter in " + "the .spec file"), + ('spec-only', None, + "only regenerate spec file"), + ('source-only', None, + "only generate source RPM"), + ('binary-only', None, + "only generate binary RPM"), + ('use-bzip2', None, + "use bzip2 instead of gzip to create source distribution"), + # More meta-data: too RPM-specific to put in the setup script, # but needs to go in the .spec file -- so we make these options # to "bdist_rpm". The idea is that packagers would put this # info in setup.cfg, although they are of course free to # supply it on the command line. - ( - 'distribution-name=', - None, - "name of the (Linux) distribution to which this " - "RPM applies (*not* the name of the module distribution!)", - ), - ('group=', None, "package classification [default: \"Development/Libraries\"]"), - ('release=', None, "RPM release number"), - ('serial=', None, "RPM serial number"), - ( - 'vendor=', - None, - "RPM \"vendor\" (eg. \"Joe Blow \") " - "[default: maintainer or author from setup script]", - ), - ( - 'packager=', - None, - "RPM packager (eg. \"Jane Doe \") " "[default: vendor]", - ), - ('doc-files=', None, "list of documentation files (space or comma-separated)"), - ('changelog=', None, "RPM changelog"), - ('icon=', None, "name of icon file"), - ('provides=', None, "capabilities provided by this package"), - ('requires=', None, "capabilities required by this package"), - ('conflicts=', None, "capabilities which conflict with this package"), - ('build-requires=', None, "capabilities required to build this package"), - ('obsoletes=', None, "capabilities made obsolete by this package"), - ('no-autoreq', None, "do not automatically calculate dependencies"), + ('distribution-name=', None, + "name of the (Linux) distribution to which this " + "RPM applies (*not* the name of the module distribution!)"), + ('group=', None, + "package classification [default: \"Development/Libraries\"]"), + ('release=', None, + "RPM release number"), + ('serial=', None, + "RPM serial number"), + ('vendor=', None, + "RPM \"vendor\" (eg. \"Joe Blow \") " + "[default: maintainer or author from setup script]"), + ('packager=', None, + "RPM packager (eg. \"Jane Doe \") " + "[default: vendor]"), + ('doc-files=', None, + "list of documentation files (space or comma-separated)"), + ('changelog=', None, + "RPM changelog"), + ('icon=', None, + "name of icon file"), + ('provides=', None, + "capabilities provided by this package"), + ('requires=', None, + "capabilities required by this package"), + ('conflicts=', None, + "capabilities which conflict with this package"), + ('build-requires=', None, + "capabilities required to build this package"), + ('obsoletes=', None, + "capabilities made obsolete by this package"), + ('no-autoreq', None, + "do not automatically calculate dependencies"), + # Actions to take when building RPM - ('keep-temp', 'k', "don't clean up RPM build directory"), - ('no-keep-temp', None, "clean up RPM build directory [default]"), - ( - 'use-rpm-opt-flags', - None, - "compile with RPM_OPT_FLAGS when building from source RPM", - ), - ('no-rpm-opt-flags', None, "do not pass any RPM CFLAGS to compiler"), - ('rpm3-mode', None, "RPM 3 compatibility mode (default)"), - ('rpm2-mode', None, "RPM 2 compatibility mode"), + ('keep-temp', 'k', + "don't clean up RPM build directory"), + ('no-keep-temp', None, + "clean up RPM build directory [default]"), + ('use-rpm-opt-flags', None, + "compile with RPM_OPT_FLAGS when building from source RPM"), + ('no-rpm-opt-flags', None, + "do not pass any RPM CFLAGS to compiler"), + ('rpm3-mode', None, + "RPM 3 compatibility mode (default)"), + ('rpm2-mode', None, + "RPM 2 compatibility mode"), + # Add the hooks necessary for specifying custom scripts - ('prep-script=', None, "Specify a script for the PREP phase of RPM building"), - ('build-script=', None, "Specify a script for the BUILD phase of RPM building"), - ( - 'pre-install=', - None, - "Specify a script for the pre-INSTALL phase of RPM building", - ), - ( - 'install-script=', - None, - "Specify a script for the INSTALL phase of RPM building", - ), - ( - 'post-install=', - None, - "Specify a script for the post-INSTALL phase of RPM building", - ), - ( - 'pre-uninstall=', - None, - "Specify a script for the pre-UNINSTALL phase of RPM building", - ), - ( - 'post-uninstall=', - None, - "Specify a script for the post-UNINSTALL phase of RPM building", - ), - ('clean-script=', None, "Specify a script for the CLEAN phase of RPM building"), - ( - 'verify-script=', - None, - "Specify a script for the VERIFY phase of the RPM build", - ), + ('prep-script=', None, + "Specify a script for the PREP phase of RPM building"), + ('build-script=', None, + "Specify a script for the BUILD phase of RPM building"), + + ('pre-install=', None, + "Specify a script for the pre-INSTALL phase of RPM building"), + ('install-script=', None, + "Specify a script for the INSTALL phase of RPM building"), + ('post-install=', None, + "Specify a script for the post-INSTALL phase of RPM building"), + + ('pre-uninstall=', None, + "Specify a script for the pre-UNINSTALL phase of RPM building"), + ('post-uninstall=', None, + "Specify a script for the post-UNINSTALL phase of RPM building"), + + ('clean-script=', None, + "Specify a script for the CLEAN phase of RPM building"), + + ('verify-script=', None, + "Specify a script for the VERIFY phase of the RPM build"), + # Allow a packager to explicitly force an architecture - ('force-arch=', None, "Force an architecture onto the RPM build process"), - ('quiet', 'q', "Run the INSTALL phase of RPM building in quiet mode"), - ] - - boolean_options = [ - 'keep-temp', - 'use-rpm-opt-flags', - 'rpm3-mode', - 'no-autoreq', - 'quiet', - ] - - negative_opt = { - 'no-keep-temp': 'keep-temp', - 'no-rpm-opt-flags': 'use-rpm-opt-flags', - 'rpm2-mode': 'rpm3-mode', - } + ('force-arch=', None, + "Force an architecture onto the RPM build process"), + + ('quiet', 'q', + "Run the INSTALL phase of RPM building in quiet mode"), + ] + + boolean_options = ['keep-temp', 'use-rpm-opt-flags', 'rpm3-mode', + 'no-autoreq', 'quiet'] + + negative_opt = {'no-keep-temp': 'keep-temp', + 'no-rpm-opt-flags': 'use-rpm-opt-flags', + 'rpm2-mode': 'rpm3-mode'} + def initialize_options(self): self.bdist_base = None @@ -200,7 +181,8 @@ def finalize_options(self): self.set_undefined_options('bdist', ('bdist_base', 'bdist_base')) if self.rpm_base is None: if not self.rpm3_mode: - raise DistutilsOptionError("you must specify --rpm-base in RPM 2 mode") + raise DistutilsOptionError( + "you must specify --rpm-base in RPM 2 mode") self.rpm_base = os.path.join(self.bdist_base, "rpm") if self.python is None: @@ -210,17 +192,14 @@ def finalize_options(self): self.python = "python3" elif self.fix_python: raise DistutilsOptionError( - "--python and --fix-python are mutually exclusive options" - ) + "--python and --fix-python are mutually exclusive options") if os.name != 'posix': - raise DistutilsPlatformError( - "don't know how to create RPM " "distributions on platform %s" % os.name - ) + raise DistutilsPlatformError("don't know how to create RPM " + "distributions on platform %s" % os.name) if self.binary_only and self.source_only: raise DistutilsOptionError( - "cannot supply both '--source-only' and '--binary-only'" - ) + "cannot supply both '--source-only' and '--binary-only'") # don't pass CFLAGS to pure python distributions if not self.distribution.has_ext_modules(): @@ -231,11 +210,9 @@ def finalize_options(self): def finalize_package_data(self): self.ensure_string('group', "Development/Libraries") - self.ensure_string( - 'vendor', - "%s <%s>" - % (self.distribution.get_contact(), self.distribution.get_contact_email()), - ) + self.ensure_string('vendor', + "%s <%s>" % (self.distribution.get_contact(), + self.distribution.get_contact_email())) self.ensure_string('packager') self.ensure_string_list('doc_files') if isinstance(self.doc_files, list): @@ -244,12 +221,12 @@ def finalize_package_data(self): self.doc_files.append(readme) self.ensure_string('release', "1") - self.ensure_string('serial') # should it be an int? + self.ensure_string('serial') # should it be an int? self.ensure_string('distribution_name') self.ensure_string('changelog') - # Format changelog correctly + # Format changelog correctly self.changelog = self._format_changelog(self.changelog) self.ensure_filename('icon') @@ -276,7 +253,7 @@ def finalize_package_data(self): self.ensure_string('force_arch') - def run(self): # noqa: C901 + def run(self): if DEBUG: print("before _get_package_data():") print("vendor =", self.vendor) @@ -297,12 +274,14 @@ def run(self): # noqa: C901 # Spec file goes into 'dist_dir' if '--spec-only specified', # build/rpm. otherwise. - spec_path = os.path.join(spec_dir, "%s.spec" % self.distribution.get_name()) - self.execute( - write_file, (spec_path, self._make_spec_file()), "writing '%s'" % spec_path - ) - - if self.spec_only: # stop if requested + spec_path = os.path.join(spec_dir, + "%s.spec" % self.distribution.get_name()) + self.execute(write_file, + (spec_path, + self._make_spec_file()), + "writing '%s'" % spec_path) + + if self.spec_only: # stop if requested return # Make a source distribution and copy to SOURCES directory with @@ -324,13 +303,14 @@ def run(self): # noqa: C901 if os.path.exists(self.icon): self.copy_file(self.icon, source_dir) else: - raise DistutilsFileError("icon file '%s' does not exist" % self.icon) + raise DistutilsFileError( + "icon file '%s' does not exist" % self.icon) # build package log.info("building RPMs") rpm_cmd = ['rpmbuild'] - if self.source_only: # what kind of RPMs? + if self.source_only: # what kind of RPMs? rpm_cmd.append('-bs') elif self.binary_only: rpm_cmd.append('-bb') @@ -338,7 +318,8 @@ def run(self): # noqa: C901 rpm_cmd.append('-ba') rpm_cmd.extend(['--define', '__python %s' % self.python]) if self.rpm3_mode: - rpm_cmd.extend(['--define', '_topdir %s' % os.path.abspath(self.rpm_base)]) + rpm_cmd.extend(['--define', + '_topdir %s' % os.path.abspath(self.rpm_base)]) if not self.keep_temp: rpm_cmd.append('--clean') @@ -353,11 +334,8 @@ def run(self): # noqa: C901 nvr_string = "%{name}-%{version}-%{release}" src_rpm = nvr_string + ".src.rpm" non_src_rpm = "%{arch}/" + nvr_string + ".%{arch}.rpm" - q_cmd = r"rpm -q --qf '{} {}\n' --specfile '{}'".format( - src_rpm, - non_src_rpm, - spec_path, - ) + q_cmd = r"rpm -q --qf '%s %s\n' --specfile '%s'" % ( + src_rpm, non_src_rpm, spec_path) out = os.popen(q_cmd) try: @@ -367,12 +345,12 @@ def run(self): # noqa: C901 line = out.readline() if not line: break - ell = line.strip().split() - assert len(ell) == 2 - binary_rpms.append(ell[1]) + l = line.strip().split() + assert(len(l) == 2) + binary_rpms.append(l[1]) # The source rpm is named after the first entry in the spec file if source_rpm is None: - source_rpm = ell[0] + source_rpm = l[0] status = out.close() if status: @@ -391,37 +369,38 @@ def run(self): # noqa: C901 if not self.binary_only: srpm = os.path.join(rpm_dir['SRPMS'], source_rpm) - assert os.path.exists(srpm) + assert(os.path.exists(srpm)) self.move_file(srpm, self.dist_dir) filename = os.path.join(self.dist_dir, source_rpm) - self.distribution.dist_files.append(('bdist_rpm', pyversion, filename)) + self.distribution.dist_files.append( + ('bdist_rpm', pyversion, filename)) if not self.source_only: for rpm in binary_rpms: rpm = os.path.join(rpm_dir['RPMS'], rpm) if os.path.exists(rpm): self.move_file(rpm, self.dist_dir) - filename = os.path.join(self.dist_dir, os.path.basename(rpm)) + filename = os.path.join(self.dist_dir, + os.path.basename(rpm)) self.distribution.dist_files.append( - ('bdist_rpm', pyversion, filename) - ) + ('bdist_rpm', pyversion, filename)) def _dist_path(self, path): return os.path.join(self.dist_dir, os.path.basename(path)) - def _make_spec_file(self): # noqa: C901 + def _make_spec_file(self): """Generate the text of an RPM spec file and return it as a list of strings (one per line). """ # definitions and headers spec_file = [ '%define name ' + self.distribution.get_name(), - '%define version ' + self.distribution.get_version().replace('-', '_'), + '%define version ' + self.distribution.get_version().replace('-','_'), '%define unmangled_version ' + self.distribution.get_version(), - '%define release ' + self.release.replace('-', '_'), + '%define release ' + self.release.replace('-','_'), '', - 'Summary: ' + (self.distribution.get_description() or "UNKNOWN"), - ] + 'Summary: ' + self.distribution.get_description(), + ] # Workaround for #14443 which affects some RPM based systems such as # RHEL6 (and probably derivatives) @@ -429,9 +408,8 @@ def _make_spec_file(self): # noqa: C901 # Generate a potential replacement value for __os_install_post (whilst # normalizing the whitespace to simplify the test for whether the # invocation of brp-python-bytecompile passes in __python): - vendor_hook = '\n'.join( - [' %s \\' % line.strip() for line in vendor_hook.splitlines()] - ) + vendor_hook = '\n'.join([' %s \\' % line.strip() + for line in vendor_hook.splitlines()]) problem = "brp-python-bytecompile \\\n" fixed = "brp-python-bytecompile %{__python} \\\n" fixed_hook = vendor_hook.replace(problem, fixed) @@ -442,17 +420,14 @@ def _make_spec_file(self): # noqa: C901 # put locale summaries into spec file # XXX not supported for now (hard to put a dictionary # in a config file -- arg!) - # for locale in self.summaries.keys(): + #for locale in self.summaries.keys(): # spec_file.append('Summary(%s): %s' % (locale, # self.summaries[locale])) - spec_file.extend( - [ - 'Name: %{name}', - 'Version: %{version}', - 'Release: %{release}', - ] - ) + spec_file.extend([ + 'Name: %{name}', + 'Version: %{version}', + 'Release: %{release}',]) # XXX yuck! this filename is available from the "sdist" command, # but only after it has run: and we create the spec file before @@ -462,44 +437,42 @@ def _make_spec_file(self): # noqa: C901 else: spec_file.append('Source0: %{name}-%{unmangled_version}.tar.gz') - spec_file.extend( - [ - 'License: ' + (self.distribution.get_license() or "UNKNOWN"), - 'Group: ' + self.group, - 'BuildRoot: %{_tmppath}/%{name}-%{version}-%{release}-buildroot', - 'Prefix: %{_prefix}', - ] - ) + spec_file.extend([ + 'License: ' + self.distribution.get_license(), + 'Group: ' + self.group, + 'BuildRoot: %{_tmppath}/%{name}-%{version}-%{release}-buildroot', + 'Prefix: %{_prefix}', ]) if not self.force_arch: # noarch if no extension modules if not self.distribution.has_ext_modules(): spec_file.append('BuildArch: noarch') else: - spec_file.append('BuildArch: %s' % self.force_arch) - - for field in ( - 'Vendor', - 'Packager', - 'Provides', - 'Requires', - 'Conflicts', - 'Obsoletes', - ): + spec_file.append( 'BuildArch: %s' % self.force_arch ) + + for field in ('Vendor', + 'Packager', + 'Provides', + 'Requires', + 'Conflicts', + 'Obsoletes', + ): val = getattr(self, field.lower()) if isinstance(val, list): - spec_file.append('{}: {}'.format(field, ' '.join(val))) + spec_file.append('%s: %s' % (field, ' '.join(val))) elif val is not None: - spec_file.append('{}: {}'.format(field, val)) + spec_file.append('%s: %s' % (field, val)) - if self.distribution.get_url(): + + if self.distribution.get_url() != 'UNKNOWN': spec_file.append('Url: ' + self.distribution.get_url()) if self.distribution_name: spec_file.append('Distribution: ' + self.distribution_name) if self.build_requires: - spec_file.append('BuildRequires: ' + ' '.join(self.build_requires)) + spec_file.append('BuildRequires: ' + + ' '.join(self.build_requires)) if self.icon: spec_file.append('Icon: ' + os.path.basename(self.icon)) @@ -507,18 +480,16 @@ def _make_spec_file(self): # noqa: C901 if self.no_autoreq: spec_file.append('AutoReq: 0') - spec_file.extend( - [ - '', - '%description', - self.distribution.get_long_description() or "", - ] - ) + spec_file.extend([ + '', + '%description', + self.distribution.get_long_description() + ]) # put locale descriptions into spec file # XXX again, suppressed because config file syntax doesn't # easily support this ;-( - # for locale in self.descriptions.keys(): + #for locale in self.descriptions.keys(): # spec_file.extend([ # '', # '%description -l ' + locale, @@ -527,7 +498,7 @@ def _make_spec_file(self): # noqa: C901 # rpm scripts # figure out default build script - def_setup_call = "{} {}".format(self.python, os.path.basename(sys.argv[0])) + def_setup_call = "%s %s" % (self.python,os.path.basename(sys.argv[0])) def_build = "%s build" % def_setup_call if self.use_rpm_opt_flags: def_build = 'env CFLAGS="$RPM_OPT_FLAGS" ' + def_build @@ -538,9 +509,8 @@ def _make_spec_file(self): # noqa: C901 # that we open and interpolate into the spec file, but the defaults # are just text that we drop in as-is. Hmmm. - install_cmd = ( - '%s install -O1 --root=$RPM_BUILD_ROOT ' '--record=INSTALLED_FILES' - ) % def_setup_call + install_cmd = ('%s install -O1 --root=$RPM_BUILD_ROOT ' + '--record=INSTALLED_FILES') % def_setup_call script_options = [ ('prep', 'prep_script', "%setup -n %{name}-%{unmangled_version}"), @@ -559,43 +529,37 @@ def _make_spec_file(self): # noqa: C901 # use 'default' as contents of script val = getattr(self, attr) if val or default: - spec_file.extend( - [ - '', - '%' + rpm_opt, - ] - ) + spec_file.extend([ + '', + '%' + rpm_opt,]) if val: with open(val) as f: spec_file.extend(f.read().split('\n')) else: spec_file.append(default) + # files section - spec_file.extend( - [ - '', - '%files -f INSTALLED_FILES', - '%defattr(-,root,root)', - ] - ) + spec_file.extend([ + '', + '%files -f INSTALLED_FILES', + '%defattr(-,root,root)', + ]) if self.doc_files: spec_file.append('%doc ' + ' '.join(self.doc_files)) if self.changelog: - spec_file.extend( - [ - '', - '%changelog', - ] - ) + spec_file.extend([ + '', + '%changelog',]) spec_file.extend(self.changelog) return spec_file def _format_changelog(self, changelog): - """Format the changelog correctly and convert it to a list of strings""" + """Format the changelog correctly and convert it to a list of strings + """ if not changelog: return changelog new_changelog = [] diff --git a/venv/lib/python3.10/site-packages/setuptools/_distutils/command/build.py b/venv/lib/python3.10/site-packages/setuptools/_distutils/command/build.py index 6d45341..4355a63 100644 --- a/venv/lib/python3.10/site-packages/setuptools/_distutils/command/build.py +++ b/venv/lib/python3.10/site-packages/setuptools/_distutils/command/build.py @@ -2,8 +2,7 @@ Implements the Distutils 'build' command.""" -import sys -import os +import sys, os from distutils.core import Command from distutils.errors import DistutilsOptionError from distutils.util import get_platform @@ -11,7 +10,6 @@ def show_compilers(): from distutils.ccompiler import show_compilers - show_compilers() @@ -20,35 +18,40 @@ class build(Command): description = "build everything needed to install" user_options = [ - ('build-base=', 'b', "base directory for build library"), - ('build-purelib=', None, "build directory for platform-neutral distributions"), - ('build-platlib=', None, "build directory for platform-specific distributions"), - ( - 'build-lib=', - None, - "build directory for all distribution (defaults to either " - + "build-purelib or build-platlib", - ), - ('build-scripts=', None, "build directory for scripts"), - ('build-temp=', 't', "temporary build directory"), - ( - 'plat-name=', - 'p', - "platform name to build for, if supported " - "(default: %s)" % get_platform(), - ), - ('compiler=', 'c', "specify the compiler type"), - ('parallel=', 'j', "number of parallel build jobs"), - ('debug', 'g', "compile extensions and libraries with debugging information"), - ('force', 'f', "forcibly build everything (ignore file timestamps)"), - ('executable=', 'e', "specify final destination interpreter path (build.py)"), - ] + ('build-base=', 'b', + "base directory for build library"), + ('build-purelib=', None, + "build directory for platform-neutral distributions"), + ('build-platlib=', None, + "build directory for platform-specific distributions"), + ('build-lib=', None, + "build directory for all distribution (defaults to either " + + "build-purelib or build-platlib"), + ('build-scripts=', None, + "build directory for scripts"), + ('build-temp=', 't', + "temporary build directory"), + ('plat-name=', 'p', + "platform name to build for, if supported " + "(default: %s)" % get_platform()), + ('compiler=', 'c', + "specify the compiler type"), + ('parallel=', 'j', + "number of parallel build jobs"), + ('debug', 'g', + "compile extensions and libraries with debugging information"), + ('force', 'f', + "forcibly build everything (ignore file timestamps)"), + ('executable=', 'e', + "specify final destination interpreter path (build.py)"), + ] boolean_options = ['debug', 'force'] help_options = [ - ('help-compiler', None, "list available compilers", show_compilers), - ] + ('help-compiler', None, + "list available compilers", show_compilers), + ] def initialize_options(self): self.build_base = 'build' @@ -66,7 +69,7 @@ def initialize_options(self): self.executable = None self.parallel = None - def finalize_options(self): # noqa: C901 + def finalize_options(self): if self.plat_name is None: self.plat_name = get_platform() else: @@ -75,11 +78,10 @@ def finalize_options(self): # noqa: C901 # other platforms. if os.name != 'nt': raise DistutilsOptionError( - "--plat-name only supported on Windows (try " - "using './configure --help' on your platform)" - ) + "--plat-name only supported on Windows (try " + "using './configure --help' on your platform)") - plat_specifier = ".{}-{}".format(self.plat_name, sys.implementation.cache_tag) + plat_specifier = ".%s-%d.%d" % (self.plat_name, *sys.version_info[:2]) # Make it so Python 2.x and Python 2.x with --with-pydebug don't # share the same build directories. Doing so confuses the build @@ -93,7 +95,8 @@ def finalize_options(self): # noqa: C901 if self.build_purelib is None: self.build_purelib = os.path.join(self.build_base, 'lib') if self.build_platlib is None: - self.build_platlib = os.path.join(self.build_base, 'lib' + plat_specifier) + self.build_platlib = os.path.join(self.build_base, + 'lib' + plat_specifier) # 'build_lib' is the actual directory that we will use for this # particular module distribution -- if user didn't supply it, pick @@ -107,11 +110,11 @@ def finalize_options(self): # noqa: C901 # 'build_temp' -- temporary directory for compiler turds, # "build/temp." if self.build_temp is None: - self.build_temp = os.path.join(self.build_base, 'temp' + plat_specifier) + self.build_temp = os.path.join(self.build_base, + 'temp' + plat_specifier) if self.build_scripts is None: - self.build_scripts = os.path.join( - self.build_base, 'scripts-%d.%d' % sys.version_info[:2] - ) + self.build_scripts = os.path.join(self.build_base, + 'scripts-%d.%d' % sys.version_info[:2]) if self.executable is None and sys.executable: self.executable = os.path.normpath(sys.executable) @@ -131,6 +134,7 @@ def run(self): for cmd_name in self.get_sub_commands(): self.run_command(cmd_name) + # -- Predicates for the sub-command list --------------------------- def has_pure_modules(self): @@ -145,9 +149,9 @@ def has_ext_modules(self): def has_scripts(self): return self.distribution.has_scripts() - sub_commands = [ - ('build_py', has_pure_modules), - ('build_clib', has_c_libraries), - ('build_ext', has_ext_modules), - ('build_scripts', has_scripts), - ] + + sub_commands = [('build_py', has_pure_modules), + ('build_clib', has_c_libraries), + ('build_ext', has_ext_modules), + ('build_scripts', has_scripts), + ] diff --git a/venv/lib/python3.10/site-packages/setuptools/_distutils/command/build_clib.py b/venv/lib/python3.10/site-packages/setuptools/_distutils/command/build_clib.py index 50bb9bb..3e20ef2 100644 --- a/venv/lib/python3.10/site-packages/setuptools/_distutils/command/build_clib.py +++ b/venv/lib/python3.10/site-packages/setuptools/_distutils/command/build_clib.py @@ -16,14 +16,12 @@ import os from distutils.core import Command -from distutils.errors import DistutilsSetupError +from distutils.errors import * from distutils.sysconfig import customize_compiler from distutils import log - def show_compilers(): from distutils.ccompiler import show_compilers - show_compilers() @@ -32,18 +30,24 @@ class build_clib(Command): description = "build C/C++ libraries used by Python extensions" user_options = [ - ('build-clib=', 'b', "directory to build C/C++ libraries to"), - ('build-temp=', 't', "directory to put temporary build by-products"), - ('debug', 'g', "compile with debugging information"), - ('force', 'f', "forcibly build everything (ignore file timestamps)"), - ('compiler=', 'c', "specify the compiler type"), - ] + ('build-clib=', 'b', + "directory to build C/C++ libraries to"), + ('build-temp=', 't', + "directory to put temporary build by-products"), + ('debug', 'g', + "compile with debugging information"), + ('force', 'f', + "forcibly build everything (ignore file timestamps)"), + ('compiler=', 'c', + "specify the compiler type"), + ] boolean_options = ['debug', 'force'] help_options = [ - ('help-compiler', None, "list available compilers", show_compilers), - ] + ('help-compiler', None, + "list available compilers", show_compilers), + ] def initialize_options(self): self.build_clib = None @@ -60,20 +64,19 @@ def initialize_options(self): self.force = 0 self.compiler = None + def finalize_options(self): # This might be confusing: both build-clib and build-temp default # to build-temp as defined by the "build" command. This is because # I think that C libraries are really just temporary build # by-products, at least from the point of view of building Python # extensions -- but I want to keep my options open. - self.set_undefined_options( - 'build', - ('build_temp', 'build_clib'), - ('build_temp', 'build_temp'), - ('compiler', 'compiler'), - ('debug', 'debug'), - ('force', 'force'), - ) + self.set_undefined_options('build', + ('build_temp', 'build_clib'), + ('build_temp', 'build_temp'), + ('compiler', 'compiler'), + ('debug', 'debug'), + ('force', 'force')) self.libraries = self.distribution.libraries if self.libraries: @@ -87,23 +90,23 @@ def finalize_options(self): # XXX same as for build_ext -- what about 'self.define' and # 'self.undef' ? + def run(self): if not self.libraries: return # Yech -- this is cut 'n pasted from build_ext.py! from distutils.ccompiler import new_compiler - - self.compiler = new_compiler( - compiler=self.compiler, dry_run=self.dry_run, force=self.force - ) + self.compiler = new_compiler(compiler=self.compiler, + dry_run=self.dry_run, + force=self.force) customize_compiler(self.compiler) if self.include_dirs is not None: self.compiler.set_include_dirs(self.include_dirs) if self.define is not None: # 'define' option is a list of (name,value) tuples - for (name, value) in self.define: + for (name,value) in self.define: self.compiler.define_macro(name, value) if self.undef is not None: for macro in self.undef: @@ -111,6 +114,7 @@ def run(self): self.build_libraries(self.libraries) + def check_library_list(self, libraries): """Ensure that the list of libraries is valid. @@ -122,31 +126,30 @@ def check_library_list(self, libraries): just returns otherwise. """ if not isinstance(libraries, list): - raise DistutilsSetupError("'libraries' option must be a list of tuples") + raise DistutilsSetupError( + "'libraries' option must be a list of tuples") for lib in libraries: if not isinstance(lib, tuple) and len(lib) != 2: - raise DistutilsSetupError("each element of 'libraries' must a 2-tuple") + raise DistutilsSetupError( + "each element of 'libraries' must a 2-tuple") name, build_info = lib if not isinstance(name, str): raise DistutilsSetupError( - "first element of each tuple in 'libraries' " - "must be a string (the library name)" - ) + "first element of each tuple in 'libraries' " + "must be a string (the library name)") if '/' in name or (os.sep != '/' and os.sep in name): - raise DistutilsSetupError( - "bad library name '%s': " - "may not contain directory separators" % lib[0] - ) + raise DistutilsSetupError("bad library name '%s': " + "may not contain directory separators" % lib[0]) if not isinstance(build_info, dict): raise DistutilsSetupError( - "second element of each tuple in 'libraries' " - "must be a dictionary (build info)" - ) + "second element of each tuple in 'libraries' " + "must be a dictionary (build info)") + def get_library_names(self): # Assume the library list is valid -- 'check_library_list()' is @@ -159,6 +162,7 @@ def get_library_names(self): lib_names.append(lib_name) return lib_names + def get_source_files(self): self.check_library_list(self.libraries) filenames = [] @@ -166,23 +170,22 @@ def get_source_files(self): sources = build_info.get('sources') if sources is None or not isinstance(sources, (list, tuple)): raise DistutilsSetupError( - "in 'libraries' option (library '%s'), " - "'sources' must be present and must be " - "a list of source filenames" % lib_name - ) + "in 'libraries' option (library '%s'), " + "'sources' must be present and must be " + "a list of source filenames" % lib_name) filenames.extend(sources) return filenames + def build_libraries(self, libraries): for (lib_name, build_info) in libraries: sources = build_info.get('sources') if sources is None or not isinstance(sources, (list, tuple)): raise DistutilsSetupError( - "in 'libraries' option (library '%s'), " - "'sources' must be present and must be " - "a list of source filenames" % lib_name - ) + "in 'libraries' option (library '%s'), " + "'sources' must be present and must be " + "a list of source filenames" % lib_name) sources = list(sources) log.info("building '%s' library", lib_name) @@ -192,17 +195,15 @@ def build_libraries(self, libraries): # files in a temporary build directory.) macros = build_info.get('macros') include_dirs = build_info.get('include_dirs') - objects = self.compiler.compile( - sources, - output_dir=self.build_temp, - macros=macros, - include_dirs=include_dirs, - debug=self.debug, - ) + objects = self.compiler.compile(sources, + output_dir=self.build_temp, + macros=macros, + include_dirs=include_dirs, + debug=self.debug) # Now "link" the object files together into a static library. # (On Unix at least, this isn't really linking -- it just # builds an archive. Whatever.) - self.compiler.create_static_lib( - objects, lib_name, output_dir=self.build_clib, debug=self.debug - ) + self.compiler.create_static_lib(objects, lib_name, + output_dir=self.build_clib, + debug=self.debug) diff --git a/venv/lib/python3.10/site-packages/setuptools/_distutils/command/build_ext.py b/venv/lib/python3.10/site-packages/setuptools/_distutils/command/build_ext.py index 3c6cee7..181671b 100644 --- a/venv/lib/python3.10/site-packages/setuptools/_distutils/command/build_ext.py +++ b/venv/lib/python3.10/site-packages/setuptools/_distutils/command/build_ext.py @@ -9,14 +9,7 @@ import re import sys from distutils.core import Command -from distutils.errors import ( - DistutilsOptionError, - DistutilsSetupError, - CCompilerError, - DistutilsError, - CompileError, - DistutilsPlatformError, -) +from distutils.errors import * from distutils.sysconfig import customize_compiler, get_python_version from distutils.sysconfig import get_config_h_filename from distutils.dep_util import newer_group @@ -29,12 +22,12 @@ # An extension name is just a dot-separated list of Python NAMEs (ie. # the same as a fully-qualified module name). -extension_name_re = re.compile(r'^[a-zA-Z_][a-zA-Z_0-9]*(\.[a-zA-Z_][a-zA-Z_0-9]*)*$') +extension_name_re = re.compile \ + (r'^[a-zA-Z_][a-zA-Z_0-9]*(\.[a-zA-Z_][a-zA-Z_0-9]*)*$') -def show_compilers(): +def show_compilers (): from distutils.ccompiler import show_compilers - show_compilers() @@ -62,50 +55,54 @@ class build_ext(Command): sep_by = " (separated by '%s')" % os.pathsep user_options = [ - ('build-lib=', 'b', "directory for compiled extension modules"), - ('build-temp=', 't', "directory for temporary files (build by-products)"), - ( - 'plat-name=', - 'p', - "platform name to cross-compile for, if supported " - "(default: %s)" % get_platform(), - ), - ( - 'inplace', - 'i', - "ignore build-lib and put compiled extensions into the source " - + "directory alongside your pure Python modules", - ), - ( - 'include-dirs=', - 'I', - "list of directories to search for header files" + sep_by, - ), - ('define=', 'D', "C preprocessor macros to define"), - ('undef=', 'U', "C preprocessor macros to undefine"), - ('libraries=', 'l', "external C libraries to link with"), - ( - 'library-dirs=', - 'L', - "directories to search for external C libraries" + sep_by, - ), - ('rpath=', 'R', "directories to search for shared C libraries at runtime"), - ('link-objects=', 'O', "extra explicit link objects to include in the link"), - ('debug', 'g', "compile/link with debugging information"), - ('force', 'f', "forcibly build everything (ignore file timestamps)"), - ('compiler=', 'c', "specify the compiler type"), - ('parallel=', 'j', "number of parallel build jobs"), - ('swig-cpp', None, "make SWIG create C++ files (default is C)"), - ('swig-opts=', None, "list of SWIG command line options"), - ('swig=', None, "path to the SWIG executable"), - ('user', None, "add user include, library and rpath"), - ] + ('build-lib=', 'b', + "directory for compiled extension modules"), + ('build-temp=', 't', + "directory for temporary files (build by-products)"), + ('plat-name=', 'p', + "platform name to cross-compile for, if supported " + "(default: %s)" % get_platform()), + ('inplace', 'i', + "ignore build-lib and put compiled extensions into the source " + + "directory alongside your pure Python modules"), + ('include-dirs=', 'I', + "list of directories to search for header files" + sep_by), + ('define=', 'D', + "C preprocessor macros to define"), + ('undef=', 'U', + "C preprocessor macros to undefine"), + ('libraries=', 'l', + "external C libraries to link with"), + ('library-dirs=', 'L', + "directories to search for external C libraries" + sep_by), + ('rpath=', 'R', + "directories to search for shared C libraries at runtime"), + ('link-objects=', 'O', + "extra explicit link objects to include in the link"), + ('debug', 'g', + "compile/link with debugging information"), + ('force', 'f', + "forcibly build everything (ignore file timestamps)"), + ('compiler=', 'c', + "specify the compiler type"), + ('parallel=', 'j', + "number of parallel build jobs"), + ('swig-cpp', None, + "make SWIG create C++ files (default is C)"), + ('swig-opts=', None, + "list of SWIG command line options"), + ('swig=', None, + "path to the SWIG executable"), + ('user', None, + "add user include, library and rpath") + ] boolean_options = ['inplace', 'debug', 'force', 'swig-cpp', 'user'] help_options = [ - ('help-compiler', None, "list available compilers", show_compilers), - ] + ('help-compiler', None, + "list available compilers", show_compilers), + ] def initialize_options(self): self.extensions = None @@ -131,19 +128,18 @@ def initialize_options(self): self.user = None self.parallel = None - def finalize_options(self): # noqa: C901 + def finalize_options(self): from distutils import sysconfig - self.set_undefined_options( - 'build', - ('build_lib', 'build_lib'), - ('build_temp', 'build_temp'), - ('compiler', 'compiler'), - ('debug', 'debug'), - ('force', 'force'), - ('parallel', 'parallel'), - ('plat_name', 'plat_name'), - ) + self.set_undefined_options('build', + ('build_lib', 'build_lib'), + ('build_temp', 'build_temp'), + ('compiler', 'compiler'), + ('debug', 'debug'), + ('force', 'force'), + ('parallel', 'parallel'), + ('plat_name', 'plat_name'), + ) if self.package is None: self.package = self.distribution.ext_package @@ -168,7 +164,8 @@ def finalize_options(self): # noqa: C901 # any local include dirs take precedence. self.include_dirs.extend(py_include.split(os.path.pathsep)) if plat_py_include != py_include: - self.include_dirs.extend(plat_py_include.split(os.path.pathsep)) + self.include_dirs.extend( + plat_py_include.split(os.path.pathsep)) self.ensure_string_list('libraries') self.ensure_string_list('link_objects') @@ -223,11 +220,9 @@ def finalize_options(self): # noqa: C901 if sys.platform[:6] == 'cygwin': if not sysconfig.python_build: # building third party extensions - self.library_dirs.append( - os.path.join( - sys.prefix, "lib", "python" + get_python_version(), "config" - ) - ) + self.library_dirs.append(os.path.join(sys.prefix, "lib", + "python" + get_python_version(), + "config")) else: # building python standard extensions self.library_dirs.append('.') @@ -235,7 +230,7 @@ def finalize_options(self): # noqa: C901 # For building extensions with a shared Python library, # Python's library directory must be appended to library_dirs # See Issues: #1600860, #4366 - if sysconfig.get_config_var('Py_ENABLE_SHARED'): + if (sysconfig.get_config_var('Py_ENABLE_SHARED')): if not sysconfig.python_build: # building third party extensions self.library_dirs.append(sysconfig.get_config_var('LIBDIR')) @@ -279,7 +274,7 @@ def finalize_options(self): # noqa: C901 except ValueError: raise DistutilsOptionError("parallel should be an integer") - def run(self): # noqa: C901 + def run(self): from distutils.ccompiler import new_compiler # 'self.extensions', as supplied by setup.py, is a list of @@ -307,12 +302,10 @@ def run(self): # noqa: C901 # Setup the CCompiler object that we'll use to do all the # compiling and linking - self.compiler = new_compiler( - compiler=self.compiler, - verbose=self.verbose, - dry_run=self.dry_run, - force=self.force, - ) + self.compiler = new_compiler(compiler=self.compiler, + verbose=self.verbose, + dry_run=self.dry_run, + force=self.force) customize_compiler(self.compiler) # If we are cross-compiling, init the compiler now (if we are not # cross-compiling, init would not hurt, but people may rely on @@ -345,7 +338,7 @@ def run(self): # noqa: C901 # Now actually compile and link everything. self.build_extensions() - def check_extensions_list(self, extensions): # noqa: C901 + def check_extensions_list(self, extensions): """Ensure that the list of extensions (presumably provided as a command option 'extensions') is valid, i.e. it is a list of Extension objects. We also support the old-style list of 2-tuples, @@ -357,40 +350,34 @@ def check_extensions_list(self, extensions): # noqa: C901 """ if not isinstance(extensions, list): raise DistutilsSetupError( - "'ext_modules' option must be a list of Extension instances" - ) + "'ext_modules' option must be a list of Extension instances") for i, ext in enumerate(extensions): if isinstance(ext, Extension): - continue # OK! (assume type-checking done - # by Extension constructor) + continue # OK! (assume type-checking done + # by Extension constructor) if not isinstance(ext, tuple) or len(ext) != 2: raise DistutilsSetupError( - "each element of 'ext_modules' option must be an " - "Extension instance or 2-tuple" - ) + "each element of 'ext_modules' option must be an " + "Extension instance or 2-tuple") ext_name, build_info = ext - log.warn( - "old-style (ext_name, build_info) tuple found in " - "ext_modules for extension '%s' " - "-- please convert to Extension instance", - ext_name, - ) + log.warn("old-style (ext_name, build_info) tuple found in " + "ext_modules for extension '%s' " + "-- please convert to Extension instance", ext_name) - if not (isinstance(ext_name, str) and extension_name_re.match(ext_name)): + if not (isinstance(ext_name, str) and + extension_name_re.match(ext_name)): raise DistutilsSetupError( - "first element of each tuple in 'ext_modules' " - "must be the extension name (a string)" - ) + "first element of each tuple in 'ext_modules' " + "must be the extension name (a string)") if not isinstance(build_info, dict): raise DistutilsSetupError( - "second element of each tuple in 'ext_modules' " - "must be a dictionary (build info)" - ) + "second element of each tuple in 'ext_modules' " + "must be a dictionary (build info)") # OK, the (ext_name, build_info) dict is type-safe: convert it # to an Extension instance. @@ -398,14 +385,9 @@ def check_extensions_list(self, extensions): # noqa: C901 # Easy stuff: one-to-one mapping from dict elements to # instance attributes. - for key in ( - 'include_dirs', - 'library_dirs', - 'libraries', - 'extra_objects', - 'extra_compile_args', - 'extra_link_args', - ): + for key in ('include_dirs', 'library_dirs', 'libraries', + 'extra_objects', 'extra_compile_args', + 'extra_link_args'): val = build_info.get(key) if val is not None: setattr(ext, key, val) @@ -413,7 +395,8 @@ def check_extensions_list(self, extensions): # noqa: C901 # Medium-easy stuff: same syntax/semantics, different names. ext.runtime_library_dirs = build_info.get('rpath') if 'def_file' in build_info: - log.warn("'def_file' element of build info dict " "no longer supported") + log.warn("'def_file' element of build info dict " + "no longer supported") # Non-trivial stuff: 'macros' split into 'define_macros' # and 'undef_macros'. @@ -424,9 +407,8 @@ def check_extensions_list(self, extensions): # noqa: C901 for macro in macros: if not (isinstance(macro, tuple) and len(macro) in (1, 2)): raise DistutilsSetupError( - "'macros' element of build info dict " - "must be 1- or 2-tuple" - ) + "'macros' element of build info dict " + "must be 1- or 2-tuple") if len(macro) == 1: ext.undef_macros.append(macro[0]) elif len(macro) == 2: @@ -479,9 +461,8 @@ def _build_extensions_parallel(self): return with ThreadPoolExecutor(max_workers=workers) as executor: - futures = [ - executor.submit(self.build_extension, ext) for ext in self.extensions - ] + futures = [executor.submit(self.build_extension, ext) + for ext in self.extensions] for ext, fut in zip(self.extensions, futures): with self._filter_build_errors(ext): fut.result() @@ -498,16 +479,16 @@ def _filter_build_errors(self, ext): except (CCompilerError, DistutilsError, CompileError) as e: if not ext.optional: raise - self.warn('building extension "{}" failed: {}'.format(ext.name, e)) + self.warn('building extension "%s" failed: %s' % + (ext.name, e)) def build_extension(self, ext): sources = ext.sources if sources is None or not isinstance(sources, (list, tuple)): raise DistutilsSetupError( - "in 'ext_modules' option (extension '%s'), " - "'sources' must be present and must be " - "a list of source filenames" % ext.name - ) + "in 'ext_modules' option (extension '%s'), " + "'sources' must be present and must be " + "a list of source filenames" % ext.name) # sort to make the resulting .so file build reproducible sources = sorted(sources) @@ -544,15 +525,13 @@ def build_extension(self, ext): for undef in ext.undef_macros: macros.append((undef,)) - objects = self.compiler.compile( - sources, - output_dir=self.build_temp, - macros=macros, - include_dirs=ext.include_dirs, - debug=self.debug, - extra_postargs=extra_args, - depends=ext.depends, - ) + objects = self.compiler.compile(sources, + output_dir=self.build_temp, + macros=macros, + include_dirs=ext.include_dirs, + debug=self.debug, + extra_postargs=extra_args, + depends=ext.depends) # XXX outdated variable, kept here in case third-part code # needs it. @@ -569,8 +548,7 @@ def build_extension(self, ext): language = ext.language or self.compiler.detect_language(sources) self.compiler.link_shared_object( - objects, - ext_path, + objects, ext_path, libraries=self.get_libraries(ext), library_dirs=ext.library_dirs, runtime_library_dirs=ext.runtime_library_dirs, @@ -578,8 +556,7 @@ def build_extension(self, ext): export_symbols=self.get_export_symbols(ext), debug=self.debug, build_temp=self.build_temp, - target_lang=language, - ) + target_lang=language) def swig_sources(self, sources, extension): """Walk the list of source files in 'sources', looking for SWIG @@ -599,18 +576,15 @@ def swig_sources(self, sources, extension): if self.swig_cpp: log.warn("--swig-cpp is deprecated - use --swig-opts=-c++") - if ( - self.swig_cpp - or ('-c++' in self.swig_opts) - or ('-c++' in extension.swig_opts) - ): + if self.swig_cpp or ('-c++' in self.swig_opts) or \ + ('-c++' in extension.swig_opts): target_ext = '.cpp' else: target_ext = '.c' for source in sources: (base, ext) = os.path.splitext(source) - if ext == ".i": # SWIG interface file + if ext == ".i": # SWIG interface file new_sources.append(base + '_wrap' + target_ext) swig_sources.append(source) swig_targets[source] = new_sources[-1] @@ -657,9 +631,8 @@ def find_swig(self): return "swig.exe" else: raise DistutilsPlatformError( - "I don't know how to find (much less run) SWIG " - "on platform '%s'" % os.name - ) + "I don't know how to find (much less run) SWIG " + "on platform '%s'" % os.name) # -- Name generators ----------------------------------------------- # (extension names, filenames, whatever) @@ -677,7 +650,7 @@ def get_ext_fullpath(self, ext_name): # no further work needed # returning : # build_dir/package/path/filename - filename = os.path.join(*modpath[:-1] + [filename]) + filename = os.path.join(*modpath[:-1]+[filename]) return os.path.join(self.build_lib, filename) # the inplace option requires to find the package directory @@ -705,7 +678,6 @@ def get_ext_filename(self, ext_name): "foo\bar.pyd"). """ from distutils.sysconfig import get_config_var - ext_path = ext_name.split('.') ext_suffix = get_config_var('EXT_SUFFIX') return os.path.join(*ext_path) + ext_suffix @@ -731,7 +703,7 @@ def get_export_symbols(self, ext): ext.export_symbols.append(initfunc_name) return ext.export_symbols - def get_libraries(self, ext): # noqa: C901 + def get_libraries(self, ext): """Return the list of libraries to link against when building a shared extension. On most platforms, this is just 'ext.libraries'; on Windows, we add the Python library (eg. python20.dll). @@ -743,15 +715,12 @@ def get_libraries(self, ext): # noqa: C901 # Append '_d' to the python import library on debug builds. if sys.platform == "win32": from distutils._msvccompiler import MSVCCompiler - if not isinstance(self.compiler, MSVCCompiler): template = "python%d%d" if self.debug: template = template + '_d' - pythonlib = template % ( - sys.hexversion >> 24, - (sys.hexversion >> 16) & 0xFF, - ) + pythonlib = (template % + (sys.hexversion >> 24, (sys.hexversion >> 16) & 0xff)) # don't extend ext.libraries, it may be shared with other # extensions, it is a reference to the original list return ext.libraries + [pythonlib] @@ -765,7 +734,6 @@ def get_libraries(self, ext): # noqa: C901 # Windows like MinGW) it is simply necessary that all symbols in # shared libraries are resolved at link time. from distutils.sysconfig import get_config_var - link_libpython = False if get_config_var('Py_ENABLE_SHARED'): # A native build on an Android device or on Cygwin diff --git a/venv/lib/python3.10/site-packages/setuptools/_distutils/command/build_py.py b/venv/lib/python3.10/site-packages/setuptools/_distutils/command/build_py.py index 47c6158..7ef9bce 100644 --- a/venv/lib/python3.10/site-packages/setuptools/_distutils/command/build_py.py +++ b/venv/lib/python3.10/site-packages/setuptools/_distutils/command/build_py.py @@ -8,12 +8,11 @@ import glob from distutils.core import Command -from distutils.errors import DistutilsOptionError, DistutilsFileError +from distutils.errors import * from distutils.util import convert_path from distutils import log - -class build_py(Command): +class build_py (Command): description = "\"build\" pure Python modules (copy to build directory)" @@ -21,17 +20,14 @@ class build_py(Command): ('build-lib=', 'd', "directory to \"build\" (copy) to"), ('compile', 'c', "compile .py to .pyc"), ('no-compile', None, "don't compile .py files [default]"), - ( - 'optimize=', - 'O', - "also compile with optimization: -O1 for \"python -O\", " - "-O2 for \"python -OO\", and -O0 to disable [default: -O0]", - ), + ('optimize=', 'O', + "also compile with optimization: -O1 for \"python -O\", " + "-O2 for \"python -OO\", and -O0 to disable [default: -O0]"), ('force', 'f', "forcibly build everything (ignore file timestamps)"), - ] + ] boolean_options = ['compile', 'force'] - negative_opt = {'no-compile': 'compile'} + negative_opt = {'no-compile' : 'compile'} def initialize_options(self): self.build_lib = None @@ -44,9 +40,9 @@ def initialize_options(self): self.force = None def finalize_options(self): - self.set_undefined_options( - 'build', ('build_lib', 'build_lib'), ('force', 'force') - ) + self.set_undefined_options('build', + ('build_lib', 'build_lib'), + ('force', 'force')) # Get the distribution options that are aliases for build_py # options -- list of packages and list of modules. @@ -113,42 +109,42 @@ def get_data_files(self): # Length of path to strip from found files plen = 0 if src_dir: - plen = len(src_dir) + 1 + plen = len(src_dir)+1 # Strip directory from globbed filenames - filenames = [file[plen:] for file in self.find_data_files(package, src_dir)] + filenames = [ + file[plen:] for file in self.find_data_files(package, src_dir) + ] data.append((package, src_dir, build_dir, filenames)) return data def find_data_files(self, package, src_dir): """Return filenames for package's data files in 'src_dir'""" - globs = self.package_data.get('', []) + self.package_data.get(package, []) + globs = (self.package_data.get('', []) + + self.package_data.get(package, [])) files = [] for pattern in globs: # Each pattern has to be converted to a platform-specific path - filelist = glob.glob( - os.path.join(glob.escape(src_dir), convert_path(pattern)) - ) + filelist = glob.glob(os.path.join(glob.escape(src_dir), convert_path(pattern))) # Files that match more than one pattern are only added once - files.extend( - [fn for fn in filelist if fn not in files and os.path.isfile(fn)] - ) + files.extend([fn for fn in filelist if fn not in files + and os.path.isfile(fn)]) return files def build_package_data(self): """Copy data files into build directory""" + lastdir = None for package, src_dir, build_dir, filenames in self.data_files: for filename in filenames: target = os.path.join(build_dir, filename) self.mkpath(os.path.dirname(target)) - self.copy_file( - os.path.join(src_dir, filename), target, preserve_mode=False - ) + self.copy_file(os.path.join(src_dir, filename), target, + preserve_mode=False) def get_package_dir(self, package): """Return the directory, relative to the top of the source - distribution, where package 'package' should be found - (at least according to the 'package_dir' option, if any).""" + distribution, where package 'package' should be found + (at least according to the 'package_dir' option, if any).""" path = package.split('.') if not self.package_dir: @@ -192,19 +188,20 @@ def check_package(self, package, package_dir): if package_dir != "": if not os.path.exists(package_dir): raise DistutilsFileError( - "package directory '%s' does not exist" % package_dir - ) + "package directory '%s' does not exist" % package_dir) if not os.path.isdir(package_dir): raise DistutilsFileError( - "supposed package directory '%s' exists, " - "but is not a directory" % package_dir - ) + "supposed package directory '%s' exists, " + "but is not a directory" % package_dir) - # Directories without __init__.py are namespace packages (PEP 420). + # Require __init__.py for all but the "root package" if package: init_py = os.path.join(package_dir, "__init__.py") if os.path.isfile(init_py): return init_py + else: + log.warn(("package init file '%s' not found " + + "(or not a regular file)"), init_py) # Either not in a package at all (__init__.py not expected), or # __init__.py doesn't exist -- so don't return the filename. @@ -316,21 +313,17 @@ def get_outputs(self, include_bytecode=1): outputs.append(filename) if include_bytecode: if self.compile: - outputs.append( - importlib.util.cache_from_source(filename, optimization='') - ) + outputs.append(importlib.util.cache_from_source( + filename, optimization='')) if self.optimize > 0: - outputs.append( - importlib.util.cache_from_source( - filename, optimization=self.optimize - ) - ) + outputs.append(importlib.util.cache_from_source( + filename, optimization=self.optimize)) outputs += [ os.path.join(build_dir, filename) for package, src_dir, build_dir, filenames in self.data_files for filename in filenames - ] + ] return outputs @@ -339,8 +332,7 @@ def build_module(self, module, module_file, package): package = package.split('.') elif not isinstance(package, (list, tuple)): raise TypeError( - "'package' must be a string (dot-separated), list, or tuple" - ) + "'package' must be a string (dot-separated), list, or tuple") # Now put the module source file into the "build" area -- this is # easy, we just copy it somewhere under self.build_lib (the build @@ -385,7 +377,6 @@ def byte_compile(self, files): return from distutils.util import byte_compile - prefix = self.build_lib if prefix[-1] != os.sep: prefix = prefix + os.sep @@ -394,14 +385,8 @@ def byte_compile(self, files): # method of the "install_lib" command, except for the determination # of the 'prefix' string. Hmmm. if self.compile: - byte_compile( - files, optimize=0, force=self.force, prefix=prefix, dry_run=self.dry_run - ) + byte_compile(files, optimize=0, + force=self.force, prefix=prefix, dry_run=self.dry_run) if self.optimize > 0: - byte_compile( - files, - optimize=self.optimize, - force=self.force, - prefix=prefix, - dry_run=self.dry_run, - ) + byte_compile(files, optimize=self.optimize, + force=self.force, prefix=prefix, dry_run=self.dry_run) diff --git a/venv/lib/python3.10/site-packages/setuptools/_distutils/command/build_scripts.py b/venv/lib/python3.10/site-packages/setuptools/_distutils/command/build_scripts.py index 2cc5d1e..e3312cf 100644 --- a/venv/lib/python3.10/site-packages/setuptools/_distutils/command/build_scripts.py +++ b/venv/lib/python3.10/site-packages/setuptools/_distutils/command/build_scripts.py @@ -2,8 +2,7 @@ Implements the Distutils 'build_scripts' command.""" -import os -import re +import os, re from stat import ST_MODE from distutils import sysconfig from distutils.core import Command @@ -12,14 +11,8 @@ from distutils import log import tokenize -shebang_pattern = re.compile('^#!.*python[0-9.]*([ \t].*)?$') -""" -Pattern matching a Python interpreter indicated in first line of a script. -""" - -# for Setuptools compatibility -first_line_re = shebang_pattern - +# check if Python is called on the first line with this expression +first_line_re = re.compile(b'^#!.*python[0-9.]*([ \t].*)?$') class build_scripts(Command): @@ -29,23 +22,23 @@ class build_scripts(Command): ('build-dir=', 'd', "directory to \"build\" (copy) to"), ('force', 'f', "forcibly build everything (ignore file timestamps"), ('executable=', 'e', "specify final destination interpreter path"), - ] + ] boolean_options = ['force'] + def initialize_options(self): self.build_dir = None self.scripts = None self.force = None self.executable = None + self.outfiles = None def finalize_options(self): - self.set_undefined_options( - 'build', - ('build_scripts', 'build_dir'), - ('force', 'force'), - ('executable', 'executable'), - ) + self.set_undefined_options('build', + ('build_scripts', 'build_dir'), + ('force', 'force'), + ('executable', 'executable')) self.scripts = self.distribution.scripts def get_source_files(self): @@ -56,118 +49,104 @@ def run(self): return self.copy_scripts() - def copy_scripts(self): - """ - Copy each script listed in ``self.scripts``. - If a script is marked as a Python script (first line matches - 'shebang_pattern', i.e. starts with ``#!`` and contains - "python"), then adjust in the copy the first line to refer to - the current Python interpreter. + def copy_scripts(self): + r"""Copy each script listed in 'self.scripts'; if it's marked as a + Python script in the Unix way (first line matches 'first_line_re', + ie. starts with "\#!" and contains "python"), then adjust the first + line to refer to the current Python interpreter as we copy. """ self.mkpath(self.build_dir) outfiles = [] updated_files = [] for script in self.scripts: - self._copy_script(script, outfiles, updated_files) - - self._change_modes(outfiles) - - return outfiles, updated_files - - def _copy_script(self, script, outfiles, updated_files): # noqa: C901 - shebang_match = None - script = convert_path(script) - outfile = os.path.join(self.build_dir, os.path.basename(script)) - outfiles.append(outfile) - - if not self.force and not newer(script, outfile): - log.debug("not copying %s (up-to-date)", script) - return - - # Always open the file, but ignore failures in dry-run mode - # in order to attempt to copy directly. - try: - f = tokenize.open(script) - except OSError: - if not self.dry_run: - raise - f = None - else: - first_line = f.readline() - if not first_line: - self.warn("%s is an empty file (skipping)" % script) - return - - shebang_match = shebang_pattern.match(first_line) - - updated_files.append(outfile) - if shebang_match: - log.info("copying and adjusting %s -> %s", script, self.build_dir) - if not self.dry_run: - if not sysconfig.python_build: - executable = self.executable + adjust = False + script = convert_path(script) + outfile = os.path.join(self.build_dir, os.path.basename(script)) + outfiles.append(outfile) + + if not self.force and not newer(script, outfile): + log.debug("not copying %s (up-to-date)", script) + continue + + # Always open the file, but ignore failures in dry-run mode -- + # that way, we'll get accurate feedback if we can read the + # script. + try: + f = open(script, "rb") + except OSError: + if not self.dry_run: + raise + f = None + else: + encoding, lines = tokenize.detect_encoding(f.readline) + f.seek(0) + first_line = f.readline() + if not first_line: + self.warn("%s is an empty file (skipping)" % script) + continue + + match = first_line_re.match(first_line) + if match: + adjust = True + post_interp = match.group(1) or b'' + + if adjust: + log.info("copying and adjusting %s -> %s", script, + self.build_dir) + updated_files.append(outfile) + if not self.dry_run: + if not sysconfig.python_build: + executable = self.executable + else: + executable = os.path.join( + sysconfig.get_config_var("BINDIR"), + "python%s%s" % (sysconfig.get_config_var("VERSION"), + sysconfig.get_config_var("EXE"))) + executable = os.fsencode(executable) + shebang = b"#!" + executable + post_interp + b"\n" + # Python parser starts to read a script using UTF-8 until + # it gets a #coding:xxx cookie. The shebang has to be the + # first line of a file, the #coding:xxx cookie cannot be + # written before. So the shebang has to be decodable from + # UTF-8. + try: + shebang.decode('utf-8') + except UnicodeDecodeError: + raise ValueError( + "The shebang ({!r}) is not decodable " + "from utf-8".format(shebang)) + # If the script is encoded to a custom encoding (use a + # #coding:xxx cookie), the shebang has to be decodable from + # the script encoding too. + try: + shebang.decode(encoding) + except UnicodeDecodeError: + raise ValueError( + "The shebang ({!r}) is not decodable " + "from the script encoding ({})" + .format(shebang, encoding)) + with open(outfile, "wb") as outf: + outf.write(shebang) + outf.writelines(f.readlines()) + if f: + f.close() + else: + if f: + f.close() + updated_files.append(outfile) + self.copy_file(script, outfile) + + if os.name == 'posix': + for file in outfiles: + if self.dry_run: + log.info("changing mode of %s", file) else: - executable = os.path.join( - sysconfig.get_config_var("BINDIR"), - "python%s%s" - % ( - sysconfig.get_config_var("VERSION"), - sysconfig.get_config_var("EXE"), - ), - ) - post_interp = shebang_match.group(1) or '' - shebang = "#!" + executable + post_interp + "\n" - self._validate_shebang(shebang, f.encoding) - with open(outfile, "w", encoding=f.encoding) as outf: - outf.write(shebang) - outf.writelines(f.readlines()) - if f: - f.close() - else: - if f: - f.close() - self.copy_file(script, outfile) - - def _change_modes(self, outfiles): - if os.name != 'posix': - return - - for file in outfiles: - self._change_mode(file) - - def _change_mode(self, file): - if self.dry_run: - log.info("changing mode of %s", file) - return - - oldmode = os.stat(file)[ST_MODE] & 0o7777 - newmode = (oldmode | 0o555) & 0o7777 - if newmode != oldmode: - log.info("changing mode of %s from %o to %o", file, oldmode, newmode) - os.chmod(file, newmode) - - @staticmethod - def _validate_shebang(shebang, encoding): - # Python parser starts to read a script using UTF-8 until - # it gets a #coding:xxx cookie. The shebang has to be the - # first line of a file, the #coding:xxx cookie cannot be - # written before. So the shebang has to be encodable to - # UTF-8. - try: - shebang.encode('utf-8') - except UnicodeEncodeError: - raise ValueError( - "The shebang ({!r}) is not encodable " "to utf-8".format(shebang) - ) - - # If the script is encoded to a custom encoding (use a - # #coding:xxx cookie), the shebang has to be encodable to - # the script encoding too. - try: - shebang.encode(encoding) - except UnicodeEncodeError: - raise ValueError( - "The shebang ({!r}) is not encodable " - "to the script encoding ({})".format(shebang, encoding) - ) + oldmode = os.stat(file)[ST_MODE] & 0o7777 + newmode = (oldmode | 0o555) & 0o7777 + if newmode != oldmode: + log.info("changing mode of %s from %o to %o", + file, oldmode, newmode) + os.chmod(file, newmode) + # XXX should we modify self.outfiles? + return outfiles, updated_files diff --git a/venv/lib/python3.10/site-packages/setuptools/_distutils/command/check.py b/venv/lib/python3.10/site-packages/setuptools/_distutils/command/check.py index 539481c..ada2500 100644 --- a/venv/lib/python3.10/site-packages/setuptools/_distutils/command/check.py +++ b/venv/lib/python3.10/site-packages/setuptools/_distutils/command/check.py @@ -2,56 +2,46 @@ Implements the Distutils 'check' command. """ -import contextlib - from distutils.core import Command from distutils.errors import DistutilsSetupError -with contextlib.suppress(ImportError): - import docutils.utils - import docutils.parsers.rst - import docutils.frontend - import docutils.nodes - - class SilentReporter(docutils.utils.Reporter): - def __init__( - self, - source, - report_level, - halt_level, - stream=None, - debug=0, - encoding='ascii', - error_handler='replace', - ): +try: + # docutils is installed + from docutils.utils import Reporter + from docutils.parsers.rst import Parser + from docutils import frontend + from docutils import nodes + + class SilentReporter(Reporter): + + def __init__(self, source, report_level, halt_level, stream=None, + debug=0, encoding='ascii', error_handler='replace'): self.messages = [] - super().__init__( - source, report_level, halt_level, stream, debug, encoding, error_handler - ) + Reporter.__init__(self, source, report_level, halt_level, stream, + debug, encoding, error_handler) def system_message(self, level, message, *children, **kwargs): self.messages.append((level, message, children, kwargs)) - return docutils.nodes.system_message( - message, level=level, type=self.levels[level], *children, **kwargs - ) + return nodes.system_message(message, level=level, + type=self.levels[level], + *children, **kwargs) + HAS_DOCUTILS = True +except Exception: + # Catch all exceptions because exceptions besides ImportError probably + # indicate that docutils is not ported to Py3k. + HAS_DOCUTILS = False class check(Command): - """This command checks the meta-data of the package.""" - - description = "perform some checks on the package" - user_options = [ - ('metadata', 'm', 'Verify meta-data'), - ( - 'restructuredtext', - 'r', - ( - 'Checks if long string meta-data syntax ' - 'are reStructuredText-compliant' - ), - ), - ('strict', 's', 'Will exit with an error if a check fails'), - ] + """This command checks the meta-data of the package. + """ + description = ("perform some checks on the package") + user_options = [('metadata', 'm', 'Verify meta-data'), + ('restructuredtext', 'r', + ('Checks if long string meta-data syntax ' + 'are reStructuredText-compliant')), + ('strict', 's', + 'Will exit with an error if a check fails')] boolean_options = ['metadata', 'restructuredtext', 'strict'] @@ -76,11 +66,8 @@ def run(self): if self.metadata: self.check_metadata() if self.restructuredtext: - if 'docutils' in globals(): - try: - self.check_restructuredtext() - except TypeError as exc: - raise DistutilsSetupError(str(exc)) + if HAS_DOCUTILS: + self.check_restructuredtext() elif self.strict: raise DistutilsSetupError('The docutils package is needed.') @@ -93,19 +80,34 @@ def check_metadata(self): """Ensures that all required elements of meta-data are supplied. Required fields: - name, version + name, version, URL + + Recommended fields: + (author and author_email) or (maintainer and maintainer_email)) Warns if any are missing. """ metadata = self.distribution.metadata missing = [] - for attr in 'name', 'version': - if not getattr(metadata, attr, None): + for attr in ('name', 'version', 'url'): + if not (hasattr(metadata, attr) and getattr(metadata, attr)): missing.append(attr) if missing: - self.warn("missing required meta-data: %s" % ', '.join(missing)) + self.warn("missing required meta-data: %s" % ', '.join(missing)) + if metadata.author: + if not metadata.author_email: + self.warn("missing meta-data: if 'author' supplied, " + + "'author_email' should be supplied too") + elif metadata.maintainer: + if not metadata.maintainer_email: + self.warn("missing meta-data: if 'maintainer' supplied, " + + "'maintainer_email' should be supplied too") + else: + self.warn("missing meta-data: either (author and author_email) " + + "or (maintainer and maintainer_email) " + + "should be supplied") def check_restructuredtext(self): """Checks if the long string fields are reST-compliant.""" @@ -115,37 +117,32 @@ def check_restructuredtext(self): if line is None: warning = warning[1] else: - warning = '{} (line {})'.format(warning[1], line) + warning = '%s (line %s)' % (warning[1], line) self.warn(warning) def _check_rst_data(self, data): """Returns warnings when the provided data doesn't compile.""" # the include and csv_table directives need this to be a path source_path = self.distribution.script_name or 'setup.py' - parser = docutils.parsers.rst.Parser() - settings = docutils.frontend.OptionParser( - components=(docutils.parsers.rst.Parser,) - ).get_default_values() + parser = Parser() + settings = frontend.OptionParser(components=(Parser,)).get_default_values() settings.tab_width = 4 settings.pep_references = None settings.rfc_references = None - reporter = SilentReporter( - source_path, - settings.report_level, - settings.halt_level, - stream=settings.warning_stream, - debug=settings.debug, - encoding=settings.error_encoding, - error_handler=settings.error_encoding_error_handler, - ) - - document = docutils.nodes.document(settings, reporter, source=source_path) + reporter = SilentReporter(source_path, + settings.report_level, + settings.halt_level, + stream=settings.warning_stream, + debug=settings.debug, + encoding=settings.error_encoding, + error_handler=settings.error_encoding_error_handler) + + document = nodes.document(settings, reporter, source=source_path) document.note_source(source_path, -1) try: parser.parse(data, document) except AttributeError as e: reporter.messages.append( - (-1, 'Could not finish the parsing: %s.' % e, '', {}) - ) + (-1, 'Could not finish the parsing: %s.' % e, '', {})) return reporter.messages diff --git a/venv/lib/python3.10/site-packages/setuptools/_distutils/command/clean.py b/venv/lib/python3.10/site-packages/setuptools/_distutils/command/clean.py index b731b60..0cb2701 100644 --- a/venv/lib/python3.10/site-packages/setuptools/_distutils/command/clean.py +++ b/venv/lib/python3.10/site-packages/setuptools/_distutils/command/clean.py @@ -9,25 +9,22 @@ from distutils.dir_util import remove_tree from distutils import log - class clean(Command): description = "clean up temporary files from 'build' command" user_options = [ - ('build-base=', 'b', "base build directory (default: 'build.build-base')"), - ( - 'build-lib=', - None, - "build directory for all modules (default: 'build.build-lib')", - ), - ('build-temp=', 't', "temporary build directory (default: 'build.build-temp')"), - ( - 'build-scripts=', - None, - "build directory for scripts (default: 'build.build-scripts')", - ), - ('bdist-base=', None, "temporary directory for built distributions"), - ('all', 'a', "remove all build output, not just temporary by-products"), + ('build-base=', 'b', + "base build directory (default: 'build.build-base')"), + ('build-lib=', None, + "build directory for all modules (default: 'build.build-lib')"), + ('build-temp=', 't', + "temporary build directory (default: 'build.build-temp')"), + ('build-scripts=', None, + "build directory for scripts (default: 'build.build-scripts')"), + ('bdist-base=', None, + "temporary directory for built distributions"), + ('all', 'a', + "remove all build output, not just temporary by-products") ] boolean_options = ['all'] @@ -41,14 +38,13 @@ def initialize_options(self): self.all = None def finalize_options(self): - self.set_undefined_options( - 'build', - ('build_base', 'build_base'), - ('build_lib', 'build_lib'), - ('build_scripts', 'build_scripts'), - ('build_temp', 'build_temp'), - ) - self.set_undefined_options('bdist', ('bdist_base', 'bdist_base')) + self.set_undefined_options('build', + ('build_base', 'build_base'), + ('build_lib', 'build_lib'), + ('build_scripts', 'build_scripts'), + ('build_temp', 'build_temp')) + self.set_undefined_options('bdist', + ('bdist_base', 'bdist_base')) def run(self): # remove the build/temp. directory (unless it's already @@ -56,15 +52,19 @@ def run(self): if os.path.exists(self.build_temp): remove_tree(self.build_temp, dry_run=self.dry_run) else: - log.debug("'%s' does not exist -- can't clean it", self.build_temp) + log.debug("'%s' does not exist -- can't clean it", + self.build_temp) if self.all: # remove build directories - for directory in (self.build_lib, self.bdist_base, self.build_scripts): + for directory in (self.build_lib, + self.bdist_base, + self.build_scripts): if os.path.exists(directory): remove_tree(directory, dry_run=self.dry_run) else: - log.warn("'%s' does not exist -- can't clean it", directory) + log.warn("'%s' does not exist -- can't clean it", + directory) # just for the heck of it, try to remove the base build directory: # we might have emptied it right now, but if not we don't care diff --git a/venv/lib/python3.10/site-packages/setuptools/_distutils/command/config.py b/venv/lib/python3.10/site-packages/setuptools/_distutils/command/config.py index 4492c89..aeda408 100644 --- a/venv/lib/python3.10/site-packages/setuptools/_distutils/command/config.py +++ b/venv/lib/python3.10/site-packages/setuptools/_distutils/command/config.py @@ -9,8 +9,7 @@ this header file lives". """ -import os -import re +import os, re from distutils.core import Command from distutils.errors import DistutilsExecError @@ -19,26 +18,32 @@ LANG_EXT = {"c": ".c", "c++": ".cxx"} - class config(Command): description = "prepare to build" user_options = [ - ('compiler=', None, "specify the compiler type"), - ('cc=', None, "specify the compiler executable"), - ('include-dirs=', 'I', "list of directories to search for header files"), - ('define=', 'D', "C preprocessor macros to define"), - ('undef=', 'U', "C preprocessor macros to undefine"), - ('libraries=', 'l', "external C libraries to link with"), - ('library-dirs=', 'L', "directories to search for external C libraries"), - ('noisy', None, "show every action (compile, link, run, ...) taken"), - ( - 'dump-source', - None, - "dump generated source files before attempting to compile them", - ), - ] + ('compiler=', None, + "specify the compiler type"), + ('cc=', None, + "specify the compiler executable"), + ('include-dirs=', 'I', + "list of directories to search for header files"), + ('define=', 'D', + "C preprocessor macros to define"), + ('undef=', 'U', + "C preprocessor macros to undefine"), + ('libraries=', 'l', + "external C libraries to link with"), + ('library-dirs=', 'L', + "directories to search for external C libraries"), + + ('noisy', None, + "show every action (compile, link, run, ...) taken"), + ('dump-source', None, + "dump generated source files before attempting to compile them"), + ] + # The three standard command methods: since the "config" command # does nothing by default, these are empty. @@ -88,11 +93,9 @@ def _check_compiler(self): # We do this late, and only on-demand, because this is an expensive # import. from distutils.ccompiler import CCompiler, new_compiler - if not isinstance(self.compiler, CCompiler): - self.compiler = new_compiler( - compiler=self.compiler, dry_run=self.dry_run, force=1 - ) + self.compiler = new_compiler(compiler=self.compiler, + dry_run=self.dry_run, force=1) customize_compiler(self.compiler) if self.include_dirs: self.compiler.set_include_dirs(self.include_dirs) @@ -129,16 +132,14 @@ def _compile(self, body, headers, include_dirs, lang): self.compiler.compile([src], include_dirs=include_dirs) return (src, obj) - def _link(self, body, headers, include_dirs, libraries, library_dirs, lang): + def _link(self, body, headers, include_dirs, libraries, library_dirs, + lang): (src, obj) = self._compile(body, headers, include_dirs, lang) prog = os.path.splitext(os.path.basename(src))[0] - self.compiler.link_executable( - [obj], - prog, - libraries=libraries, - library_dirs=library_dirs, - target_lang=lang, - ) + self.compiler.link_executable([obj], prog, + libraries=libraries, + library_dirs=library_dirs, + target_lang=lang) if self.compiler.exe_extension is not None: prog = prog + self.compiler.exe_extension @@ -157,6 +158,7 @@ def _clean(self, *filenames): except OSError: pass + # XXX these ignore the dry-run flag: what to do, what to do? even if # you want a dry-run build, you still need some sort of configuration # info. My inclination is to make it up to the real config command to @@ -175,7 +177,6 @@ def try_cpp(self, body=None, headers=None, include_dirs=None, lang="c"): ('body' probably isn't of much use, but what the heck.) """ from distutils.ccompiler import CompileError - self._check_compiler() ok = True try: @@ -186,7 +187,8 @@ def try_cpp(self, body=None, headers=None, include_dirs=None, lang="c"): self._clean() return ok - def search_cpp(self, pattern, body=None, headers=None, include_dirs=None, lang="c"): + def search_cpp(self, pattern, body=None, headers=None, include_dirs=None, + lang="c"): """Construct a source file (just like 'try_cpp()'), run it through the preprocessor, and return true if any line of the output matches 'pattern'. 'pattern' should either be a compiled regex object or a @@ -218,7 +220,6 @@ def try_compile(self, body, headers=None, include_dirs=None, lang="c"): Return true on success, false otherwise. """ from distutils.ccompiler import CompileError - self._check_compiler() try: self._compile(body, headers, include_dirs, lang) @@ -230,24 +231,17 @@ def try_compile(self, body, headers=None, include_dirs=None, lang="c"): self._clean() return ok - def try_link( - self, - body, - headers=None, - include_dirs=None, - libraries=None, - library_dirs=None, - lang="c", - ): + def try_link(self, body, headers=None, include_dirs=None, libraries=None, + library_dirs=None, lang="c"): """Try to compile and link a source file, built from 'body' and 'headers', to executable form. Return true on success, false otherwise. """ from distutils.ccompiler import CompileError, LinkError - self._check_compiler() try: - self._link(body, headers, include_dirs, libraries, library_dirs, lang) + self._link(body, headers, include_dirs, + libraries, library_dirs, lang) ok = True except (CompileError, LinkError): ok = False @@ -256,26 +250,17 @@ def try_link( self._clean() return ok - def try_run( - self, - body, - headers=None, - include_dirs=None, - libraries=None, - library_dirs=None, - lang="c", - ): + def try_run(self, body, headers=None, include_dirs=None, libraries=None, + library_dirs=None, lang="c"): """Try to compile, link to an executable, and run a program built from 'body' and 'headers'. Return true on success, false otherwise. """ from distutils.ccompiler import CompileError, LinkError - self._check_compiler() try: - src, obj, exe = self._link( - body, headers, include_dirs, libraries, library_dirs, lang - ) + src, obj, exe = self._link(body, headers, include_dirs, + libraries, library_dirs, lang) self.spawn([exe]) ok = True except (CompileError, LinkError, DistutilsExecError): @@ -285,20 +270,13 @@ def try_run( self._clean() return ok + # -- High-level methods -------------------------------------------- # (these are the ones that are actually likely to be useful # when implementing a real-world config command!) - def check_func( - self, - func, - headers=None, - include_dirs=None, - libraries=None, - library_dirs=None, - decl=0, - call=0, - ): + def check_func(self, func, headers=None, include_dirs=None, + libraries=None, library_dirs=None, decl=0, call=0): """Determine if function 'func' is available by constructing a source file that refers to 'func', and compiles and links it. If everything succeeds, returns true; otherwise returns false. @@ -324,16 +302,11 @@ def check_func( body.append("}") body = "\n".join(body) + "\n" - return self.try_link(body, headers, include_dirs, libraries, library_dirs) + return self.try_link(body, headers, include_dirs, + libraries, library_dirs) - def check_lib( - self, - library, - library_dirs=None, - headers=None, - include_dirs=None, - other_libraries=[], - ): + def check_lib(self, library, library_dirs=None, headers=None, + include_dirs=None, other_libraries=[]): """Determine if 'library' is available to be linked against, without actually checking that any particular symbols are provided by it. 'headers' will be used in constructing the source file to @@ -343,23 +316,17 @@ def check_lib( has symbols that depend on other libraries. """ self._check_compiler() - return self.try_link( - "int main (void) { }", - headers, - include_dirs, - [library] + other_libraries, - library_dirs, - ) - - def check_header(self, header, include_dirs=None, library_dirs=None, lang="c"): + return self.try_link("int main (void) { }", headers, include_dirs, + [library] + other_libraries, library_dirs) + + def check_header(self, header, include_dirs=None, library_dirs=None, + lang="c"): """Determine if the system header file named by 'header_file' exists and can be found by the preprocessor; return true if so, false otherwise. """ - return self.try_cpp( - body="/* No body */", headers=[header], include_dirs=include_dirs - ) - + return self.try_cpp(body="/* No body */", headers=[header], + include_dirs=include_dirs) def dump_file(filename, head=None): """Dumps a file content into log.info. diff --git a/venv/lib/python3.10/site-packages/setuptools/_distutils/command/install.py b/venv/lib/python3.10/site-packages/setuptools/_distutils/command/install.py index a38cddc..18b352f 100644 --- a/venv/lib/python3.10/site-packages/setuptools/_distutils/command/install.py +++ b/venv/lib/python3.10/site-packages/setuptools/_distutils/command/install.py @@ -12,16 +12,14 @@ from distutils.core import Command from distutils.debug import DEBUG from distutils.sysconfig import get_config_vars +from distutils.errors import DistutilsPlatformError from distutils.file_util import write_file from distutils.util import convert_path, subst_vars, change_root from distutils.util import get_platform -from distutils.errors import DistutilsOptionError, DistutilsPlatformError -from . import _framework_compat as fw -from .. import _collections +from distutils.errors import DistutilsOptionError from site import USER_BASE from site import USER_SITE - HAS_USER_SITE = True WINDOWS_SCHEME = { @@ -29,66 +27,59 @@ 'platlib': '{base}/Lib/site-packages', 'headers': '{base}/Include/{dist_name}', 'scripts': '{base}/Scripts', - 'data': '{base}', + 'data' : '{base}', } INSTALL_SCHEMES = { 'posix_prefix': { 'purelib': '{base}/lib/{implementation_lower}{py_version_short}/site-packages', - 'platlib': '{platbase}/{platlibdir}/{implementation_lower}' - '{py_version_short}/site-packages', - 'headers': '{base}/include/{implementation_lower}' - '{py_version_short}{abiflags}/{dist_name}', + 'platlib': '{platbase}/{platlibdir}/{implementation_lower}{py_version_short}/site-packages', + 'headers': '{base}/include/{implementation_lower}{py_version_short}{abiflags}/{dist_name}', 'scripts': '{base}/bin', - 'data': '{base}', - }, + 'data' : '{base}', + }, 'posix_home': { 'purelib': '{base}/lib/{implementation_lower}', 'platlib': '{base}/{platlibdir}/{implementation_lower}', 'headers': '{base}/include/{implementation_lower}/{dist_name}', 'scripts': '{base}/bin', - 'data': '{base}', - }, + 'data' : '{base}', + }, 'nt': WINDOWS_SCHEME, 'pypy': { 'purelib': '{base}/site-packages', 'platlib': '{base}/site-packages', 'headers': '{base}/include/{dist_name}', 'scripts': '{base}/bin', - 'data': '{base}', - }, + 'data' : '{base}', + }, 'pypy_nt': { 'purelib': '{base}/site-packages', 'platlib': '{base}/site-packages', 'headers': '{base}/include/{dist_name}', 'scripts': '{base}/Scripts', - 'data': '{base}', - }, -} + 'data' : '{base}', + }, + } # user site schemes if HAS_USER_SITE: INSTALL_SCHEMES['nt_user'] = { 'purelib': '{usersite}', 'platlib': '{usersite}', - 'headers': '{userbase}/{implementation}{py_version_nodot_plat}' - '/Include/{dist_name}', - 'scripts': '{userbase}/{implementation}{py_version_nodot_plat}/Scripts', - 'data': '{userbase}', - } + 'headers': '{userbase}/{implementation}{py_version_nodot}/Include/{dist_name}', + 'scripts': '{userbase}/{implementation}{py_version_nodot}/Scripts', + 'data' : '{userbase}', + } INSTALL_SCHEMES['posix_user'] = { 'purelib': '{usersite}', 'platlib': '{usersite}', - 'headers': '{userbase}/include/{implementation_lower}' - '{py_version_short}{abiflags}/{dist_name}', + 'headers': + '{userbase}/include/{implementation_lower}{py_version_short}{abiflags}/{dist_name}', 'scripts': '{userbase}/bin', - 'data': '{userbase}', - } - - -INSTALL_SCHEMES.update(fw.schemes) - + 'data' : '{userbase}', + } # The keys to an installation scheme; if any new types of files are to be # installed, be sure to add an entry to every installation scheme above, @@ -127,131 +118,78 @@ def _get_implementation(): return 'Python' -def _select_scheme(ob, name): - scheme = _inject_headers(name, _load_scheme(_resolve_scheme(name))) - vars(ob).update(_remove_set(ob, _scheme_attrs(scheme))) - - -def _remove_set(ob, attrs): - """ - Include only attrs that are None in ob. - """ - return {key: value for key, value in attrs.items() if getattr(ob, key) is None} - - -def _resolve_scheme(name): - os_name, sep, key = name.partition('_') - try: - resolved = sysconfig.get_preferred_scheme(key) - except Exception: - resolved = fw.scheme(_pypy_hack(name)) - return resolved - - -def _load_scheme(name): - return _load_schemes()[name] - - -def _inject_headers(name, scheme): - """ - Given a scheme name and the resolved scheme, - if the scheme does not include headers, resolve - the fallback scheme for the name and use headers - from it. pypa/distutils#88 - """ - # Bypass the preferred scheme, which may not - # have defined headers. - fallback = _load_scheme(_pypy_hack(name)) - scheme.setdefault('headers', fallback['headers']) - return scheme - - -def _scheme_attrs(scheme): - """Resolve install directories by applying the install schemes.""" - return {f'install_{key}': scheme[key] for key in SCHEME_KEYS} - - -def _pypy_hack(name): - PY37 = sys.version_info < (3, 8) - old_pypy = hasattr(sys, 'pypy_version_info') and PY37 - prefix = not name.endswith(('_user', '_home')) - pypy_name = 'pypy' + '_nt' * (os.name == 'nt') - return pypy_name if old_pypy and prefix else name - - class install(Command): description = "install everything from build directory" user_options = [ # Select installation scheme and set base director(y|ies) - ('prefix=', None, "installation prefix"), - ('exec-prefix=', None, "(Unix only) prefix for platform-specific files"), - ('home=', None, "(Unix only) home directory to install under"), + ('prefix=', None, + "installation prefix"), + ('exec-prefix=', None, + "(Unix only) prefix for platform-specific files"), + ('home=', None, + "(Unix only) home directory to install under"), + # Or, just set the base director(y|ies) - ( - 'install-base=', - None, - "base installation directory (instead of --prefix or --home)", - ), - ( - 'install-platbase=', - None, - "base installation directory for platform-specific files " - + "(instead of --exec-prefix or --home)", - ), - ('root=', None, "install everything relative to this alternate root directory"), + ('install-base=', None, + "base installation directory (instead of --prefix or --home)"), + ('install-platbase=', None, + "base installation directory for platform-specific files " + + "(instead of --exec-prefix or --home)"), + ('root=', None, + "install everything relative to this alternate root directory"), + # Or, explicitly set the installation scheme - ( - 'install-purelib=', - None, - "installation directory for pure Python module distributions", - ), - ( - 'install-platlib=', - None, - "installation directory for non-pure module distributions", - ), - ( - 'install-lib=', - None, - "installation directory for all module distributions " - + "(overrides --install-purelib and --install-platlib)", - ), - ('install-headers=', None, "installation directory for C/C++ headers"), - ('install-scripts=', None, "installation directory for Python scripts"), - ('install-data=', None, "installation directory for data files"), + ('install-purelib=', None, + "installation directory for pure Python module distributions"), + ('install-platlib=', None, + "installation directory for non-pure module distributions"), + ('install-lib=', None, + "installation directory for all module distributions " + + "(overrides --install-purelib and --install-platlib)"), + + ('install-headers=', None, + "installation directory for C/C++ headers"), + ('install-scripts=', None, + "installation directory for Python scripts"), + ('install-data=', None, + "installation directory for data files"), + # Byte-compilation options -- see install_lib.py for details, as # these are duplicated from there (but only install_lib does # anything with them). ('compile', 'c', "compile .py to .pyc [default]"), ('no-compile', None, "don't compile .py files"), - ( - 'optimize=', - 'O', - "also compile with optimization: -O1 for \"python -O\", " - "-O2 for \"python -OO\", and -O0 to disable [default: -O0]", - ), + ('optimize=', 'O', + "also compile with optimization: -O1 for \"python -O\", " + "-O2 for \"python -OO\", and -O0 to disable [default: -O0]"), + # Miscellaneous control options - ('force', 'f', "force installation (overwrite any existing files)"), - ('skip-build', None, "skip rebuilding everything (for testing/debugging)"), + ('force', 'f', + "force installation (overwrite any existing files)"), + ('skip-build', None, + "skip rebuilding everything (for testing/debugging)"), + # Where to install documentation (eventually!) - # ('doc-format=', None, "format of documentation to generate"), - # ('install-man=', None, "directory for Unix man pages"), - # ('install-html=', None, "directory for HTML documentation"), - # ('install-info=', None, "directory for GNU info files"), - ('record=', None, "filename in which to record list of installed files"), - ] + #('doc-format=', None, "format of documentation to generate"), + #('install-man=', None, "directory for Unix man pages"), + #('install-html=', None, "directory for HTML documentation"), + #('install-info=', None, "directory for GNU info files"), + + ('record=', None, + "filename in which to record list of installed files"), + ] boolean_options = ['compile', 'force', 'skip-build'] if HAS_USER_SITE: - user_options.append( - ('user', None, "install in user site-package '%s'" % USER_SITE) - ) + user_options.append(('user', None, + "install in user site-package '%s'" % USER_SITE)) boolean_options.append('user') - negative_opt = {'no-compile': 'compile'} + negative_opt = {'no-compile' : 'compile'} + def initialize_options(self): """Initializes options.""" @@ -273,10 +211,10 @@ def initialize_options(self): # supplied by the user, they are filled in using the installation # scheme implied by prefix/exec-prefix/home and the contents of # that installation scheme. - self.install_purelib = None # for pure module distributions - self.install_platlib = None # non-pure (dists w/ extensions) - self.install_headers = None # for C/C++ headers - self.install_lib = None # set to either purelib or platlib + self.install_purelib = None # for pure module distributions + self.install_platlib = None # non-pure (dists w/ extensions) + self.install_headers = None # for C/C++ headers + self.install_lib = None # set to either purelib or platlib self.install_scripts = None self.install_data = None self.install_userbase = USER_BASE @@ -318,19 +256,20 @@ def initialize_options(self): # Not defined yet because we don't know anything about # documentation yet. - # self.install_man = None - # self.install_html = None - # self.install_info = None + #self.install_man = None + #self.install_html = None + #self.install_info = None self.record = None + # -- Option finalizing methods ------------------------------------- # (This is rather more involved than for most commands, # because this is where the policy for installing third- # party Python modules on various platforms given a wide # array of user input is decided. Yes, it's quite complex!) - def finalize_options(self): # noqa: C901 + def finalize_options(self): """Finalizes options.""" # This method (and its helpers, like 'finalize_unix()', # 'finalize_other()', and 'select_scheme()') is where the default @@ -346,30 +285,20 @@ def finalize_options(self): # noqa: C901 # Check for errors/inconsistencies in the options; first, stuff # that's wrong on any platform. - if (self.prefix or self.exec_prefix or self.home) and ( - self.install_base or self.install_platbase - ): + if ((self.prefix or self.exec_prefix or self.home) and + (self.install_base or self.install_platbase)): raise DistutilsOptionError( - "must supply either prefix/exec-prefix/home or " - + "install-base/install-platbase -- not both" - ) + "must supply either prefix/exec-prefix/home or " + + "install-base/install-platbase -- not both") if self.home and (self.prefix or self.exec_prefix): raise DistutilsOptionError( - "must supply either home or prefix/exec-prefix -- not both" - ) + "must supply either home or prefix/exec-prefix -- not both") - if self.user and ( - self.prefix - or self.exec_prefix - or self.home - or self.install_base - or self.install_platbase - ): - raise DistutilsOptionError( - "can't combine user with prefix, " - "exec_prefix/home, or install_(plat)base" - ) + if self.user and (self.prefix or self.exec_prefix or self.home or + self.install_base or self.install_platbase): + raise DistutilsOptionError("can't combine user with prefix, " + "exec_prefix/home, or install_(plat)base") # Next, stuff that's wrong (or dubious) only on certain platforms. if os.name != "posix": @@ -406,36 +335,25 @@ def finalize_options(self): # noqa: C901 except AttributeError: # sys.abiflags may not be defined on all platforms. abiflags = '' - local_vars = { - 'dist_name': self.distribution.get_name(), - 'dist_version': self.distribution.get_version(), - 'dist_fullname': self.distribution.get_fullname(), - 'py_version': py_version, - 'py_version_short': '%d.%d' % sys.version_info[:2], - 'py_version_nodot': '%d%d' % sys.version_info[:2], - 'sys_prefix': prefix, - 'prefix': prefix, - 'sys_exec_prefix': exec_prefix, - 'exec_prefix': exec_prefix, - 'abiflags': abiflags, - 'platlibdir': getattr(sys, 'platlibdir', 'lib'), - 'implementation_lower': _get_implementation().lower(), - 'implementation': _get_implementation(), - } - - # vars for compatibility on older Pythons - compat_vars = dict( - # Python 3.9 and earlier - py_version_nodot_plat=getattr(sys, 'winver', '').replace('.', ''), - ) + self.config_vars = {'dist_name': self.distribution.get_name(), + 'dist_version': self.distribution.get_version(), + 'dist_fullname': self.distribution.get_fullname(), + 'py_version': py_version, + 'py_version_short': '%d.%d' % sys.version_info[:2], + 'py_version_nodot': '%d%d' % sys.version_info[:2], + 'sys_prefix': prefix, + 'prefix': prefix, + 'sys_exec_prefix': exec_prefix, + 'exec_prefix': exec_prefix, + 'abiflags': abiflags, + 'platlibdir': getattr(sys, 'platlibdir', 'lib'), + 'implementation_lower': _get_implementation().lower(), + 'implementation': _get_implementation(), + } if HAS_USER_SITE: - local_vars['userbase'] = self.install_userbase - local_vars['usersite'] = self.install_usersite - - self.config_vars = _collections.DictStack( - [fw.vars(), compat_vars, sysconfig.get_config_vars(), local_vars] - ) + self.config_vars['userbase'] = self.install_userbase + self.config_vars['usersite'] = self.install_usersite self.expand_basedirs() @@ -443,14 +361,15 @@ def finalize_options(self): # noqa: C901 # Now define config vars for the base directories so we can expand # everything else. - local_vars['base'] = self.install_base - local_vars['platbase'] = self.install_platbase + self.config_vars['base'] = self.install_base + self.config_vars['platbase'] = self.install_platbase + self.config_vars['installed_base'] = ( + sysconfig.get_config_vars()['installed_base']) if DEBUG: from pprint import pprint - print("config vars:") - pprint(dict(self.config_vars)) + pprint(self.config_vars) # Expand "~" and configuration variables in the installation # directories. @@ -467,23 +386,17 @@ def finalize_options(self): # noqa: C901 # module distribution is pure or not. Of course, if the user # already specified install_lib, use their selection. if self.install_lib is None: - if self.distribution.has_ext_modules(): # has extensions: non-pure + if self.distribution.has_ext_modules(): # has extensions: non-pure self.install_lib = self.install_platlib else: self.install_lib = self.install_purelib + # Convert directories from Unix /-separated syntax to the local # convention. - self.convert_paths( - 'lib', - 'purelib', - 'platlib', - 'scripts', - 'data', - 'headers', - 'userbase', - 'usersite', - ) + self.convert_paths('lib', 'purelib', 'platlib', + 'scripts', 'data', 'headers', + 'userbase', 'usersite') # Deprecated # Well, we're not actually fully completely finalized yet: we still @@ -491,22 +404,21 @@ def finalize_options(self): # noqa: C901 # non-packagized module distributions (hello, Numerical Python!) to # get their own directories. self.handle_extra_path() - self.install_libbase = self.install_lib # needed for .pth file + self.install_libbase = self.install_lib # needed for .pth file self.install_lib = os.path.join(self.install_lib, self.extra_dirs) # If a new root directory was supplied, make all the installation # dirs relative to it. if self.root is not None: - self.change_roots( - 'libbase', 'lib', 'purelib', 'platlib', 'scripts', 'data', 'headers' - ) + self.change_roots('libbase', 'lib', 'purelib', 'platlib', + 'scripts', 'data', 'headers') self.dump_dirs("after prepending root") # Find out the build directories, ie. where to install from. - self.set_undefined_options( - 'build', ('build_base', 'build_base'), ('build_lib', 'build_lib') - ) + self.set_undefined_options('build', + ('build_base', 'build_base'), + ('build_lib', 'build_lib')) # Punt on doc directories for now -- after all, we're punting on # documentation completely! @@ -516,7 +428,6 @@ def dump_dirs(self, msg): if not DEBUG: return from distutils.fancy_getopt import longopt_xlate - log.debug(msg + ":") for opt in self.user_options: opt_name = opt[0] @@ -534,26 +445,21 @@ def dump_dirs(self, msg): def finalize_unix(self): """Finalizes options for posix platforms.""" if self.install_base is not None or self.install_platbase is not None: - incomplete_scheme = ( - ( - self.install_lib is None - and self.install_purelib is None - and self.install_platlib is None - ) - or self.install_headers is None - or self.install_scripts is None - or self.install_data is None - ) - if incomplete_scheme: + if ((self.install_lib is None and + self.install_purelib is None and + self.install_platlib is None) or + self.install_headers is None or + self.install_scripts is None or + self.install_data is None): raise DistutilsOptionError( - "install-base or install-platbase supplied, but " - "installation scheme is incomplete" - ) + "install-base or install-platbase supplied, but " + "installation scheme is incomplete") return if self.user: if self.install_userbase is None: - raise DistutilsPlatformError("User base directory is not specified") + raise DistutilsPlatformError( + "User base directory is not specified") self.install_base = self.install_platbase = self.install_userbase self.select_scheme("posix_user") elif self.home is not None: @@ -563,14 +469,15 @@ def finalize_unix(self): if self.prefix is None: if self.exec_prefix is not None: raise DistutilsOptionError( - "must not supply exec-prefix without prefix" - ) + "must not supply exec-prefix without prefix") # Allow Fedora to add components to the prefix _prefix_addition = getattr(sysconfig, '_prefix_addition', "") - self.prefix = os.path.normpath(sys.prefix) + _prefix_addition - self.exec_prefix = os.path.normpath(sys.exec_prefix) + _prefix_addition + self.prefix = ( + os.path.normpath(sys.prefix) + _prefix_addition) + self.exec_prefix = ( + os.path.normpath(sys.exec_prefix) + _prefix_addition) else: if self.exec_prefix is None: @@ -584,7 +491,8 @@ def finalize_other(self): """Finalizes options for non-posix platforms""" if self.user: if self.install_userbase is None: - raise DistutilsPlatformError("User base directory is not specified") + raise DistutilsPlatformError( + "User base directory is not specified") self.install_base = self.install_platbase = self.install_userbase self.select_scheme(os.name + "_user") elif self.home is not None: @@ -599,11 +507,23 @@ def finalize_other(self): self.select_scheme(os.name) except KeyError: raise DistutilsPlatformError( - "I don't know how to install stuff on '%s'" % os.name - ) + "I don't know how to install stuff on '%s'" % os.name) def select_scheme(self, name): - _select_scheme(self, name) + """Sets the install directories by applying the install schemes.""" + # it's the caller's problem if they supply a bad name! + if (hasattr(sys, 'pypy_version_info') and + sys.version_info < (3, 8) and + not name.endswith(('_user', '_home'))): + if os.name == 'nt': + name = 'pypy_nt' + else: + name = 'pypy' + scheme = _load_schemes()[name] + for key in SCHEME_KEYS: + attrname = 'install_' + key + if getattr(self, attrname) is None: + setattr(self, attrname, scheme[key]) def _expand_attrs(self, attrs): for attr in attrs: @@ -621,16 +541,9 @@ def expand_basedirs(self): def expand_dirs(self): """Calls `os.path.expanduser` on install dirs.""" - self._expand_attrs( - [ - 'install_purelib', - 'install_platlib', - 'install_lib', - 'install_headers', - 'install_scripts', - 'install_data', - ] - ) + self._expand_attrs(['install_purelib', 'install_platlib', + 'install_lib', 'install_headers', + 'install_scripts', 'install_data',]) def convert_paths(self, *names): """Call `convert_path` over `names`.""" @@ -657,9 +570,8 @@ def handle_extra_path(self): path_file, extra_dirs = self.extra_path else: raise DistutilsOptionError( - "'extra_path' option must be a list, tuple, or " - "comma-separated string with 1 or 2 elements" - ) + "'extra_path' option must be a list, tuple, or " + "comma-separated string with 1 or 2 elements") # convert to local form in case Unix notation used (as it # should be in setup scripts) @@ -685,7 +597,7 @@ def create_home_path(self): return home = convert_path(os.path.expanduser("~")) for name, path in self.config_vars.items(): - if str(path).startswith(home) and not os.path.isdir(path): + if path.startswith(home) and not os.path.isdir(path): self.debug_print("os.makedirs('%s', 0o700)" % path) os.makedirs(path, 0o700) @@ -702,7 +614,8 @@ def run(self): # internally, and not to sys.path, so we don't check the platform # matches what we are running. if self.warn_dir and build_plat != get_platform(): - raise DistutilsPlatformError("Can't install when " "cross-compiling") + raise DistutilsPlatformError("Can't install when " + "cross-compiling") # Run all sub-commands (at least those that need to be run) for cmd_name in self.get_sub_commands(): @@ -714,43 +627,38 @@ def run(self): # write list of installed files, if requested. if self.record: outputs = self.get_outputs() - if self.root: # strip any package prefix + if self.root: # strip any package prefix root_len = len(self.root) for counter in range(len(outputs)): outputs[counter] = outputs[counter][root_len:] - self.execute( - write_file, - (self.record, outputs), - "writing list of installed files to '%s'" % self.record, - ) + self.execute(write_file, + (self.record, outputs), + "writing list of installed files to '%s'" % + self.record) sys_path = map(os.path.normpath, sys.path) sys_path = map(os.path.normcase, sys_path) install_lib = os.path.normcase(os.path.normpath(self.install_lib)) - if ( - self.warn_dir - and not (self.path_file and self.install_path_file) - and install_lib not in sys_path - ): - log.debug( - ( - "modules installed to '%s', which is not in " - "Python's module search path (sys.path) -- " - "you'll have to change the search path yourself" - ), - self.install_lib, - ) + if (self.warn_dir and + not (self.path_file and self.install_path_file) and + install_lib not in sys_path): + log.debug(("modules installed to '%s', which is not in " + "Python's module search path (sys.path) -- " + "you'll have to change the search path yourself"), + self.install_lib) def create_path_file(self): """Creates the .pth file""" - filename = os.path.join(self.install_libbase, self.path_file + ".pth") + filename = os.path.join(self.install_libbase, + self.path_file + ".pth") if self.install_path_file: - self.execute( - write_file, (filename, [self.extra_dirs]), "creating %s" % filename - ) + self.execute(write_file, + (filename, [self.extra_dirs]), + "creating %s" % filename) else: self.warn("path file '%s' not created" % filename) + # -- Reporting methods --------------------------------------------- def get_outputs(self): @@ -765,7 +673,8 @@ def get_outputs(self): outputs.append(filename) if self.path_file and self.install_path_file: - outputs.append(os.path.join(self.install_libbase, self.path_file + ".pth")) + outputs.append(os.path.join(self.install_libbase, + self.path_file + ".pth")) return outputs @@ -784,9 +693,8 @@ def get_inputs(self): def has_lib(self): """Returns true if the current distribution has any Python modules to install.""" - return ( - self.distribution.has_pure_modules() or self.distribution.has_ext_modules() - ) + return (self.distribution.has_pure_modules() or + self.distribution.has_ext_modules()) def has_headers(self): """Returns true if the current distribution has any headers to @@ -805,10 +713,9 @@ def has_data(self): # 'sub_commands': a list of commands this command might have to run to # get its work done. See cmd.py for more info. - sub_commands = [ - ('install_lib', has_lib), - ('install_headers', has_headers), - ('install_scripts', has_scripts), - ('install_data', has_data), - ('install_egg_info', lambda self: True), - ] + sub_commands = [('install_lib', has_lib), + ('install_headers', has_headers), + ('install_scripts', has_scripts), + ('install_data', has_data), + ('install_egg_info', lambda self:True), + ] diff --git a/venv/lib/python3.10/site-packages/setuptools/_distutils/command/install_data.py b/venv/lib/python3.10/site-packages/setuptools/_distutils/command/install_data.py index 23d91ad..947cd76 100644 --- a/venv/lib/python3.10/site-packages/setuptools/_distutils/command/install_data.py +++ b/venv/lib/python3.10/site-packages/setuptools/_distutils/command/install_data.py @@ -9,21 +9,18 @@ from distutils.core import Command from distutils.util import change_root, convert_path - class install_data(Command): description = "install data files" user_options = [ - ( - 'install-dir=', - 'd', - "base directory for installing data files " - "(default: installation base dir)", - ), - ('root=', None, "install everything relative to this alternate root directory"), + ('install-dir=', 'd', + "base directory for installing data files " + "(default: installation base dir)"), + ('root=', None, + "install everything relative to this alternate root directory"), ('force', 'f', "force installation (overwrite existing files)"), - ] + ] boolean_options = ['force'] @@ -36,12 +33,11 @@ def initialize_options(self): self.warn_dir = 1 def finalize_options(self): - self.set_undefined_options( - 'install', - ('install_data', 'install_dir'), - ('root', 'root'), - ('force', 'force'), - ) + self.set_undefined_options('install', + ('install_data', 'install_dir'), + ('root', 'root'), + ('force', 'force'), + ) def run(self): self.mkpath(self.install_dir) @@ -50,10 +46,9 @@ def run(self): # it's a simple file, so copy it f = convert_path(f) if self.warn_dir: - self.warn( - "setup script did not provide a directory for " - "'%s' -- installing right in '%s'" % (f, self.install_dir) - ) + self.warn("setup script did not provide a directory for " + "'%s' -- installing right in '%s'" % + (f, self.install_dir)) (out, _) = self.copy_file(f, self.install_dir) self.outfiles.append(out) else: diff --git a/venv/lib/python3.10/site-packages/setuptools/_distutils/command/install_egg_info.py b/venv/lib/python3.10/site-packages/setuptools/_distutils/command/install_egg_info.py index d5e68a6..adc0323 100644 --- a/venv/lib/python3.10/site-packages/setuptools/_distutils/command/install_egg_info.py +++ b/venv/lib/python3.10/site-packages/setuptools/_distutils/command/install_egg_info.py @@ -1,17 +1,12 @@ -""" -distutils.command.install_egg_info +"""distutils.command.install_egg_info Implements the Distutils 'install_egg_info' command, for installing -a package's PKG-INFO metadata. -""" +a package's PKG-INFO metadata.""" -import os -import sys -import re from distutils.cmd import Command from distutils import log, dir_util - +import os, sys, re class install_egg_info(Command): """Install an .egg-info file for the package""" @@ -33,11 +28,11 @@ def basename(self): return "%s-%s-py%d.%d.egg-info" % ( to_filename(safe_name(self.distribution.get_name())), to_filename(safe_version(self.distribution.get_version())), - *sys.version_info[:2], + *sys.version_info[:2] ) def finalize_options(self): - self.set_undefined_options('install_lib', ('install_dir', 'install_dir')) + self.set_undefined_options('install_lib',('install_dir','install_dir')) self.target = os.path.join(self.install_dir, self.basename) self.outputs = [self.target] @@ -46,11 +41,10 @@ def run(self): if os.path.isdir(target) and not os.path.islink(target): dir_util.remove_tree(target, dry_run=self.dry_run) elif os.path.exists(target): - self.execute(os.unlink, (self.target,), "Removing " + target) + self.execute(os.unlink,(self.target,),"Removing "+target) elif not os.path.isdir(self.install_dir): - self.execute( - os.makedirs, (self.install_dir,), "Creating " + self.install_dir - ) + self.execute(os.makedirs, (self.install_dir,), + "Creating "+self.install_dir) log.info("Writing %s", target) if not self.dry_run: with open(target, 'w', encoding='UTF-8') as f: @@ -64,7 +58,6 @@ def get_outputs(self): # can be replaced by importing them from pkg_resources once it is included # in the stdlib. - def safe_name(name): """Convert an arbitrary string to a standard distribution name @@ -79,7 +72,7 @@ def safe_version(version): Spaces become dots, and all other non-alphanumeric characters become dashes, with runs of multiple dashes condensed to a single dash. """ - version = version.replace(' ', '.') + version = version.replace(' ','.') return re.sub('[^A-Za-z0-9.]+', '-', version) @@ -88,4 +81,4 @@ def to_filename(name): Any '-' characters are currently replaced with '_'. """ - return name.replace('-', '_') + return name.replace('-','_') diff --git a/venv/lib/python3.10/site-packages/setuptools/_distutils/command/install_headers.py b/venv/lib/python3.10/site-packages/setuptools/_distutils/command/install_headers.py index 87046ab..9bb0b18 100644 --- a/venv/lib/python3.10/site-packages/setuptools/_distutils/command/install_headers.py +++ b/venv/lib/python3.10/site-packages/setuptools/_distutils/command/install_headers.py @@ -11,10 +11,11 @@ class install_headers(Command): description = "install C/C++ header files" - user_options = [ - ('install-dir=', 'd', "directory to install header files to"), - ('force', 'f', "force installation (overwrite existing files)"), - ] + user_options = [('install-dir=', 'd', + "directory to install header files to"), + ('force', 'f', + "force installation (overwrite existing files)"), + ] boolean_options = ['force'] @@ -24,9 +25,10 @@ def initialize_options(self): self.outfiles = [] def finalize_options(self): - self.set_undefined_options( - 'install', ('install_headers', 'install_dir'), ('force', 'force') - ) + self.set_undefined_options('install', + ('install_headers', 'install_dir'), + ('force', 'force')) + def run(self): headers = self.distribution.headers diff --git a/venv/lib/python3.10/site-packages/setuptools/_distutils/command/install_lib.py b/venv/lib/python3.10/site-packages/setuptools/_distutils/command/install_lib.py index ad3089c..6154cf0 100644 --- a/venv/lib/python3.10/site-packages/setuptools/_distutils/command/install_lib.py +++ b/venv/lib/python3.10/site-packages/setuptools/_distutils/command/install_lib.py @@ -14,7 +14,6 @@ # Extension for Python source files. PYTHON_SOURCE_EXTENSION = ".py" - class install_lib(Command): description = "install all Python modules (extensions and pure Python)" @@ -36,21 +35,18 @@ class install_lib(Command): user_options = [ ('install-dir=', 'd', "directory to install to"), - ('build-dir=', 'b', "build directory (where to install from)"), + ('build-dir=','b', "build directory (where to install from)"), ('force', 'f', "force installation (overwrite existing files)"), ('compile', 'c', "compile .py to .pyc [default]"), ('no-compile', None, "don't compile .py files"), - ( - 'optimize=', - 'O', - "also compile with optimization: -O1 for \"python -O\", " - "-O2 for \"python -OO\", and -O0 to disable [default: -O0]", - ), + ('optimize=', 'O', + "also compile with optimization: -O1 for \"python -O\", " + "-O2 for \"python -OO\", and -O0 to disable [default: -O0]"), ('skip-build', None, "skip the build steps"), - ] + ] boolean_options = ['force', 'compile', 'skip-build'] - negative_opt = {'no-compile': 'compile'} + negative_opt = {'no-compile' : 'compile'} def initialize_options(self): # let the 'install' command dictate our installation directory @@ -65,15 +61,14 @@ def finalize_options(self): # Get all the information we need to install pure Python modules # from the umbrella 'install' command -- build (source) directory, # install (target) directory, and whether to compile .py files. - self.set_undefined_options( - 'install', - ('build_lib', 'build_dir'), - ('install_lib', 'install_dir'), - ('force', 'force'), - ('compile', 'compile'), - ('optimize', 'optimize'), - ('skip_build', 'skip_build'), - ) + self.set_undefined_options('install', + ('build_lib', 'build_dir'), + ('install_lib', 'install_dir'), + ('force', 'force'), + ('compile', 'compile'), + ('optimize', 'optimize'), + ('skip_build', 'skip_build'), + ) if self.compile is None: self.compile = True @@ -115,9 +110,8 @@ def install(self): if os.path.isdir(self.build_dir): outfiles = self.copy_tree(self.build_dir, self.install_dir) else: - self.warn( - "'%s' does not exist -- no Python modules to install" % self.build_dir - ) + self.warn("'%s' does not exist -- no Python modules to install" % + self.build_dir) return return outfiles @@ -135,22 +129,14 @@ def byte_compile(self, files): install_root = self.get_finalized_command('install').root if self.compile: - byte_compile( - files, - optimize=0, - force=self.force, - prefix=install_root, - dry_run=self.dry_run, - ) + byte_compile(files, optimize=0, + force=self.force, prefix=install_root, + dry_run=self.dry_run) if self.optimize > 0: - byte_compile( - files, - optimize=self.optimize, - force=self.force, - prefix=install_root, - verbose=self.verbose, - dry_run=self.dry_run, - ) + byte_compile(files, optimize=self.optimize, + force=self.force, prefix=install_root, + verbose=self.verbose, dry_run=self.dry_run) + # -- Utility methods ----------------------------------------------- @@ -179,18 +165,15 @@ def _bytecode_filenames(self, py_filenames): if ext != PYTHON_SOURCE_EXTENSION: continue if self.compile: - bytecode_files.append( - importlib.util.cache_from_source(py_file, optimization='') - ) + bytecode_files.append(importlib.util.cache_from_source( + py_file, optimization='')) if self.optimize > 0: - bytecode_files.append( - importlib.util.cache_from_source( - py_file, optimization=self.optimize - ) - ) + bytecode_files.append(importlib.util.cache_from_source( + py_file, optimization=self.optimize)) return bytecode_files + # -- External interface -------------------------------------------- # (called by outsiders) @@ -199,23 +182,19 @@ def get_outputs(self): were actually run. Not affected by the "dry-run" flag or whether modules have actually been built yet. """ - pure_outputs = self._mutate_outputs( - self.distribution.has_pure_modules(), - 'build_py', - 'build_lib', - self.install_dir, - ) + pure_outputs = \ + self._mutate_outputs(self.distribution.has_pure_modules(), + 'build_py', 'build_lib', + self.install_dir) if self.compile: bytecode_outputs = self._bytecode_filenames(pure_outputs) else: bytecode_outputs = [] - ext_outputs = self._mutate_outputs( - self.distribution.has_ext_modules(), - 'build_ext', - 'build_lib', - self.install_dir, - ) + ext_outputs = \ + self._mutate_outputs(self.distribution.has_ext_modules(), + 'build_ext', 'build_lib', + self.install_dir) return pure_outputs + bytecode_outputs + ext_outputs diff --git a/venv/lib/python3.10/site-packages/setuptools/_distutils/command/install_scripts.py b/venv/lib/python3.10/site-packages/setuptools/_distutils/command/install_scripts.py index f09bd64..31a1130 100644 --- a/venv/lib/python3.10/site-packages/setuptools/_distutils/command/install_scripts.py +++ b/venv/lib/python3.10/site-packages/setuptools/_distutils/command/install_scripts.py @@ -17,7 +17,7 @@ class install_scripts(Command): user_options = [ ('install-dir=', 'd', "directory to install scripts to"), - ('build-dir=', 'b', "build directory (where to install from)"), + ('build-dir=','b', "build directory (where to install from)"), ('force', 'f', "force installation (overwrite existing files)"), ('skip-build', None, "skip the build steps"), ] @@ -32,12 +32,11 @@ def initialize_options(self): def finalize_options(self): self.set_undefined_options('build', ('build_scripts', 'build_dir')) - self.set_undefined_options( - 'install', - ('install_scripts', 'install_dir'), - ('force', 'force'), - ('skip_build', 'skip_build'), - ) + self.set_undefined_options('install', + ('install_scripts', 'install_dir'), + ('force', 'force'), + ('skip_build', 'skip_build'), + ) def run(self): if not self.skip_build: diff --git a/venv/lib/python3.10/site-packages/setuptools/_distutils/command/py37compat.py b/venv/lib/python3.10/site-packages/setuptools/_distutils/command/py37compat.py index aa0c0a7..754715a 100644 --- a/venv/lib/python3.10/site-packages/setuptools/_distutils/command/py37compat.py +++ b/venv/lib/python3.10/site-packages/setuptools/_distutils/command/py37compat.py @@ -7,13 +7,12 @@ def _pythonlib_compat(): library. See pypa/distutils#9. """ from distutils import sysconfig - if not sysconfig.get_config_var('Py_ENABLED_SHARED'): return yield 'python{}.{}{}'.format( sys.hexversion >> 24, - (sys.hexversion >> 16) & 0xFF, + (sys.hexversion >> 16) & 0xff, sysconfig.get_config_var('ABIFLAGS'), ) diff --git a/venv/lib/python3.10/site-packages/setuptools/_distutils/command/register.py b/venv/lib/python3.10/site-packages/setuptools/_distutils/command/register.py index c140265..0fac94e 100644 --- a/venv/lib/python3.10/site-packages/setuptools/_distutils/command/register.py +++ b/venv/lib/python3.10/site-packages/setuptools/_distutils/command/register.py @@ -7,30 +7,24 @@ import getpass import io -import urllib.parse -import urllib.request +import urllib.parse, urllib.request from warnings import warn from distutils.core import PyPIRCCommand +from distutils.errors import * from distutils import log - class register(PyPIRCCommand): - description = "register the distribution with the Python package index" + description = ("register the distribution with the Python package index") user_options = PyPIRCCommand.user_options + [ - ('list-classifiers', None, 'list the valid Trove classifiers'), - ( - 'strict', - None, - 'Will stop the registering if the meta-data are not fully compliant', - ), - ] + ('list-classifiers', None, + 'list the valid Trove classifiers'), + ('strict', None , + 'Will stop the registering if the meta-data are not fully compliant') + ] boolean_options = PyPIRCCommand.boolean_options + [ - 'verify', - 'list-classifiers', - 'strict', - ] + 'verify', 'list-classifiers', 'strict'] sub_commands = [('check', lambda self: True)] @@ -42,10 +36,8 @@ def initialize_options(self): def finalize_options(self): PyPIRCCommand.finalize_options(self) # setting options for the `check` subcommand - check_options = { - 'strict': ('register', self.strict), - 'restructuredtext': ('register', 1), - } + check_options = {'strict': ('register', self.strict), + 'restructuredtext': ('register', 1)} self.distribution.command_options['check'] = check_options def run(self): @@ -65,11 +57,8 @@ def run(self): def check_metadata(self): """Deprecated API.""" - warn( - "distutils.command.register.check_metadata is deprecated; " - "use the check command instead", - DeprecationWarning, - ) + warn("distutils.command.register.check_metadata is deprecated, \ + use the check command instead", PendingDeprecationWarning) check = self.distribution.get_command_obj('check') check.ensure_finalized() check.strict = self.strict @@ -77,7 +66,8 @@ def check_metadata(self): check.run() def _set_config(self): - '''Reads the configuration file and set attributes.''' + ''' Reads the configuration file and set attributes. + ''' config = self._read_pypirc() if config != {}: self.username = config['username'] @@ -93,43 +83,45 @@ def _set_config(self): self.has_config = False def classifiers(self): - '''Fetch the list of classifiers from the server.''' - url = self.repository + '?:action=list_classifiers' + ''' Fetch the list of classifiers from the server. + ''' + url = self.repository+'?:action=list_classifiers' response = urllib.request.urlopen(url) log.info(self._read_pypi_response(response)) def verify_metadata(self): - '''Send the metadata to the package index server to be checked.''' + ''' Send the metadata to the package index server to be checked. + ''' # send the info to the server and report the result (code, result) = self.post_to_server(self.build_post_data('verify')) log.info('Server response (%s): %s', code, result) - def send_metadata(self): # noqa: C901 - '''Send the metadata to the package index server. + def send_metadata(self): + ''' Send the metadata to the package index server. - Well, do the following: - 1. figure who the user is, and then - 2. send the data as a Basic auth'ed POST. + Well, do the following: + 1. figure who the user is, and then + 2. send the data as a Basic auth'ed POST. - First we try to read the username/password from $HOME/.pypirc, - which is a ConfigParser-formatted file with a section - [distutils] containing username and password entries (both - in clear text). Eg: + First we try to read the username/password from $HOME/.pypirc, + which is a ConfigParser-formatted file with a section + [distutils] containing username and password entries (both + in clear text). Eg: - [distutils] - index-servers = - pypi + [distutils] + index-servers = + pypi - [pypi] - username: fred - password: sekrit + [pypi] + username: fred + password: sekrit - Otherwise, to figure who the user is, we offer the user three - choices: + Otherwise, to figure who the user is, we offer the user three + choices: - 1. use existing login, - 2. register as a new user, or - 3. set the password to a random string and email the user. + 1. use existing login, + 2. register as a new user, or + 3. set the password to a random string and email the user. ''' # see if we can short-cut and get the username/password from the @@ -145,16 +137,13 @@ def send_metadata(self): # noqa: C901 # get the user's login info choices = '1 2 3 4'.split() while choice not in choices: - self.announce( - '''\ + self.announce('''\ We need to know who you are, so please choose either: 1. use your existing login, 2. register as a new user, 3. have the server generate a new password for you (and email it to you), or 4. quit -Your selection [default 1]: ''', - log.INFO, - ) +Your selection [default 1]: ''', log.INFO) choice = input() if not choice: choice = '1' @@ -173,8 +162,10 @@ def send_metadata(self): # noqa: C901 host = urllib.parse.urlparse(self.repository)[1] auth.add_password(self.realm, host, username, password) # send the info to the server and report the result - code, result = self.post_to_server(self.build_post_data('submit'), auth) - self.announce('Server response ({}): {}'.format(code, result), log.INFO) + code, result = self.post_to_server(self.build_post_data('submit'), + auth) + self.announce('Server response (%s): %s' % (code, result), + log.INFO) # possibly save the login if code == 200: @@ -183,17 +174,10 @@ def send_metadata(self): # noqa: C901 # so the upload command can reuse it self.distribution.password = password else: - self.announce( - ( - 'I can store your PyPI login so future ' - 'submissions will be faster.' - ), - log.INFO, - ) - self.announce( - '(the login will be stored in %s)' % self._get_rc_file(), - log.INFO, - ) + self.announce(('I can store your PyPI login so future ' + 'submissions will be faster.'), log.INFO) + self.announce('(the login will be stored in %s)' % \ + self._get_rc_file(), log.INFO) choice = 'X' while choice.lower() not in 'yn': choice = input('Save your login (y/N)?') @@ -224,7 +208,8 @@ def send_metadata(self): # noqa: C901 log.info('Server response (%s): %s', code, result) else: log.info('You will receive an email shortly.') - log.info('Follow the instructions in it to ' 'complete registration.') + log.info(('Follow the instructions in it to ' + 'complete registration.')) elif choice == '3': data = {':action': 'password_reset'} data['email'] = '' @@ -239,7 +224,7 @@ def build_post_data(self, action): meta = self.distribution.metadata data = { ':action': action, - 'metadata_version': '1.0', + 'metadata_version' : '1.0', 'name': meta.get_name(), 'version': meta.get_version(), 'summary': meta.get_description(), @@ -261,12 +246,13 @@ def build_post_data(self, action): data['metadata_version'] = '1.1' return data - def post_to_server(self, data, auth=None): # noqa: C901 - '''Post a query to the server, and return a string response.''' + def post_to_server(self, data, auth=None): + ''' Post a query to the server, and return a string response. + ''' if 'name' in data: - self.announce( - 'Registering {} to {}'.format(data['name'], self.repository), log.INFO - ) + self.announce('Registering %s to %s' % (data['name'], + self.repository), + log.INFO) # Build up the MIME payload for the urllib2 POST data boundary = '--------------GHSKFJDLGDS7543FJKLFHRE75642756743254' sep_boundary = '\n--' + boundary @@ -274,12 +260,12 @@ def post_to_server(self, data, auth=None): # noqa: C901 body = io.StringIO() for key, value in data.items(): # handle multiple entries for the same name - if type(value) not in (type([]), type(())): + if type(value) not in (type([]), type( () )): value = [value] for value in value: value = str(value) body.write(sep_boundary) - body.write('\nContent-Disposition: form-data; name="%s"' % key) + body.write('\nContent-Disposition: form-data; name="%s"'%key) body.write("\n\n") body.write(value) if value and value[-1] == '\r': @@ -290,9 +276,8 @@ def post_to_server(self, data, auth=None): # noqa: C901 # build the Request headers = { - 'Content-type': 'multipart/form-data; boundary=%s; charset=utf-8' - % boundary, - 'Content-length': str(len(body)), + 'Content-type': 'multipart/form-data; boundary=%s; charset=utf-8'%boundary, + 'Content-length': str(len(body)) } req = urllib.request.Request(self.repository, body, headers) diff --git a/venv/lib/python3.10/site-packages/setuptools/_distutils/command/sdist.py b/venv/lib/python3.10/site-packages/setuptools/_distutils/command/sdist.py index d6e9489..b4996fc 100644 --- a/venv/lib/python3.10/site-packages/setuptools/_distutils/command/sdist.py +++ b/venv/lib/python3.10/site-packages/setuptools/_distutils/command/sdist.py @@ -15,7 +15,7 @@ from distutils.filelist import FileList from distutils import log from distutils.util import convert_path -from distutils.errors import DistutilsOptionError, DistutilsTemplateError +from distutils.errors import DistutilsTemplateError, DistutilsOptionError def show_formats(): @@ -24,12 +24,13 @@ def show_formats(): """ from distutils.fancy_getopt import FancyGetopt from distutils.archive_util import ARCHIVE_FORMATS - formats = [] for format in ARCHIVE_FORMATS.keys(): - formats.append(("formats=" + format, None, ARCHIVE_FORMATS[format][2])) + formats.append(("formats=" + format, None, + ARCHIVE_FORMATS[format][2])) formats.sort() - FancyGetopt(formats).print_help("List of available source distribution formats:") + FancyGetopt(formats).print_help( + "List of available source distribution formats:") class sdist(Command): @@ -43,77 +44,55 @@ def checking_metadata(self): return self.metadata_check user_options = [ - ('template=', 't', "name of manifest template file [default: MANIFEST.in]"), - ('manifest=', 'm', "name of manifest file [default: MANIFEST]"), - ( - 'use-defaults', - None, - "include the default file set in the manifest " - "[default; disable with --no-defaults]", - ), - ('no-defaults', None, "don't include the default file set"), - ( - 'prune', - None, - "specifically exclude files/directories that should not be " - "distributed (build tree, RCS/CVS dirs, etc.) " - "[default; disable with --no-prune]", - ), - ('no-prune', None, "don't automatically exclude anything"), - ( - 'manifest-only', - 'o', - "just regenerate the manifest and then stop " "(implies --force-manifest)", - ), - ( - 'force-manifest', - 'f', - "forcibly regenerate the manifest and carry on as usual. " - "Deprecated: now the manifest is always regenerated.", - ), - ('formats=', None, "formats for source distribution (comma-separated list)"), - ( - 'keep-temp', - 'k', - "keep the distribution tree around after creating " + "archive file(s)", - ), - ( - 'dist-dir=', - 'd', - "directory to put the source distribution archive(s) in " "[default: dist]", - ), - ( - 'metadata-check', - None, - "Ensure that all required elements of meta-data " - "are supplied. Warn if any missing. [default]", - ), - ( - 'owner=', - 'u', - "Owner name used when creating a tar file [default: current user]", - ), - ( - 'group=', - 'g', - "Group name used when creating a tar file [default: current group]", - ), - ] - - boolean_options = [ - 'use-defaults', - 'prune', - 'manifest-only', - 'force-manifest', - 'keep-temp', - 'metadata-check', - ] + ('template=', 't', + "name of manifest template file [default: MANIFEST.in]"), + ('manifest=', 'm', + "name of manifest file [default: MANIFEST]"), + ('use-defaults', None, + "include the default file set in the manifest " + "[default; disable with --no-defaults]"), + ('no-defaults', None, + "don't include the default file set"), + ('prune', None, + "specifically exclude files/directories that should not be " + "distributed (build tree, RCS/CVS dirs, etc.) " + "[default; disable with --no-prune]"), + ('no-prune', None, + "don't automatically exclude anything"), + ('manifest-only', 'o', + "just regenerate the manifest and then stop " + "(implies --force-manifest)"), + ('force-manifest', 'f', + "forcibly regenerate the manifest and carry on as usual. " + "Deprecated: now the manifest is always regenerated."), + ('formats=', None, + "formats for source distribution (comma-separated list)"), + ('keep-temp', 'k', + "keep the distribution tree around after creating " + + "archive file(s)"), + ('dist-dir=', 'd', + "directory to put the source distribution archive(s) in " + "[default: dist]"), + ('metadata-check', None, + "Ensure that all required elements of meta-data " + "are supplied. Warn if any missing. [default]"), + ('owner=', 'u', + "Owner name used when creating a tar file [default: current user]"), + ('group=', 'g', + "Group name used when creating a tar file [default: current group]"), + ] + + boolean_options = ['use-defaults', 'prune', + 'manifest-only', 'force-manifest', + 'keep-temp', 'metadata-check'] help_options = [ - ('help-formats', None, "list available distribution formats", show_formats), - ] + ('help-formats', None, + "list available distribution formats", show_formats), + ] - negative_opt = {'no-defaults': 'use-defaults', 'no-prune': 'prune'} + negative_opt = {'no-defaults': 'use-defaults', + 'no-prune': 'prune' } sub_commands = [('check', checking_metadata)] @@ -152,7 +131,8 @@ def finalize_options(self): bad_format = archive_util.check_archive_formats(self.formats) if bad_format: - raise DistutilsOptionError("unknown archive format '%s'" % bad_format) + raise DistutilsOptionError( + "unknown archive format '%s'" % bad_format) if self.dist_dir is None: self.dist_dir = "dist" @@ -181,11 +161,8 @@ def run(self): def check_metadata(self): """Deprecated API.""" - warn( - "distutils.command.sdist.check_metadata is deprecated, \ - use the check command instead", - PendingDeprecationWarning, - ) + warn("distutils.command.sdist.check_metadata is deprecated, \ + use the check command instead", PendingDeprecationWarning) check = self.distribution.get_command_obj('check') check.ensure_finalized() check.run() @@ -212,10 +189,9 @@ def get_file_list(self): return if not template_exists: - self.warn( - ("manifest template '%s' does not exist " + "(using default file list)") - % self.template - ) + self.warn(("manifest template '%s' does not exist " + + "(using default file list)") % + self.template) self.filelist.findall() if self.use_defaults: @@ -283,9 +259,8 @@ def _add_defaults_standards(self): break if not got_it: - self.warn( - "standard file not found: should have one of " + ', '.join(alts) - ) + self.warn("standard file not found: should have one of " + + ', '.join(alts)) else: if self._cs_path_exists(fn): self.filelist.append(fn) @@ -353,20 +328,14 @@ def read_template(self): 'self.filelist', which updates itself accordingly. """ log.info("reading manifest template '%s'", self.template) - template = TextFile( - self.template, - strip_comments=1, - skip_blanks=1, - join_lines=1, - lstrip_ws=1, - rstrip_ws=1, - collapse_join=1, - ) + template = TextFile(self.template, strip_comments=1, skip_blanks=1, + join_lines=1, lstrip_ws=1, rstrip_ws=1, + collapse_join=1) try: while True: line = template.readline() - if line is None: # end of file + if line is None: # end of file break try: @@ -375,10 +344,9 @@ def read_template(self): # malformed lines, or a ValueError from the lower-level # convert_path function except (DistutilsTemplateError, ValueError) as msg: - self.warn( - "%s, line %d: %s" - % (template.filename, template.current_line, msg) - ) + self.warn("%s, line %d: %s" % (template.filename, + template.current_line, + msg)) finally: template.close() @@ -401,8 +369,9 @@ def prune_file_list(self): else: seps = '/' - vcs_dirs = ['RCS', 'CVS', r'\.svn', r'\.hg', r'\.git', r'\.bzr', '_darcs'] - vcs_ptrn = r'(^|{})({})({}).*'.format(seps, '|'.join(vcs_dirs), seps) + vcs_dirs = ['RCS', 'CVS', r'\.svn', r'\.hg', r'\.git', r'\.bzr', + '_darcs'] + vcs_ptrn = r'(^|%s)(%s)(%s).*' % (seps, '|'.join(vcs_dirs), seps) self.filelist.exclude_pattern(vcs_ptrn, is_regex=1) def write_manifest(self): @@ -411,19 +380,14 @@ def write_manifest(self): named by 'self.manifest'. """ if self._manifest_is_not_generated(): - log.info( - "not writing to manually maintained " - "manifest file '%s'" % self.manifest - ) + log.info("not writing to manually maintained " + "manifest file '%s'" % self.manifest) return content = self.filelist.files[:] content.insert(0, '# file GENERATED by distutils, do NOT edit') - self.execute( - file_util.write_file, - (self.manifest, content), - "writing manifest file '%s'" % self.manifest, - ) + self.execute(file_util.write_file, (self.manifest, content), + "writing manifest file '%s'" % self.manifest) def _manifest_is_not_generated(self): # check for special comment used in 3.1.3 and higher @@ -473,10 +437,10 @@ def make_release_tree(self, base_dir, files): # out-of-date, because by default we blow away 'base_dir' when # we're done making the distribution archives.) - if hasattr(os, 'link'): # can make hard links on this system + if hasattr(os, 'link'): # can make hard links on this system link = 'hard' msg = "making hard links in %s..." % base_dir - else: # nope, have to copy + else: # nope, have to copy link = None msg = "copying files to %s..." % base_dir @@ -507,15 +471,14 @@ def make_distribution(self): base_name = os.path.join(self.dist_dir, base_dir) self.make_release_tree(base_dir, self.filelist.files) - archive_files = [] # remember names of files we create + archive_files = [] # remember names of files we create # tar archive must be created last to avoid overwrite and remove if 'tar' in self.formats: self.formats.append(self.formats.pop(self.formats.index('tar'))) for fmt in self.formats: - file = self.make_archive( - base_name, fmt, base_dir=base_dir, owner=self.owner, group=self.group - ) + file = self.make_archive(base_name, fmt, base_dir=base_dir, + owner=self.owner, group=self.group) archive_files.append(file) self.distribution.dist_files.append(('sdist', '', file)) diff --git a/venv/lib/python3.10/site-packages/setuptools/_distutils/command/upload.py b/venv/lib/python3.10/site-packages/setuptools/_distutils/command/upload.py index 6af5394..95e9fda 100644 --- a/venv/lib/python3.10/site-packages/setuptools/_distutils/command/upload.py +++ b/venv/lib/python3.10/site-packages/setuptools/_distutils/command/upload.py @@ -31,9 +31,10 @@ class upload(PyPIRCCommand): description = "upload binary package to PyPI" user_options = PyPIRCCommand.user_options + [ - ('sign', 's', 'sign files to upload using gpg'), + ('sign', 's', + 'sign files to upload using gpg'), ('identity=', 'i', 'GPG identity used to sign files'), - ] + ] boolean_options = PyPIRCCommand.boolean_options + ['sign'] @@ -48,7 +49,9 @@ def initialize_options(self): def finalize_options(self): PyPIRCCommand.finalize_options(self) if self.identity and not self.sign: - raise DistutilsOptionError("Must use --sign for --identity to have meaning") + raise DistutilsOptionError( + "Must use --sign for --identity to have meaning" + ) config = self._read_pypirc() if config != {}: self.username = config['username'] @@ -63,17 +66,16 @@ def finalize_options(self): def run(self): if not self.distribution.dist_files: - msg = ( - "Must create and upload files in one command " - "(e.g. setup.py sdist upload)" - ) + msg = ("Must create and upload files in one command " + "(e.g. setup.py sdist upload)") raise DistutilsOptionError(msg) for command, pyversion, filename in self.distribution.dist_files: self.upload_file(command, pyversion, filename) - def upload_file(self, command, pyversion, filename): # noqa: C901 + def upload_file(self, command, pyversion, filename): # Makes sure the repository URL is compliant - schema, netloc, url, params, query, fragments = urlparse(self.repository) + schema, netloc, url, params, query, fragments = \ + urlparse(self.repository) if params or query or fragments: raise AssertionError("Incompatible url %s" % self.repository) @@ -85,11 +87,12 @@ def upload_file(self, command, pyversion, filename): # noqa: C901 gpg_args = ["gpg", "--detach-sign", "-a", filename] if self.identity: gpg_args[2:2] = ["--local-user", self.identity] - spawn(gpg_args, dry_run=self.dry_run) + spawn(gpg_args, + dry_run=self.dry_run) # Fill in the data - send all the meta-data in case we need to # register a new release - f = open(filename, 'rb') + f = open(filename,'rb') try: content = f.read() finally: @@ -100,13 +103,16 @@ def upload_file(self, command, pyversion, filename): # noqa: C901 # action ':action': 'file_upload', 'protocol_version': '1', + # identify release 'name': meta.get_name(), 'version': meta.get_version(), + # file content - 'content': (os.path.basename(filename), content), + 'content': (os.path.basename(filename),content), 'filetype': command, 'pyversion': pyversion, + # additional meta-data 'metadata_version': '1.0', 'summary': meta.get_description(), @@ -123,7 +129,7 @@ def upload_file(self, command, pyversion, filename): # noqa: C901 'provides': meta.get_provides(), 'requires': meta.get_requires(), 'obsoletes': meta.get_obsoletes(), - } + } data['comment'] = '' @@ -139,7 +145,8 @@ def upload_file(self, command, pyversion, filename): # noqa: C901 if self.sign: with open(filename + ".asc", "rb") as f: - data['gpg_signature'] = (os.path.basename(filename) + ".asc", f.read()) + data['gpg_signature'] = (os.path.basename(filename) + ".asc", + f.read()) # set up the authentication user_pass = (self.username + ":" + self.password).encode('ascii') @@ -170,7 +177,7 @@ def upload_file(self, command, pyversion, filename): # noqa: C901 body.write(end_boundary) body = body.getvalue() - msg = "Submitting {} to {}".format(filename, self.repository) + msg = "Submitting %s to %s" % (filename, self.repository) self.announce(msg, log.INFO) # build the Request @@ -180,7 +187,8 @@ def upload_file(self, command, pyversion, filename): # noqa: C901 'Authorization': auth, } - request = Request(self.repository, data=body, headers=headers) + request = Request(self.repository, data=body, + headers=headers) # send the data try: result = urlopen(request) @@ -194,12 +202,13 @@ def upload_file(self, command, pyversion, filename): # noqa: C901 raise if status == 200: - self.announce('Server response ({}): {}'.format(status, reason), log.INFO) + self.announce('Server response (%s): %s' % (status, reason), + log.INFO) if self.show_response: text = self._read_pypi_response(result) msg = '\n'.join(('-' * 75, text, '-' * 75)) self.announce(msg, log.INFO) else: - msg = 'Upload failed ({}): {}'.format(status, reason) + msg = 'Upload failed (%s): %s' % (status, reason) self.announce(msg, log.ERROR) raise DistutilsError(msg) diff --git a/venv/lib/python3.10/site-packages/setuptools/_distutils/config.py b/venv/lib/python3.10/site-packages/setuptools/_distutils/config.py index 6e0c3a7..2171abd 100644 --- a/venv/lib/python3.10/site-packages/setuptools/_distutils/config.py +++ b/venv/lib/python3.10/site-packages/setuptools/_distutils/config.py @@ -18,19 +18,20 @@ password:%s """ - class PyPIRCCommand(Command): - """Base command that knows how to handle the .pypirc file""" - + """Base command that knows how to handle the .pypirc file + """ DEFAULT_REPOSITORY = 'https://upload.pypi.org/legacy/' DEFAULT_REALM = 'pypi' repository = None realm = None user_options = [ - ('repository=', 'r', "url of repository [default: %s]" % DEFAULT_REPOSITORY), - ('show-response', None, 'display full response text from server'), - ] + ('repository=', 'r', + "url of repository [default: %s]" % \ + DEFAULT_REPOSITORY), + ('show-response', None, + 'display full response text from server')] boolean_options = ['show-response'] @@ -44,7 +45,7 @@ def _store_pypirc(self, username, password): with os.fdopen(os.open(rc, os.O_CREAT | os.O_WRONLY, 0o600), 'w') as f: f.write(DEFAULT_PYPIRC % (username, password)) - def _read_pypirc(self): # noqa: C901 + def _read_pypirc(self): """Reads the .pypirc file.""" rc = self._get_rc_file() if os.path.exists(rc): @@ -57,11 +58,9 @@ def _read_pypirc(self): # noqa: C901 if 'distutils' in sections: # let's get the list of servers index_servers = config.get('distutils', 'index-servers') - _servers = [ - server.strip() - for server in index_servers.split('\n') - if server.strip() != '' - ] + _servers = [server.strip() for server in + index_servers.split('\n') + if server.strip() != ''] if _servers == []: # nothing set, let's try to get the default pypi if 'pypi' in sections: @@ -75,11 +74,10 @@ def _read_pypirc(self): # noqa: C901 current['username'] = config.get(server, 'username') # optional params - for key, default in ( - ('repository', self.DEFAULT_REPOSITORY), - ('realm', self.DEFAULT_REALM), - ('password', None), - ): + for key, default in (('repository', + self.DEFAULT_REPOSITORY), + ('realm', self.DEFAULT_REALM), + ('password', None)): if config.has_option(server, key): current[key] = config.get(server, key) else: @@ -88,17 +86,13 @@ def _read_pypirc(self): # noqa: C901 # work around people having "repository" for the "pypi" # section of their config set to the HTTP (rather than # HTTPS) URL - if server == 'pypi' and repository in ( - self.DEFAULT_REPOSITORY, - 'pypi', - ): + if (server == 'pypi' and + repository in (self.DEFAULT_REPOSITORY, 'pypi')): current['repository'] = self.DEFAULT_REPOSITORY return current - if ( - current['server'] == repository - or current['repository'] == repository - ): + if (current['server'] == repository or + current['repository'] == repository): return current elif 'server-login' in sections: # old format @@ -107,20 +101,17 @@ def _read_pypirc(self): # noqa: C901 repository = config.get(server, 'repository') else: repository = self.DEFAULT_REPOSITORY - return { - 'username': config.get(server, 'username'), - 'password': config.get(server, 'password'), - 'repository': repository, - 'server': server, - 'realm': self.DEFAULT_REALM, - } + return {'username': config.get(server, 'username'), + 'password': config.get(server, 'password'), + 'repository': repository, + 'server': server, + 'realm': self.DEFAULT_REALM} return {} def _read_pypi_response(self, response): """Read and decode a PyPI HTTP response.""" import cgi - content_type = response.getheader('content-type', 'text/plain') encoding = cgi.parse_header(content_type)[1].get('charset', 'ascii') return response.read().decode(encoding) diff --git a/venv/lib/python3.10/site-packages/setuptools/_distutils/core.py b/venv/lib/python3.10/site-packages/setuptools/_distutils/core.py index de13978..f43888e 100644 --- a/venv/lib/python3.10/site-packages/setuptools/_distutils/core.py +++ b/venv/lib/python3.10/site-packages/setuptools/_distutils/core.py @@ -11,12 +11,7 @@ import tokenize from distutils.debug import DEBUG -from distutils.errors import ( - DistutilsSetupError, - DistutilsError, - CCompilerError, - DistutilsArgError, -) +from distutils.errors import * # Mainly import these so setup scripts can "from distutils.core import" them. from distutils.dist import Distribution @@ -24,9 +19,6 @@ from distutils.config import PyPIRCCommand from distutils.extension import Extension - -__all__ = ['Distribution', 'Command', 'PyPIRCCommand', 'Extension', 'setup'] - # This is a barebones help message generated displayed when the user # runs the setup script with no arguments at all. More useful help # is generated with various --help options: global help, list commands, @@ -38,10 +30,9 @@ or: %(script)s cmd --help """ - -def gen_usage(script_name): +def gen_usage (script_name): script = os.path.basename(script_name) - return USAGE % locals() + return USAGE % vars() # Some mild magic to control the behaviour of 'setup()' from 'run_setup()'. @@ -49,51 +40,22 @@ def gen_usage(script_name): _setup_distribution = None # Legal keyword arguments for the setup() function -setup_keywords = ( - 'distclass', - 'script_name', - 'script_args', - 'options', - 'name', - 'version', - 'author', - 'author_email', - 'maintainer', - 'maintainer_email', - 'url', - 'license', - 'description', - 'long_description', - 'keywords', - 'platforms', - 'classifiers', - 'download_url', - 'requires', - 'provides', - 'obsoletes', -) +setup_keywords = ('distclass', 'script_name', 'script_args', 'options', + 'name', 'version', 'author', 'author_email', + 'maintainer', 'maintainer_email', 'url', 'license', + 'description', 'long_description', 'keywords', + 'platforms', 'classifiers', 'download_url', + 'requires', 'provides', 'obsoletes', + ) # Legal keyword arguments for the Extension constructor -extension_keywords = ( - 'name', - 'sources', - 'include_dirs', - 'define_macros', - 'undef_macros', - 'library_dirs', - 'libraries', - 'runtime_library_dirs', - 'extra_objects', - 'extra_compile_args', - 'extra_link_args', - 'swig_opts', - 'export_symbols', - 'depends', - 'language', -) - - -def setup(**attrs): # noqa: C901 +extension_keywords = ('name', 'sources', 'include_dirs', + 'define_macros', 'undef_macros', + 'library_dirs', 'libraries', 'runtime_library_dirs', + 'extra_objects', 'extra_compile_args', 'extra_link_args', + 'swig_opts', 'export_symbols', 'depends', 'language') + +def setup (**attrs): """The gateway to the Distutils: do everything your setup script needs to do, in a highly flexible and user-driven way. Briefly: create a Distribution instance; find and parse config files; parse the command @@ -138,7 +100,7 @@ class found in 'cmdclass' is used in place of the default, which is if 'script_name' not in attrs: attrs['script_name'] = os.path.basename(sys.argv[0]) - if 'script_args' not in attrs: + if 'script_args' not in attrs: attrs['script_args'] = sys.argv[1:] # Create the Distribution instance, using the remaining arguments @@ -149,7 +111,8 @@ class found in 'cmdclass' is used in place of the default, which is if 'name' not in attrs: raise SystemExit("error in setup command: %s" % msg) else: - raise SystemExit("error in {} setup command: {}".format(attrs['name'], msg)) + raise SystemExit("error in %s setup command: %s" % \ + (attrs['name'], msg)) if _setup_stop_after == "init": return dist @@ -186,11 +149,10 @@ class found in 'cmdclass' is used in place of the default, which is return dist - # setup () -def run_commands(dist): +def run_commands (dist): """Given a Distribution object run all the commands, raising ``SystemExit`` errors in the case of failure. @@ -203,12 +165,13 @@ def run_commands(dist): raise SystemExit("interrupted") except OSError as exc: if DEBUG: - sys.stderr.write("error: {}\n".format(exc)) + sys.stderr.write("error: %s\n" % (exc,)) raise else: - raise SystemExit("error: {}".format(exc)) + raise SystemExit("error: %s" % (exc,)) - except (DistutilsError, CCompilerError) as msg: + except (DistutilsError, + CCompilerError) as msg: if DEBUG: raise else: @@ -217,7 +180,7 @@ def run_commands(dist): return dist -def run_setup(script_name, script_args=None, stop_after="run"): +def run_setup (script_name, script_args=None, stop_after="run"): """Run a setup script in a somewhat controlled environment, and return the Distribution instance that drives things. This is useful if you need to find out the distribution meta-data (passed as @@ -249,7 +212,7 @@ def run_setup(script_name, script_args=None, stop_after="run"): used to drive the Distutils. """ if stop_after not in ('init', 'config', 'commandline', 'run'): - raise ValueError("invalid value for 'stop_after': {!r}".format(stop_after)) + raise ValueError("invalid value for 'stop_after': %r" % (stop_after,)) global _setup_stop_after, _setup_distribution _setup_stop_after = stop_after @@ -274,18 +237,13 @@ def run_setup(script_name, script_args=None, stop_after="run"): pass if _setup_distribution is None: - raise RuntimeError( - ( - "'distutils.core.setup()' was never called -- " - "perhaps '%s' is not a Distutils setup script?" - ) - % script_name - ) + raise RuntimeError(("'distutils.core.setup()' was never called -- " + "perhaps '%s' is not a Distutils setup script?") % \ + script_name) # I wonder if the setup script's namespace -- g and l -- would be of # any interest to callers? - # print "_setup_distribution:", _setup_distribution + #print "_setup_distribution:", _setup_distribution return _setup_distribution - # run_setup () diff --git a/venv/lib/python3.10/site-packages/setuptools/_distutils/cygwinccompiler.py b/venv/lib/python3.10/site-packages/setuptools/_distutils/cygwinccompiler.py index 2c4da5b..ad6cc44 100644 --- a/venv/lib/python3.10/site-packages/setuptools/_distutils/cygwinccompiler.py +++ b/venv/lib/python3.10/site-packages/setuptools/_distutils/cygwinccompiler.py @@ -6,23 +6,60 @@ cygwin in no-cygwin mode). """ +# problems: +# +# * if you use a msvc compiled python version (1.5.2) +# 1. you have to insert a __GNUC__ section in its config.h +# 2. you have to generate an import library for its dll +# - create a def-file for python??.dll +# - create an import library using +# dlltool --dllname python15.dll --def python15.def \ +# --output-lib libpython15.a +# +# see also http://starship.python.net/crew/kernr/mingw32/Notes.html +# +# * We put export_symbols in a def-file, and don't use +# --export-all-symbols because it doesn't worked reliable in some +# tested configurations. And because other windows compilers also +# need their symbols specified this no serious problem. +# +# tested configurations: +# +# * cygwin gcc 2.91.57/ld 2.9.4/dllwrap 0.2.4 works +# (after patching python's config.h and for C++ some other include files) +# see also http://starship.python.net/crew/kernr/mingw32/Notes.html +# * mingw32 gcc 2.95.2/ld 2.9.4/dllwrap 0.2.4 works +# (ld doesn't support -shared, so we use dllwrap) +# * cygwin gcc 2.95.2/ld 2.10.90/dllwrap 2.10.90 works now +# - its dllwrap doesn't work, there is a bug in binutils 2.10.90 +# see also http://sources.redhat.com/ml/cygwin/2000-06/msg01274.html +# - using gcc -mdll instead dllwrap doesn't work without -static because +# it tries to link against dlls instead their import libraries. (If +# it finds the dll first.) +# By specifying -static we force ld to link against the import libraries, +# this is windows standard and there are normally not the necessary symbols +# in the dlls. +# *** only the version of June 2000 shows these problems +# * cygwin gcc 3.2/ld 2.13.90 works +# (ld supports -shared) +# * mingw gcc 3.2/ld 2.13 works +# (ld supports -shared) +# * llvm-mingw with Clang 11 works +# (lld supports -shared) + import os import sys import copy -import shlex -import warnings -from subprocess import check_output +from subprocess import Popen, PIPE, check_output +import re +import distutils.version from distutils.unixccompiler import UnixCCompiler from distutils.file_util import write_file -from distutils.errors import ( - DistutilsExecError, - DistutilsPlatformError, - CCompilerError, - CompileError, -) -from distutils.version import LooseVersion, suppress_known_deprecation - +from distutils.errors import (DistutilsExecError, CCompilerError, + CompileError, UnknownFileError) +from distutils.version import LooseVersion +from distutils.spawn import find_executable def get_msvcr(): """Include the appropriate MSVC runtime library if Python was built @@ -30,7 +67,7 @@ def get_msvcr(): """ msc_pos = sys.version.find('MSC v.') if msc_pos != -1: - msc_ver = sys.version[msc_pos + 6 : msc_pos + 10] + msc_ver = sys.version[msc_pos+6:msc_pos+10] if msc_ver == '1300': # MSVC 7.0 return ['msvcr70'] @@ -54,77 +91,85 @@ def get_msvcr(): return ['msvcr120'] elif 1900 <= int(msc_ver) < 2000: # VS2015 / MSVC 14.0 - return ['ucrt', 'vcruntime140'] + return ['ucrt', 'vcruntime140'] else: raise ValueError("Unknown MS Compiler version %s " % msc_ver) -_runtime_library_dirs_msg = ( - "Unable to set runtime library search path on Windows, " - "usually indicated by `runtime_library_dirs` parameter to Extension" -) - - class CygwinCCompiler(UnixCCompiler): - """Handles the Cygwin port of the GNU C compiler to Windows.""" - + """ Handles the Cygwin port of the GNU C compiler to Windows. + """ compiler_type = 'cygwin' obj_extension = ".o" static_lib_extension = ".a" - shared_lib_extension = ".dll.a" - dylib_lib_extension = ".dll" + shared_lib_extension = ".dll" static_lib_format = "lib%s%s" - shared_lib_format = "lib%s%s" - dylib_lib_format = "cyg%s%s" + shared_lib_format = "%s%s" exe_extension = ".exe" def __init__(self, verbose=0, dry_run=0, force=0): - super().__init__(verbose, dry_run, force) + UnixCCompiler.__init__(self, verbose, dry_run, force) status, details = check_config_h() - self.debug_print( - "Python's GCC status: {} (details: {})".format(status, details) - ) + self.debug_print("Python's GCC status: %s (details: %s)" % + (status, details)) if status is not CONFIG_H_OK: self.warn( "Python's pyconfig.h doesn't seem to support your compiler. " "Reason: %s. " - "Compiling may fail because of undefined preprocessor macros." % details - ) + "Compiling may fail because of undefined preprocessor macros." + % details) self.cc = os.environ.get('CC', 'gcc') self.cxx = os.environ.get('CXX', 'g++') - self.linker_dll = self.cc - shared_option = "-shared" - - self.set_executables( - compiler='%s -mcygwin -O -Wall' % self.cc, - compiler_so='%s -mcygwin -mdll -O -Wall' % self.cc, - compiler_cxx='%s -mcygwin -O -Wall' % self.cxx, - linker_exe='%s -mcygwin' % self.cc, - linker_so=('{} -mcygwin {}'.format(self.linker_dll, shared_option)), - ) - - # Include the appropriate MSVC runtime library if Python was built - # with MSVC 7.0 or later. - self.dll_libraries = get_msvcr() + if ('gcc' in self.cc): # Start gcc workaround + self.gcc_version, self.ld_version, self.dllwrap_version = \ + get_versions() + self.debug_print(self.compiler_type + ": gcc %s, ld %s, dllwrap %s\n" % + (self.gcc_version, + self.ld_version, + self.dllwrap_version) ) + + # ld_version >= "2.10.90" and < "2.13" should also be able to use + # gcc -mdll instead of dllwrap + # Older dllwraps had own version numbers, newer ones use the + # same as the rest of binutils ( also ld ) + # dllwrap 2.10.90 is buggy + if self.ld_version >= "2.10.90": + self.linker_dll = self.cc + else: + self.linker_dll = "dllwrap" - @property - def gcc_version(self): - # Older numpy dependend on this existing to check for ancient - # gcc versions. This doesn't make much sense with clang etc so - # just hardcode to something recent. - # https://github.com/numpy/numpy/pull/20333 - warnings.warn( - "gcc_version attribute of CygwinCCompiler is deprecated. " - "Instead of returning actual gcc version a fixed value 11.2.0 is returned.", - DeprecationWarning, - stacklevel=2, - ) - with suppress_known_deprecation(): - return LooseVersion("11.2.0") + # ld_version >= "2.13" support -shared so use it instead of + # -mdll -static + if self.ld_version >= "2.13": + shared_option = "-shared" + else: + shared_option = "-mdll -static" + else: # Assume linker is up to date + self.linker_dll = self.cc + shared_option = "-shared" + + self.set_executables(compiler='%s -mcygwin -O -Wall' % self.cc, + compiler_so='%s -mcygwin -mdll -O -Wall' % self.cc, + compiler_cxx='%s -mcygwin -O -Wall' % self.cxx, + linker_exe='%s -mcygwin' % self.cc, + linker_so=('%s -mcygwin %s' % + (self.linker_dll, shared_option))) + + # cygwin and mingw32 need different sets of libraries + if ('gcc' in self.cc and self.gcc_version == "2.91.57"): + # cygwin shouldn't need msvcrt, but without the dlls will crash + # (gcc version 2.91.57) -- perhaps something about initialization + self.dll_libraries=["msvcrt"] + self.warn( + "Consider upgrading to a newer version of gcc") + else: + # Include the appropriate MSVC runtime library if Python was built + # with MSVC 7.0 or later. + self.dll_libraries = get_msvcr() def _compile(self, obj, src, ext, cc_args, extra_postargs, pp_opts): """Compiles the source by spawning GCC and windres if needed.""" @@ -134,47 +179,30 @@ def _compile(self, obj, src, ext, cc_args, extra_postargs, pp_opts): self.spawn(["windres", "-i", src, "-o", obj]) except DistutilsExecError as msg: raise CompileError(msg) - else: # for other files use the C-compiler + else: # for other files use the C-compiler try: - self.spawn( - self.compiler_so + cc_args + [src, '-o', obj] + extra_postargs - ) + self.spawn(self.compiler_so + cc_args + [src, '-o', obj] + + extra_postargs) except DistutilsExecError as msg: raise CompileError(msg) - def link( - self, - target_desc, - objects, - output_filename, - output_dir=None, - libraries=None, - library_dirs=None, - runtime_library_dirs=None, - export_symbols=None, - debug=0, - extra_preargs=None, - extra_postargs=None, - build_temp=None, - target_lang=None, - ): + def link(self, target_desc, objects, output_filename, output_dir=None, + libraries=None, library_dirs=None, runtime_library_dirs=None, + export_symbols=None, debug=0, extra_preargs=None, + extra_postargs=None, build_temp=None, target_lang=None): """Link the objects.""" # use separate copies, so we can modify the lists extra_preargs = copy.copy(extra_preargs or []) libraries = copy.copy(libraries or []) objects = copy.copy(objects or []) - if runtime_library_dirs: - self.warn(_runtime_library_dirs_msg) - # Additional libraries libraries.extend(self.dll_libraries) # handle export symbols by creating a def-file # with executables this only works with gcc/ld as linker - if (export_symbols is not None) and ( - target_desc != self.EXECUTABLE or self.linker_dll == "gcc" - ): + if ((export_symbols is not None) and + (target_desc != self.EXECUTABLE or self.linker_dll == "gcc")): # (The linker doesn't do anything if output is up-to-date. # So it would probably better to check if we really need this, # but for this we had to insert some unchanged parts of @@ -186,115 +214,124 @@ def link( temp_dir = os.path.dirname(objects[0]) # name of dll to give the helper files the same base name (dll_name, dll_extension) = os.path.splitext( - os.path.basename(output_filename) - ) + os.path.basename(output_filename)) # generate the filenames for these files def_file = os.path.join(temp_dir, dll_name + ".def") + lib_file = os.path.join(temp_dir, 'lib' + dll_name + ".a") # Generate .def file - contents = ["LIBRARY %s" % os.path.basename(output_filename), "EXPORTS"] + contents = [ + "LIBRARY %s" % os.path.basename(output_filename), + "EXPORTS"] for sym in export_symbols: contents.append(sym) - self.execute(write_file, (def_file, contents), "writing %s" % def_file) + self.execute(write_file, (def_file, contents), + "writing %s" % def_file) - # next add options for def-file + # next add options for def-file and to creating import libraries - # for gcc/ld the def-file is specified as any object files - objects.append(def_file) + # dllwrap uses different options than gcc/ld + if self.linker_dll == "dllwrap": + extra_preargs.extend(["--output-lib", lib_file]) + # for dllwrap we have to use a special option + extra_preargs.extend(["--def", def_file]) + # we use gcc/ld here and can be sure ld is >= 2.9.10 + else: + # doesn't work: bfd_close build\...\libfoo.a: Invalid operation + #extra_preargs.extend(["-Wl,--out-implib,%s" % lib_file]) + # for gcc/ld the def-file is specified as any object files + objects.append(def_file) - # end: if ((export_symbols is not None) and + #end: if ((export_symbols is not None) and # (target_desc != self.EXECUTABLE or self.linker_dll == "gcc")): # who wants symbols and a many times larger output file # should explicitly switch the debug mode on - # otherwise we let ld strip the output file + # otherwise we let dllwrap/ld strip the output file # (On my machine: 10KiB < stripped_file < ??100KiB # unstripped_file = stripped_file + XXX KiB # ( XXX=254 for a typical python extension)) if not debug: extra_preargs.append("-s") - UnixCCompiler.link( - self, - target_desc, - objects, - output_filename, - output_dir, - libraries, - library_dirs, - runtime_library_dirs, - None, # export_symbols, we do this in our def-file - debug, - extra_preargs, - extra_postargs, - build_temp, - target_lang, - ) - - def runtime_library_dir_option(self, dir): - # cygwin doesn't support rpath. While in theory we could error - # out like MSVC does, code might expect it to work like on Unix, so - # just warn and hope for the best. - self.warn(_runtime_library_dirs_msg) - return [] + UnixCCompiler.link(self, target_desc, objects, output_filename, + output_dir, libraries, library_dirs, + runtime_library_dirs, + None, # export_symbols, we do this in our def-file + debug, extra_preargs, extra_postargs, build_temp, + target_lang) # -- Miscellaneous methods ----------------------------------------- - def _make_out_path(self, output_dir, strip_dir, src_name): - # use normcase to make sure '.rc' is really '.rc' and not '.RC' - norm_src_name = os.path.normcase(src_name) - return super()._make_out_path(output_dir, strip_dir, norm_src_name) - - @property - def out_extensions(self): - """ - Add support for rc and res files. - """ - return { - **super().out_extensions, - **{ext: ext + self.obj_extension for ext in ('.res', '.rc')}, - } - + def object_filenames(self, source_filenames, strip_dir=0, output_dir=''): + """Adds supports for rc and res files.""" + if output_dir is None: + output_dir = '' + obj_names = [] + for src_name in source_filenames: + # use normcase to make sure '.rc' is really '.rc' and not '.RC' + base, ext = os.path.splitext(os.path.normcase(src_name)) + if ext not in (self.src_extensions + ['.rc','.res']): + raise UnknownFileError("unknown file type '%s' (from '%s')" % \ + (ext, src_name)) + if strip_dir: + base = os.path.basename (base) + if ext in ('.res', '.rc'): + # these need to be compiled to object files + obj_names.append (os.path.join(output_dir, + base + ext + self.obj_extension)) + else: + obj_names.append (os.path.join(output_dir, + base + self.obj_extension)) + return obj_names # the same as cygwin plus some additional parameters class Mingw32CCompiler(CygwinCCompiler): - """Handles the Mingw32 port of the GNU C compiler to Windows.""" - + """ Handles the Mingw32 port of the GNU C compiler to Windows. + """ compiler_type = 'mingw32' def __init__(self, verbose=0, dry_run=0, force=0): - super().__init__(verbose, dry_run, force) - - shared_option = "-shared" + CygwinCCompiler.__init__ (self, verbose, dry_run, force) - if is_cygwincc(self.cc): - raise CCompilerError('Cygwin gcc cannot be used with --compiler=mingw32') + # ld_version >= "2.13" support -shared so use it instead of + # -mdll -static + if ('gcc' in self.cc and self.ld_version < "2.13"): + shared_option = "-mdll -static" + else: + shared_option = "-shared" - self.set_executables( - compiler='%s -O -Wall' % self.cc, - compiler_so='%s -mdll -O -Wall' % self.cc, - compiler_cxx='%s -O -Wall' % self.cxx, - linker_exe='%s' % self.cc, - linker_so='{} {}'.format(self.linker_dll, shared_option), - ) + # A real mingw32 doesn't need to specify a different entry point, + # but cygwin 2.91.57 in no-cygwin-mode needs it. + if ('gcc' in self.cc and self.gcc_version <= "2.91.57"): + entry_point = '--entry _DllMain@12' + else: + entry_point = '' + if is_cygwincc(self.cc): + raise CCompilerError( + 'Cygwin gcc cannot be used with --compiler=mingw32') + + self.set_executables(compiler='%s -O -Wall' % self.cc, + compiler_so='%s -mdll -O -Wall' % self.cc, + compiler_cxx='%s -O -Wall' % self.cxx, + linker_exe='%s' % self.cc, + linker_so='%s %s %s' + % (self.linker_dll, shared_option, + entry_point)) # Maybe we should also append -mthreads, but then the finished # dlls need another dll (mingwm10.dll see Mingw32 docs) # (-mthreads: Support thread-safe exception handling on `Mingw32') # no additional libraries needed - self.dll_libraries = [] + self.dll_libraries=[] # Include the appropriate MSVC runtime library if Python was built # with MSVC 7.0 or later. self.dll_libraries = get_msvcr() - def runtime_library_dir_option(self, dir): - raise DistutilsPlatformError(_runtime_library_dirs_msg) - - # Because these compilers aren't configured in Python's pyconfig.h file by # default, we should at least warn the user if he is using an unmodified # version. @@ -303,7 +340,6 @@ def runtime_library_dir_option(self, dir): CONFIG_H_NOTOK = "not ok" CONFIG_H_UNCERTAIN = "uncertain" - def check_config_h(): """Check if the current Python installation appears amenable to building extensions with GCC. @@ -348,17 +384,42 @@ def check_config_h(): finally: config_h.close() except OSError as exc: - return (CONFIG_H_UNCERTAIN, "couldn't read '{}': {}".format(fn, exc.strerror)) + return (CONFIG_H_UNCERTAIN, + "couldn't read '%s': %s" % (fn, exc.strerror)) +RE_VERSION = re.compile(br'(\d+\.\d+(\.\d+)*)') + +def _find_exe_version(cmd): + """Find the version of an executable by running `cmd` in the shell. + + If the command is not found, or the output does not match + `RE_VERSION`, returns None. + """ + executable = cmd.split()[0] + if find_executable(executable) is None: + return None + out = Popen(cmd, shell=True, stdout=PIPE).stdout + try: + out_string = out.read() + finally: + out.close() + result = RE_VERSION.search(out_string) + if result is None: + return None + # LooseVersion works with strings; decode + ver_str = result.group(1).decode() + with distutils.version.suppress_known_deprecation(): + return LooseVersion(ver_str) + +def get_versions(): + """ Try to find out the versions of gcc, ld and dllwrap. + + If not possible it returns None for it. + """ + commands = ['gcc -dumpversion', 'ld -v', 'dllwrap --version'] + return tuple([_find_exe_version(cmd) for cmd in commands]) def is_cygwincc(cc): '''Try to determine if the compiler that would be used is from cygwin.''' - out_string = check_output(shlex.split(cc) + ['-dumpmachine']) + out_string = check_output([cc, '-dumpmachine']) return out_string.strip().endswith(b'cygwin') - - -get_versions = None -""" -A stand-in for the previous get_versions() function to prevent failures -when monkeypatched. See pypa/setuptools#2969. -""" diff --git a/venv/lib/python3.10/site-packages/setuptools/_distutils/dep_util.py b/venv/lib/python3.10/site-packages/setuptools/_distutils/dep_util.py index db1fa01..d74f5e4 100644 --- a/venv/lib/python3.10/site-packages/setuptools/_distutils/dep_util.py +++ b/venv/lib/python3.10/site-packages/setuptools/_distutils/dep_util.py @@ -8,29 +8,28 @@ from distutils.errors import DistutilsFileError -def newer(source, target): +def newer (source, target): """Return true if 'source' exists and is more recently modified than 'target', or if 'source' exists and 'target' doesn't. Return false if both exist and 'target' is the same age or younger than 'source'. Raise DistutilsFileError if 'source' does not exist. """ if not os.path.exists(source): - raise DistutilsFileError("file '%s' does not exist" % os.path.abspath(source)) + raise DistutilsFileError("file '%s' does not exist" % + os.path.abspath(source)) if not os.path.exists(target): return 1 from stat import ST_MTIME - mtime1 = os.stat(source)[ST_MTIME] mtime2 = os.stat(target)[ST_MTIME] return mtime1 > mtime2 - # newer () -def newer_pairwise(sources, targets): +def newer_pairwise (sources, targets): """Walk two filename lists in parallel, testing if each source is newer than its corresponding target. Return a pair of lists (sources, targets) where source is newer than target, according to the semantics @@ -49,11 +48,10 @@ def newer_pairwise(sources, targets): return (n_sources, n_targets) - # newer_pairwise () -def newer_group(sources, target, missing='error'): +def newer_group (sources, target, missing='error'): """Return true if 'target' is out-of-date with respect to any file listed in 'sources'. In other words, if 'target' exists and is newer than every file in 'sources', return false; otherwise return true. @@ -75,16 +73,15 @@ def newer_group(sources, target, missing='error'): # we can immediately return true. If we fall through to the end # of the loop, then 'target' is up-to-date and we return false. from stat import ST_MTIME - target_mtime = os.stat(target)[ST_MTIME] for source in sources: if not os.path.exists(source): - if missing == 'error': # blow up when we stat() the file + if missing == 'error': # blow up when we stat() the file pass - elif missing == 'ignore': # missing source dropped from - continue # target's dependency list - elif missing == 'newer': # missing source means target is - return 1 # out-of-date + elif missing == 'ignore': # missing source dropped from + continue # target's dependency list + elif missing == 'newer': # missing source means target is + return 1 # out-of-date source_mtime = os.stat(source)[ST_MTIME] if source_mtime > target_mtime: @@ -92,5 +89,4 @@ def newer_group(sources, target, missing='error'): else: return 0 - # newer_group () diff --git a/venv/lib/python3.10/site-packages/setuptools/_distutils/dir_util.py b/venv/lib/python3.10/site-packages/setuptools/_distutils/dir_util.py index 6f0bb8a..d5cd8e3 100644 --- a/venv/lib/python3.10/site-packages/setuptools/_distutils/dir_util.py +++ b/venv/lib/python3.10/site-packages/setuptools/_distutils/dir_util.py @@ -4,15 +4,17 @@ import os import errno -from distutils.errors import DistutilsInternalError, DistutilsFileError +from distutils.errors import DistutilsFileError, DistutilsInternalError from distutils import log # cache for by mkpath() -- in addition to cheapening redundant calls, # eliminates redundant "creating /foo/bar/baz" messages in dry-run mode _path_created = {} - -def mkpath(name, mode=0o777, verbose=1, dry_run=0): # noqa: C901 +# I don't use os.makedirs because a) it's new to Python 1.5.2, and +# b) it blows up if the directory already exists (I want to silently +# succeed in that case). +def mkpath(name, mode=0o777, verbose=1, dry_run=0): """Create a directory and any missing ancestor directories. If the directory already exists (or if 'name' is the empty string, which @@ -21,12 +23,6 @@ def mkpath(name, mode=0o777, verbose=1, dry_run=0): # noqa: C901 (eg. some sub-path exists, but is a file rather than a directory). If 'verbose' is true, print a one-line summary of each mkdir to stdout. Return the list of directories actually created. - - os.makedirs is not used because: - - a) It's new to Python 1.5.2, and - b) it blows up if the directory already exists (in which case it should - silently succeed). """ global _path_created @@ -34,8 +30,7 @@ def mkpath(name, mode=0o777, verbose=1, dry_run=0): # noqa: C901 # Detect a common bug -- name is None if not isinstance(name, str): raise DistutilsInternalError( - "mkpath: 'name' must be a string (got {!r})".format(name) - ) + "mkpath: 'name' must be a string (got %r)" % (name,)) # XXX what's the better way to handle verbosity? print as we create # each directory in the path (the current behaviour), or only announce @@ -50,17 +45,17 @@ def mkpath(name, mode=0o777, verbose=1, dry_run=0): # noqa: C901 return created_dirs (head, tail) = os.path.split(name) - tails = [tail] # stack of lone dirs to create + tails = [tail] # stack of lone dirs to create while head and tail and not os.path.isdir(head): (head, tail) = os.path.split(head) - tails.insert(0, tail) # push next higher dir onto stack + tails.insert(0, tail) # push next higher dir onto stack # now 'head' contains the deepest directory that already exists # (that is, the child of 'head' in 'name' is the highest directory # that does *not* exist) for d in tails: - # print "head = %s, d = %s: " % (head, d), + #print "head = %s, d = %s: " % (head, d), head = os.path.join(head, d) abs_head = os.path.abspath(head) @@ -76,14 +71,12 @@ def mkpath(name, mode=0o777, verbose=1, dry_run=0): # noqa: C901 except OSError as exc: if not (exc.errno == errno.EEXIST and os.path.isdir(head)): raise DistutilsFileError( - "could not create '{}': {}".format(head, exc.args[-1]) - ) + "could not create '%s': %s" % (head, exc.args[-1])) created_dirs.append(head) _path_created[abs_head] = 1 return created_dirs - def create_tree(base_dir, files, mode=0o777, verbose=1, dry_run=0): """Create all the empty directories under 'base_dir' needed to put 'files' there. @@ -103,17 +96,8 @@ def create_tree(base_dir, files, mode=0o777, verbose=1, dry_run=0): for dir in sorted(need_dir): mkpath(dir, mode, verbose=verbose, dry_run=dry_run) - -def copy_tree( # noqa: C901 - src, - dst, - preserve_mode=1, - preserve_times=1, - preserve_symlinks=0, - update=0, - verbose=1, - dry_run=0, -): +def copy_tree(src, dst, preserve_mode=1, preserve_times=1, + preserve_symlinks=0, update=0, verbose=1, dry_run=0): """Copy an entire directory tree 'src' to a new location 'dst'. Both 'src' and 'dst' must be directory names. If 'src' is not a @@ -136,7 +120,8 @@ def copy_tree( # noqa: C901 from distutils.file_util import copy_file if not dry_run and not os.path.isdir(src): - raise DistutilsFileError("cannot copy tree '%s': not a directory" % src) + raise DistutilsFileError( + "cannot copy tree '%s': not a directory" % src) try: names = os.listdir(src) except OSError as e: @@ -144,8 +129,7 @@ def copy_tree( # noqa: C901 names = [] else: raise DistutilsFileError( - "error listing files in '{}': {}".format(src, e.strerror) - ) + "error listing files in '%s': %s" % (src, e.strerror)) if not dry_run: mkpath(dst, verbose=verbose) @@ -170,43 +154,27 @@ def copy_tree( # noqa: C901 elif os.path.isdir(src_name): outputs.extend( - copy_tree( - src_name, - dst_name, - preserve_mode, - preserve_times, - preserve_symlinks, - update, - verbose=verbose, - dry_run=dry_run, - ) - ) + copy_tree(src_name, dst_name, preserve_mode, + preserve_times, preserve_symlinks, update, + verbose=verbose, dry_run=dry_run)) else: - copy_file( - src_name, - dst_name, - preserve_mode, - preserve_times, - update, - verbose=verbose, - dry_run=dry_run, - ) + copy_file(src_name, dst_name, preserve_mode, + preserve_times, update, verbose=verbose, + dry_run=dry_run) outputs.append(dst_name) return outputs - def _build_cmdtuple(path, cmdtuples): """Helper for remove_tree().""" for f in os.listdir(path): - real_f = os.path.join(path, f) + real_f = os.path.join(path,f) if os.path.isdir(real_f) and not os.path.islink(real_f): _build_cmdtuple(real_f, cmdtuples) else: cmdtuples.append((os.remove, real_f)) cmdtuples.append((os.rmdir, path)) - def remove_tree(directory, verbose=1, dry_run=0): """Recursively remove an entire directory tree. @@ -231,7 +199,6 @@ def remove_tree(directory, verbose=1, dry_run=0): except OSError as exc: log.warn("error removing %s: %s", directory, exc) - def ensure_relative(path): """Take the full path 'path', and make it a relative path. diff --git a/venv/lib/python3.10/site-packages/setuptools/_distutils/dist.py b/venv/lib/python3.10/site-packages/setuptools/_distutils/dist.py index 917cd94..37db4d6 100644 --- a/venv/lib/python3.10/site-packages/setuptools/_distutils/dist.py +++ b/venv/lib/python3.10/site-packages/setuptools/_distutils/dist.py @@ -7,8 +7,6 @@ import sys import os import re -import pathlib -import contextlib from email import message_from_file try: @@ -16,12 +14,7 @@ except ImportError: warnings = None -from distutils.errors import ( - DistutilsOptionError, - DistutilsModuleError, - DistutilsArgError, - DistutilsClassError, -) +from distutils.errors import * from distutils.fancy_getopt import FancyGetopt, translate_longopt from distutils.util import check_environ, strtobool, rfc822_escape from distutils import log @@ -76,7 +69,8 @@ class Distribution: ('quiet', 'q', "run quietly (turns verbosity off)"), ('dry-run', 'n', "don't actually do anything"), ('help', 'h', "show detailed help message"), - ('no-user-cfg', None, 'ignore pydistutils.cfg in your home directory'), + ('no-user-cfg', None, + 'ignore pydistutils.cfg in your home directory'), ] # 'common_usage' is a short (2-3 line) string describing the common @@ -90,32 +84,49 @@ class Distribution: # options that are not propagated to the commands display_options = [ - ('help-commands', None, "list all available commands"), - ('name', None, "print package name"), - ('version', 'V', "print package version"), - ('fullname', None, "print -"), - ('author', None, "print the author's name"), - ('author-email', None, "print the author's email address"), - ('maintainer', None, "print the maintainer's name"), - ('maintainer-email', None, "print the maintainer's email address"), - ('contact', None, "print the maintainer's name if known, else the author's"), - ( - 'contact-email', - None, - "print the maintainer's email address if known, else the author's", - ), - ('url', None, "print the URL for this package"), - ('license', None, "print the license of the package"), - ('licence', None, "alias for --license"), - ('description', None, "print the package description"), - ('long-description', None, "print the long package description"), - ('platforms', None, "print the list of platforms"), - ('classifiers', None, "print the list of classifiers"), - ('keywords', None, "print the list of keywords"), - ('provides', None, "print the list of packages/modules provided"), - ('requires', None, "print the list of packages/modules required"), - ('obsoletes', None, "print the list of packages/modules made obsolete"), - ] + ('help-commands', None, + "list all available commands"), + ('name', None, + "print package name"), + ('version', 'V', + "print package version"), + ('fullname', None, + "print -"), + ('author', None, + "print the author's name"), + ('author-email', None, + "print the author's email address"), + ('maintainer', None, + "print the maintainer's name"), + ('maintainer-email', None, + "print the maintainer's email address"), + ('contact', None, + "print the maintainer's name if known, else the author's"), + ('contact-email', None, + "print the maintainer's email address if known, else the author's"), + ('url', None, + "print the URL for this package"), + ('license', None, + "print the license of the package"), + ('licence', None, + "alias for --license"), + ('description', None, + "print the package description"), + ('long-description', None, + "print the long package description"), + ('platforms', None, + "print the list of platforms"), + ('classifiers', None, + "print the list of classifiers"), + ('keywords', None, + "print the list of keywords"), + ('provides', None, + "print the list of packages/modules provided"), + ('requires', None, + "print the list of packages/modules required"), + ('obsoletes', None, + "print the list of packages/modules made obsolete") + ] display_option_names = [translate_longopt(x[0]) for x in display_options] # negative options are options that exclude other options @@ -123,7 +134,7 @@ class Distribution: # -- Creation/initialization methods ------------------------------- - def __init__(self, attrs=None): # noqa: C901 + def __init__(self, attrs=None): """Construct a new Distribution instance: initialize all the attributes of a Distribution, and then use 'attrs' (a dictionary mapping attribute names to values) to assign some of those @@ -295,7 +306,7 @@ def get_option_dict(self, command): def dump_option_dicts(self, header=None, commands=None, indent=""): from pprint import pformat - if commands is None: # dump all command option dicts + if commands is None: # dump all command option dicts commands = sorted(self.command_options.keys()) if header is not None: @@ -309,9 +320,11 @@ def dump_option_dicts(self, header=None, commands=None, indent=""): for cmd_name in commands: opt_dict = self.command_options.get(cmd_name) if opt_dict is None: - self.announce(indent + "no option dict for '%s' command" % cmd_name) + self.announce(indent + + "no option dict for '%s' command" % cmd_name) else: - self.announce(indent + "option dict for '%s' command:" % cmd_name) + self.announce(indent + + "option dict for '%s' command:" % cmd_name) out = pformat(opt_dict) for line in out.split('\n'): self.announce(indent + " " + line) @@ -324,61 +337,58 @@ def find_config_files(self): should be parsed. The filenames returned are guaranteed to exist (modulo nasty race conditions). - There are multiple possible config files: - - distutils.cfg in the Distutils installation directory (i.e. - where the top-level Distutils __inst__.py file lives) - - a file in the user's home directory named .pydistutils.cfg - on Unix and pydistutils.cfg on Windows/Mac; may be disabled - with the ``--no-user-cfg`` option - - setup.cfg in the current directory - - a file named by an environment variable + There are three possible config files: distutils.cfg in the + Distutils installation directory (ie. where the top-level + Distutils __inst__.py file lives), a file in the user's home + directory named .pydistutils.cfg on Unix and pydistutils.cfg + on Windows/Mac; and setup.cfg in the current directory. + + The file in the user's home directory can be disabled with the + --no-user-cfg option. """ + files = [] check_environ() - files = [str(path) for path in self._gen_paths() if os.path.isfile(path)] - if DEBUG: - self.announce("using config files: %s" % ', '.join(files)) + # Where to look for the system-wide Distutils config file + sys_dir = os.path.dirname(sys.modules['distutils'].__file__) - return files + # Look for the system config file + sys_file = os.path.join(sys_dir, "distutils.cfg") + if os.path.isfile(sys_file): + files.append(sys_file) - def _gen_paths(self): - # The system-wide Distutils config file - sys_dir = pathlib.Path(sys.modules['distutils'].__file__).parent - yield sys_dir / "distutils.cfg" + # What to call the per-user config file + if os.name == 'posix': + user_filename = ".pydistutils.cfg" + else: + user_filename = "pydistutils.cfg" - # The per-user config file - prefix = '.' * (os.name == 'posix') - filename = prefix + 'pydistutils.cfg' + # And look for the user config file if self.want_user_cfg: - yield pathlib.Path('~').expanduser() / filename + user_file = os.path.join(os.path.expanduser('~'), user_filename) + if os.path.isfile(user_file): + files.append(user_file) # All platforms support local setup.cfg - yield pathlib.Path('setup.cfg') + local_file = "setup.cfg" + if os.path.isfile(local_file): + files.append(local_file) - # Additional config indicated in the environment - with contextlib.suppress(TypeError): - yield pathlib.Path(os.getenv("DIST_EXTRA_CONFIG")) + if DEBUG: + self.announce("using config files: %s" % ', '.join(files)) + + return files - def parse_config_files(self, filenames=None): # noqa: C901 + def parse_config_files(self, filenames=None): from configparser import ConfigParser # Ignore install directory options if we have a venv if sys.prefix != sys.base_prefix: ignore_options = [ - 'install-base', - 'install-platbase', - 'install-lib', - 'install-platlib', - 'install-purelib', - 'install-headers', - 'install-scripts', - 'install-data', - 'prefix', - 'exec-prefix', - 'home', - 'user', - 'root', - ] + 'install-base', 'install-platbase', 'install-lib', + 'install-platlib', 'install-purelib', 'install-headers', + 'install-scripts', 'install-data', 'prefix', 'exec-prefix', + 'home', 'user', 'root'] else: ignore_options = [] @@ -401,7 +411,7 @@ def parse_config_files(self, filenames=None): # noqa: C901 for opt in options: if opt != '__name__' and opt not in ignore_options: - val = parser.get(section, opt) + val = parser.get(section,opt) opt = opt.replace('-', '_') opt_dict[opt] = (filename, val) @@ -418,7 +428,7 @@ def parse_config_files(self, filenames=None): # noqa: C901 try: if alias: setattr(self, alias, not strtobool(val)) - elif opt in ('verbose', 'dry_run'): # ugh! + elif opt in ('verbose', 'dry_run'): # ugh! setattr(self, opt, strtobool(val)) else: setattr(self, opt, val) @@ -472,7 +482,7 @@ def parse_command_line(self): return while args: args = self._parse_command_opts(parser, args) - if args is None: # user asked for help (and got it) + if args is None: # user asked for help (and got it) return # Handle the cases of --help as a "global" option, ie. @@ -482,9 +492,9 @@ def parse_command_line(self): # latter, we omit the display-only options and show help for # each command listed on the command line. if self.help: - self._show_help( - parser, display_options=len(self.commands) == 0, commands=self.commands - ) + self._show_help(parser, + display_options=len(self.commands) == 0, + commands=self.commands) return # Oops, no commands found -- an end-user error @@ -501,14 +511,11 @@ def _get_toplevel_options(self): level as well as options recognized for commands. """ return self.global_options + [ - ( - "command-packages=", - None, - "list of packages that provide distutils commands", - ), - ] + ("command-packages=", None, + "list of packages that provide distutils commands"), + ] - def _parse_command_opts(self, parser, args): # noqa: C901 + def _parse_command_opts(self, parser, args): """Parse the command-line options for a single command. 'parser' must be a FancyGetopt instance; 'args' must be the list of arguments, starting with the current command (whose options @@ -538,19 +545,14 @@ def _parse_command_opts(self, parser, args): # noqa: C901 # to be sure that the basic "command" interface is implemented. if not issubclass(cmd_class, Command): raise DistutilsClassError( - "command class %s must subclass Command" % cmd_class - ) + "command class %s must subclass Command" % cmd_class) # Also make sure that the command object provides a list of its # known options. - if not ( - hasattr(cmd_class, 'user_options') - and isinstance(cmd_class.user_options, list) - ): - msg = ( - "command class %s must provide " - "'user_options' attribute (a list of tuples)" - ) + if not (hasattr(cmd_class, 'user_options') and + isinstance(cmd_class.user_options, list)): + msg = ("command class %s must provide " + "'user_options' attribute (a list of tuples)") raise DistutilsClassError(msg % cmd_class) # If the command class has a list of negative alias options, @@ -562,39 +564,36 @@ def _parse_command_opts(self, parser, args): # noqa: C901 # Check for help_options in command class. They have a different # format (tuple of four) so we need to preprocess them here. - if hasattr(cmd_class, 'help_options') and isinstance( - cmd_class.help_options, list - ): + if (hasattr(cmd_class, 'help_options') and + isinstance(cmd_class.help_options, list)): help_options = fix_help_options(cmd_class.help_options) else: help_options = [] # All commands support the global options too, just by adding # in 'global_options'. - parser.set_option_table( - self.global_options + cmd_class.user_options + help_options - ) + parser.set_option_table(self.global_options + + cmd_class.user_options + + help_options) parser.set_negative_aliases(negative_opt) (args, opts) = parser.getopt(args[1:]) if hasattr(opts, 'help') and opts.help: self._show_help(parser, display_options=0, commands=[cmd_class]) return - if hasattr(cmd_class, 'help_options') and isinstance( - cmd_class.help_options, list - ): - help_option_found = 0 + if (hasattr(cmd_class, 'help_options') and + isinstance(cmd_class.help_options, list)): + help_option_found=0 for (help_option, short, desc, func) in cmd_class.help_options: if hasattr(opts, parser.get_attr_name(help_option)): - help_option_found = 1 + help_option_found=1 if callable(func): func() else: raise DistutilsClassError( "invalid help function %r for help option '%s': " "must be a callable object (function, etc.)" - % (func, help_option) - ) + % (func, help_option)) if help_option_found: return @@ -620,7 +619,8 @@ def finalize_options(self): value = [elm.strip() for elm in value.split(',')] setattr(self.metadata, attr, value) - def _show_help(self, parser, global_options=1, display_options=1, commands=[]): + def _show_help(self, parser, global_options=1, display_options=1, + commands=[]): """Show help for the setup script command-line in the form of several lists of command-line options. 'parser' should be a FancyGetopt instance; do not expect it to be returned in the @@ -649,9 +649,8 @@ def _show_help(self, parser, global_options=1, display_options=1, commands=[]): if display_options: parser.set_option_table(self.display_options) parser.print_help( - "Information display options (just display " - + "information, ignore any commands)" - ) + "Information display options (just display " + + "information, ignore any commands)") print('') for command in self.commands: @@ -659,10 +658,10 @@ def _show_help(self, parser, global_options=1, display_options=1, commands=[]): klass = command else: klass = self.get_command_class(command) - if hasattr(klass, 'help_options') and isinstance(klass.help_options, list): - parser.set_option_table( - klass.user_options + fix_help_options(klass.help_options) - ) + if (hasattr(klass, 'help_options') and + isinstance(klass.help_options, list)): + parser.set_option_table(klass.user_options + + fix_help_options(klass.help_options)) else: parser.set_option_table(klass.user_options) parser.print_help("Options for '%s' command:" % klass.__name__) @@ -698,10 +697,11 @@ def handle_display_options(self, option_order): for (opt, val) in option_order: if val and is_display_option.get(opt): opt = translate_longopt(opt) - value = getattr(self.metadata, "get_" + opt)() + value = getattr(self.metadata, "get_"+opt)() if opt in ['keywords', 'platforms']: print(','.join(value)) - elif opt in ('classifiers', 'provides', 'requires', 'obsoletes'): + elif opt in ('classifiers', 'provides', 'requires', + 'obsoletes'): print('\n'.join(value)) else: print(value) @@ -735,7 +735,6 @@ def print_commands(self): 'description'. """ import distutils.command - std_commands = distutils.command.__all__ is_std = {} for cmd in std_commands: @@ -747,14 +746,18 @@ def print_commands(self): extra_commands.append(cmd) max_length = 0 - for cmd in std_commands + extra_commands: + for cmd in (std_commands + extra_commands): if len(cmd) > max_length: max_length = len(cmd) - self.print_command_list(std_commands, "Standard commands", max_length) + self.print_command_list(std_commands, + "Standard commands", + max_length) if extra_commands: print() - self.print_command_list(extra_commands, "Extra commands", max_length) + self.print_command_list(extra_commands, + "Extra commands", + max_length) def get_command_list(self): """Get a list of (command, description) tuples. @@ -766,7 +769,6 @@ def get_command_list(self): # Currently this is only used on Mac OS, for the Mac-only GUI # Distutils interface (by Jack Jansen) import distutils.command - std_commands = distutils.command.__all__ is_std = {} for cmd in std_commands: @@ -778,7 +780,7 @@ def get_command_list(self): extra_commands.append(cmd) rv = [] - for cmd in std_commands + extra_commands: + for cmd in (std_commands + extra_commands): klass = self.cmdclass.get(cmd) if not klass: klass = self.get_command_class(cmd) @@ -820,7 +822,7 @@ def get_command_class(self, command): return klass for pkgname in self.get_command_packages(): - module_name = "{}.{}".format(pkgname, command) + module_name = "%s.%s" % (pkgname, command) klass_name = command try: @@ -834,8 +836,7 @@ def get_command_class(self, command): except AttributeError: raise DistutilsModuleError( "invalid command '%s' (no class '%s' in module '%s')" - % (command, klass_name, module_name) - ) + % (command, klass_name, module_name)) self.cmdclass[command] = klass return klass @@ -851,10 +852,8 @@ def get_command_obj(self, command, create=1): cmd_obj = self.command_obj.get(command) if not cmd_obj and create: if DEBUG: - self.announce( - "Distribution.get_command_obj(): " - "creating '%s' command object" % command - ) + self.announce("Distribution.get_command_obj(): " + "creating '%s' command object" % command) klass = self.get_command_class(command) cmd_obj = self.command_obj[command] = klass(self) @@ -871,7 +870,7 @@ def get_command_obj(self, command, create=1): return cmd_obj - def _set_command_options(self, command_obj, option_dict=None): # noqa: C901 + def _set_command_options(self, command_obj, option_dict=None): """Set the options for 'command_obj' from 'option_dict'. Basically this means copying elements of a dictionary ('option_dict') to attributes of an instance ('command'). @@ -888,9 +887,11 @@ def _set_command_options(self, command_obj, option_dict=None): # noqa: C901 self.announce(" setting options for '%s' command:" % command_name) for (option, (source, value)) in option_dict.items(): if DEBUG: - self.announce(" {} = {} (from {})".format(option, value, source)) + self.announce(" %s = %s (from %s)" % (option, value, + source)) try: - bool_opts = [translate_longopt(o) for o in command_obj.boolean_options] + bool_opts = [translate_longopt(o) + for o in command_obj.boolean_options] except AttributeError: bool_opts = [] try: @@ -909,8 +910,7 @@ def _set_command_options(self, command_obj, option_dict=None): # noqa: C901 else: raise DistutilsOptionError( "error in %s: command '%s' has no such option '%s'" - % (source, command_name, option) - ) + % (source, command_name, option)) except ValueError as msg: raise DistutilsOptionError(msg) @@ -934,7 +934,6 @@ def reinitialize_command(self, command, reinit_subcommands=0): Returns the reinitialized command object. """ from distutils.cmd import Command - if not isinstance(command, Command): command_name = command command = self.get_command_obj(command_name) @@ -1011,11 +1010,9 @@ def has_data_files(self): return self.data_files and len(self.data_files) > 0 def is_pure(self): - return ( - self.has_pure_modules() - and not self.has_ext_modules() - and not self.has_c_libraries() - ) + return (self.has_pure_modules() and + not self.has_ext_modules() and + not self.has_c_libraries()) # -- Metadata query methods ---------------------------------------- @@ -1024,35 +1021,19 @@ def is_pure(self): # to self.metadata.get_XXX. The actual code is in the # DistributionMetadata class, below. - class DistributionMetadata: """Dummy class to hold the distribution meta-data: name, version, author, and so forth. """ - _METHOD_BASENAMES = ( - "name", - "version", - "author", - "author_email", - "maintainer", - "maintainer_email", - "url", - "license", - "description", - "long_description", - "keywords", - "platforms", - "fullname", - "contact", - "contact_email", - "classifiers", - "download_url", - # PEP 314 - "provides", - "requires", - "obsoletes", - ) + _METHOD_BASENAMES = ("name", "version", "author", "author_email", + "maintainer", "maintainer_email", "url", + "license", "description", "long_description", + "keywords", "platforms", "fullname", "contact", + "contact_email", "classifiers", "download_url", + # PEP 314 + "provides", "requires", "obsoletes", + ) def __init__(self, path=None): if path is not None: @@ -1083,8 +1064,9 @@ def read_pkg_file(self, file): def _read_field(name): value = msg[name] - if value and value != "UNKNOWN": - return value + if value == 'UNKNOWN': + return None + return value def _read_list(name): values = msg.get_all(name, None) @@ -1129,42 +1111,37 @@ def _read_list(name): self.obsoletes = None def write_pkg_info(self, base_dir): - """Write the PKG-INFO file into the release tree.""" - with open( - os.path.join(base_dir, 'PKG-INFO'), 'w', encoding='UTF-8' - ) as pkg_info: + """Write the PKG-INFO file into the release tree. + """ + with open(os.path.join(base_dir, 'PKG-INFO'), 'w', + encoding='UTF-8') as pkg_info: self.write_pkg_file(pkg_info) def write_pkg_file(self, file): - """Write the PKG-INFO format data to a file object.""" + """Write the PKG-INFO format data to a file object. + """ version = '1.0' - if ( - self.provides - or self.requires - or self.obsoletes - or self.classifiers - or self.download_url - ): + if (self.provides or self.requires or self.obsoletes or + self.classifiers or self.download_url): version = '1.1' - # required fields file.write('Metadata-Version: %s\n' % version) file.write('Name: %s\n' % self.get_name()) file.write('Version: %s\n' % self.get_version()) + file.write('Summary: %s\n' % self.get_description()) + file.write('Home-page: %s\n' % self.get_url()) + file.write('Author: %s\n' % self.get_contact()) + file.write('Author-email: %s\n' % self.get_contact_email()) + file.write('License: %s\n' % self.get_license()) + if self.download_url: + file.write('Download-URL: %s\n' % self.download_url) - def maybe_write(header, val): - if val: - file.write(f"{header}: {val}\n") + long_desc = rfc822_escape(self.get_long_description()) + file.write('Description: %s\n' % long_desc) - # optional fields - maybe_write("Summary", self.get_description()) - maybe_write("Home-page", self.get_url()) - maybe_write("Author", self.get_contact()) - maybe_write("Author-email", self.get_contact_email()) - maybe_write("License", self.get_license()) - maybe_write("Download-URL", self.download_url) - maybe_write("Description", rfc822_escape(self.get_long_description() or "")) - maybe_write("Keywords", ",".join(self.get_keywords())) + keywords = ','.join(self.get_keywords()) + if keywords: + file.write('Keywords: %s\n' % keywords) self._write_list(file, 'Platform', self.get_platforms()) self._write_list(file, 'Classifier', self.get_classifiers()) @@ -1175,9 +1152,8 @@ def maybe_write(header, val): self._write_list(file, 'Obsoletes', self.get_obsoletes()) def _write_list(self, file, name, values): - values = values or [] for value in values: - file.write('{}: {}\n'.format(name, value)) + file.write('%s: %s\n' % (name, value)) # -- Metadata query methods ---------------------------------------- @@ -1188,39 +1164,38 @@ def get_version(self): return self.version or "0.0.0" def get_fullname(self): - return "{}-{}".format(self.get_name(), self.get_version()) + return "%s-%s" % (self.get_name(), self.get_version()) def get_author(self): - return self.author + return self.author or "UNKNOWN" def get_author_email(self): - return self.author_email + return self.author_email or "UNKNOWN" def get_maintainer(self): - return self.maintainer + return self.maintainer or "UNKNOWN" def get_maintainer_email(self): - return self.maintainer_email + return self.maintainer_email or "UNKNOWN" def get_contact(self): - return self.maintainer or self.author + return self.maintainer or self.author or "UNKNOWN" def get_contact_email(self): - return self.maintainer_email or self.author_email + return self.maintainer_email or self.author_email or "UNKNOWN" def get_url(self): - return self.url + return self.url or "UNKNOWN" def get_license(self): - return self.license - + return self.license or "UNKNOWN" get_licence = get_license def get_description(self): - return self.description + return self.description or "UNKNOWN" def get_long_description(self): - return self.long_description + return self.long_description or "UNKNOWN" def get_keywords(self): return self.keywords or [] @@ -1229,7 +1204,7 @@ def set_keywords(self, value): self.keywords = _ensure_list(value, 'keywords') def get_platforms(self): - return self.platforms + return self.platforms or ["UNKNOWN"] def set_platforms(self, value): self.platforms = _ensure_list(value, 'platforms') @@ -1241,7 +1216,7 @@ def set_classifiers(self, value): self.classifiers = _ensure_list(value, 'classifiers') def get_download_url(self): - return self.download_url + return self.download_url or "UNKNOWN" # PEP 314 def get_requires(self): @@ -1249,7 +1224,6 @@ def get_requires(self): def set_requires(self, value): import distutils.versionpredicate - for v in value: distutils.versionpredicate.VersionPredicate(v) self.requires = list(value) @@ -1261,7 +1235,6 @@ def set_provides(self, value): value = [v.strip() for v in value] for v in value: import distutils.versionpredicate - distutils.versionpredicate.split_provision(v) self.provides = value @@ -1270,12 +1243,10 @@ def get_obsoletes(self): def set_obsoletes(self, value): import distutils.versionpredicate - for v in value: distutils.versionpredicate.VersionPredicate(v) self.obsoletes = list(value) - def fix_help_options(options): """Convert a 4-tuple 'help_options' list as found in various command classes to the 3-tuple form required by FancyGetopt. diff --git a/venv/lib/python3.10/site-packages/setuptools/_distutils/errors.py b/venv/lib/python3.10/site-packages/setuptools/_distutils/errors.py index 626254c..8b93059 100644 --- a/venv/lib/python3.10/site-packages/setuptools/_distutils/errors.py +++ b/venv/lib/python3.10/site-packages/setuptools/_distutils/errors.py @@ -8,120 +8,90 @@ This module is safe to use in "from ... import *" mode; it only exports symbols whose names start with "Distutils" and end with "Error".""" - -class DistutilsError(Exception): +class DistutilsError (Exception): """The root of all Distutils evil.""" - pass - -class DistutilsModuleError(DistutilsError): +class DistutilsModuleError (DistutilsError): """Unable to load an expected module, or to find an expected class within some module (in particular, command modules and classes).""" - pass - -class DistutilsClassError(DistutilsError): +class DistutilsClassError (DistutilsError): """Some command class (or possibly distribution class, if anyone feels a need to subclass Distribution) is found not to be holding up its end of the bargain, ie. implementing some part of the "command "interface.""" - pass - -class DistutilsGetoptError(DistutilsError): +class DistutilsGetoptError (DistutilsError): """The option table provided to 'fancy_getopt()' is bogus.""" - pass - -class DistutilsArgError(DistutilsError): +class DistutilsArgError (DistutilsError): """Raised by fancy_getopt in response to getopt.error -- ie. an error in the command line usage.""" - pass - -class DistutilsFileError(DistutilsError): +class DistutilsFileError (DistutilsError): """Any problems in the filesystem: expected file not found, etc. Typically this is for problems that we detect before OSError could be raised.""" - pass - -class DistutilsOptionError(DistutilsError): +class DistutilsOptionError (DistutilsError): """Syntactic/semantic errors in command options, such as use of mutually conflicting options, or inconsistent options, badly-spelled values, etc. No distinction is made between option values originating in the setup script, the command line, config files, or what-have-you -- but if we *know* something originated in the setup script, we'll raise DistutilsSetupError instead.""" - pass - -class DistutilsSetupError(DistutilsError): +class DistutilsSetupError (DistutilsError): """For errors that can be definitely blamed on the setup script, such as invalid keyword arguments to 'setup()'.""" - pass - -class DistutilsPlatformError(DistutilsError): +class DistutilsPlatformError (DistutilsError): """We don't know how to do something on the current platform (but we do know how to do it on some platform) -- eg. trying to compile C files on a platform not supported by a CCompiler subclass.""" - pass - -class DistutilsExecError(DistutilsError): +class DistutilsExecError (DistutilsError): """Any problems executing an external program (such as the C compiler, when compiling C files).""" - pass - -class DistutilsInternalError(DistutilsError): +class DistutilsInternalError (DistutilsError): """Internal inconsistencies or impossibilities (obviously, this should never be seen if the code is working!).""" - pass - -class DistutilsTemplateError(DistutilsError): +class DistutilsTemplateError (DistutilsError): """Syntax error in a file list template.""" - class DistutilsByteCompileError(DistutilsError): """Byte compile error.""" - # Exception classes used by the CCompiler implementation classes -class CCompilerError(Exception): +class CCompilerError (Exception): """Some compile/link operation failed.""" - -class PreprocessError(CCompilerError): +class PreprocessError (CCompilerError): """Failure to preprocess one or more C/C++ files.""" - -class CompileError(CCompilerError): +class CompileError (CCompilerError): """Failure to compile one or more C/C++ source files.""" - -class LibError(CCompilerError): +class LibError (CCompilerError): """Failure to create a static library from one or more C/C++ object files.""" - -class LinkError(CCompilerError): +class LinkError (CCompilerError): """Failure to link one or more C/C++ object files into an executable or shared library file.""" - -class UnknownFileError(CCompilerError): +class UnknownFileError (CCompilerError): """Attempt to process an unknown file type.""" diff --git a/venv/lib/python3.10/site-packages/setuptools/_distutils/extension.py b/venv/lib/python3.10/site-packages/setuptools/_distutils/extension.py index 6b8575d..c507da3 100644 --- a/venv/lib/python3.10/site-packages/setuptools/_distutils/extension.py +++ b/venv/lib/python3.10/site-packages/setuptools/_distutils/extension.py @@ -16,7 +16,6 @@ # import that large-ish module (indirectly, through distutils.core) in # order to do anything. - class Extension: """Just a collection of attributes that describes an extension module and everything needed to build it (hopefully in a portable @@ -84,29 +83,27 @@ class Extension: # When adding arguments to this constructor, be sure to update # setup_keywords in core.py. - def __init__( - self, - name, - sources, - include_dirs=None, - define_macros=None, - undef_macros=None, - library_dirs=None, - libraries=None, - runtime_library_dirs=None, - extra_objects=None, - extra_compile_args=None, - extra_link_args=None, - export_symbols=None, - swig_opts=None, - depends=None, - language=None, - optional=None, - **kw # To catch unknown keywords - ): + def __init__(self, name, sources, + include_dirs=None, + define_macros=None, + undef_macros=None, + library_dirs=None, + libraries=None, + runtime_library_dirs=None, + extra_objects=None, + extra_compile_args=None, + extra_link_args=None, + export_symbols=None, + swig_opts = None, + depends=None, + language=None, + optional=None, + **kw # To catch unknown keywords + ): if not isinstance(name, str): raise AssertionError("'name' must be a string") - if not (isinstance(sources, list) and all(isinstance(v, str) for v in sources)): + if not (isinstance(sources, list) and + all(isinstance(v, str) for v in sources)): raise AssertionError("'sources' must be a list of strings") self.name = name @@ -134,17 +131,17 @@ def __init__( warnings.warn(msg) def __repr__(self): - return '<{}.{}({!r}) at {:#x}>'.format( + return '<%s.%s(%r) at %#x>' % ( self.__class__.__module__, self.__class__.__qualname__, self.name, - id(self), - ) + id(self)) -def read_setup_file(filename): # noqa: C901 +def read_setup_file(filename): """Reads a Setup file and returns Extension instances.""" - from distutils.sysconfig import parse_makefile, expand_makefile_vars, _variable_rx + from distutils.sysconfig import (parse_makefile, expand_makefile_vars, + _variable_rx) from distutils.text_file import TextFile from distutils.util import split_quoted @@ -154,22 +151,17 @@ def read_setup_file(filename): # noqa: C901 # Second pass to gobble up the real content: lines of the form # ... [ ...] [ ...] [ ...] - file = TextFile( - filename, - strip_comments=1, - skip_blanks=1, - join_lines=1, - lstrip_ws=1, - rstrip_ws=1, - ) + file = TextFile(filename, + strip_comments=1, skip_blanks=1, join_lines=1, + lstrip_ws=1, rstrip_ws=1) try: extensions = [] while True: line = file.readline() - if line is None: # eof + if line is None: # eof break - if _variable_rx.match(line): # VAR=VALUE, handled in first pass + if _variable_rx.match(line): # VAR=VALUE, handled in first pass continue if line[0] == line[-1] == "*": @@ -196,8 +188,7 @@ def read_setup_file(filename): # noqa: C901 continue suffix = os.path.splitext(word)[1] - switch = word[0:2] - value = word[2:] + switch = word[0:2] ; value = word[2:] if suffix in (".c", ".cc", ".cpp", ".cxx", ".c++", ".m", ".mm"): # hmm, should we do something about C vs. C++ sources? @@ -208,13 +199,14 @@ def read_setup_file(filename): # noqa: C901 ext.include_dirs.append(value) elif switch == "-D": equals = value.find("=") - if equals == -1: # bare "-DFOO" -- no value + if equals == -1: # bare "-DFOO" -- no value ext.define_macros.append((value, None)) - else: # "-DFOO=blah" - ext.define_macros.append((value[0:equals], value[equals + 2 :])) + else: # "-DFOO=blah" + ext.define_macros.append((value[0:equals], + value[equals+2:])) elif switch == "-U": ext.undef_macros.append(value) - elif switch == "-C": # only here 'cause makesetup has it! + elif switch == "-C": # only here 'cause makesetup has it! ext.extra_compile_args.append(word) elif switch == "-l": ext.libraries.append(value) diff --git a/venv/lib/python3.10/site-packages/setuptools/_distutils/fancy_getopt.py b/venv/lib/python3.10/site-packages/setuptools/_distutils/fancy_getopt.py index 830f047..7d170dd 100644 --- a/venv/lib/python3.10/site-packages/setuptools/_distutils/fancy_getopt.py +++ b/venv/lib/python3.10/site-packages/setuptools/_distutils/fancy_getopt.py @@ -8,11 +8,9 @@ * options set attributes of a passed-in object """ -import sys -import string -import re +import sys, string, re import getopt -from distutils.errors import DistutilsGetoptError, DistutilsArgError +from distutils.errors import * # Much like command_re in distutils.core, this is close to but not quite # the same as a Python NAME -- except, in the spirit of most GNU @@ -22,13 +20,12 @@ longopt_re = re.compile(r'^%s$' % longopt_pat) # For recognizing "negative alias" options, eg. "quiet=!verbose" -neg_alias_re = re.compile("^({})=!({})$".format(longopt_pat, longopt_pat)) +neg_alias_re = re.compile("^(%s)=!(%s)$" % (longopt_pat, longopt_pat)) # This is used to translate long options to legitimate Python identifiers # (for use as attributes of some object). longopt_xlate = str.maketrans('-', '_') - class FancyGetopt: """Wrapper around the standard 'getopt()' module that provides some handy extra functionality: @@ -93,8 +90,7 @@ def set_option_table(self, option_table): def add_option(self, long_option, short_option=None, help_string=None): if long_option in self.option_index: raise DistutilsGetoptError( - "option conflict: already an option '%s'" % long_option - ) + "option conflict: already an option '%s'" % long_option) else: option = (long_option, short_option, help_string) self.option_table.append(option) @@ -115,15 +111,11 @@ def _check_alias_dict(self, aliases, what): assert isinstance(aliases, dict) for (alias, opt) in aliases.items(): if alias not in self.option_index: - raise DistutilsGetoptError( - ("invalid %s '%s': " "option '%s' not defined") - % (what, alias, alias) - ) + raise DistutilsGetoptError(("invalid %s '%s': " + "option '%s' not defined") % (what, alias, alias)) if opt not in self.option_index: - raise DistutilsGetoptError( - ("invalid %s '%s': " "aliased option '%s' not defined") - % (what, alias, opt) - ) + raise DistutilsGetoptError(("invalid %s '%s': " + "aliased option '%s' not defined") % (what, alias, opt)) def set_aliases(self, alias): """Set the aliases for this option parser.""" @@ -138,7 +130,7 @@ def set_negative_aliases(self, negative_alias): self._check_alias_dict(negative_alias, "negative alias") self.negative_alias = negative_alias - def _grok_option_table(self): # noqa: C901 + def _grok_option_table(self): """Populate the various data structures that keep tabs on the option table. Called by 'getopt()' before it can do anything worthwhile. @@ -157,27 +149,23 @@ def _grok_option_table(self): # noqa: C901 else: # the option table is part of the code, so simply # assert that it is correct - raise ValueError("invalid option tuple: {!r}".format(option)) + raise ValueError("invalid option tuple: %r" % (option,)) # Type- and value-check the option names if not isinstance(long, str) or len(long) < 2: - raise DistutilsGetoptError( - ("invalid long option '%s': " "must be a string of length >= 2") - % long - ) + raise DistutilsGetoptError(("invalid long option '%s': " + "must be a string of length >= 2") % long) - if not ((short is None) or (isinstance(short, str) and len(short) == 1)): - raise DistutilsGetoptError( - "invalid short option '%s': " - "must a single character or None" % short - ) + if (not ((short is None) or + (isinstance(short, str) and len(short) == 1))): + raise DistutilsGetoptError("invalid short option '%s': " + "must a single character or None" % short) self.repeat[long] = repeat self.long_opts.append(long) - if long[-1] == '=': # option takes an argument? - if short: - short = short + ':' + if long[-1] == '=': # option takes an argument? + if short: short = short + ':' long = long[0:-1] self.takes_arg[long] = 1 else: @@ -187,11 +175,11 @@ def _grok_option_table(self): # noqa: C901 if alias_to is not None: if self.takes_arg[alias_to]: raise DistutilsGetoptError( - "invalid negative alias '%s': " - "aliased option '%s' takes a value" % (long, alias_to) - ) + "invalid negative alias '%s': " + "aliased option '%s' takes a value" + % (long, alias_to)) - self.long_opts[-1] = long # XXX redundant?! + self.long_opts[-1] = long # XXX redundant?! self.takes_arg[long] = 0 # If this is an alias option, make sure its "takes arg" flag is @@ -200,10 +188,10 @@ def _grok_option_table(self): # noqa: C901 if alias_to is not None: if self.takes_arg[long] != self.takes_arg[alias_to]: raise DistutilsGetoptError( - "invalid alias '%s': inconsistent with " - "aliased option '%s' (one of them takes a value, " - "the other doesn't" % (long, alias_to) - ) + "invalid alias '%s': inconsistent with " + "aliased option '%s' (one of them takes a value, " + "the other doesn't" + % (long, alias_to)) # Now enforce some bondage on the long option name, so we can # later translate it to an attribute name on some object. Have @@ -211,16 +199,15 @@ def _grok_option_table(self): # noqa: C901 # '='. if not longopt_re.match(long): raise DistutilsGetoptError( - "invalid long option name '%s' " - "(must be letters, numbers, hyphens only" % long - ) + "invalid long option name '%s' " + "(must be letters, numbers, hyphens only" % long) self.attr_name[long] = self.get_attr_name(long) if short: self.short_opts.append(short) self.short2long[short[0]] = long - def getopt(self, args=None, object=None): # noqa: C901 + def getopt(self, args=None, object=None): """Parse command-line options in args. Store as attributes on object. If 'args' is None or not supplied, uses 'sys.argv[1:]'. If @@ -248,7 +235,7 @@ def getopt(self, args=None, object=None): # noqa: C901 raise DistutilsArgError(msg) for opt, val in opts: - if len(opt) == 2 and opt[0] == '-': # it's a short option + if len(opt) == 2 and opt[0] == '-': # it's a short option opt = self.short2long[opt[1]] else: assert len(opt) > 2 and opt[:2] == '--' @@ -258,7 +245,7 @@ def getopt(self, args=None, object=None): # noqa: C901 if alias: opt = alias - if not self.takes_arg[opt]: # boolean option? + if not self.takes_arg[opt]: # boolean option? assert val == '', "boolean option can't have value" alias = self.negative_alias.get(opt) if alias: @@ -291,7 +278,7 @@ def get_option_order(self): else: return self.option_order - def generate_help(self, header=None): # noqa: C901 + def generate_help(self, header=None): """Generate help text (a list of strings, one per suggested line of output) from the option table for this FancyGetopt object. """ @@ -303,15 +290,15 @@ def generate_help(self, header=None): # noqa: C901 for option in self.option_table: long = option[0] short = option[1] - ell = len(long) + l = len(long) if long[-1] == '=': - ell = ell - 1 + l = l - 1 if short is not None: - ell = ell + 5 # " (-x)" where short == 'x' - if ell > max_opt: - max_opt = ell + l = l + 5 # " (-x)" where short == 'x' + if l > max_opt: + max_opt = l - opt_width = max_opt + 2 + 2 + 2 # room for indent + dashes + gutter + opt_width = max_opt + 2 + 2 + 2 # room for indent + dashes + gutter # Typical help block looks like this: # --foo controls foonabulation @@ -359,14 +346,15 @@ def generate_help(self, header=None): # noqa: C901 # Case 2: we have a short option, so we have to include it # just after the long option else: - opt_names = "{} (-{})".format(long, short) + opt_names = "%s (-%s)" % (long, short) if text: - lines.append(" --%-*s %s" % (max_opt, opt_names, text[0])) + lines.append(" --%-*s %s" % + (max_opt, opt_names, text[0])) else: lines.append(" --%-*s" % opt_names) - for ell in text[1:]: - lines.append(big_indent + ell) + for l in text[1:]: + lines.append(big_indent + l) return lines def print_help(self, header=None, file=None): @@ -382,8 +370,7 @@ def fancy_getopt(options, negative_opt, object, args): return parser.getopt(args, object) -WS_TRANS = {ord(_wschar): ' ' for _wschar in string.whitespace} - +WS_TRANS = {ord(_wschar) : ' ' for _wschar in string.whitespace} def wrap_text(text, width): """wrap_text(text : string, width : int) -> [string] @@ -399,26 +386,26 @@ def wrap_text(text, width): text = text.expandtabs() text = text.translate(WS_TRANS) chunks = re.split(r'( +|-+)', text) - chunks = [ch for ch in chunks if ch] # ' - ' results in empty strings + chunks = [ch for ch in chunks if ch] # ' - ' results in empty strings lines = [] while chunks: - cur_line = [] # list of chunks (to-be-joined) - cur_len = 0 # length of current line + cur_line = [] # list of chunks (to-be-joined) + cur_len = 0 # length of current line while chunks: - ell = len(chunks[0]) - if cur_len + ell <= width: # can squeeze (at least) this chunk in + l = len(chunks[0]) + if cur_len + l <= width: # can squeeze (at least) this chunk in cur_line.append(chunks[0]) del chunks[0] - cur_len = cur_len + ell - else: # this line is full + cur_len = cur_len + l + else: # this line is full # drop last chunk if all space if cur_line and cur_line[-1][0] == ' ': del cur_line[-1] break - if chunks: # any chunks left to process? + if chunks: # any chunks left to process? # if the current line is still empty, then we had a single # chunk that's too big too fit on a line -- so we break # down and break it up at the line width diff --git a/venv/lib/python3.10/site-packages/setuptools/_distutils/file_util.py b/venv/lib/python3.10/site-packages/setuptools/_distutils/file_util.py index 1f1e444..b3fee35 100644 --- a/venv/lib/python3.10/site-packages/setuptools/_distutils/file_util.py +++ b/venv/lib/python3.10/site-packages/setuptools/_distutils/file_util.py @@ -8,10 +8,12 @@ from distutils import log # for generating verbose output in 'copy_file()' -_copy_action = {None: 'copying', 'hard': 'hard linking', 'sym': 'symbolically linking'} +_copy_action = { None: 'copying', + 'hard': 'hard linking', + 'sym': 'symbolically linking' } -def _copy_file_contents(src, dst, buffer_size=16 * 1024): # noqa: C901 +def _copy_file_contents(src, dst, buffer_size=16*1024): """Copy the file 'src' to 'dst'; both must be filenames. Any error opening either file, reading from 'src', or writing to 'dst', raises DistutilsFileError. Data is read/written in chunks of 'buffer_size' @@ -26,30 +28,27 @@ def _copy_file_contents(src, dst, buffer_size=16 * 1024): # noqa: C901 try: fsrc = open(src, 'rb') except OSError as e: - raise DistutilsFileError("could not open '{}': {}".format(src, e.strerror)) + raise DistutilsFileError("could not open '%s': %s" % (src, e.strerror)) if os.path.exists(dst): try: os.unlink(dst) except OSError as e: raise DistutilsFileError( - "could not delete '{}': {}".format(dst, e.strerror) - ) + "could not delete '%s': %s" % (dst, e.strerror)) try: fdst = open(dst, 'wb') except OSError as e: raise DistutilsFileError( - "could not create '{}': {}".format(dst, e.strerror) - ) + "could not create '%s': %s" % (dst, e.strerror)) while True: try: buf = fsrc.read(buffer_size) except OSError as e: raise DistutilsFileError( - "could not read from '{}': {}".format(src, e.strerror) - ) + "could not read from '%s': %s" % (src, e.strerror)) if not buf: break @@ -58,25 +57,15 @@ def _copy_file_contents(src, dst, buffer_size=16 * 1024): # noqa: C901 fdst.write(buf) except OSError as e: raise DistutilsFileError( - "could not write to '{}': {}".format(dst, e.strerror) - ) + "could not write to '%s': %s" % (dst, e.strerror)) finally: if fdst: fdst.close() if fsrc: fsrc.close() - -def copy_file( # noqa: C901 - src, - dst, - preserve_mode=1, - preserve_times=1, - update=0, - link=None, - verbose=1, - dry_run=0, -): +def copy_file(src, dst, preserve_mode=1, preserve_times=1, update=0, + link=None, verbose=1, dry_run=0): """Copy a file 'src' to 'dst'. If 'dst' is a directory, then 'src' is copied there with the same name; otherwise, it must be a filename. (If the file exists, it will be ruthlessly clobbered.) If 'preserve_mode' @@ -113,8 +102,7 @@ def copy_file( # noqa: C901 if not os.path.isfile(src): raise DistutilsFileError( - "can't copy '%s': doesn't exist or not a regular file" % src - ) + "can't copy '%s': doesn't exist or not a regular file" % src) if os.path.isdir(dst): dir = dst @@ -175,7 +163,9 @@ def copy_file( # noqa: C901 # XXX I suspect this is Unix-specific -- need porting help! -def move_file(src, dst, verbose=1, dry_run=0): # noqa: C901 +def move_file (src, dst, + verbose=1, + dry_run=0): """Move a file 'src' to 'dst'. If 'dst' is a directory, the file will be moved into it with the same name; otherwise, 'src' is just renamed @@ -200,13 +190,13 @@ def move_file(src, dst, verbose=1, dry_run=0): # noqa: C901 dst = os.path.join(dst, basename(src)) elif exists(dst): raise DistutilsFileError( - "can't move '{}': destination '{}' already exists".format(src, dst) - ) + "can't move '%s': destination '%s' already exists" % + (src, dst)) if not isdir(dirname(dst)): raise DistutilsFileError( - "can't move '{}': destination '{}' not a valid path".format(src, dst) - ) + "can't move '%s': destination '%s' not a valid path" % + (src, dst)) copy_it = False try: @@ -217,8 +207,7 @@ def move_file(src, dst, verbose=1, dry_run=0): # noqa: C901 copy_it = True else: raise DistutilsFileError( - "couldn't move '{}' to '{}': {}".format(src, dst, msg) - ) + "couldn't move '%s' to '%s': %s" % (src, dst, msg)) if copy_it: copy_file(src, dst, verbose=verbose) @@ -231,13 +220,13 @@ def move_file(src, dst, verbose=1, dry_run=0): # noqa: C901 except OSError: pass raise DistutilsFileError( - "couldn't move '%s' to '%s' by copy/delete: " - "delete '%s' failed: %s" % (src, dst, src, msg) - ) + "couldn't move '%s' to '%s' by copy/delete: " + "delete '%s' failed: %s" + % (src, dst, src, msg)) return dst -def write_file(filename, contents): +def write_file (filename, contents): """Create a file with the specified name and write 'contents' (a sequence of strings without line terminators) to it. """ diff --git a/venv/lib/python3.10/site-packages/setuptools/_distutils/filelist.py b/venv/lib/python3.10/site-packages/setuptools/_distutils/filelist.py index 987931a..82a7738 100644 --- a/venv/lib/python3.10/site-packages/setuptools/_distutils/filelist.py +++ b/venv/lib/python3.10/site-packages/setuptools/_distutils/filelist.py @@ -46,7 +46,6 @@ def debug_print(self, msg): DISTUTILS_DEBUG environment variable) flag is true. """ from distutils.debug import DEBUG - if DEBUG: print(msg) @@ -81,31 +80,29 @@ def _parse_template_line(self, line): patterns = dir = dir_pattern = None - if action in ('include', 'exclude', 'global-include', 'global-exclude'): + if action in ('include', 'exclude', + 'global-include', 'global-exclude'): if len(words) < 2: raise DistutilsTemplateError( - "'%s' expects ..." % action - ) + "'%s' expects ..." % action) patterns = [convert_path(w) for w in words[1:]] elif action in ('recursive-include', 'recursive-exclude'): if len(words) < 3: raise DistutilsTemplateError( - "'%s' expects ..." % action - ) + "'%s' expects ..." % action) dir = convert_path(words[1]) patterns = [convert_path(w) for w in words[2:]] elif action in ('graft', 'prune'): if len(words) != 2: raise DistutilsTemplateError( - "'%s' expects a single " % action - ) + "'%s' expects a single " % action) dir_pattern = convert_path(words[1]) else: raise DistutilsTemplateError("unknown action '%s'" % action) return (action, patterns, dir, dir_pattern) - def process_template_line(self, line): # noqa: C901 + def process_template_line(self, line): # Parse the line: split it up, make sure the right number of words # is there, and return the relevant words. 'action' is always # defined: it's the first word of the line. Which of the other @@ -120,82 +117,65 @@ def process_template_line(self, line): # noqa: C901 self.debug_print("include " + ' '.join(patterns)) for pattern in patterns: if not self.include_pattern(pattern, anchor=1): - log.warn("warning: no files found matching '%s'", pattern) + log.warn("warning: no files found matching '%s'", + pattern) elif action == 'exclude': self.debug_print("exclude " + ' '.join(patterns)) for pattern in patterns: if not self.exclude_pattern(pattern, anchor=1): - log.warn( - ( - "warning: no previously-included files " - "found matching '%s'" - ), - pattern, - ) + log.warn(("warning: no previously-included files " + "found matching '%s'"), pattern) elif action == 'global-include': self.debug_print("global-include " + ' '.join(patterns)) for pattern in patterns: if not self.include_pattern(pattern, anchor=0): - log.warn( - ( - "warning: no files found matching '%s' " - "anywhere in distribution" - ), - pattern, - ) + log.warn(("warning: no files found matching '%s' " + "anywhere in distribution"), pattern) elif action == 'global-exclude': self.debug_print("global-exclude " + ' '.join(patterns)) for pattern in patterns: if not self.exclude_pattern(pattern, anchor=0): - log.warn( - ( - "warning: no previously-included files matching " - "'%s' found anywhere in distribution" - ), - pattern, - ) + log.warn(("warning: no previously-included files matching " + "'%s' found anywhere in distribution"), + pattern) elif action == 'recursive-include': - self.debug_print("recursive-include {} {}".format(dir, ' '.join(patterns))) + self.debug_print("recursive-include %s %s" % + (dir, ' '.join(patterns))) for pattern in patterns: if not self.include_pattern(pattern, prefix=dir): msg = ( - "warning: no files found matching '%s' " "under directory '%s'" + "warning: no files found matching '%s' " + "under directory '%s'" ) log.warn(msg, pattern, dir) elif action == 'recursive-exclude': - self.debug_print("recursive-exclude {} {}".format(dir, ' '.join(patterns))) + self.debug_print("recursive-exclude %s %s" % + (dir, ' '.join(patterns))) for pattern in patterns: if not self.exclude_pattern(pattern, prefix=dir): - log.warn( - ( - "warning: no previously-included files matching " - "'%s' found under directory '%s'" - ), - pattern, - dir, - ) + log.warn(("warning: no previously-included files matching " + "'%s' found under directory '%s'"), + pattern, dir) elif action == 'graft': self.debug_print("graft " + dir_pattern) if not self.include_pattern(None, prefix=dir_pattern): - log.warn("warning: no directories found matching '%s'", dir_pattern) + log.warn("warning: no directories found matching '%s'", + dir_pattern) elif action == 'prune': self.debug_print("prune " + dir_pattern) if not self.exclude_pattern(None, prefix=dir_pattern): - log.warn( - ("no previously-included directories found " "matching '%s'"), - dir_pattern, - ) + log.warn(("no previously-included directories found " + "matching '%s'"), dir_pattern) else: raise DistutilsInternalError( - "this cannot happen: invalid action '%s'" % action - ) + "this cannot happen: invalid action '%s'" % action) # Filtering/selection methods @@ -227,7 +207,8 @@ def include_pattern(self, pattern, anchor=1, prefix=None, is_regex=0): # XXX docstring lying about what the special chars are? files_found = False pattern_re = translate_pattern(pattern, anchor, prefix, is_regex) - self.debug_print("include_pattern: applying regex r'%s'" % pattern_re.pattern) + self.debug_print("include_pattern: applying regex r'%s'" % + pattern_re.pattern) # delayed loading of allfiles list if self.allfiles is None: @@ -240,7 +221,8 @@ def include_pattern(self, pattern, anchor=1, prefix=None, is_regex=0): files_found = True return files_found - def exclude_pattern(self, pattern, anchor=1, prefix=None, is_regex=0): + def exclude_pattern( + self, pattern, anchor=1, prefix=None, is_regex=0): """Remove strings (presumably filenames) from 'files' that match 'pattern'. Other parameters are the same as for 'include_pattern()', above. @@ -249,8 +231,9 @@ def exclude_pattern(self, pattern, anchor=1, prefix=None, is_regex=0): """ files_found = False pattern_re = translate_pattern(pattern, anchor, prefix, is_regex) - self.debug_print("exclude_pattern: applying regex r'%s'" % pattern_re.pattern) - for i in range(len(self.files) - 1, -1, -1): + self.debug_print("exclude_pattern: applying regex r'%s'" % + pattern_re.pattern) + for i in range(len(self.files)-1, -1, -1): if pattern_re.search(self.files[i]): self.debug_print(" removing " + self.files[i]) del self.files[i] @@ -260,14 +243,15 @@ def exclude_pattern(self, pattern, anchor=1, prefix=None, is_regex=0): # Utility functions - def _find_all_simple(path): """ Find all files under 'path' """ all_unique = _UniqueDirs.filter(os.walk(path, followlinks=True)) results = ( - os.path.join(base, file) for base, dirs, files in all_unique for file in files + os.path.join(base, file) + for base, dirs, files in all_unique + for file in files ) return filter(os.path.isfile, results) @@ -278,7 +262,6 @@ class _UniqueDirs(set): avoiding infinite recursion. Ref https://bugs.python.org/issue44497. """ - def __call__(self, walk_item): """ Given an item from an os.walk result, determine @@ -358,14 +341,15 @@ def translate_pattern(pattern, anchor=1, prefix=None, is_regex=0): if prefix is not None: prefix_re = glob_to_re(prefix) assert prefix_re.startswith(start) and prefix_re.endswith(end) - prefix_re = prefix_re[len(start) : len(prefix_re) - len(end)] + prefix_re = prefix_re[len(start): len(prefix_re) - len(end)] sep = os.sep if os.sep == '\\': sep = r'\\' - pattern_re = pattern_re[len(start) : len(pattern_re) - len(end)] - pattern_re = r'{}\A{}{}.*{}{}'.format(start, prefix_re, sep, pattern_re, end) - else: # no prefix -- respect anchor flag + pattern_re = pattern_re[len(start): len(pattern_re) - len(end)] + pattern_re = r'%s\A%s%s.*%s%s' % ( + start, prefix_re, sep, pattern_re, end) + else: # no prefix -- respect anchor flag if anchor: - pattern_re = r'{}\A{}'.format(start, pattern_re[len(start) :]) + pattern_re = r'%s\A%s' % (start, pattern_re[len(start):]) return re.compile(pattern_re) diff --git a/venv/lib/python3.10/site-packages/setuptools/_distutils/log.py b/venv/lib/python3.10/site-packages/setuptools/_distutils/log.py index be25f6c..8ef6b28 100644 --- a/venv/lib/python3.10/site-packages/setuptools/_distutils/log.py +++ b/venv/lib/python3.10/site-packages/setuptools/_distutils/log.py @@ -3,16 +3,16 @@ # The class here is styled after PEP 282 so that it could later be # replaced with a standard Python logging implementation. -import sys - DEBUG = 1 INFO = 2 WARN = 3 ERROR = 4 FATAL = 5 +import sys class Log: + def __init__(self, threshold=WARN): self.threshold = threshold @@ -54,7 +54,6 @@ def error(self, msg, *args): def fatal(self, msg, *args): self._log(FATAL, msg, args) - _global_log = Log() log = _global_log.log debug = _global_log.debug @@ -63,14 +62,12 @@ def fatal(self, msg, *args): error = _global_log.error fatal = _global_log.fatal - def set_threshold(level): # return the old threshold for use from tests old = _global_log.threshold _global_log.threshold = level return old - def set_verbosity(v): if v <= 0: set_threshold(WARN) diff --git a/venv/lib/python3.10/site-packages/setuptools/_distutils/msvc9compiler.py b/venv/lib/python3.10/site-packages/setuptools/_distutils/msvc9compiler.py index 2202183..a1b3b02 100644 --- a/venv/lib/python3.10/site-packages/setuptools/_distutils/msvc9compiler.py +++ b/venv/lib/python3.10/site-packages/setuptools/_distutils/msvc9compiler.py @@ -16,41 +16,26 @@ import subprocess import sys import re -import warnings - -from distutils.errors import ( - DistutilsExecError, - DistutilsPlatformError, - CompileError, - LibError, - LinkError, -) + +from distutils.errors import DistutilsExecError, DistutilsPlatformError, \ + CompileError, LibError, LinkError from distutils.ccompiler import CCompiler, gen_lib_options from distutils import log from distutils.util import get_platform import winreg -warnings.warn( - "msvc9compiler is deprecated and slated to be removed " - "in the future. Please discontinue use or file an issue " - "with pypa/distutils describing your use case.", - DeprecationWarning, -) - RegOpenKeyEx = winreg.OpenKeyEx RegEnumKey = winreg.EnumKey RegEnumValue = winreg.EnumValue RegError = winreg.error -HKEYS = ( - winreg.HKEY_USERS, - winreg.HKEY_CURRENT_USER, - winreg.HKEY_LOCAL_MACHINE, - winreg.HKEY_CLASSES_ROOT, -) +HKEYS = (winreg.HKEY_USERS, + winreg.HKEY_CURRENT_USER, + winreg.HKEY_LOCAL_MACHINE, + winreg.HKEY_CLASSES_ROOT) -NATIVE_WIN64 = sys.platform == 'win32' and sys.maxsize > 2**32 +NATIVE_WIN64 = (sys.platform == 'win32' and sys.maxsize > 2**32) if NATIVE_WIN64: # Visual C++ is a 32-bit application, so we need to look in # the corresponding registry branch, if we're running a @@ -67,13 +52,13 @@ # 'vcvarsall.bat'. Note a cross-compile may combine these (eg, 'x86_amd64' is # the param to cross-compile on x86 targeting amd64.) PLAT_TO_VCVARS = { - 'win32': 'x86', - 'win-amd64': 'amd64', + 'win32' : 'x86', + 'win-amd64' : 'amd64', } - class Reg: - """Helper class to read values from the registry""" + """Helper class to read values from the registry + """ def get_value(cls, path, key): for base in HKEYS: @@ -81,7 +66,6 @@ def get_value(cls, path, key): if d and key in d: return d[key] raise KeyError(key) - get_value = classmethod(get_value) def read_keys(cls, base, key): @@ -100,7 +84,6 @@ def read_keys(cls, base, key): L.append(k) i += 1 return L - read_keys = classmethod(read_keys) def read_values(cls, base, key): @@ -123,7 +106,6 @@ def read_values(cls, base, key): d[cls.convert_mbcs(name)] = cls.convert_mbcs(value) i += 1 return d - read_values = classmethod(read_values) def convert_mbcs(s): @@ -134,11 +116,10 @@ def convert_mbcs(s): except UnicodeError: pass return s - convert_mbcs = staticmethod(convert_mbcs) - class MacroExpander: + def __init__(self, version): self.macros = {} self.vsbase = VS_BASE % version @@ -153,16 +134,16 @@ def load_macros(self, version): self.set_macro("FrameworkDir", NET_BASE, "installroot") try: if version >= 8.0: - self.set_macro("FrameworkSDKDir", NET_BASE, "sdkinstallrootv2.0") + self.set_macro("FrameworkSDKDir", NET_BASE, + "sdkinstallrootv2.0") else: raise KeyError("sdkinstallrootv2.0") except KeyError: raise DistutilsPlatformError( - """Python was built with Visual Studio 2008; + """Python was built with Visual Studio 2008; extensions must be built with a compiler than can generate compatible binaries. Visual Studio 2008 was not found on this system. If you have Cygwin installed, -you can try compiling with MingW32, by passing "-c mingw32" to setup.py.""" - ) +you can try compiling with MingW32, by passing "-c mingw32" to setup.py.""") if version >= 9.0: self.set_macro("FrameworkVersion", self.vsbase, "clr version") @@ -175,7 +156,7 @@ def load_macros(self, version): except RegError: continue key = RegEnumKey(h, 0) - d = Reg.get_value(base, r"{}\{}".format(p, key)) + d = Reg.get_value(base, r"%s\%s" % (p, key)) self.macros["$(FrameworkVersion)"] = d["version"] def sub(self, s): @@ -183,7 +164,6 @@ def sub(self, s): s = s.replace(k, v) return s - def get_build_version(): """Return the version of MSVC that was used to build Python. @@ -209,7 +189,6 @@ def get_build_version(): # else we don't know what version of the compiler this is return None - def normalize_and_reduce_paths(paths): """Return a list of normalized paths with duplicates removed. @@ -224,9 +203,9 @@ def normalize_and_reduce_paths(paths): reduced_paths.append(np) return reduced_paths - def removeDuplicates(variable): - """Remove duplicate values of an environment variable.""" + """Remove duplicate values of an environment variable. + """ oldList = variable.split(os.pathsep) newList = [] for i in oldList: @@ -235,7 +214,6 @@ def removeDuplicates(variable): newVariable = os.pathsep.join(newList) return newVariable - def find_vcvarsall(version): """Find the vcvarsall.bat file @@ -244,7 +222,8 @@ def find_vcvarsall(version): """ vsbase = VS_BASE % version try: - productdir = Reg.get_value(r"%s\Setup\VC" % vsbase, "productdir") + productdir = Reg.get_value(r"%s\Setup\VC" % vsbase, + "productdir") except KeyError: log.debug("Unable to find productdir in registry") productdir = None @@ -270,9 +249,9 @@ def find_vcvarsall(version): log.debug("Unable to find vcvarsall.bat") return None - def query_vcvarsall(version, arch="x86"): - """Launch vcvarsall.bat and read the settings from its environment""" + """Launch vcvarsall.bat and read the settings from its environment + """ vcvarsall = find_vcvarsall(version) interesting = {"include", "lib", "libpath", "path"} result = {} @@ -280,11 +259,9 @@ def query_vcvarsall(version, arch="x86"): if vcvarsall is None: raise DistutilsPlatformError("Unable to find vcvarsall.bat") log.debug("Calling 'vcvarsall.bat %s' (version=%s)", arch, version) - popen = subprocess.Popen( - '"{}" {} & set'.format(vcvarsall, arch), - stdout=subprocess.PIPE, - stderr=subprocess.PIPE, - ) + popen = subprocess.Popen('"%s" %s & set' % (vcvarsall, arch), + stdout=subprocess.PIPE, + stderr=subprocess.PIPE) try: stdout, stderr = popen.communicate() if popen.wait() != 0: @@ -312,15 +289,15 @@ def query_vcvarsall(version, arch="x86"): return result - # More globals VERSION = get_build_version() +if VERSION < 8.0: + raise DistutilsPlatformError("VC %0.1f is not supported by this module" % VERSION) # MACROS = MacroExpander(VERSION) - -class MSVCCompiler(CCompiler): +class MSVCCompiler(CCompiler) : """Concrete class that implements an interface to Microsoft Visual C++, - as defined by the CCompiler abstract class.""" + as defined by the CCompiler abstract class.""" compiler_type = 'msvc' @@ -339,7 +316,8 @@ class MSVCCompiler(CCompiler): # Needed for the filename generation methods provided by the # base class, CCompiler. - src_extensions = _c_extensions + _cpp_extensions + _rc_extensions + _mc_extensions + src_extensions = (_c_extensions + _cpp_extensions + + _rc_extensions + _mc_extensions) res_extension = '.res' obj_extension = '.obj' static_lib_extension = '.lib' @@ -348,37 +326,28 @@ class MSVCCompiler(CCompiler): exe_extension = '.exe' def __init__(self, verbose=0, dry_run=0, force=0): - super().__init__(verbose, dry_run, force) + CCompiler.__init__ (self, verbose, dry_run, force) self.__version = VERSION self.__root = r"Software\Microsoft\VisualStudio" # self.__macros = MACROS self.__paths = [] # target platform (.plat_name is consistent with 'bdist') self.plat_name = None - self.__arch = None # deprecated name + self.__arch = None # deprecated name self.initialized = False - def initialize(self, plat_name=None): # noqa: C901 + def initialize(self, plat_name=None): # multi-init means we would need to check platform same each time... assert not self.initialized, "don't init multiple times" - if self.__version < 8.0: - raise DistutilsPlatformError( - "VC %0.1f is not supported by this module" % self.__version - ) if plat_name is None: plat_name = get_platform() # sanity check for platforms to prevent obscure errors later. ok_plats = 'win32', 'win-amd64' if plat_name not in ok_plats: - raise DistutilsPlatformError( - "--plat-name must be one of {}".format(ok_plats) - ) - - if ( - "DISTUTILS_USE_SDK" in os.environ - and "MSSdk" in os.environ - and self.find_exe("cl.exe") - ): + raise DistutilsPlatformError("--plat-name must be one of %s" % + (ok_plats,)) + + if "DISTUTILS_USE_SDK" in os.environ and "MSSdk" in os.environ and self.find_exe("cl.exe"): # Assume that the SDK set up everything alright; don't try to be # smarter self.cc = "cl.exe" @@ -396,9 +365,8 @@ def initialize(self, plat_name=None): # noqa: C901 plat_spec = PLAT_TO_VCVARS[plat_name] else: # cross compile from win32 -> some 64bit - plat_spec = ( - PLAT_TO_VCVARS[get_platform()] + '_' + PLAT_TO_VCVARS[plat_name] - ) + plat_spec = PLAT_TO_VCVARS[get_platform()] + '_' + \ + PLAT_TO_VCVARS[plat_name] vc_env = query_vcvarsall(VERSION, plat_spec) @@ -407,19 +375,18 @@ def initialize(self, plat_name=None): # noqa: C901 os.environ['include'] = vc_env['include'] if len(self.__paths) == 0: - raise DistutilsPlatformError( - "Python was built with %s, " - "and extensions need to be built with the same " - "version of the compiler, but it isn't installed." % self.__product - ) + raise DistutilsPlatformError("Python was built with %s, " + "and extensions need to be built with the same " + "version of the compiler, but it isn't installed." + % self.__product) self.cc = self.find_exe("cl.exe") self.linker = self.find_exe("link.exe") self.lib = self.find_exe("lib.exe") - self.rc = self.find_exe("rc.exe") # resource compiler - self.mc = self.find_exe("mc.exe") # message compiler - # self.set_path_env_var('lib') - # self.set_path_env_var('include') + self.rc = self.find_exe("rc.exe") # resource compiler + self.mc = self.find_exe("mc.exe") # message compiler + #self.set_path_env_var('lib') + #self.set_path_env_var('include') # extend the MSVC path with the current path try: @@ -432,83 +399,71 @@ def initialize(self, plat_name=None): # noqa: C901 self.preprocess_options = None if self.__arch == "x86": - self.compile_options = ['/nologo', '/O2', '/MD', '/W3', '/DNDEBUG'] - self.compile_options_debug = [ - '/nologo', - '/Od', - '/MDd', - '/W3', - '/Z7', - '/D_DEBUG', - ] + self.compile_options = [ '/nologo', '/O2', '/MD', '/W3', + '/DNDEBUG'] + self.compile_options_debug = ['/nologo', '/Od', '/MDd', '/W3', + '/Z7', '/D_DEBUG'] else: # Win64 - self.compile_options = ['/nologo', '/O2', '/MD', '/W3', '/GS-', '/DNDEBUG'] - self.compile_options_debug = [ - '/nologo', - '/Od', - '/MDd', - '/W3', - '/GS-', - '/Z7', - '/D_DEBUG', - ] + self.compile_options = [ '/nologo', '/O2', '/MD', '/W3', '/GS-' , + '/DNDEBUG'] + self.compile_options_debug = ['/nologo', '/Od', '/MDd', '/W3', '/GS-', + '/Z7', '/D_DEBUG'] self.ldflags_shared = ['/DLL', '/nologo', '/INCREMENTAL:NO'] if self.__version >= 7: - self.ldflags_shared_debug = ['/DLL', '/nologo', '/INCREMENTAL:no', '/DEBUG'] - self.ldflags_static = ['/nologo'] + self.ldflags_shared_debug = [ + '/DLL', '/nologo', '/INCREMENTAL:no', '/DEBUG' + ] + self.ldflags_static = [ '/nologo'] self.initialized = True # -- Worker methods ------------------------------------------------ - def object_filenames(self, source_filenames, strip_dir=0, output_dir=''): + def object_filenames(self, + source_filenames, + strip_dir=0, + output_dir=''): # Copied from ccompiler.py, extended to return .res as 'object'-file # for .rc input file - if output_dir is None: - output_dir = '' + if output_dir is None: output_dir = '' obj_names = [] for src_name in source_filenames: - (base, ext) = os.path.splitext(src_name) - base = os.path.splitdrive(base)[1] # Chop off the drive - base = base[os.path.isabs(base) :] # If abs, chop off leading / + (base, ext) = os.path.splitext (src_name) + base = os.path.splitdrive(base)[1] # Chop off the drive + base = base[os.path.isabs(base):] # If abs, chop off leading / if ext not in self.src_extensions: # Better to raise an exception instead of silently continuing # and later complain about sources and targets having # different lengths - raise CompileError("Don't know how to compile %s" % src_name) + raise CompileError ("Don't know how to compile %s" % src_name) if strip_dir: - base = os.path.basename(base) + base = os.path.basename (base) if ext in self._rc_extensions: - obj_names.append(os.path.join(output_dir, base + self.res_extension)) + obj_names.append (os.path.join (output_dir, + base + self.res_extension)) elif ext in self._mc_extensions: - obj_names.append(os.path.join(output_dir, base + self.res_extension)) + obj_names.append (os.path.join (output_dir, + base + self.res_extension)) else: - obj_names.append(os.path.join(output_dir, base + self.obj_extension)) + obj_names.append (os.path.join (output_dir, + base + self.obj_extension)) return obj_names - def compile( # noqa: C901 - self, - sources, - output_dir=None, - macros=None, - include_dirs=None, - debug=0, - extra_preargs=None, - extra_postargs=None, - depends=None, - ): + + def compile(self, sources, + output_dir=None, macros=None, include_dirs=None, debug=0, + extra_preargs=None, extra_postargs=None, depends=None): if not self.initialized: self.initialize() - compile_info = self._setup_compile( - output_dir, macros, include_dirs, sources, depends, extra_postargs - ) + compile_info = self._setup_compile(output_dir, macros, include_dirs, + sources, depends, extra_postargs) macros, objects, extra_postargs, pp_opts, build = compile_info compile_opts = extra_preargs or [] - compile_opts.append('/c') + compile_opts.append ('/c') if debug: compile_opts.extend(self.compile_options_debug) else: @@ -534,7 +489,8 @@ def compile( # noqa: C901 input_opt = src output_opt = "/fo" + obj try: - self.spawn([self.rc] + pp_opts + [output_opt] + [input_opt]) + self.spawn([self.rc] + pp_opts + + [output_opt] + [input_opt]) except DistutilsExecError as msg: raise CompileError(msg) continue @@ -554,48 +510,50 @@ def compile( # noqa: C901 rc_dir = os.path.dirname(obj) try: # first compile .MC to .RC and .H file - self.spawn([self.mc] + ['-h', h_dir, '-r', rc_dir] + [src]) - base, _ = os.path.splitext(os.path.basename(src)) - rc_file = os.path.join(rc_dir, base + '.rc') + self.spawn([self.mc] + + ['-h', h_dir, '-r', rc_dir] + [src]) + base, _ = os.path.splitext (os.path.basename (src)) + rc_file = os.path.join (rc_dir, base + '.rc') # then compile .RC to .RES file - self.spawn([self.rc] + ["/fo" + obj] + [rc_file]) + self.spawn([self.rc] + + ["/fo" + obj] + [rc_file]) except DistutilsExecError as msg: raise CompileError(msg) continue else: # how to handle this file? - raise CompileError( - "Don't know how to compile {} to {}".format(src, obj) - ) + raise CompileError("Don't know how to compile %s to %s" + % (src, obj)) output_opt = "/Fo" + obj try: - self.spawn( - [self.cc] - + compile_opts - + pp_opts - + [input_opt, output_opt] - + extra_postargs - ) + self.spawn([self.cc] + compile_opts + pp_opts + + [input_opt, output_opt] + + extra_postargs) except DistutilsExecError as msg: raise CompileError(msg) return objects - def create_static_lib( - self, objects, output_libname, output_dir=None, debug=0, target_lang=None - ): + + def create_static_lib(self, + objects, + output_libname, + output_dir=None, + debug=0, + target_lang=None): if not self.initialized: self.initialize() (objects, output_dir) = self._fix_object_args(objects, output_dir) - output_filename = self.library_filename(output_libname, output_dir=output_dir) + output_filename = self.library_filename(output_libname, + output_dir=output_dir) if self._need_link(objects, output_filename): lib_args = objects + ['/OUT:' + output_filename] if debug: - pass # XXX what goes here? + pass # XXX what goes here? try: self.spawn([self.lib] + lib_args) except DistutilsExecError as msg: @@ -603,36 +561,36 @@ def create_static_lib( else: log.debug("skipping %s (up-to-date)", output_filename) - def link( # noqa: C901 - self, - target_desc, - objects, - output_filename, - output_dir=None, - libraries=None, - library_dirs=None, - runtime_library_dirs=None, - export_symbols=None, - debug=0, - extra_preargs=None, - extra_postargs=None, - build_temp=None, - target_lang=None, - ): + + def link(self, + target_desc, + objects, + output_filename, + output_dir=None, + libraries=None, + library_dirs=None, + runtime_library_dirs=None, + export_symbols=None, + debug=0, + extra_preargs=None, + extra_postargs=None, + build_temp=None, + target_lang=None): if not self.initialized: self.initialize() (objects, output_dir) = self._fix_object_args(objects, output_dir) - fixed_args = self._fix_lib_args(libraries, library_dirs, runtime_library_dirs) + fixed_args = self._fix_lib_args(libraries, library_dirs, + runtime_library_dirs) (libraries, library_dirs, runtime_library_dirs) = fixed_args if runtime_library_dirs: - self.warn( - "I don't know what to do with 'runtime_library_dirs': " - + str(runtime_library_dirs) - ) + self.warn ("I don't know what to do with 'runtime_library_dirs': " + + str (runtime_library_dirs)) - lib_opts = gen_lib_options(self, library_dirs, runtime_library_dirs, libraries) + lib_opts = gen_lib_options(self, + library_dirs, runtime_library_dirs, + libraries) if output_dir is not None: output_filename = os.path.join(output_dir, output_filename) @@ -649,12 +607,11 @@ def link( # noqa: C901 ldflags = self.ldflags_shared export_opts = [] - for sym in export_symbols or []: + for sym in (export_symbols or []): export_opts.append("/EXPORT:" + sym) - ld_args = ( - ldflags + lib_opts + export_opts + objects + ['/OUT:' + output_filename] - ) + ld_args = (ldflags + lib_opts + export_opts + + objects + ['/OUT:' + output_filename]) # The MSVC linker generates .lib and .exp files, which cannot be # suppressed by any linker switches. The .lib files may even be @@ -664,10 +621,11 @@ def link( # noqa: C901 build_temp = os.path.dirname(objects[0]) if export_symbols is not None: (dll_name, dll_ext) = os.path.splitext( - os.path.basename(output_filename) - ) - implib_file = os.path.join(build_temp, self.library_filename(dll_name)) - ld_args.append('/IMPLIB:' + implib_file) + os.path.basename(output_filename)) + implib_file = os.path.join( + build_temp, + self.library_filename(dll_name)) + ld_args.append ('/IMPLIB:' + implib_file) self.manifest_setup_ldargs(output_filename, build_temp, ld_args) @@ -690,9 +648,10 @@ def link( # noqa: C901 mfinfo = self.manifest_get_embed_info(target_desc, ld_args) if mfinfo is not None: mffilename, mfid = mfinfo - out_arg = '-outputresource:{};{}'.format(output_filename, mfid) + out_arg = '-outputresource:%s;%s' % (output_filename, mfid) try: - self.spawn(['mt.exe', '-nologo', '-manifest', mffilename, out_arg]) + self.spawn(['mt.exe', '-nologo', '-manifest', + mffilename, out_arg]) except DistutilsExecError as msg: raise LinkError(msg) else: @@ -706,8 +665,8 @@ def manifest_setup_ldargs(self, output_filename, build_temp, ld_args): # Ask the linker to generate the manifest in the temp dir, so # we can check it, and possibly embed it, later. temp_manifest = os.path.join( - build_temp, os.path.basename(output_filename) + ".manifest" - ) + build_temp, + os.path.basename(output_filename) + ".manifest") ld_args.append('/MANIFESTFILE:' + temp_manifest) def manifest_get_embed_info(self, target_desc, ld_args): @@ -750,10 +709,9 @@ def _remove_visual_c_ref(self, manifest_file): finally: manifest_f.close() pattern = re.compile( - r"""|)""", - re.DOTALL, - ) + re.DOTALL) manifest_buf = re.sub(pattern, "", manifest_buf) pattern = r"\s*" manifest_buf = re.sub(pattern, "", manifest_buf) @@ -761,9 +719,7 @@ def _remove_visual_c_ref(self, manifest_file): # don't want a manifest embedded. pattern = re.compile( r"""|)""", - re.DOTALL, - ) + r""".*?(?:/>|)""", re.DOTALL) if re.search(pattern, manifest_buf) is None: return None @@ -785,12 +741,12 @@ def library_dir_option(self, dir): def runtime_library_dir_option(self, dir): raise DistutilsPlatformError( - "don't know how to set runtime library search path for MSVC++" - ) + "don't know how to set runtime library search path for MSVC++") def library_option(self, lib): return self.library_filename(lib) + def find_library_file(self, dirs, lib, debug=0): # Prefer a debugging library if found (and requested), but deal # with it if we don't have one. @@ -800,7 +756,7 @@ def find_library_file(self, dirs, lib, debug=0): try_names = [lib] for dir in dirs: for name in try_names: - libfile = os.path.join(dir, self.library_filename(name)) + libfile = os.path.join(dir, self.library_filename (name)) if os.path.exists(libfile): return libfile else: @@ -825,7 +781,7 @@ def find_exe(self, exe): # didn't find it; try existing path for p in os.environ['Path'].split(';'): - fn = os.path.join(os.path.abspath(p), exe) + fn = os.path.join(os.path.abspath(p),exe) if os.path.isfile(fn): return fn diff --git a/venv/lib/python3.10/site-packages/setuptools/_distutils/msvccompiler.py b/venv/lib/python3.10/site-packages/setuptools/_distutils/msvccompiler.py index 1069e99..2d447b8 100644 --- a/venv/lib/python3.10/site-packages/setuptools/_distutils/msvccompiler.py +++ b/venv/lib/python3.10/site-packages/setuptools/_distutils/msvccompiler.py @@ -8,17 +8,12 @@ # hacked by Robin Becker and Thomas Heller to do a better job of # finding DevStudio (through the registry) -import sys -import os -import warnings -from distutils.errors import ( - DistutilsExecError, - DistutilsPlatformError, - CompileError, - LibError, - LinkError, -) -from distutils.ccompiler import CCompiler, gen_lib_options +import sys, os +from distutils.errors import \ + DistutilsExecError, DistutilsPlatformError, \ + CompileError, LibError, LinkError +from distutils.ccompiler import \ + CCompiler, gen_lib_options from distutils import log _can_read_reg = False @@ -37,7 +32,6 @@ try: import win32api import win32con - _can_read_reg = True hkey_mod = win32con @@ -46,30 +40,17 @@ RegEnumValue = win32api.RegEnumValue RegError = win32api.error except ImportError: - log.info( - "Warning: Can't read registry to find the " - "necessary compiler setting\n" - "Make sure that Python modules winreg, " - "win32api or win32con are installed." - ) + log.info("Warning: Can't read registry to find the " + "necessary compiler setting\n" + "Make sure that Python modules winreg, " + "win32api or win32con are installed.") pass if _can_read_reg: - HKEYS = ( - hkey_mod.HKEY_USERS, - hkey_mod.HKEY_CURRENT_USER, - hkey_mod.HKEY_LOCAL_MACHINE, - hkey_mod.HKEY_CLASSES_ROOT, - ) - - -warnings.warn( - "msvccompiler is deprecated and slated to be removed " - "in the future. Please discontinue use or file an issue " - "with pypa/distutils describing your use case.", - DeprecationWarning, -) - + HKEYS = (hkey_mod.HKEY_USERS, + hkey_mod.HKEY_CURRENT_USER, + hkey_mod.HKEY_LOCAL_MACHINE, + hkey_mod.HKEY_CLASSES_ROOT) def read_keys(base, key): """Return list of registry keys.""" @@ -88,7 +69,6 @@ def read_keys(base, key): i += 1 return L - def read_values(base, key): """Return dict of registry keys and values. @@ -110,7 +90,6 @@ def read_values(base, key): i += 1 return d - def convert_mbcs(s): dec = getattr(s, "decode", None) if dec is not None: @@ -120,7 +99,6 @@ def convert_mbcs(s): pass return s - class MacroExpander: def __init__(self, version): self.macros = {} @@ -144,13 +122,12 @@ def load_macros(self, version): self.set_macro("FrameworkSDKDir", net, "sdkinstallrootv1.1") else: self.set_macro("FrameworkSDKDir", net, "sdkinstallroot") - except KeyError: + except KeyError as exc: # raise DistutilsPlatformError( - """Python was built with Visual Studio 2003; + """Python was built with Visual Studio 2003; extensions must be built with a compiler than can generate compatible binaries. Visual Studio 2003 was not found on this system. If you have Cygwin installed, -you can try compiling with MingW32, by passing "-c mingw32" to setup.py.""" - ) +you can try compiling with MingW32, by passing "-c mingw32" to setup.py.""") p = r"Software\Microsoft\NET Framework Setup\Product" for base in HKEYS: @@ -159,7 +136,7 @@ def load_macros(self, version): except RegError: continue key = RegEnumKey(h, 0) - d = read_values(base, r"{}\{}".format(p, key)) + d = read_values(base, r"%s\%s" % (p, key)) self.macros["$(FrameworkVersion)"] = d["version"] def sub(self, s): @@ -167,7 +144,6 @@ def sub(self, s): s = s.replace(k, v) return s - def get_build_version(): """Return the version of MSVC that was used to build Python. @@ -193,7 +169,6 @@ def get_build_version(): # else we don't know what version of the compiler this is return None - def get_build_architecture(): """Return the processor architecture. @@ -205,8 +180,7 @@ def get_build_architecture(): if i == -1: return "Intel" j = sys.version.find(")", i) - return sys.version[i + len(prefix) : j] - + return sys.version[i+len(prefix):j] def normalize_and_reduce_paths(paths): """Return a list of normalized paths with duplicates removed. @@ -223,9 +197,9 @@ def normalize_and_reduce_paths(paths): return reduced_paths -class MSVCCompiler(CCompiler): +class MSVCCompiler(CCompiler) : """Concrete class that implements an interface to Microsoft Visual C++, - as defined by the CCompiler abstract class.""" + as defined by the CCompiler abstract class.""" compiler_type = 'msvc' @@ -244,7 +218,8 @@ class MSVCCompiler(CCompiler): # Needed for the filename generation methods provided by the # base class, CCompiler. - src_extensions = _c_extensions + _cpp_extensions + _rc_extensions + _mc_extensions + src_extensions = (_c_extensions + _cpp_extensions + + _rc_extensions + _mc_extensions) res_extension = '.res' obj_extension = '.obj' static_lib_extension = '.lib' @@ -253,7 +228,7 @@ class MSVCCompiler(CCompiler): exe_extension = '.exe' def __init__(self, verbose=0, dry_run=0, force=0): - super().__init__(verbose, dry_run, force) + CCompiler.__init__ (self, verbose, dry_run, force) self.__version = get_build_version() self.__arch = get_build_architecture() if self.__arch == "Intel": @@ -272,11 +247,7 @@ def __init__(self, verbose=0, dry_run=0, force=0): def initialize(self): self.__paths = [] - if ( - "DISTUTILS_USE_SDK" in os.environ - and "MSSdk" in os.environ - and self.find_exe("cl.exe") - ): + if "DISTUTILS_USE_SDK" in os.environ and "MSSdk" in os.environ and self.find_exe("cl.exe"): # Assume that the SDK set up everything alright; don't try to be # smarter self.cc = "cl.exe" @@ -288,17 +259,16 @@ def initialize(self): self.__paths = self.get_msvc_paths("path") if len(self.__paths) == 0: - raise DistutilsPlatformError( - "Python was built with %s, " - "and extensions need to be built with the same " - "version of the compiler, but it isn't installed." % self.__product - ) + raise DistutilsPlatformError("Python was built with %s, " + "and extensions need to be built with the same " + "version of the compiler, but it isn't installed." + % self.__product) self.cc = self.find_exe("cl.exe") self.linker = self.find_exe("link.exe") self.lib = self.find_exe("lib.exe") - self.rc = self.find_exe("rc.exe") # resource compiler - self.mc = self.find_exe("mc.exe") # message compiler + self.rc = self.find_exe("rc.exe") # resource compiler + self.mc = self.find_exe("mc.exe") # message compiler self.set_path_env_var('lib') self.set_path_env_var('include') @@ -313,92 +283,75 @@ def initialize(self): self.preprocess_options = None if self.__arch == "Intel": - self.compile_options = ['/nologo', '/O2', '/MD', '/W3', '/GX', '/DNDEBUG'] - self.compile_options_debug = [ - '/nologo', - '/Od', - '/MDd', - '/W3', - '/GX', - '/Z7', - '/D_DEBUG', - ] + self.compile_options = [ '/nologo', '/O2', '/MD', '/W3', '/GX' , + '/DNDEBUG'] + self.compile_options_debug = ['/nologo', '/Od', '/MDd', '/W3', '/GX', + '/Z7', '/D_DEBUG'] else: # Win64 - self.compile_options = ['/nologo', '/O2', '/MD', '/W3', '/GS-', '/DNDEBUG'] - self.compile_options_debug = [ - '/nologo', - '/Od', - '/MDd', - '/W3', - '/GS-', - '/Z7', - '/D_DEBUG', - ] + self.compile_options = [ '/nologo', '/O2', '/MD', '/W3', '/GS-' , + '/DNDEBUG'] + self.compile_options_debug = ['/nologo', '/Od', '/MDd', '/W3', '/GS-', + '/Z7', '/D_DEBUG'] self.ldflags_shared = ['/DLL', '/nologo', '/INCREMENTAL:NO'] if self.__version >= 7: - self.ldflags_shared_debug = ['/DLL', '/nologo', '/INCREMENTAL:no', '/DEBUG'] + self.ldflags_shared_debug = [ + '/DLL', '/nologo', '/INCREMENTAL:no', '/DEBUG' + ] else: self.ldflags_shared_debug = [ - '/DLL', - '/nologo', - '/INCREMENTAL:no', - '/pdb:None', - '/DEBUG', - ] - self.ldflags_static = ['/nologo'] + '/DLL', '/nologo', '/INCREMENTAL:no', '/pdb:None', '/DEBUG' + ] + self.ldflags_static = [ '/nologo'] self.initialized = True # -- Worker methods ------------------------------------------------ - def object_filenames(self, source_filenames, strip_dir=0, output_dir=''): + def object_filenames(self, + source_filenames, + strip_dir=0, + output_dir=''): # Copied from ccompiler.py, extended to return .res as 'object'-file # for .rc input file - if output_dir is None: - output_dir = '' + if output_dir is None: output_dir = '' obj_names = [] for src_name in source_filenames: - (base, ext) = os.path.splitext(src_name) - base = os.path.splitdrive(base)[1] # Chop off the drive - base = base[os.path.isabs(base) :] # If abs, chop off leading / + (base, ext) = os.path.splitext (src_name) + base = os.path.splitdrive(base)[1] # Chop off the drive + base = base[os.path.isabs(base):] # If abs, chop off leading / if ext not in self.src_extensions: # Better to raise an exception instead of silently continuing # and later complain about sources and targets having # different lengths - raise CompileError("Don't know how to compile %s" % src_name) + raise CompileError ("Don't know how to compile %s" % src_name) if strip_dir: - base = os.path.basename(base) + base = os.path.basename (base) if ext in self._rc_extensions: - obj_names.append(os.path.join(output_dir, base + self.res_extension)) + obj_names.append (os.path.join (output_dir, + base + self.res_extension)) elif ext in self._mc_extensions: - obj_names.append(os.path.join(output_dir, base + self.res_extension)) + obj_names.append (os.path.join (output_dir, + base + self.res_extension)) else: - obj_names.append(os.path.join(output_dir, base + self.obj_extension)) + obj_names.append (os.path.join (output_dir, + base + self.obj_extension)) return obj_names - def compile( # noqa: C901 - self, - sources, - output_dir=None, - macros=None, - include_dirs=None, - debug=0, - extra_preargs=None, - extra_postargs=None, - depends=None, - ): + + def compile(self, sources, + output_dir=None, macros=None, include_dirs=None, debug=0, + extra_preargs=None, extra_postargs=None, depends=None): if not self.initialized: self.initialize() - compile_info = self._setup_compile( - output_dir, macros, include_dirs, sources, depends, extra_postargs - ) + compile_info = self._setup_compile(output_dir, macros, include_dirs, + sources, depends, extra_postargs) macros, objects, extra_postargs, pp_opts, build = compile_info compile_opts = extra_preargs or [] - compile_opts.append('/c') + compile_opts.append ('/c') if debug: compile_opts.extend(self.compile_options_debug) else: @@ -424,7 +377,8 @@ def compile( # noqa: C901 input_opt = src output_opt = "/fo" + obj try: - self.spawn([self.rc] + pp_opts + [output_opt] + [input_opt]) + self.spawn([self.rc] + pp_opts + + [output_opt] + [input_opt]) except DistutilsExecError as msg: raise CompileError(msg) continue @@ -444,48 +398,50 @@ def compile( # noqa: C901 rc_dir = os.path.dirname(obj) try: # first compile .MC to .RC and .H file - self.spawn([self.mc] + ['-h', h_dir, '-r', rc_dir] + [src]) - base, _ = os.path.splitext(os.path.basename(src)) - rc_file = os.path.join(rc_dir, base + '.rc') + self.spawn([self.mc] + + ['-h', h_dir, '-r', rc_dir] + [src]) + base, _ = os.path.splitext (os.path.basename (src)) + rc_file = os.path.join (rc_dir, base + '.rc') # then compile .RC to .RES file - self.spawn([self.rc] + ["/fo" + obj] + [rc_file]) + self.spawn([self.rc] + + ["/fo" + obj] + [rc_file]) except DistutilsExecError as msg: raise CompileError(msg) continue else: # how to handle this file? - raise CompileError( - "Don't know how to compile {} to {}".format(src, obj) - ) + raise CompileError("Don't know how to compile %s to %s" + % (src, obj)) output_opt = "/Fo" + obj try: - self.spawn( - [self.cc] - + compile_opts - + pp_opts - + [input_opt, output_opt] - + extra_postargs - ) + self.spawn([self.cc] + compile_opts + pp_opts + + [input_opt, output_opt] + + extra_postargs) except DistutilsExecError as msg: raise CompileError(msg) return objects - def create_static_lib( - self, objects, output_libname, output_dir=None, debug=0, target_lang=None - ): + + def create_static_lib(self, + objects, + output_libname, + output_dir=None, + debug=0, + target_lang=None): if not self.initialized: self.initialize() (objects, output_dir) = self._fix_object_args(objects, output_dir) - output_filename = self.library_filename(output_libname, output_dir=output_dir) + output_filename = self.library_filename(output_libname, + output_dir=output_dir) if self._need_link(objects, output_filename): lib_args = objects + ['/OUT:' + output_filename] if debug: - pass # XXX what goes here? + pass # XXX what goes here? try: self.spawn([self.lib] + lib_args) except DistutilsExecError as msg: @@ -493,36 +449,36 @@ def create_static_lib( else: log.debug("skipping %s (up-to-date)", output_filename) - def link( # noqa: C901 - self, - target_desc, - objects, - output_filename, - output_dir=None, - libraries=None, - library_dirs=None, - runtime_library_dirs=None, - export_symbols=None, - debug=0, - extra_preargs=None, - extra_postargs=None, - build_temp=None, - target_lang=None, - ): + + def link(self, + target_desc, + objects, + output_filename, + output_dir=None, + libraries=None, + library_dirs=None, + runtime_library_dirs=None, + export_symbols=None, + debug=0, + extra_preargs=None, + extra_postargs=None, + build_temp=None, + target_lang=None): if not self.initialized: self.initialize() (objects, output_dir) = self._fix_object_args(objects, output_dir) - fixed_args = self._fix_lib_args(libraries, library_dirs, runtime_library_dirs) + fixed_args = self._fix_lib_args(libraries, library_dirs, + runtime_library_dirs) (libraries, library_dirs, runtime_library_dirs) = fixed_args if runtime_library_dirs: - self.warn( - "I don't know what to do with 'runtime_library_dirs': " - + str(runtime_library_dirs) - ) + self.warn ("I don't know what to do with 'runtime_library_dirs': " + + str (runtime_library_dirs)) - lib_opts = gen_lib_options(self, library_dirs, runtime_library_dirs, libraries) + lib_opts = gen_lib_options(self, + library_dirs, runtime_library_dirs, + libraries) if output_dir is not None: output_filename = os.path.join(output_dir, output_filename) @@ -539,12 +495,11 @@ def link( # noqa: C901 ldflags = self.ldflags_shared export_opts = [] - for sym in export_symbols or []: + for sym in (export_symbols or []): export_opts.append("/EXPORT:" + sym) - ld_args = ( - ldflags + lib_opts + export_opts + objects + ['/OUT:' + output_filename] - ) + ld_args = (ldflags + lib_opts + export_opts + + objects + ['/OUT:' + output_filename]) # The MSVC linker generates .lib and .exp files, which cannot be # suppressed by any linker switches. The .lib files may even be @@ -553,12 +508,11 @@ def link( # noqa: C901 # builds, they can go into the same directory. if export_symbols is not None: (dll_name, dll_ext) = os.path.splitext( - os.path.basename(output_filename) - ) + os.path.basename(output_filename)) implib_file = os.path.join( - os.path.dirname(objects[0]), self.library_filename(dll_name) - ) - ld_args.append('/IMPLIB:' + implib_file) + os.path.dirname(objects[0]), + self.library_filename(dll_name)) + ld_args.append ('/IMPLIB:' + implib_file) if extra_preargs: ld_args[:0] = extra_preargs @@ -574,6 +528,7 @@ def link( # noqa: C901 else: log.debug("skipping %s (up-to-date)", output_filename) + # -- Miscellaneous methods ----------------------------------------- # These are all used by the 'gen_lib_options() function, in # ccompiler.py. @@ -583,12 +538,12 @@ def library_dir_option(self, dir): def runtime_library_dir_option(self, dir): raise DistutilsPlatformError( - "don't know how to set runtime library search path for MSVC++" - ) + "don't know how to set runtime library search path for MSVC++") def library_option(self, lib): return self.library_filename(lib) + def find_library_file(self, dirs, lib, debug=0): # Prefer a debugging library if found (and requested), but deal # with it if we don't have one. @@ -598,7 +553,7 @@ def find_library_file(self, dirs, lib, debug=0): try_names = [lib] for dir in dirs: for name in try_names: - libfile = os.path.join(dir, self.library_filename(name)) + libfile = os.path.join(dir, self.library_filename (name)) if os.path.exists(libfile): return libfile else: @@ -623,7 +578,7 @@ def find_exe(self, exe): # didn't find it; try existing path for p in os.environ['Path'].split(';'): - fn = os.path.join(os.path.abspath(p), exe) + fn = os.path.join(os.path.abspath(p),exe) if os.path.isfile(fn): return fn @@ -640,15 +595,11 @@ def get_msvc_paths(self, path, platform='x86'): path = path + " dirs" if self.__version >= 7: - key = r"{}\{:0.1f}\VC\VC_OBJECTS_PLATFORM_INFO\Win32\Directories".format( - self.__root, - self.__version, - ) + key = (r"%s\%0.1f\VC\VC_OBJECTS_PLATFORM_INFO\Win32\Directories" + % (self.__root, self.__version)) else: - key = ( - r"%s\6.0\Build System\Components\Platforms" - r"\Win32 (%s)\Directories" % (self.__root, platform) - ) + key = (r"%s\6.0\Build System\Components\Platforms" + r"\Win32 (%s)\Directories" % (self.__root, platform)) for base in HKEYS: d = read_values(base, key) @@ -662,12 +613,10 @@ def get_msvc_paths(self, path, platform='x86'): if self.__version == 6: for base in HKEYS: if read_values(base, r"%s\6.0" % self.__root) is not None: - self.warn( - "It seems you have Visual Studio 6 installed, " + self.warn("It seems you have Visual Studio 6 installed, " "but the expected registry settings are not present.\n" "You must at least run the Visual Studio GUI once " - "so that these entries are created." - ) + "so that these entries are created.") break return [] @@ -690,6 +639,5 @@ def set_path_env_var(self, name): log.debug("Importing new compiler from distutils.msvc9compiler") OldMSVCCompiler = MSVCCompiler from distutils.msvc9compiler import MSVCCompiler - # get_build_architecture not really relevant now we support cross-compile - from distutils.msvc9compiler import MacroExpander # noqa: F811 + from distutils.msvc9compiler import MacroExpander diff --git a/venv/lib/python3.10/site-packages/setuptools/_distutils/py38compat.py b/venv/lib/python3.10/site-packages/setuptools/_distutils/py38compat.py index 59224e7..7dbe8ce 100644 --- a/venv/lib/python3.10/site-packages/setuptools/_distutils/py38compat.py +++ b/venv/lib/python3.10/site-packages/setuptools/_distutils/py38compat.py @@ -1,8 +1,7 @@ def aix_platform(osname, version, release): try: import _aix_support - return _aix_support.aix_platform() except ImportError: pass - return "{}-{}.{}".format(osname, version, release) + return "%s-%s.%s" % (osname, version, release) diff --git a/venv/lib/python3.10/site-packages/setuptools/_distutils/py39compat.py b/venv/lib/python3.10/site-packages/setuptools/_distutils/py39compat.py deleted file mode 100644 index c43e5f1..0000000 --- a/venv/lib/python3.10/site-packages/setuptools/_distutils/py39compat.py +++ /dev/null @@ -1,22 +0,0 @@ -import sys -import platform - - -def add_ext_suffix_39(vars): - """ - Ensure vars contains 'EXT_SUFFIX'. pypa/distutils#130 - """ - import _imp - - ext_suffix = _imp.extension_suffixes()[0] - vars.update( - EXT_SUFFIX=ext_suffix, - # sysconfig sets SO to match EXT_SUFFIX, so maintain - # that expectation. - # https://github.com/python/cpython/blob/785cc6770588de087d09e89a69110af2542be208/Lib/sysconfig.py#L671-L673 - SO=ext_suffix, - ) - - -needs_ext_suffix = sys.version_info < (3, 10) and platform.system() == 'Windows' -add_ext_suffix = add_ext_suffix_39 if needs_ext_suffix else lambda vars: None diff --git a/venv/lib/python3.10/site-packages/setuptools/_distutils/spawn.py b/venv/lib/python3.10/site-packages/setuptools/_distutils/spawn.py index b18ba9d..6e1c89f 100644 --- a/venv/lib/python3.10/site-packages/setuptools/_distutils/spawn.py +++ b/venv/lib/python3.10/site-packages/setuptools/_distutils/spawn.py @@ -10,12 +10,12 @@ import os import subprocess -from distutils.errors import DistutilsExecError +from distutils.errors import DistutilsPlatformError, DistutilsExecError from distutils.debug import DEBUG from distutils import log -def spawn(cmd, search_path=1, verbose=0, dry_run=0, env=None): # noqa: C901 +def spawn(cmd, search_path=1, verbose=0, dry_run=0, env=None): """Run another program, specified as a command list 'cmd', in a new process. 'cmd' is just the argument list for the new process, ie. @@ -48,7 +48,6 @@ def spawn(cmd, search_path=1, verbose=0, dry_run=0, env=None): # noqa: C901 if sys.platform == 'darwin': from distutils.util import MACOSX_VERSION_VAR, get_macosx_target_ver - macosx_target_ver = get_macosx_target_ver() if macosx_target_ver: env[MACOSX_VERSION_VAR] = macosx_target_ver @@ -61,15 +60,13 @@ def spawn(cmd, search_path=1, verbose=0, dry_run=0, env=None): # noqa: C901 if not DEBUG: cmd = cmd[0] raise DistutilsExecError( - "command {!r} failed: {}".format(cmd, exc.args[-1]) - ) from exc + "command %r failed: %s" % (cmd, exc.args[-1])) from exc if exitcode: if not DEBUG: cmd = cmd[0] raise DistutilsExecError( - "command {!r} failed with exit code {}".format(cmd, exitcode) - ) + "command %r failed with exit code %s" % (cmd, exitcode)) def find_executable(executable, path=None): diff --git a/venv/lib/python3.10/site-packages/setuptools/_distutils/sysconfig.py b/venv/lib/python3.10/site-packages/setuptools/_distutils/sysconfig.py index 6a979f8..d36d94f 100644 --- a/venv/lib/python3.10/site-packages/setuptools/_distutils/sysconfig.py +++ b/venv/lib/python3.10/site-packages/setuptools/_distutils/sysconfig.py @@ -9,15 +9,12 @@ Email: """ +import _imp import os import re import sys -import sysconfig -import pathlib from .errors import DistutilsPlatformError -from . import py39compat -from ._functools import pass_none IS_PYPY = '__pypy__' in sys.builtin_module_names @@ -41,48 +38,31 @@ project_base = os.getcwd() +# python_build: (Boolean) if true, we're either building Python or +# building an extension with an un-installed Python, so we use +# different (hard-wired) directories. def _is_python_source_dir(d): - """ - Return True if the target directory appears to point to an - un-installed Python. - """ - modules = pathlib.Path(d).joinpath('Modules') - return any(modules.joinpath(fn).is_file() for fn in ('Setup', 'Setup.local')) - + for fn in ("Setup", "Setup.local"): + if os.path.isfile(os.path.join(d, "Modules", fn)): + return True + return False _sys_home = getattr(sys, '_home', None) - -def _is_parent(dir_a, dir_b): - """ - Return True if a is a parent of b. - """ - return os.path.normcase(dir_a).startswith(os.path.normcase(dir_b)) - - if os.name == 'nt': - - @pass_none def _fix_pcbuild(d): - # In a venv, sys._home will be inside BASE_PREFIX rather than PREFIX. - prefixes = PREFIX, BASE_PREFIX - matched = ( - prefix - for prefix in prefixes - if _is_parent(d, os.path.join(prefix, "PCbuild")) - ) - return next(matched, d) - + if d and os.path.normcase(d).startswith( + os.path.normcase(os.path.join(PREFIX, "PCbuild"))): + return PREFIX + return d project_base = _fix_pcbuild(project_base) _sys_home = _fix_pcbuild(_sys_home) - def _python_build(): if _sys_home: return _is_python_source_dir(_sys_home) return _is_python_source_dir(project_base) - python_build = _python_build() @@ -98,7 +78,6 @@ def _python_build(): # this attribute, which is fine. pass - def get_python_version(): """Return a string containing the major and minor Python version, leaving off the patchlevel. Sample return values could be '1.5' @@ -118,83 +97,36 @@ def get_python_inc(plat_specific=0, prefix=None): If 'prefix' is supplied, use it instead of sys.base_prefix or sys.base_exec_prefix -- i.e., ignore 'plat_specific'. """ - default_prefix = BASE_EXEC_PREFIX if plat_specific else BASE_PREFIX - resolved_prefix = prefix if prefix is not None else default_prefix - try: - getter = globals()[f'_get_python_inc_{os.name}'] - except KeyError: + if prefix is None: + prefix = plat_specific and BASE_EXEC_PREFIX or BASE_PREFIX + if os.name == "posix": + if IS_PYPY and sys.version_info < (3, 8): + return os.path.join(prefix, 'include') + if python_build: + # Assume the executable is in the build directory. The + # pyconfig.h file should be in the same directory. Since + # the build directory may not be the source directory, we + # must use "srcdir" from the makefile to find the "Include" + # directory. + if plat_specific: + return _sys_home or project_base + else: + incdir = os.path.join(get_config_var('srcdir'), 'Include') + return os.path.normpath(incdir) + implementation = 'pypy' if IS_PYPY else 'python' + python_dir = implementation + get_python_version() + build_flags + return os.path.join(prefix, "include", python_dir) + elif os.name == "nt": + if python_build: + # Include both the include and PC dir to ensure we can find + # pyconfig.h + return (os.path.join(prefix, "include") + os.path.pathsep + + os.path.join(prefix, "PC")) + return os.path.join(prefix, "include") + else: raise DistutilsPlatformError( "I don't know where Python installs its C header files " - "on platform '%s'" % os.name - ) - return getter(resolved_prefix, prefix, plat_specific) - - -def _get_python_inc_posix(prefix, spec_prefix, plat_specific): - if IS_PYPY and sys.version_info < (3, 8): - return os.path.join(prefix, 'include') - return ( - _get_python_inc_posix_python(plat_specific) - or _get_python_inc_from_config(plat_specific, spec_prefix) - or _get_python_inc_posix_prefix(prefix) - ) - - -def _get_python_inc_posix_python(plat_specific): - """ - Assume the executable is in the build directory. The - pyconfig.h file should be in the same directory. Since - the build directory may not be the source directory, - use "srcdir" from the makefile to find the "Include" - directory. - """ - if not python_build: - return - if plat_specific: - return _sys_home or project_base - incdir = os.path.join(get_config_var('srcdir'), 'Include') - return os.path.normpath(incdir) - - -def _get_python_inc_from_config(plat_specific, spec_prefix): - """ - If no prefix was explicitly specified, provide the include - directory from the config vars. Useful when - cross-compiling, since the config vars may come from - the host - platform Python installation, while the current Python - executable is from the build platform installation. - - >>> monkeypatch = getfixture('monkeypatch') - >>> gpifc = _get_python_inc_from_config - >>> monkeypatch.setitem(gpifc.__globals__, 'get_config_var', str.lower) - >>> gpifc(False, '/usr/bin/') - >>> gpifc(False, '') - >>> gpifc(False, None) - 'includepy' - >>> gpifc(True, None) - 'confincludepy' - """ - if spec_prefix is None: - return get_config_var('CONF' * plat_specific + 'INCLUDEPY') - - -def _get_python_inc_posix_prefix(prefix): - implementation = 'pypy' if IS_PYPY else 'python' - python_dir = implementation + get_python_version() + build_flags - return os.path.join(prefix, "include", python_dir) - - -def _get_python_inc_nt(prefix, spec_prefix, plat_specific): - if python_build: - # Include both the include and PC dir to ensure we can find - # pyconfig.h - return ( - os.path.join(prefix, "include") - + os.path.pathsep - + os.path.join(prefix, "PC") - ) - return os.path.join(prefix, "include") + "on platform '%s'" % os.name) # allow this behavior to be monkey-patched. Ref pypa/distutils#2. @@ -245,7 +177,8 @@ def get_python_lib(plat_specific=0, standard_lib=0, prefix=None): # Pure Python libdir = "lib" implementation = 'pypy' if IS_PYPY else 'python' - libpython = os.path.join(prefix, libdir, implementation + get_python_version()) + libpython = os.path.join(prefix, libdir, + implementation + get_python_version()) return _posix_lib(standard_lib, libpython, early_prefix, prefix) elif os.name == "nt": if standard_lib: @@ -255,11 +188,11 @@ def get_python_lib(plat_specific=0, standard_lib=0, prefix=None): else: raise DistutilsPlatformError( "I don't know where Python installs its library " - "on platform '%s'" % os.name - ) + "on platform '%s'" % os.name) -def customize_compiler(compiler): # noqa: C901 + +def customize_compiler(compiler): """Do any platform-specific customization of a CCompiler instance. Mainly needed on Unix, so we can plug in the information that @@ -279,36 +212,20 @@ def customize_compiler(compiler): # noqa: C901 # Use get_config_var() to ensure _config_vars is initialized. if not get_config_var('CUSTOMIZED_OSX_COMPILER'): import _osx_support - _osx_support.customize_compiler(_config_vars) _config_vars['CUSTOMIZED_OSX_COMPILER'] = 'True' - ( - cc, - cxx, - cflags, - ccshared, - ldshared, - shlib_suffix, - ar, - ar_flags, - ) = get_config_vars( - 'CC', - 'CXX', - 'CFLAGS', - 'CCSHARED', - 'LDSHARED', - 'SHLIB_SUFFIX', - 'AR', - 'ARFLAGS', - ) + (cc, cxx, cflags, ccshared, ldshared, shlib_suffix, ar, ar_flags) = \ + get_config_vars('CC', 'CXX', 'CFLAGS', + 'CCSHARED', 'LDSHARED', 'SHLIB_SUFFIX', 'AR', 'ARFLAGS') if 'CC' in os.environ: newcc = os.environ['CC'] - if 'LDSHARED' not in os.environ and ldshared.startswith(cc): + if('LDSHARED' not in os.environ + and ldshared.startswith(cc)): # If CC is overridden, use that as the default # command for LDSHARED as well - ldshared = newcc + ldshared[len(cc) :] + ldshared = newcc + ldshared[len(cc):] cc = newcc if 'CXX' in os.environ: cxx = os.environ['CXX'] @@ -317,7 +234,7 @@ def customize_compiler(compiler): # noqa: C901 if 'CPP' in os.environ: cpp = os.environ['CPP'] else: - cpp = cc + " -E" # not always + cpp = cc + " -E" # not always if 'LDFLAGS' in os.environ: ldshared = ldshared + ' ' + os.environ['LDFLAGS'] if 'CFLAGS' in os.environ: @@ -342,8 +259,7 @@ def customize_compiler(compiler): # noqa: C901 compiler_cxx=cxx, linker_so=ldshared, linker_exe=cc, - archiver=archiver, - ) + archiver=archiver) if 'RANLIB' in os.environ and compiler.executables.get('ranlib', None): compiler.set_executables(ranlib=os.environ['RANLIB']) @@ -358,14 +274,31 @@ def get_config_h_filename(): inc_dir = os.path.join(_sys_home or project_base, "PC") else: inc_dir = _sys_home or project_base - return os.path.join(inc_dir, 'pyconfig.h') else: - return sysconfig.get_config_h_filename() + inc_dir = get_python_inc(plat_specific=1) + + return os.path.join(inc_dir, 'pyconfig.h') + + +# Allow this value to be patched by pkgsrc. Ref pypa/distutils#16. +_makefile_tmpl = 'config-{python_ver}{build_flags}{multiarch}' def get_makefile_filename(): """Return full pathname of installed Makefile from the Python build.""" - return sysconfig.get_makefile_filename() + if python_build: + return os.path.join(_sys_home or project_base, "Makefile") + lib_dir = get_python_lib(plat_specific=0, standard_lib=1) + multiarch = ( + '-%s' % sys.implementation._multiarch + if hasattr(sys.implementation, '_multiarch') else '' + ) + config_file = _makefile_tmpl.format( + python_ver=get_python_version(), + build_flags=build_flags, + multiarch=multiarch, + ) + return os.path.join(lib_dir, config_file, 'Makefile') def parse_config_h(fp, g=None): @@ -375,7 +308,26 @@ def parse_config_h(fp, g=None): optional dictionary is passed in as the second argument, it is used instead of a new dictionary. """ - return sysconfig.parse_config_h(fp, vars=g) + if g is None: + g = {} + define_rx = re.compile("#define ([A-Z][A-Za-z0-9_]+) (.*)\n") + undef_rx = re.compile("/[*] #undef ([A-Z][A-Za-z0-9_]+) [*]/\n") + # + while True: + line = fp.readline() + if not line: + break + m = define_rx.match(line) + if m: + n, v = m.group(1, 2) + try: v = int(v) + except ValueError: pass + g[n] = v + else: + m = undef_rx.match(line) + if m: + g[m.group(1)] = 0 + return g # Regexes needed for parsing Makefile (and similar syntaxes, @@ -384,8 +336,7 @@ def parse_config_h(fp, g=None): _findvar1_rx = re.compile(r"\$\(([A-Za-z][A-Za-z0-9_]*)\)") _findvar2_rx = re.compile(r"\${([A-Za-z][A-Za-z0-9_]*)}") - -def parse_makefile(fn, g=None): # noqa: C901 +def parse_makefile(fn, g=None): """Parse a Makefile-style file. A dictionary containing name/value pairs is returned. If an @@ -393,10 +344,7 @@ def parse_makefile(fn, g=None): # noqa: C901 used instead of a new dictionary. """ from distutils.text_file import TextFile - - fp = TextFile( - fn, strip_comments=1, skip_blanks=1, join_lines=1, errors="surrogateescape" - ) + fp = TextFile(fn, strip_comments=1, skip_blanks=1, join_lines=1, errors="surrogateescape") if g is None: g = {} @@ -405,7 +353,7 @@ def parse_makefile(fn, g=None): # noqa: C901 while True: line = fp.readline() - if line is None: # eof + if line is None: # eof break m = _variable_rx.match(line) if m: @@ -460,20 +408,20 @@ def parse_makefile(fn, g=None): # noqa: C901 else: done[n] = item = "" if found: - after = value[m.end() :] - value = value[: m.start()] + item + after + after = value[m.end():] + value = value[:m.start()] + item + after if "$" in after: notdone[name] = value else: - try: - value = int(value) + try: value = int(value) except ValueError: done[name] = value.strip() else: done[name] = value del notdone[name] - if name.startswith('PY_') and name[3:] in renamed_variables: + if name.startswith('PY_') \ + and name[3:] in renamed_variables: name = name[3:] if name not in done: @@ -522,6 +470,51 @@ def expand_makefile_vars(s, vars): _config_vars = None +_sysconfig_name_tmpl = '_sysconfigdata_{abi}_{platform}_{multiarch}' + + +def _init_posix(): + """Initialize the module as appropriate for POSIX systems.""" + # _sysconfigdata is generated at build time, see the sysconfig module + name = os.environ.get( + '_PYTHON_SYSCONFIGDATA_NAME', + _sysconfig_name_tmpl.format( + abi=sys.abiflags, + platform=sys.platform, + multiarch=getattr(sys.implementation, '_multiarch', ''), + ), + ) + try: + _temp = __import__(name, globals(), locals(), ['build_time_vars'], 0) + except ImportError: + # Python 3.5 and pypy 7.3.1 + _temp = __import__( + '_sysconfigdata', globals(), locals(), ['build_time_vars'], 0) + build_time_vars = _temp.build_time_vars + global _config_vars + _config_vars = {} + _config_vars.update(build_time_vars) + + +def _init_nt(): + """Initialize the module as appropriate for NT""" + g = {} + # set basic install directories + g['LIBDEST'] = get_python_lib(plat_specific=0, standard_lib=1) + g['BINLIBDEST'] = get_python_lib(plat_specific=1, standard_lib=1) + + # XXX hmmm.. a normal install puts include files here + g['INCLUDEPY'] = get_python_inc(plat_specific=0) + + g['EXT_SUFFIX'] = _imp.extension_suffixes()[0] + g['EXE'] = ".exe" + g['VERSION'] = get_python_version().replace(".", "") + g['BINDIR'] = os.path.dirname(os.path.abspath(sys.executable)) + + global _config_vars + _config_vars = g + + def get_config_vars(*args): """With no arguments, return a dictionary of all configuration variables relevant for the current platform. Generally this includes @@ -534,8 +527,60 @@ def get_config_vars(*args): """ global _config_vars if _config_vars is None: - _config_vars = sysconfig.get_config_vars().copy() - py39compat.add_ext_suffix(_config_vars) + func = globals().get("_init_" + os.name) + if func: + func() + else: + _config_vars = {} + + # Normalized versions of prefix and exec_prefix are handy to have; + # in fact, these are the standard versions used most places in the + # Distutils. + _config_vars['prefix'] = PREFIX + _config_vars['exec_prefix'] = EXEC_PREFIX + + if not IS_PYPY: + # For backward compatibility, see issue19555 + SO = _config_vars.get('EXT_SUFFIX') + if SO is not None: + _config_vars['SO'] = SO + + # Always convert srcdir to an absolute path + srcdir = _config_vars.get('srcdir', project_base) + if os.name == 'posix': + if python_build: + # If srcdir is a relative path (typically '.' or '..') + # then it should be interpreted relative to the directory + # containing Makefile. + base = os.path.dirname(get_makefile_filename()) + srcdir = os.path.join(base, srcdir) + else: + # srcdir is not meaningful since the installation is + # spread about the filesystem. We choose the + # directory containing the Makefile since we know it + # exists. + srcdir = os.path.dirname(get_makefile_filename()) + _config_vars['srcdir'] = os.path.abspath(os.path.normpath(srcdir)) + + # Convert srcdir into an absolute path if it appears necessary. + # Normally it is relative to the build directory. However, during + # testing, for example, we might be running a non-installed python + # from a different directory. + if python_build and os.name == "posix": + base = project_base + if (not os.path.isabs(_config_vars['srcdir']) and + base != os.getcwd()): + # srcdir is relative and we are not in the same directory + # as the executable. Assume executable is in the build + # directory and make srcdir absolute. + srcdir = os.path.join(base, _config_vars['srcdir']) + _config_vars['srcdir'] = os.path.normpath(srcdir) + + # OS X platforms require special customization to handle + # multi-architecture, multi-os-version installers + if sys.platform == 'darwin': + import _osx_support + _osx_support.customize_config_vars(_config_vars) if args: vals = [] @@ -545,7 +590,6 @@ def get_config_vars(*args): else: return _config_vars - def get_config_var(name): """Return the value of a single variable using the dictionary returned by 'get_config_vars()'. Equivalent to @@ -553,6 +597,5 @@ def get_config_var(name): """ if name == 'SO': import warnings - warnings.warn('SO is deprecated, use EXT_SUFFIX', DeprecationWarning, 2) return get_config_vars().get(name) diff --git a/venv/lib/python3.10/site-packages/setuptools/_distutils/text_file.py b/venv/lib/python3.10/site-packages/setuptools/_distutils/text_file.py index 7274d4b..93abad3 100644 --- a/venv/lib/python3.10/site-packages/setuptools/_distutils/text_file.py +++ b/venv/lib/python3.10/site-packages/setuptools/_distutils/text_file.py @@ -4,87 +4,84 @@ that (optionally) takes care of stripping comments, ignoring blank lines, and joining lines with backslashes.""" -import sys +import sys, io class TextFile: """Provides a file-like object that takes care of all the things you - commonly want to do when processing a text file that has some - line-by-line syntax: strip comments (as long as "#" is your - comment character), skip blank lines, join adjacent lines by - escaping the newline (ie. backslash at end of line), strip - leading and/or trailing whitespace. All of these are optional - and independently controllable. - - Provides a 'warn()' method so you can generate warning messages that - report physical line number, even if the logical line in question - spans multiple physical lines. Also provides 'unreadline()' for - implementing line-at-a-time lookahead. - - Constructor is called as: - - TextFile (filename=None, file=None, **options) - - It bombs (RuntimeError) if both 'filename' and 'file' are None; - 'filename' should be a string, and 'file' a file object (or - something that provides 'readline()' and 'close()' methods). It is - recommended that you supply at least 'filename', so that TextFile - can include it in warning messages. If 'file' is not supplied, - TextFile creates its own using 'io.open()'. - - The options are all boolean, and affect the value returned by - 'readline()': - strip_comments [default: true] - strip from "#" to end-of-line, as well as any whitespace - leading up to the "#" -- unless it is escaped by a backslash - lstrip_ws [default: false] - strip leading whitespace from each line before returning it - rstrip_ws [default: true] - strip trailing whitespace (including line terminator!) from - each line before returning it - skip_blanks [default: true} - skip lines that are empty *after* stripping comments and - whitespace. (If both lstrip_ws and rstrip_ws are false, - then some lines may consist of solely whitespace: these will - *not* be skipped, even if 'skip_blanks' is true.) - join_lines [default: false] - if a backslash is the last non-newline character on a line - after stripping comments and whitespace, join the following line - to it to form one "logical line"; if N consecutive lines end - with a backslash, then N+1 physical lines will be joined to - form one logical line. - collapse_join [default: false] - strip leading whitespace from lines that are joined to their - predecessor; only matters if (join_lines and not lstrip_ws) - errors [default: 'strict'] - error handler used to decode the file content - - Note that since 'rstrip_ws' can strip the trailing newline, the - semantics of 'readline()' must differ from those of the builtin file - object's 'readline()' method! In particular, 'readline()' returns - None for end-of-file: an empty string might just be a blank line (or - an all-whitespace line), if 'rstrip_ws' is true but 'skip_blanks' is - not.""" - - default_options = { - 'strip_comments': 1, - 'skip_blanks': 1, - 'lstrip_ws': 0, - 'rstrip_ws': 1, - 'join_lines': 0, - 'collapse_join': 0, - 'errors': 'strict', - } + commonly want to do when processing a text file that has some + line-by-line syntax: strip comments (as long as "#" is your + comment character), skip blank lines, join adjacent lines by + escaping the newline (ie. backslash at end of line), strip + leading and/or trailing whitespace. All of these are optional + and independently controllable. + + Provides a 'warn()' method so you can generate warning messages that + report physical line number, even if the logical line in question + spans multiple physical lines. Also provides 'unreadline()' for + implementing line-at-a-time lookahead. + + Constructor is called as: + + TextFile (filename=None, file=None, **options) + + It bombs (RuntimeError) if both 'filename' and 'file' are None; + 'filename' should be a string, and 'file' a file object (or + something that provides 'readline()' and 'close()' methods). It is + recommended that you supply at least 'filename', so that TextFile + can include it in warning messages. If 'file' is not supplied, + TextFile creates its own using 'io.open()'. + + The options are all boolean, and affect the value returned by + 'readline()': + strip_comments [default: true] + strip from "#" to end-of-line, as well as any whitespace + leading up to the "#" -- unless it is escaped by a backslash + lstrip_ws [default: false] + strip leading whitespace from each line before returning it + rstrip_ws [default: true] + strip trailing whitespace (including line terminator!) from + each line before returning it + skip_blanks [default: true} + skip lines that are empty *after* stripping comments and + whitespace. (If both lstrip_ws and rstrip_ws are false, + then some lines may consist of solely whitespace: these will + *not* be skipped, even if 'skip_blanks' is true.) + join_lines [default: false] + if a backslash is the last non-newline character on a line + after stripping comments and whitespace, join the following line + to it to form one "logical line"; if N consecutive lines end + with a backslash, then N+1 physical lines will be joined to + form one logical line. + collapse_join [default: false] + strip leading whitespace from lines that are joined to their + predecessor; only matters if (join_lines and not lstrip_ws) + errors [default: 'strict'] + error handler used to decode the file content + + Note that since 'rstrip_ws' can strip the trailing newline, the + semantics of 'readline()' must differ from those of the builtin file + object's 'readline()' method! In particular, 'readline()' returns + None for end-of-file: an empty string might just be a blank line (or + an all-whitespace line), if 'rstrip_ws' is true but 'skip_blanks' is + not.""" + + default_options = { 'strip_comments': 1, + 'skip_blanks': 1, + 'lstrip_ws': 0, + 'rstrip_ws': 1, + 'join_lines': 0, + 'collapse_join': 0, + 'errors': 'strict', + } def __init__(self, filename=None, file=None, **options): """Construct a new TextFile object. At least one of 'filename' - (a string) and 'file' (a file-like object) must be supplied. - They keyword argument options are described above and affect - the values returned by 'readline()'.""" + (a string) and 'file' (a file-like object) must be supplied. + They keyword argument options are described above and affect + the values returned by 'readline()'.""" if filename is None and file is None: - raise RuntimeError( - "you must supply either or both of 'filename' and 'file'" - ) + raise RuntimeError("you must supply either or both of 'filename' and 'file'") # set values for all options -- either from client option hash # or fallback to default_options @@ -104,7 +101,7 @@ def __init__(self, filename=None, file=None, **options): else: self.filename = filename self.file = file - self.current_line = 0 # assuming that file is at BOF! + self.current_line = 0 # assuming that file is at BOF! # 'linebuf' is a stack of lines that will be emptied before we # actually read from the file; it's only populated by an @@ -113,14 +110,14 @@ def __init__(self, filename=None, file=None, **options): def open(self, filename): """Open a new file named 'filename'. This overrides both the - 'filename' and 'file' arguments to the constructor.""" + 'filename' and 'file' arguments to the constructor.""" self.filename = filename - self.file = open(self.filename, errors=self.errors) + self.file = io.open(self.filename, 'r', errors=self.errors) self.current_line = 0 def close(self): """Close the current file and forget everything we know about it - (filename, current line number).""" + (filename, current line number).""" file = self.file self.file = None self.filename = None @@ -144,24 +141,24 @@ def error(self, msg, line=None): def warn(self, msg, line=None): """Print (to stderr) a warning message tied to the current logical - line in the current file. If the current logical line in the - file spans multiple physical lines, the warning refers to the - whole range, eg. "lines 3-5". If 'line' supplied, it overrides - the current line number; it may be a list or tuple to indicate a - range of physical lines, or an integer for a single physical - line.""" + line in the current file. If the current logical line in the + file spans multiple physical lines, the warning refers to the + whole range, eg. "lines 3-5". If 'line' supplied, it overrides + the current line number; it may be a list or tuple to indicate a + range of physical lines, or an integer for a single physical + line.""" sys.stderr.write("warning: " + self.gen_error(msg, line) + "\n") - def readline(self): # noqa: C901 + def readline(self): """Read and return a single logical line from the current file (or - from an internal buffer if lines have previously been "unread" - with 'unreadline()'). If the 'join_lines' option is true, this - may involve reading multiple physical lines concatenated into a - single string. Updates the current line number, so calling - 'warn()' after 'readline()' emits a warning about the physical - line(s) just read. Returns None on end-of-file, since the empty - string can occur if 'rstrip_ws' is true but 'strip_blanks' is - not.""" + from an internal buffer if lines have previously been "unread" + with 'unreadline()'). If the 'join_lines' option is true, this + may involve reading multiple physical lines concatenated into a + single string. Updates the current line number, so calling + 'warn()' after 'readline()' emits a warning about the physical + line(s) just read. Returns None on end-of-file, since the empty + string can occur if 'rstrip_ws' is true but 'strip_blanks' is + not.""" # If any "unread" lines waiting in 'linebuf', return the top # one. (We don't actually buffer read-ahead data -- lines only # get put in 'linebuf' if the client explicitly does an @@ -190,12 +187,12 @@ def readline(self): # noqa: C901 # lurking in there) and otherwise leave the line alone. pos = line.find("#") - if pos == -1: # no "#" -- no comments + if pos == -1: # no "#" -- no comments pass # It's definitely a comment -- either "#" is the first # character, or it's elsewhere and unescaped. - elif pos == 0 or line[pos - 1] != "\\": + elif pos == 0 or line[pos-1] != "\\": # Have to preserve the trailing newline, because it's # the job of a later step (rstrip_ws) to remove it -- # and if rstrip_ws is false, we'd better preserve it! @@ -214,14 +211,15 @@ def readline(self): # noqa: C901 # result in "hello there". if line.strip() == "": continue - else: # it's an escaped "#" + else: # it's an escaped "#" line = line.replace("\\#", "#") # did previous line end with a backslash? then accumulate if self.join_lines and buildup_line: # oops: end of file if line is None: - self.warn("continuation line immediately precedes " "end-of-file") + self.warn("continuation line immediately precedes " + "end-of-file") return buildup_line if self.collapse_join: @@ -232,10 +230,11 @@ def readline(self): # noqa: C901 if isinstance(self.current_line, list): self.current_line[1] = self.current_line[1] + 1 else: - self.current_line = [self.current_line, self.current_line + 1] + self.current_line = [self.current_line, + self.current_line + 1] # just an ordinary line, read it as usual else: - if line is None: # eof + if line is None: # eof return None # still have to be careful about incrementing the line number! @@ -272,7 +271,7 @@ def readline(self): # noqa: C901 def readlines(self): """Read and return the list of all logical lines remaining in the - current file.""" + current file.""" lines = [] while True: line = self.readline() @@ -282,6 +281,6 @@ def readlines(self): def unreadline(self, line): """Push 'line' (a string) onto an internal buffer that will be - checked by future 'readline()' calls. Handy for implementing - a parser with line-at-a-time lookahead.""" + checked by future 'readline()' calls. Handy for implementing + a parser with line-at-a-time lookahead.""" self.linebuf.append(line) diff --git a/venv/lib/python3.10/site-packages/setuptools/_distutils/unixccompiler.py b/venv/lib/python3.10/site-packages/setuptools/_distutils/unixccompiler.py index 4ab771a..a07e598 100644 --- a/venv/lib/python3.10/site-packages/setuptools/_distutils/unixccompiler.py +++ b/venv/lib/python3.10/site-packages/setuptools/_distutils/unixccompiler.py @@ -13,18 +13,18 @@ * link shared library handled by 'cc -shared' """ -import os -import sys -import re -import shlex -import itertools +import os, sys, re, shlex from distutils import sysconfig from distutils.dep_util import newer -from distutils.ccompiler import CCompiler, gen_preprocess_options, gen_lib_options -from distutils.errors import DistutilsExecError, CompileError, LibError, LinkError +from distutils.ccompiler import \ + CCompiler, gen_preprocess_options, gen_lib_options +from distutils.errors import \ + DistutilsExecError, CompileError, LibError, LinkError from distutils import log -from ._macos_compat import compiler_fixup + +if sys.platform == 'darwin': + import _osx_support # XXX Things not currently handled: # * optimization/debug/warning flags; we just use whatever's in Python's @@ -42,66 +42,6 @@ # options and carry on. -def _split_env(cmd): - """ - For macOS, split command into 'env' portion (if any) - and the rest of the linker command. - - >>> _split_env(['a', 'b', 'c']) - ([], ['a', 'b', 'c']) - >>> _split_env(['/usr/bin/env', 'A=3', 'gcc']) - (['/usr/bin/env', 'A=3'], ['gcc']) - """ - pivot = 0 - if os.path.basename(cmd[0]) == "env": - pivot = 1 - while '=' in cmd[pivot]: - pivot += 1 - return cmd[:pivot], cmd[pivot:] - - -def _split_aix(cmd): - """ - AIX platforms prefix the compiler with the ld_so_aix - script, so split that from the linker command. - - >>> _split_aix(['a', 'b', 'c']) - ([], ['a', 'b', 'c']) - >>> _split_aix(['/bin/foo/ld_so_aix', 'gcc']) - (['/bin/foo/ld_so_aix'], ['gcc']) - """ - pivot = os.path.basename(cmd[0]) == 'ld_so_aix' - return cmd[:pivot], cmd[pivot:] - - -def _linker_params(linker_cmd, compiler_cmd): - """ - The linker command usually begins with the compiler - command (possibly multiple elements), followed by zero or more - params for shared library building. - - If the LDSHARED env variable overrides the linker command, - however, the commands may not match. - - Return the best guess of the linker parameters by stripping - the linker command. If the compiler command does not - match the linker command, assume the linker command is - just the first element. - - >>> _linker_params('gcc foo bar'.split(), ['gcc']) - ['foo', 'bar'] - >>> _linker_params('gcc foo bar'.split(), ['other']) - ['foo', 'bar'] - >>> _linker_params('ccache gcc foo bar'.split(), 'ccache gcc'.split()) - ['foo', 'bar'] - >>> _linker_params(['gcc'], ['gcc']) - [] - """ - c_len = len(compiler_cmd) - pivot = c_len if linker_cmd[:c_len] == compiler_cmd else 1 - return linker_cmd[pivot:] - - class UnixCCompiler(CCompiler): compiler_type = 'unix' @@ -112,16 +52,15 @@ class UnixCCompiler(CCompiler): # are pretty generic; they will probably have to be set by an outsider # (eg. using information discovered by the sysconfig about building # Python extensions). - executables = { - 'preprocessor': None, - 'compiler': ["cc"], - 'compiler_so': ["cc"], - 'compiler_cxx': ["cc"], - 'linker_so': ["cc", "-shared"], - 'linker_exe': ["cc"], - 'archiver': ["ar", "-cr"], - 'ranlib': None, - } + executables = {'preprocessor' : None, + 'compiler' : ["cc"], + 'compiler_so' : ["cc"], + 'compiler_cxx' : ["cc"], + 'linker_so' : ["cc", "-shared"], + 'linker_exe' : ["cc"], + 'archiver' : ["ar", "-cr"], + 'ranlib' : None, + } if sys.platform[:6] == "darwin": executables['ranlib'] = ["ranlib"] @@ -132,7 +71,7 @@ class UnixCCompiler(CCompiler): # reasonable common default here, but it's not necessarily used on all # Unices! - src_extensions = [".c", ".C", ".cc", ".cxx", ".cpp", ".m"] + src_extensions = [".c",".C",".cc",".cxx",".cpp",".m"] obj_extension = ".o" static_lib_extension = ".a" shared_lib_extension = ".so" @@ -143,15 +82,8 @@ class UnixCCompiler(CCompiler): if sys.platform == "cygwin": exe_extension = ".exe" - def preprocess( - self, - source, - output_file=None, - macros=None, - include_dirs=None, - extra_preargs=None, - extra_postargs=None, - ): + def preprocess(self, source, output_file=None, macros=None, + include_dirs=None, extra_preargs=None, extra_postargs=None): fixed_args = self._fix_compile_args(None, macros, include_dirs) ignore, macros, include_dirs = fixed_args pp_opts = gen_preprocess_options(macros, include_dirs) @@ -164,39 +96,41 @@ def preprocess( pp_args.extend(extra_postargs) pp_args.append(source) - # reasons to preprocess: - # - force is indicated - # - output is directed to stdout - # - source file is newer than the target - preprocess = self.force or output_file is None or newer(source, output_file) - if not preprocess: - return - - if output_file: - self.mkpath(os.path.dirname(output_file)) - - try: - self.spawn(pp_args) - except DistutilsExecError as msg: - raise CompileError(msg) + # We need to preprocess: either we're being forced to, or we're + # generating output to stdout, or there's a target output file and + # the source file is newer than the target (or the target doesn't + # exist). + if self.force or output_file is None or newer(source, output_file): + if output_file: + self.mkpath(os.path.dirname(output_file)) + try: + self.spawn(pp_args) + except DistutilsExecError as msg: + raise CompileError(msg) def _compile(self, obj, src, ext, cc_args, extra_postargs, pp_opts): - compiler_so = compiler_fixup(self.compiler_so, cc_args + extra_postargs) + compiler_so = self.compiler_so + if sys.platform == 'darwin': + compiler_so = _osx_support.compiler_fixup(compiler_so, + cc_args + extra_postargs) try: - self.spawn(compiler_so + cc_args + [src, '-o', obj] + extra_postargs) + self.spawn(compiler_so + cc_args + [src, '-o', obj] + + extra_postargs) except DistutilsExecError as msg: raise CompileError(msg) - def create_static_lib( - self, objects, output_libname, output_dir=None, debug=0, target_lang=None - ): + def create_static_lib(self, objects, output_libname, + output_dir=None, debug=0, target_lang=None): objects, output_dir = self._fix_object_args(objects, output_dir) - output_filename = self.library_filename(output_libname, output_dir=output_dir) + output_filename = \ + self.library_filename(output_libname, output_dir=output_dir) if self._need_link(objects, output_filename): self.mkpath(os.path.dirname(output_filename)) - self.spawn(self.archiver + [output_filename] + objects + self.objects) + self.spawn(self.archiver + + [output_filename] + + objects + self.objects) # Not many Unices required ranlib anymore -- SunOS 4.x is, I # think the only major Unix that does. Maybe we need some @@ -211,34 +145,26 @@ def create_static_lib( else: log.debug("skipping %s (up-to-date)", output_filename) - def link( - self, - target_desc, - objects, - output_filename, - output_dir=None, - libraries=None, - library_dirs=None, - runtime_library_dirs=None, - export_symbols=None, - debug=0, - extra_preargs=None, - extra_postargs=None, - build_temp=None, - target_lang=None, - ): + def link(self, target_desc, objects, + output_filename, output_dir=None, libraries=None, + library_dirs=None, runtime_library_dirs=None, + export_symbols=None, debug=0, extra_preargs=None, + extra_postargs=None, build_temp=None, target_lang=None): objects, output_dir = self._fix_object_args(objects, output_dir) - fixed_args = self._fix_lib_args(libraries, library_dirs, runtime_library_dirs) + fixed_args = self._fix_lib_args(libraries, library_dirs, + runtime_library_dirs) libraries, library_dirs, runtime_library_dirs = fixed_args - lib_opts = gen_lib_options(self, library_dirs, runtime_library_dirs, libraries) + lib_opts = gen_lib_options(self, library_dirs, runtime_library_dirs, + libraries) if not isinstance(output_dir, (str, type(None))): raise TypeError("'output_dir' must be a string or None") if output_dir is not None: output_filename = os.path.join(output_dir, output_filename) if self._need_link(objects, output_filename): - ld_args = objects + self.objects + lib_opts + ['-o', output_filename] + ld_args = (objects + self.objects + + lib_opts + ['-o', output_filename]) if debug: ld_args[:0] = ['-g'] if extra_preargs: @@ -247,22 +173,33 @@ def link( ld_args.extend(extra_postargs) self.mkpath(os.path.dirname(output_filename)) try: - # Select a linker based on context: linker_exe when - # building an executable or linker_so (with shared options) - # when building a shared library. - building_exe = target_desc == CCompiler.EXECUTABLE - linker = (self.linker_exe if building_exe else self.linker_so)[:] - + if target_desc == CCompiler.EXECUTABLE: + linker = self.linker_exe[:] + else: + linker = self.linker_so[:] if target_lang == "c++" and self.compiler_cxx: - env, linker_ne = _split_env(linker) - aix, linker_na = _split_aix(linker_ne) - _, compiler_cxx_ne = _split_env(self.compiler_cxx) - _, linker_exe_ne = _split_env(self.linker_exe) - - params = _linker_params(linker_na, linker_exe_ne) - linker = env + aix + compiler_cxx_ne + params - - linker = compiler_fixup(linker, ld_args) + # skip over environment variable settings if /usr/bin/env + # is used to set up the linker's environment. + # This is needed on OSX. Note: this assumes that the + # normal and C++ compiler have the same environment + # settings. + i = 0 + if os.path.basename(linker[0]) == "env": + i = 1 + while '=' in linker[i]: + i += 1 + + if os.path.basename(linker[i]) == 'ld_so_aix': + # AIX platforms prefix the compiler with the ld_so_aix + # script, so we need to adjust our linker index + offset = 1 + else: + offset = 0 + + linker[i+offset] = self.compiler_cxx[i] + + if sys.platform == 'darwin': + linker = _osx_support.compiler_fixup(linker, ld_args) self.spawn(linker + ld_args) except DistutilsExecError as msg: @@ -277,10 +214,8 @@ def link( def library_dir_option(self, dir): return "-L" + dir - def _is_gcc(self): - cc_var = sysconfig.get_config_var("CC") - compiler = os.path.basename(shlex.split(cc_var)[0]) - return "gcc" in compiler or "g++" in compiler + def _is_gcc(self, compiler_name): + return "gcc" in compiler_name or "g++" in compiler_name def runtime_library_dir_option(self, dir): # XXX Hackish, at the very least. See Python bug #445902: @@ -296,21 +231,20 @@ def runtime_library_dir_option(self, dir): # this time, there's no way to determine this information from # the configuration data stored in the Python installation, so # we use this hack. + compiler = os.path.basename(shlex.split(sysconfig.get_config_var("CC"))[0]) if sys.platform[:6] == "darwin": from distutils.util import get_macosx_target_ver, split_version - macosx_target_ver = get_macosx_target_ver() if macosx_target_ver and split_version(macosx_target_ver) >= [10, 5]: return "-Wl,-rpath," + dir - else: # no support for -rpath on earlier macOS versions + else: # no support for -rpath on earlier macOS versions return "-L" + dir elif sys.platform[:7] == "freebsd": return "-Wl,-rpath=" + dir elif sys.platform[:5] == "hp-ux": - return [ - "-Wl,+s" if self._is_gcc() else "+s", - "-L" + dir, - ] + if self._is_gcc(compiler): + return ["-Wl,+s", "-L" + dir] + return ["+s", "-L" + dir] # For all compilers, `-Wl` is the presumed way to # pass a compiler option to the linker and `-R` is @@ -325,77 +259,67 @@ def runtime_library_dir_option(self, dir): def library_option(self, lib): return "-l" + lib - @staticmethod - def _library_root(dir): - """ - macOS users can specify an alternate SDK using'-isysroot'. - Calculate the SDK root if it is specified. - - Note that, as of Xcode 7, Apple SDKs may contain textual stub - libraries with .tbd extensions rather than the normal .dylib - shared libraries installed in /. The Apple compiler tool - chain handles this transparently but it can cause problems - for programs that are being built with an SDK and searching - for specific libraries. Callers of find_library_file need to - keep in mind that the base filename of the returned SDK library - file might have a different extension from that of the library - file installed on the running system, for example: - /Applications/Xcode.app/Contents/Developer/Platforms/ - MacOSX.platform/Developer/SDKs/MacOSX10.11.sdk/ - usr/lib/libedit.tbd - vs - /usr/lib/libedit.dylib - """ - cflags = sysconfig.get_config_var('CFLAGS') - match = re.search(r'-isysroot\s*(\S+)', cflags) - - apply_root = ( - sys.platform == 'darwin' - and match - and ( - dir.startswith('/System/') - or (dir.startswith('/usr/') and not dir.startswith('/usr/local/')) - ) - ) - - return os.path.join(match.group(1), dir[1:]) if apply_root else dir - def find_library_file(self, dirs, lib, debug=0): - r""" - Second-guess the linker with not much hard - data to go on: GCC seems to prefer the shared library, so - assume that *all* Unix C compilers do, - ignoring even GCC's "-static" option. - - >>> compiler = UnixCCompiler() - >>> compiler._library_root = lambda dir: dir - >>> monkeypatch = getfixture('monkeypatch') - >>> monkeypatch.setattr(os.path, 'exists', lambda d: 'existing' in d) - >>> dirs = ('/foo/bar/missing', '/foo/bar/existing') - >>> compiler.find_library_file(dirs, 'abc').replace('\\', '/') - '/foo/bar/existing/libabc.dylib' - >>> compiler.find_library_file(reversed(dirs), 'abc').replace('\\', '/') - '/foo/bar/existing/libabc.dylib' - >>> monkeypatch.setattr(os.path, 'exists', - ... lambda d: 'existing' in d and '.a' in d) - >>> compiler.find_library_file(dirs, 'abc').replace('\\', '/') - '/foo/bar/existing/libabc.a' - >>> compiler.find_library_file(reversed(dirs), 'abc').replace('\\', '/') - '/foo/bar/existing/libabc.a' - """ - lib_names = ( - self.library_filename(lib, lib_type=type) - for type in 'dylib xcode_stub shared static'.split() - ) - - roots = map(self._library_root, dirs) - - searched = ( - os.path.join(root, lib_name) - for root, lib_name in itertools.product(roots, lib_names) - ) - - found = filter(os.path.exists, searched) - - # Return None if it could not be found in any dir. - return next(found, None) + shared_f = self.library_filename(lib, lib_type='shared') + dylib_f = self.library_filename(lib, lib_type='dylib') + xcode_stub_f = self.library_filename(lib, lib_type='xcode_stub') + static_f = self.library_filename(lib, lib_type='static') + + if sys.platform == 'darwin': + # On OSX users can specify an alternate SDK using + # '-isysroot', calculate the SDK root if it is specified + # (and use it further on) + # + # Note that, as of Xcode 7, Apple SDKs may contain textual stub + # libraries with .tbd extensions rather than the normal .dylib + # shared libraries installed in /. The Apple compiler tool + # chain handles this transparently but it can cause problems + # for programs that are being built with an SDK and searching + # for specific libraries. Callers of find_library_file need to + # keep in mind that the base filename of the returned SDK library + # file might have a different extension from that of the library + # file installed on the running system, for example: + # /Applications/Xcode.app/Contents/Developer/Platforms/ + # MacOSX.platform/Developer/SDKs/MacOSX10.11.sdk/ + # usr/lib/libedit.tbd + # vs + # /usr/lib/libedit.dylib + cflags = sysconfig.get_config_var('CFLAGS') + m = re.search(r'-isysroot\s*(\S+)', cflags) + if m is None: + sysroot = '/' + else: + sysroot = m.group(1) + + + + for dir in dirs: + shared = os.path.join(dir, shared_f) + dylib = os.path.join(dir, dylib_f) + static = os.path.join(dir, static_f) + xcode_stub = os.path.join(dir, xcode_stub_f) + + if sys.platform == 'darwin' and ( + dir.startswith('/System/') or ( + dir.startswith('/usr/') and not dir.startswith('/usr/local/'))): + + shared = os.path.join(sysroot, dir[1:], shared_f) + dylib = os.path.join(sysroot, dir[1:], dylib_f) + static = os.path.join(sysroot, dir[1:], static_f) + xcode_stub = os.path.join(sysroot, dir[1:], xcode_stub_f) + + # We're second-guessing the linker here, with not much hard + # data to go on: GCC seems to prefer the shared library, so I'm + # assuming that *all* Unix C compilers do. And of course I'm + # ignoring even GCC's "-static" option. So sue me. + if os.path.exists(dylib): + return dylib + elif os.path.exists(xcode_stub): + return xcode_stub + elif os.path.exists(shared): + return shared + elif os.path.exists(static): + return static + + # Oops, didn't find it in *any* of 'dirs' + return None diff --git a/venv/lib/python3.10/site-packages/setuptools/_distutils/util.py b/venv/lib/python3.10/site-packages/setuptools/_distutils/util.py index 4763202..ac6d446 100644 --- a/venv/lib/python3.10/site-packages/setuptools/_distutils/util.py +++ b/venv/lib/python3.10/site-packages/setuptools/_distutils/util.py @@ -4,87 +4,132 @@ one of the other *util.py modules. """ -import importlib.util import os import re +import importlib.util import string -import subprocess import sys -import sysconfig -import functools - -from distutils.errors import DistutilsPlatformError, DistutilsByteCompileError +from distutils.errors import DistutilsPlatformError from distutils.dep_util import newer from distutils.spawn import spawn from distutils import log +from distutils.errors import DistutilsByteCompileError +from .py35compat import _optim_args_from_interpreter_flags def get_host_platform(): - """ - Return a string that identifies the current platform. Use this - function to distinguish platform-specific build directories and - platform-specific built distributions. - """ + """Return a string that identifies the current platform. This is used mainly to + distinguish platform-specific build directories and platform-specific built + distributions. Typically includes the OS name and version and the + architecture (as supplied by 'os.uname()'), although the exact information + included depends on the OS; eg. on Linux, the kernel version isn't + particularly important. - # This function initially exposed platforms as defined in Python 3.9 - # even with older Python versions when distutils was split out. - # Now it delegates to stdlib sysconfig, but maintains compatibility. + Examples of returned values: + linux-i586 + linux-alpha (?) + solaris-2.6-sun4u - if sys.version_info < (3, 8): - if os.name == 'nt': - if '(arm)' in sys.version.lower(): - return 'win-arm32' - if '(arm64)' in sys.version.lower(): - return 'win-arm64' + Windows will return one of: + win-amd64 (64bit Windows on AMD64 (aka x86_64, Intel64, EM64T, etc) + win32 (all others - specifically, sys.platform is returned) - if sys.version_info < (3, 9): - if os.name == "posix" and hasattr(os, 'uname'): - osname, host, release, version, machine = os.uname() - if osname[:3] == "aix": - from .py38compat import aix_platform - - return aix_platform(osname, version, release) - - return sysconfig.get_platform() + For other non-POSIX platforms, currently just returns 'sys.platform'. + """ + if os.name == 'nt': + if 'amd64' in sys.version.lower(): + return 'win-amd64' + if '(arm)' in sys.version.lower(): + return 'win-arm32' + if '(arm64)' in sys.version.lower(): + return 'win-arm64' + return sys.platform + + # Set for cross builds explicitly + if "_PYTHON_HOST_PLATFORM" in os.environ: + return os.environ["_PYTHON_HOST_PLATFORM"] + + if os.name != "posix" or not hasattr(os, 'uname'): + # XXX what about the architecture? NT is Intel or Alpha, + # Mac OS is M68k or PPC, etc. + return sys.platform + + # Try to distinguish various flavours of Unix + + (osname, host, release, version, machine) = os.uname() + + # Convert the OS name to lowercase, remove '/' characters, and translate + # spaces (for "Power Macintosh") + osname = osname.lower().replace('/', '') + machine = machine.replace(' ', '_') + machine = machine.replace('/', '-') + + if osname[:5] == "linux": + # At least on Linux/Intel, 'machine' is the processor -- + # i386, etc. + # XXX what about Alpha, SPARC, etc? + return "%s-%s" % (osname, machine) + elif osname[:5] == "sunos": + if release[0] >= "5": # SunOS 5 == Solaris 2 + osname = "solaris" + release = "%d.%s" % (int(release[0]) - 3, release[2:]) + # We can't use "platform.architecture()[0]" because a + # bootstrap problem. We use a dict to get an error + # if some suspicious happens. + bitness = {2147483647:"32bit", 9223372036854775807:"64bit"} + machine += ".%s" % bitness[sys.maxsize] + # fall through to standard osname-release-machine representation + elif osname[:3] == "aix": + from .py38compat import aix_platform + return aix_platform(osname, version, release) + elif osname[:6] == "cygwin": + osname = "cygwin" + rel_re = re.compile (r'[\d.]+', re.ASCII) + m = rel_re.match(release) + if m: + release = m.group() + elif osname[:6] == "darwin": + import _osx_support, distutils.sysconfig + osname, release, machine = _osx_support.get_platform_osx( + distutils.sysconfig.get_config_vars(), + osname, release, machine) + + return "%s-%s-%s" % (osname, release, machine) def get_platform(): if os.name == 'nt': TARGET_TO_PLAT = { - 'x86': 'win32', - 'x64': 'win-amd64', - 'arm': 'win-arm32', + 'x86' : 'win32', + 'x64' : 'win-amd64', + 'arm' : 'win-arm32', 'arm64': 'win-arm64', } - target = os.environ.get('VSCMD_ARG_TGT_ARCH') - return TARGET_TO_PLAT.get(target) or get_host_platform() - return get_host_platform() + return TARGET_TO_PLAT.get(os.environ.get('VSCMD_ARG_TGT_ARCH')) or get_host_platform() + else: + return get_host_platform() if sys.platform == 'darwin': - _syscfg_macosx_ver = None # cache the version pulled from sysconfig + _syscfg_macosx_ver = None # cache the version pulled from sysconfig MACOSX_VERSION_VAR = 'MACOSX_DEPLOYMENT_TARGET' - def _clear_cached_macosx_ver(): """For testing only. Do not call.""" global _syscfg_macosx_ver _syscfg_macosx_ver = None - def get_macosx_target_ver_from_syscfg(): """Get the version of macOS latched in the Python interpreter configuration. Returns the version as a string or None if can't obtain one. Cached.""" global _syscfg_macosx_ver if _syscfg_macosx_ver is None: from distutils import sysconfig - ver = sysconfig.get_config_var(MACOSX_VERSION_VAR) or '' if ver: _syscfg_macosx_ver = ver return _syscfg_macosx_ver - def get_macosx_target_ver(): """Return the version of macOS for which we are building. @@ -102,16 +147,12 @@ def get_macosx_target_ver(): # ensures extension modules are built with correct compatibility # values, specifically LDSHARED which can use # '-undefined dynamic_lookup' which only works on >= 10.3. - if ( - syscfg_ver - and split_version(syscfg_ver) >= [10, 3] - and split_version(env_ver) < [10, 3] - ): - my_msg = ( - '$' + MACOSX_VERSION_VAR + ' mismatch: ' - 'now "%s" but "%s" during configure; ' - 'must use 10.3 or later' % (env_ver, syscfg_ver) - ) + if syscfg_ver and split_version(syscfg_ver) >= [10, 3] and \ + split_version(env_ver) < [10, 3]: + my_msg = ('$' + MACOSX_VERSION_VAR + ' mismatch: ' + 'now "%s" but "%s" during configure; ' + 'must use 10.3 or later' + % (env_ver, syscfg_ver)) raise DistutilsPlatformError(my_msg) return env_ver return syscfg_ver @@ -122,7 +163,7 @@ def split_version(s): return [int(n) for n in s.split('.')] -def convert_path(pathname): +def convert_path (pathname): """Return 'pathname' as a name that will work on the native filesystem, i.e. split it on '/' and put it back together again using the current directory separator. Needed because filenames in the setup script are @@ -147,11 +188,10 @@ def convert_path(pathname): return os.curdir return os.path.join(*paths) - # convert_path () -def change_root(new_root, pathname): +def change_root (new_root, pathname): """Return 'pathname' with 'new_root' prepended. If 'pathname' is relative, this is equivalent to "os.path.join(new_root,pathname)". Otherwise, it requires making 'pathname' relative and then joining the @@ -169,11 +209,12 @@ def change_root(new_root, pathname): path = path[1:] return os.path.join(new_root, path) - raise DistutilsPlatformError(f"nothing known about platform '{os.name}'") + else: + raise DistutilsPlatformError("nothing known about platform '%s'" % os.name) -@functools.lru_cache() -def check_environ(): +_environ_checked = 0 +def check_environ (): """Ensure that 'os.environ' has all the environment variables we guarantee that users can use in config files, command-line options, etc. Currently this includes: @@ -181,10 +222,13 @@ def check_environ(): PLAT - description of the current platform, including hardware and OS (see 'get_platform()') """ + global _environ_checked + if _environ_checked: + return + if os.name == 'posix' and 'HOME' not in os.environ: try: import pwd - os.environ['HOME'] = pwd.getpwuid(os.getuid())[5] except (ImportError, KeyError): # bpo-10496: if the current user identifier doesn't exist in the @@ -194,8 +238,10 @@ def check_environ(): if 'PLAT' not in os.environ: os.environ['PLAT'] = get_platform() + _environ_checked = 1 -def subst_vars(s, local_vars): + +def subst_vars (s, local_vars): """ Perform variable substitution on 'string'. Variables are indicated by format-style braces ("{var}"). @@ -213,20 +259,19 @@ def subst_vars(s, local_vars): except KeyError as var: raise ValueError(f"invalid variable {var}") +# subst_vars () + def _subst_compat(s): """ Replace shell/Perl-style variable substitution with format-style. For compatibility. """ - def _subst(match): return f'{{{match.group(1)}}}' - repl = re.sub(r'\$([a-zA-Z_][a-zA-Z_0-9]*)', _subst, s) if repl != s: import warnings - warnings.warn( "shell/Perl-style substitions are deprecated", DeprecationWarning, @@ -234,7 +279,7 @@ def _subst(match): return repl -def grok_environment_error(exc, prefix="error: "): +def grok_environment_error (exc, prefix="error: "): # Function kept for backward compatibility. # Used to try clever things with EnvironmentErrors, # but nowadays str(exception) produces good messages. @@ -243,16 +288,13 @@ def grok_environment_error(exc, prefix="error: "): # Needed by 'split_quoted()' _wordchars_re = _squote_re = _dquote_re = None - - def _init_regex(): global _wordchars_re, _squote_re, _dquote_re _wordchars_re = re.compile(r'[^\\\'\"%s ]*' % string.whitespace) _squote_re = re.compile(r"'(?:[^'\\]|\\.)*'") _dquote_re = re.compile(r'"(?:[^"\\]|\\.)*"') - -def split_quoted(s): +def split_quoted (s): """Split a string up according to Unix shell-like rules for quotes and backslashes. In short: words are delimited by spaces, as long as those spaces are not escaped by a backslash, or inside a quoted string. @@ -266,8 +308,7 @@ def split_quoted(s): # This is a nice algorithm for splitting up a single string, since it # doesn't require character-by-character examination. It was a little # bit of a brain-bender to get it working right, though... - if _wordchars_re is None: - _init_regex() + if _wordchars_re is None: _init_regex() s = s.strip() words = [] @@ -280,23 +321,20 @@ def split_quoted(s): words.append(s[:end]) break - if s[end] in string.whitespace: - # unescaped, unquoted whitespace: now - # we definitely have a word delimiter - words.append(s[:end]) + if s[end] in string.whitespace: # unescaped, unquoted whitespace: now + words.append(s[:end]) # we definitely have a word delimiter s = s[end:].lstrip() pos = 0 - elif s[end] == '\\': - # preserve whatever is being escaped; - # will become part of the current word - s = s[:end] + s[end + 1 :] - pos = end + 1 + elif s[end] == '\\': # preserve whatever is being escaped; + # will become part of the current word + s = s[:end] + s[end+1:] + pos = end+1 else: - if s[end] == "'": # slurp singly-quoted string + if s[end] == "'": # slurp singly-quoted string m = _squote_re.match(s, end) - elif s[end] == '"': # slurp doubly-quoted string + elif s[end] == '"': # slurp doubly-quoted string m = _dquote_re.match(s, end) else: raise RuntimeError("this can't happen (bad char '%c')" % s[end]) @@ -305,7 +343,7 @@ def split_quoted(s): raise ValueError("bad string (mismatched %s quotes?)" % s[end]) (beg, end) = m.span() - s = s[:beg] + s[beg + 1 : end - 1] + s[end:] + s = s[:beg] + s[beg+1:end-1] + s[end:] pos = m.end() - 2 if pos >= len(s): @@ -314,11 +352,10 @@ def split_quoted(s): return words - # split_quoted () -def execute(func, args, msg=None, verbose=0, dry_run=0): +def execute (func, args, msg=None, verbose=0, dry_run=0): """Perform some action that affects the outside world (eg. by writing to the filesystem). Such actions are special because they are disabled by the 'dry_run' flag. This method takes care of all @@ -328,8 +365,8 @@ def execute(func, args, msg=None, verbose=0, dry_run=0): print. """ if msg is None: - msg = "{}{!r}".format(func.__name__, args) - if msg[-2:] == ',)': # correct for singleton tuple + msg = "%s%r" % (func.__name__, args) + if msg[-2:] == ',)': # correct for singleton tuple msg = msg[0:-2] + ')' log.info(msg) @@ -337,7 +374,7 @@ def execute(func, args, msg=None, verbose=0, dry_run=0): func(*args) -def strtobool(val): +def strtobool (val): """Convert a string representation of truth to true (1) or false (0). True values are 'y', 'yes', 't', 'true', 'on', and '1'; false values @@ -350,19 +387,14 @@ def strtobool(val): elif val in ('n', 'no', 'f', 'false', 'off', '0'): return 0 else: - raise ValueError("invalid truth value {!r}".format(val)) - - -def byte_compile( # noqa: C901 - py_files, - optimize=0, - force=0, - prefix=None, - base_dir=None, - verbose=1, - dry_run=0, - direct=None, -): + raise ValueError("invalid truth value %r" % (val,)) + + +def byte_compile (py_files, + optimize=0, force=0, + prefix=None, base_dir=None, + verbose=1, dry_run=0, + direct=None): """Byte-compile a collection of Python source files to .pyc files in a __pycache__ subdirectory. 'py_files' is a list of files to compile; any files that don't end in ".py" are silently @@ -392,6 +424,10 @@ def byte_compile( # noqa: C901 it set to None. """ + # Late import to fix a bootstrap issue: _posixsubprocess is built by + # setup.py, but setup.py uses distutils. + import subprocess + # nothing is done if sys.dont_write_bytecode is True if sys.dont_write_bytecode: raise DistutilsByteCompileError('byte-compiling is disabled.') @@ -407,18 +443,16 @@ def byte_compile( # noqa: C901 # optimize mode, or if either optimization level was requested by # the caller. if direct is None: - direct = __debug__ and optimize == 0 + direct = (__debug__ and optimize == 0) # "Indirect" byte-compilation: write a temporary script and then # run it with the appropriate flags. if not direct: try: from tempfile import mkstemp - (script_fd, script_name) = mkstemp(".py") except ImportError: from tempfile import mktemp - (script_fd, script_name) = None, mktemp(".py") log.info("writing byte-compilation script '%s'", script_name) if not dry_run: @@ -428,12 +462,10 @@ def byte_compile( # noqa: C901 script = open(script_name, "w") with script: - script.write( - """\ + script.write("""\ from distutils.util import byte_compile files = [ -""" - ) +""") # XXX would be nice to write absolute filenames, just for # safety's sake (script should be more robust in the face of @@ -445,22 +477,24 @@ def byte_compile( # noqa: C901 # problem is that it's really a directory, but I'm treating it # as a dumb string, so trailing slashes and so forth matter. + #py_files = map(os.path.abspath, py_files) + #if prefix: + # prefix = os.path.abspath(prefix) + script.write(",\n".join(map(repr, py_files)) + "]\n") - script.write( - """ + script.write(""" byte_compile(files, optimize=%r, force=%r, prefix=%r, base_dir=%r, verbose=%r, dry_run=0, direct=1) -""" - % (optimize, force, prefix, base_dir, verbose) - ) +""" % (optimize, force, prefix, base_dir, verbose)) cmd = [sys.executable] - cmd.extend(subprocess._optim_args_from_interpreter_flags()) + cmd.extend(_optim_args_from_interpreter_flags()) cmd.append(script_name) spawn(cmd, dry_run=dry_run) - execute(os.remove, (script_name,), "removing %s" % script_name, dry_run=dry_run) + execute(os.remove, (script_name,), "removing %s" % script_name, + dry_run=dry_run) # "Direct" byte-compilation: use the py_compile module to compile # right here, right now. Note that the script generated in indirect @@ -480,17 +514,16 @@ def byte_compile( # noqa: C901 # dfile - purported source filename (same as 'file' by default) if optimize >= 0: opt = '' if optimize == 0 else optimize - cfile = importlib.util.cache_from_source(file, optimization=opt) + cfile = importlib.util.cache_from_source( + file, optimization=opt) else: cfile = importlib.util.cache_from_source(file) dfile = file if prefix: - if file[: len(prefix)] != prefix: - raise ValueError( - "invalid prefix: filename %r doesn't start with %r" - % (file, prefix) - ) - dfile = dfile[len(prefix) :] + if file[:len(prefix)] != prefix: + raise ValueError("invalid prefix: filename %r doesn't start with %r" + % (file, prefix)) + dfile = dfile[len(prefix):] if base_dir: dfile = os.path.join(base_dir, dfile) @@ -501,10 +534,12 @@ def byte_compile( # noqa: C901 if not dry_run: compile(file, cfile, dfile) else: - log.debug("skipping byte-compilation of %s to %s", file, cfile_base) + log.debug("skipping byte-compilation of %s to %s", + file, cfile_base) +# byte_compile () -def rfc822_escape(header): +def rfc822_escape (header): """Return a version of the string escaped for inclusion in an RFC-822 header, by ensuring there are 8 spaces space after each newline. """ diff --git a/venv/lib/python3.10/site-packages/setuptools/_distutils/version.py b/venv/lib/python3.10/site-packages/setuptools/_distutils/version.py index e29e265..35e181d 100644 --- a/venv/lib/python3.10/site-packages/setuptools/_distutils/version.py +++ b/venv/lib/python3.10/site-packages/setuptools/_distutils/version.py @@ -49,18 +49,18 @@ class Version: rich comparisons to _cmp. """ - def __init__(self, vstring=None): - if vstring: - self.parse(vstring) + def __init__ (self, vstring=None): warnings.warn( "distutils Version classes are deprecated. " "Use packaging.version instead.", DeprecationWarning, stacklevel=2, ) + if vstring: + self.parse(vstring) - def __repr__(self): - return "{} ('{}')".format(self.__class__.__name__, str(self)) + def __repr__ (self): + return "%s ('%s')" % (self.__class__.__name__, str(self)) def __eq__(self, other): c = self._cmp(other) @@ -110,7 +110,7 @@ def __ge__(self, other): # instance of your version class) -class StrictVersion(Version): +class StrictVersion (Version): """Version numbering for anal retentives and software idealists. Implements the standard interface for version number classes as @@ -147,16 +147,17 @@ class StrictVersion(Version): in the distutils documentation. """ - version_re = re.compile( - r'^(\d+) \. (\d+) (\. (\d+))? ([ab](\d+))?$', re.VERBOSE | re.ASCII - ) + version_re = re.compile(r'^(\d+) \. (\d+) (\. (\d+))? ([ab](\d+))?$', + re.VERBOSE | re.ASCII) + - def parse(self, vstring): + def parse (self, vstring): match = self.version_re.match(vstring) if not match: raise ValueError("invalid version number '%s'" % vstring) - (major, minor, patch, prerelease, prerelease_num) = match.group(1, 2, 4, 5, 6) + (major, minor, patch, prerelease, prerelease_num) = \ + match.group(1, 2, 4, 5, 6) if patch: self.version = tuple(map(int, [major, minor, patch])) @@ -168,7 +169,8 @@ def parse(self, vstring): else: self.prerelease = None - def __str__(self): + + def __str__ (self): if self.version[2] == 0: vstring = '.'.join(map(str, self.version[0:2])) @@ -180,7 +182,8 @@ def __str__(self): return vstring - def _cmp(self, other): # noqa: C901 + + def _cmp (self, other): if isinstance(other, str): with suppress_known_deprecation(): other = StrictVersion(other) @@ -201,13 +204,13 @@ def _cmp(self, other): # noqa: C901 # case 3: self doesn't have prerelease, other does: self is greater # case 4: both have prerelease: must compare them! - if not self.prerelease and not other.prerelease: + if (not self.prerelease and not other.prerelease): return 0 - elif self.prerelease and not other.prerelease: + elif (self.prerelease and not other.prerelease): return -1 - elif not self.prerelease and other.prerelease: + elif (not self.prerelease and other.prerelease): return 1 - elif self.prerelease and other.prerelease: + elif (self.prerelease and other.prerelease): if self.prerelease == other.prerelease: return 0 elif self.prerelease < other.prerelease: @@ -217,7 +220,6 @@ def _cmp(self, other): # noqa: C901 else: assert False, "never get here" - # end class StrictVersion @@ -285,8 +287,7 @@ def _cmp(self, other): # noqa: C901 # the Right Thing" (ie. the code matches the conception). But I'd rather # have a conception that matches common notions about version numbers. - -class LooseVersion(Version): +class LooseVersion (Version): """Version numbering for anarchists and software realists. Implements the standard interface for version number classes as @@ -321,12 +322,13 @@ class LooseVersion(Version): component_re = re.compile(r'(\d+ | [a-z]+ | \.)', re.VERBOSE) - def parse(self, vstring): + def parse (self, vstring): # I've given up on thinking I can reconstruct the version string # from the parsed tuple -- so I just store the string here for # use by __str__ self.vstring = vstring - components = [x for x in self.component_re.split(vstring) if x and x != '.'] + components = [x for x in self.component_re.split(vstring) + if x and x != '.'] for i, obj in enumerate(components): try: components[i] = int(obj) @@ -335,13 +337,16 @@ def parse(self, vstring): self.version = components - def __str__(self): + + def __str__ (self): return self.vstring - def __repr__(self): + + def __repr__ (self): return "LooseVersion ('%s')" % str(self) - def _cmp(self, other): + + def _cmp (self, other): if isinstance(other, str): other = LooseVersion(other) elif not isinstance(other, LooseVersion): diff --git a/venv/lib/python3.10/site-packages/setuptools/_distutils/versionpredicate.py b/venv/lib/python3.10/site-packages/setuptools/_distutils/versionpredicate.py index 6ea1192..55f25d9 100644 --- a/venv/lib/python3.10/site-packages/setuptools/_distutils/versionpredicate.py +++ b/venv/lib/python3.10/site-packages/setuptools/_distutils/versionpredicate.py @@ -5,10 +5,11 @@ import operator -re_validPackage = re.compile(r"(?i)^\s*([a-z_]\w*(?:\.[a-z_]\w*)*)(.*)", re.ASCII) +re_validPackage = re.compile(r"(?i)^\s*([a-z_]\w*(?:\.[a-z_]\w*)*)(.*)", + re.ASCII) # (package) (rest) -re_paren = re.compile(r"^\s*\((.*)\)\s*$") # (list) inside of parentheses +re_paren = re.compile(r"^\s*\((.*)\)\s*$") # (list) inside of parentheses re_splitComparison = re.compile(r"^\s*(<=|>=|<|>|!=|==)\s*([^\s,]+)\s*$") # (comp) (version) @@ -26,16 +27,8 @@ def splitUp(pred): other = distutils.version.StrictVersion(verStr) return (comp, other) - -compmap = { - "<": operator.lt, - "<=": operator.le, - "==": operator.eq, - ">": operator.gt, - ">=": operator.ge, - "!=": operator.ne, -} - +compmap = {"<": operator.lt, "<=": operator.le, "==": operator.eq, + ">": operator.gt, ">=": operator.ge, "!=": operator.ne} class VersionPredicate: """Parse and test package version predicates. @@ -103,7 +96,8 @@ class VersionPredicate: """ def __init__(self, versionPredicateStr): - """Parse a version predicate string.""" + """Parse a version predicate string. + """ # Fields: # name: package name # pred: list of (comparison string, StrictVersion) @@ -123,7 +117,8 @@ def __init__(self, versionPredicateStr): str = match.groups()[0] self.pred = [splitUp(aPred) for aPred in str.split(",")] if not self.pred: - raise ValueError("empty parenthesized list in %r" % versionPredicateStr) + raise ValueError("empty parenthesized list in %r" + % versionPredicateStr) else: self.pred = [] @@ -147,7 +142,6 @@ def satisfied_by(self, version): _provision_rx = None - def split_provision(value): """Return the name and optional version number of a provision. @@ -162,8 +156,8 @@ def split_provision(value): global _provision_rx if _provision_rx is None: _provision_rx = re.compile( - r"([a-zA-Z_]\w*(?:\.[a-zA-Z_]\w*)*)(?:\s*\(\s*([^)\s]+)\s*\))?$", re.ASCII - ) + r"([a-zA-Z_]\w*(?:\.[a-zA-Z_]\w*)*)(?:\s*\(\s*([^)\s]+)\s*\))?$", + re.ASCII) value = value.strip() m = _provision_rx.match(value) if not m: diff --git a/venv/lib/python3.10/site-packages/setuptools/_entry_points.py b/venv/lib/python3.10/site-packages/setuptools/_entry_points.py deleted file mode 100644 index f087681..0000000 --- a/venv/lib/python3.10/site-packages/setuptools/_entry_points.py +++ /dev/null @@ -1,86 +0,0 @@ -import functools -import operator -import itertools - -from .extern.jaraco.text import yield_lines -from .extern.jaraco.functools import pass_none -from ._importlib import metadata -from ._itertools import ensure_unique -from .extern.more_itertools import consume - - -def ensure_valid(ep): - """ - Exercise one of the dynamic properties to trigger - the pattern match. - """ - ep.extras - - -def load_group(value, group): - """ - Given a value of an entry point or series of entry points, - return each as an EntryPoint. - """ - # normalize to a single sequence of lines - lines = yield_lines(value) - text = f'[{group}]\n' + '\n'.join(lines) - return metadata.EntryPoints._from_text(text) - - -def by_group_and_name(ep): - return ep.group, ep.name - - -def validate(eps: metadata.EntryPoints): - """ - Ensure entry points are unique by group and name and validate each. - """ - consume(map(ensure_valid, ensure_unique(eps, key=by_group_and_name))) - return eps - - -@functools.singledispatch -def load(eps): - """ - Given a Distribution.entry_points, produce EntryPoints. - """ - groups = itertools.chain.from_iterable( - load_group(value, group) - for group, value in eps.items()) - return validate(metadata.EntryPoints(groups)) - - -@load.register(str) -def _(eps): - r""" - >>> ep, = load('[console_scripts]\nfoo=bar') - >>> ep.group - 'console_scripts' - >>> ep.name - 'foo' - >>> ep.value - 'bar' - """ - return validate(metadata.EntryPoints(metadata.EntryPoints._from_text(eps))) - - -load.register(type(None), lambda x: x) - - -@pass_none -def render(eps: metadata.EntryPoints): - by_group = operator.attrgetter('group') - groups = itertools.groupby(sorted(eps, key=by_group), by_group) - - return '\n'.join( - f'[{group}]\n{render_items(items)}\n' - for group, items in groups - ) - - -def render_items(eps): - return '\n'.join( - f'{ep.name} = {ep.value}' - for ep in sorted(eps) - ) diff --git a/venv/lib/python3.10/site-packages/setuptools/_importlib.py b/venv/lib/python3.10/site-packages/setuptools/_importlib.py deleted file mode 100644 index 819bf5d..0000000 --- a/venv/lib/python3.10/site-packages/setuptools/_importlib.py +++ /dev/null @@ -1,47 +0,0 @@ -import sys - - -def disable_importlib_metadata_finder(metadata): - """ - Ensure importlib_metadata doesn't provide older, incompatible - Distributions. - - Workaround for #3102. - """ - try: - import importlib_metadata - except ImportError: - return - except AttributeError: - import warnings - - msg = ( - "`importlib-metadata` version is incompatible with `setuptools`.\n" - "This problem is likely to be solved by installing an updated version of " - "`importlib-metadata`." - ) - warnings.warn(msg) # Ensure a descriptive message is shown. - raise # This exception can be suppressed by _distutils_hack - - if importlib_metadata is metadata: - return - to_remove = [ - ob - for ob in sys.meta_path - if isinstance(ob, importlib_metadata.MetadataPathFinder) - ] - for item in to_remove: - sys.meta_path.remove(item) - - -if sys.version_info < (3, 10): - from setuptools.extern import importlib_metadata as metadata - disable_importlib_metadata_finder(metadata) -else: - import importlib.metadata as metadata # noqa: F401 - - -if sys.version_info < (3, 9): - from setuptools.extern import importlib_resources as resources -else: - import importlib.resources as resources # noqa: F401 diff --git a/venv/lib/python3.10/site-packages/setuptools/_itertools.py b/venv/lib/python3.10/site-packages/setuptools/_itertools.py deleted file mode 100644 index b8bf6d2..0000000 --- a/venv/lib/python3.10/site-packages/setuptools/_itertools.py +++ /dev/null @@ -1,23 +0,0 @@ -from setuptools.extern.more_itertools import consume # noqa: F401 - - -# copied from jaraco.itertools 6.1 -def ensure_unique(iterable, key=lambda x: x): - """ - Wrap an iterable to raise a ValueError if non-unique values are encountered. - - >>> list(ensure_unique('abc')) - ['a', 'b', 'c'] - >>> consume(ensure_unique('abca')) - Traceback (most recent call last): - ... - ValueError: Duplicate element 'a' encountered. - """ - seen = set() - seen_add = seen.add - for element in iterable: - k = key(element) - if k in seen: - raise ValueError(f"Duplicate element {element!r} encountered.") - seen_add(k) - yield element diff --git a/venv/lib/python3.10/site-packages/setuptools/_path.py b/venv/lib/python3.10/site-packages/setuptools/_path.py deleted file mode 100644 index 3767523..0000000 --- a/venv/lib/python3.10/site-packages/setuptools/_path.py +++ /dev/null @@ -1,29 +0,0 @@ -import os -from typing import Union - -_Path = Union[str, os.PathLike] - - -def ensure_directory(path): - """Ensure that the parent directory of `path` exists""" - dirname = os.path.dirname(path) - os.makedirs(dirname, exist_ok=True) - - -def same_path(p1: _Path, p2: _Path) -> bool: - """Differs from os.path.samefile because it does not require paths to exist. - Purely string based (no comparison between i-nodes). - >>> same_path("a/b", "./a/b") - True - >>> same_path("a/b", "a/./b") - True - >>> same_path("a/b", "././a/b") - True - >>> same_path("a/b", "./a/b/c/..") - True - >>> same_path("a/b", "../a/b/c") - False - >>> same_path("a", "a/b") - False - """ - return os.path.normpath(p1) == os.path.normpath(p2) diff --git a/venv/lib/python3.10/site-packages/setuptools/_reqs.py b/venv/lib/python3.10/site-packages/setuptools/_reqs.py deleted file mode 100644 index ca72417..0000000 --- a/venv/lib/python3.10/site-packages/setuptools/_reqs.py +++ /dev/null @@ -1,19 +0,0 @@ -import setuptools.extern.jaraco.text as text - -from pkg_resources import Requirement - - -def parse_strings(strs): - """ - Yield requirement strings for each specification in `strs`. - - `strs` must be a string, or a (possibly-nested) iterable thereof. - """ - return text.join_continuation(map(text.drop_comment, text.yield_lines(strs))) - - -def parse(strs): - """ - Deprecated drop-in replacement for pkg_resources.parse_requirements. - """ - return map(Requirement, parse_strings(strs)) diff --git a/venv/lib/python3.10/site-packages/setuptools/_vendor/importlib_metadata/__init__.py b/venv/lib/python3.10/site-packages/setuptools/_vendor/importlib_metadata/__init__.py deleted file mode 100644 index 292e0c6..0000000 --- a/venv/lib/python3.10/site-packages/setuptools/_vendor/importlib_metadata/__init__.py +++ /dev/null @@ -1,1047 +0,0 @@ -import os -import re -import abc -import csv -import sys -from .. import zipp -import email -import pathlib -import operator -import textwrap -import warnings -import functools -import itertools -import posixpath -import collections - -from . import _adapters, _meta -from ._collections import FreezableDefaultDict, Pair -from ._compat import ( - NullFinder, - install, - pypy_partial, -) -from ._functools import method_cache, pass_none -from ._itertools import always_iterable, unique_everseen -from ._meta import PackageMetadata, SimplePath - -from contextlib import suppress -from importlib import import_module -from importlib.abc import MetaPathFinder -from itertools import starmap -from typing import List, Mapping, Optional, Union - - -__all__ = [ - 'Distribution', - 'DistributionFinder', - 'PackageMetadata', - 'PackageNotFoundError', - 'distribution', - 'distributions', - 'entry_points', - 'files', - 'metadata', - 'packages_distributions', - 'requires', - 'version', -] - - -class PackageNotFoundError(ModuleNotFoundError): - """The package was not found.""" - - def __str__(self): - return f"No package metadata was found for {self.name}" - - @property - def name(self): - (name,) = self.args - return name - - -class Sectioned: - """ - A simple entry point config parser for performance - - >>> for item in Sectioned.read(Sectioned._sample): - ... print(item) - Pair(name='sec1', value='# comments ignored') - Pair(name='sec1', value='a = 1') - Pair(name='sec1', value='b = 2') - Pair(name='sec2', value='a = 2') - - >>> res = Sectioned.section_pairs(Sectioned._sample) - >>> item = next(res) - >>> item.name - 'sec1' - >>> item.value - Pair(name='a', value='1') - >>> item = next(res) - >>> item.value - Pair(name='b', value='2') - >>> item = next(res) - >>> item.name - 'sec2' - >>> item.value - Pair(name='a', value='2') - >>> list(res) - [] - """ - - _sample = textwrap.dedent( - """ - [sec1] - # comments ignored - a = 1 - b = 2 - - [sec2] - a = 2 - """ - ).lstrip() - - @classmethod - def section_pairs(cls, text): - return ( - section._replace(value=Pair.parse(section.value)) - for section in cls.read(text, filter_=cls.valid) - if section.name is not None - ) - - @staticmethod - def read(text, filter_=None): - lines = filter(filter_, map(str.strip, text.splitlines())) - name = None - for value in lines: - section_match = value.startswith('[') and value.endswith(']') - if section_match: - name = value.strip('[]') - continue - yield Pair(name, value) - - @staticmethod - def valid(line): - return line and not line.startswith('#') - - -class DeprecatedTuple: - """ - Provide subscript item access for backward compatibility. - - >>> recwarn = getfixture('recwarn') - >>> ep = EntryPoint(name='name', value='value', group='group') - >>> ep[:] - ('name', 'value', 'group') - >>> ep[0] - 'name' - >>> len(recwarn) - 1 - """ - - _warn = functools.partial( - warnings.warn, - "EntryPoint tuple interface is deprecated. Access members by name.", - DeprecationWarning, - stacklevel=pypy_partial(2), - ) - - def __getitem__(self, item): - self._warn() - return self._key()[item] - - -class EntryPoint(DeprecatedTuple): - """An entry point as defined by Python packaging conventions. - - See `the packaging docs on entry points - `_ - for more information. - """ - - pattern = re.compile( - r'(?P[\w.]+)\s*' - r'(:\s*(?P[\w.]+)\s*)?' - r'((?P\[.*\])\s*)?$' - ) - """ - A regular expression describing the syntax for an entry point, - which might look like: - - - module - - package.module - - package.module:attribute - - package.module:object.attribute - - package.module:attr [extra1, extra2] - - Other combinations are possible as well. - - The expression is lenient about whitespace around the ':', - following the attr, and following any extras. - """ - - dist: Optional['Distribution'] = None - - def __init__(self, name, value, group): - vars(self).update(name=name, value=value, group=group) - - def load(self): - """Load the entry point from its definition. If only a module - is indicated by the value, return that module. Otherwise, - return the named object. - """ - match = self.pattern.match(self.value) - module = import_module(match.group('module')) - attrs = filter(None, (match.group('attr') or '').split('.')) - return functools.reduce(getattr, attrs, module) - - @property - def module(self): - match = self.pattern.match(self.value) - return match.group('module') - - @property - def attr(self): - match = self.pattern.match(self.value) - return match.group('attr') - - @property - def extras(self): - match = self.pattern.match(self.value) - return list(re.finditer(r'\w+', match.group('extras') or '')) - - def _for(self, dist): - vars(self).update(dist=dist) - return self - - def __iter__(self): - """ - Supply iter so one may construct dicts of EntryPoints by name. - """ - msg = ( - "Construction of dict of EntryPoints is deprecated in " - "favor of EntryPoints." - ) - warnings.warn(msg, DeprecationWarning) - return iter((self.name, self)) - - def matches(self, **params): - attrs = (getattr(self, param) for param in params) - return all(map(operator.eq, params.values(), attrs)) - - def _key(self): - return self.name, self.value, self.group - - def __lt__(self, other): - return self._key() < other._key() - - def __eq__(self, other): - return self._key() == other._key() - - def __setattr__(self, name, value): - raise AttributeError("EntryPoint objects are immutable.") - - def __repr__(self): - return ( - f'EntryPoint(name={self.name!r}, value={self.value!r}, ' - f'group={self.group!r})' - ) - - def __hash__(self): - return hash(self._key()) - - -class DeprecatedList(list): - """ - Allow an otherwise immutable object to implement mutability - for compatibility. - - >>> recwarn = getfixture('recwarn') - >>> dl = DeprecatedList(range(3)) - >>> dl[0] = 1 - >>> dl.append(3) - >>> del dl[3] - >>> dl.reverse() - >>> dl.sort() - >>> dl.extend([4]) - >>> dl.pop(-1) - 4 - >>> dl.remove(1) - >>> dl += [5] - >>> dl + [6] - [1, 2, 5, 6] - >>> dl + (6,) - [1, 2, 5, 6] - >>> dl.insert(0, 0) - >>> dl - [0, 1, 2, 5] - >>> dl == [0, 1, 2, 5] - True - >>> dl == (0, 1, 2, 5) - True - >>> len(recwarn) - 1 - """ - - __slots__ = () - - _warn = functools.partial( - warnings.warn, - "EntryPoints list interface is deprecated. Cast to list if needed.", - DeprecationWarning, - stacklevel=pypy_partial(2), - ) - - def _wrap_deprecated_method(method_name: str): # type: ignore - def wrapped(self, *args, **kwargs): - self._warn() - return getattr(super(), method_name)(*args, **kwargs) - - return method_name, wrapped - - locals().update( - map( - _wrap_deprecated_method, - '__setitem__ __delitem__ append reverse extend pop remove ' - '__iadd__ insert sort'.split(), - ) - ) - - def __add__(self, other): - if not isinstance(other, tuple): - self._warn() - other = tuple(other) - return self.__class__(tuple(self) + other) - - def __eq__(self, other): - if not isinstance(other, tuple): - self._warn() - other = tuple(other) - - return tuple(self).__eq__(other) - - -class EntryPoints(DeprecatedList): - """ - An immutable collection of selectable EntryPoint objects. - """ - - __slots__ = () - - def __getitem__(self, name): # -> EntryPoint: - """ - Get the EntryPoint in self matching name. - """ - if isinstance(name, int): - warnings.warn( - "Accessing entry points by index is deprecated. " - "Cast to tuple if needed.", - DeprecationWarning, - stacklevel=2, - ) - return super().__getitem__(name) - try: - return next(iter(self.select(name=name))) - except StopIteration: - raise KeyError(name) - - def select(self, **params): - """ - Select entry points from self that match the - given parameters (typically group and/or name). - """ - return EntryPoints(ep for ep in self if ep.matches(**params)) - - @property - def names(self): - """ - Return the set of all names of all entry points. - """ - return {ep.name for ep in self} - - @property - def groups(self): - """ - Return the set of all groups of all entry points. - - For coverage while SelectableGroups is present. - >>> EntryPoints().groups - set() - """ - return {ep.group for ep in self} - - @classmethod - def _from_text_for(cls, text, dist): - return cls(ep._for(dist) for ep in cls._from_text(text)) - - @staticmethod - def _from_text(text): - return ( - EntryPoint(name=item.value.name, value=item.value.value, group=item.name) - for item in Sectioned.section_pairs(text or '') - ) - - -class Deprecated: - """ - Compatibility add-in for mapping to indicate that - mapping behavior is deprecated. - - >>> recwarn = getfixture('recwarn') - >>> class DeprecatedDict(Deprecated, dict): pass - >>> dd = DeprecatedDict(foo='bar') - >>> dd.get('baz', None) - >>> dd['foo'] - 'bar' - >>> list(dd) - ['foo'] - >>> list(dd.keys()) - ['foo'] - >>> 'foo' in dd - True - >>> list(dd.values()) - ['bar'] - >>> len(recwarn) - 1 - """ - - _warn = functools.partial( - warnings.warn, - "SelectableGroups dict interface is deprecated. Use select.", - DeprecationWarning, - stacklevel=pypy_partial(2), - ) - - def __getitem__(self, name): - self._warn() - return super().__getitem__(name) - - def get(self, name, default=None): - self._warn() - return super().get(name, default) - - def __iter__(self): - self._warn() - return super().__iter__() - - def __contains__(self, *args): - self._warn() - return super().__contains__(*args) - - def keys(self): - self._warn() - return super().keys() - - def values(self): - self._warn() - return super().values() - - -class SelectableGroups(Deprecated, dict): - """ - A backward- and forward-compatible result from - entry_points that fully implements the dict interface. - """ - - @classmethod - def load(cls, eps): - by_group = operator.attrgetter('group') - ordered = sorted(eps, key=by_group) - grouped = itertools.groupby(ordered, by_group) - return cls((group, EntryPoints(eps)) for group, eps in grouped) - - @property - def _all(self): - """ - Reconstruct a list of all entrypoints from the groups. - """ - groups = super(Deprecated, self).values() - return EntryPoints(itertools.chain.from_iterable(groups)) - - @property - def groups(self): - return self._all.groups - - @property - def names(self): - """ - for coverage: - >>> SelectableGroups().names - set() - """ - return self._all.names - - def select(self, **params): - if not params: - return self - return self._all.select(**params) - - -class PackagePath(pathlib.PurePosixPath): - """A reference to a path in a package""" - - def read_text(self, encoding='utf-8'): - with self.locate().open(encoding=encoding) as stream: - return stream.read() - - def read_binary(self): - with self.locate().open('rb') as stream: - return stream.read() - - def locate(self): - """Return a path-like object for this path""" - return self.dist.locate_file(self) - - -class FileHash: - def __init__(self, spec): - self.mode, _, self.value = spec.partition('=') - - def __repr__(self): - return f'' - - -class Distribution: - """A Python distribution package.""" - - @abc.abstractmethod - def read_text(self, filename): - """Attempt to load metadata file given by the name. - - :param filename: The name of the file in the distribution info. - :return: The text if found, otherwise None. - """ - - @abc.abstractmethod - def locate_file(self, path): - """ - Given a path to a file in this distribution, return a path - to it. - """ - - @classmethod - def from_name(cls, name): - """Return the Distribution for the given package name. - - :param name: The name of the distribution package to search for. - :return: The Distribution instance (or subclass thereof) for the named - package, if found. - :raises PackageNotFoundError: When the named package's distribution - metadata cannot be found. - """ - for resolver in cls._discover_resolvers(): - dists = resolver(DistributionFinder.Context(name=name)) - dist = next(iter(dists), None) - if dist is not None: - return dist - else: - raise PackageNotFoundError(name) - - @classmethod - def discover(cls, **kwargs): - """Return an iterable of Distribution objects for all packages. - - Pass a ``context`` or pass keyword arguments for constructing - a context. - - :context: A ``DistributionFinder.Context`` object. - :return: Iterable of Distribution objects for all packages. - """ - context = kwargs.pop('context', None) - if context and kwargs: - raise ValueError("cannot accept context and kwargs") - context = context or DistributionFinder.Context(**kwargs) - return itertools.chain.from_iterable( - resolver(context) for resolver in cls._discover_resolvers() - ) - - @staticmethod - def at(path): - """Return a Distribution for the indicated metadata path - - :param path: a string or path-like object - :return: a concrete Distribution instance for the path - """ - return PathDistribution(pathlib.Path(path)) - - @staticmethod - def _discover_resolvers(): - """Search the meta_path for resolvers.""" - declared = ( - getattr(finder, 'find_distributions', None) for finder in sys.meta_path - ) - return filter(None, declared) - - @property - def metadata(self) -> _meta.PackageMetadata: - """Return the parsed metadata for this Distribution. - - The returned object will have keys that name the various bits of - metadata. See PEP 566 for details. - """ - text = ( - self.read_text('METADATA') - or self.read_text('PKG-INFO') - # This last clause is here to support old egg-info files. Its - # effect is to just end up using the PathDistribution's self._path - # (which points to the egg-info file) attribute unchanged. - or self.read_text('') - ) - return _adapters.Message(email.message_from_string(text)) - - @property - def name(self): - """Return the 'Name' metadata for the distribution package.""" - return self.metadata['Name'] - - @property - def _normalized_name(self): - """Return a normalized version of the name.""" - return Prepared.normalize(self.name) - - @property - def version(self): - """Return the 'Version' metadata for the distribution package.""" - return self.metadata['Version'] - - @property - def entry_points(self): - return EntryPoints._from_text_for(self.read_text('entry_points.txt'), self) - - @property - def files(self): - """Files in this distribution. - - :return: List of PackagePath for this distribution or None - - Result is `None` if the metadata file that enumerates files - (i.e. RECORD for dist-info or SOURCES.txt for egg-info) is - missing. - Result may be empty if the metadata exists but is empty. - """ - - def make_file(name, hash=None, size_str=None): - result = PackagePath(name) - result.hash = FileHash(hash) if hash else None - result.size = int(size_str) if size_str else None - result.dist = self - return result - - @pass_none - def make_files(lines): - return list(starmap(make_file, csv.reader(lines))) - - return make_files(self._read_files_distinfo() or self._read_files_egginfo()) - - def _read_files_distinfo(self): - """ - Read the lines of RECORD - """ - text = self.read_text('RECORD') - return text and text.splitlines() - - def _read_files_egginfo(self): - """ - SOURCES.txt might contain literal commas, so wrap each line - in quotes. - """ - text = self.read_text('SOURCES.txt') - return text and map('"{}"'.format, text.splitlines()) - - @property - def requires(self): - """Generated requirements specified for this Distribution""" - reqs = self._read_dist_info_reqs() or self._read_egg_info_reqs() - return reqs and list(reqs) - - def _read_dist_info_reqs(self): - return self.metadata.get_all('Requires-Dist') - - def _read_egg_info_reqs(self): - source = self.read_text('requires.txt') - return pass_none(self._deps_from_requires_text)(source) - - @classmethod - def _deps_from_requires_text(cls, source): - return cls._convert_egg_info_reqs_to_simple_reqs(Sectioned.read(source)) - - @staticmethod - def _convert_egg_info_reqs_to_simple_reqs(sections): - """ - Historically, setuptools would solicit and store 'extra' - requirements, including those with environment markers, - in separate sections. More modern tools expect each - dependency to be defined separately, with any relevant - extras and environment markers attached directly to that - requirement. This method converts the former to the - latter. See _test_deps_from_requires_text for an example. - """ - - def make_condition(name): - return name and f'extra == "{name}"' - - def quoted_marker(section): - section = section or '' - extra, sep, markers = section.partition(':') - if extra and markers: - markers = f'({markers})' - conditions = list(filter(None, [markers, make_condition(extra)])) - return '; ' + ' and '.join(conditions) if conditions else '' - - def url_req_space(req): - """ - PEP 508 requires a space between the url_spec and the quoted_marker. - Ref python/importlib_metadata#357. - """ - # '@' is uniquely indicative of a url_req. - return ' ' * ('@' in req) - - for section in sections: - space = url_req_space(section.value) - yield section.value + space + quoted_marker(section.name) - - -class DistributionFinder(MetaPathFinder): - """ - A MetaPathFinder capable of discovering installed distributions. - """ - - class Context: - """ - Keyword arguments presented by the caller to - ``distributions()`` or ``Distribution.discover()`` - to narrow the scope of a search for distributions - in all DistributionFinders. - - Each DistributionFinder may expect any parameters - and should attempt to honor the canonical - parameters defined below when appropriate. - """ - - name = None - """ - Specific name for which a distribution finder should match. - A name of ``None`` matches all distributions. - """ - - def __init__(self, **kwargs): - vars(self).update(kwargs) - - @property - def path(self): - """ - The sequence of directory path that a distribution finder - should search. - - Typically refers to Python installed package paths such as - "site-packages" directories and defaults to ``sys.path``. - """ - return vars(self).get('path', sys.path) - - @abc.abstractmethod - def find_distributions(self, context=Context()): - """ - Find distributions. - - Return an iterable of all Distribution instances capable of - loading the metadata for packages matching the ``context``, - a DistributionFinder.Context instance. - """ - - -class FastPath: - """ - Micro-optimized class for searching a path for - children. - - >>> FastPath('').children() - ['...'] - """ - - @functools.lru_cache() # type: ignore - def __new__(cls, root): - return super().__new__(cls) - - def __init__(self, root): - self.root = str(root) - - def joinpath(self, child): - return pathlib.Path(self.root, child) - - def children(self): - with suppress(Exception): - return os.listdir(self.root or '.') - with suppress(Exception): - return self.zip_children() - return [] - - def zip_children(self): - zip_path = zipp.Path(self.root) - names = zip_path.root.namelist() - self.joinpath = zip_path.joinpath - - return dict.fromkeys(child.split(posixpath.sep, 1)[0] for child in names) - - def search(self, name): - return self.lookup(self.mtime).search(name) - - @property - def mtime(self): - with suppress(OSError): - return os.stat(self.root).st_mtime - self.lookup.cache_clear() - - @method_cache - def lookup(self, mtime): - return Lookup(self) - - -class Lookup: - def __init__(self, path: FastPath): - base = os.path.basename(path.root).lower() - base_is_egg = base.endswith(".egg") - self.infos = FreezableDefaultDict(list) - self.eggs = FreezableDefaultDict(list) - - for child in path.children(): - low = child.lower() - if low.endswith((".dist-info", ".egg-info")): - # rpartition is faster than splitext and suitable for this purpose. - name = low.rpartition(".")[0].partition("-")[0] - normalized = Prepared.normalize(name) - self.infos[normalized].append(path.joinpath(child)) - elif base_is_egg and low == "egg-info": - name = base.rpartition(".")[0].partition("-")[0] - legacy_normalized = Prepared.legacy_normalize(name) - self.eggs[legacy_normalized].append(path.joinpath(child)) - - self.infos.freeze() - self.eggs.freeze() - - def search(self, prepared): - infos = ( - self.infos[prepared.normalized] - if prepared - else itertools.chain.from_iterable(self.infos.values()) - ) - eggs = ( - self.eggs[prepared.legacy_normalized] - if prepared - else itertools.chain.from_iterable(self.eggs.values()) - ) - return itertools.chain(infos, eggs) - - -class Prepared: - """ - A prepared search for metadata on a possibly-named package. - """ - - normalized = None - legacy_normalized = None - - def __init__(self, name): - self.name = name - if name is None: - return - self.normalized = self.normalize(name) - self.legacy_normalized = self.legacy_normalize(name) - - @staticmethod - def normalize(name): - """ - PEP 503 normalization plus dashes as underscores. - """ - return re.sub(r"[-_.]+", "-", name).lower().replace('-', '_') - - @staticmethod - def legacy_normalize(name): - """ - Normalize the package name as found in the convention in - older packaging tools versions and specs. - """ - return name.lower().replace('-', '_') - - def __bool__(self): - return bool(self.name) - - -@install -class MetadataPathFinder(NullFinder, DistributionFinder): - """A degenerate finder for distribution packages on the file system. - - This finder supplies only a find_distributions() method for versions - of Python that do not have a PathFinder find_distributions(). - """ - - def find_distributions(self, context=DistributionFinder.Context()): - """ - Find distributions. - - Return an iterable of all Distribution instances capable of - loading the metadata for packages matching ``context.name`` - (or all names if ``None`` indicated) along the paths in the list - of directories ``context.path``. - """ - found = self._search_paths(context.name, context.path) - return map(PathDistribution, found) - - @classmethod - def _search_paths(cls, name, paths): - """Find metadata directories in paths heuristically.""" - prepared = Prepared(name) - return itertools.chain.from_iterable( - path.search(prepared) for path in map(FastPath, paths) - ) - - def invalidate_caches(cls): - FastPath.__new__.cache_clear() - - -class PathDistribution(Distribution): - def __init__(self, path: SimplePath): - """Construct a distribution. - - :param path: SimplePath indicating the metadata directory. - """ - self._path = path - - def read_text(self, filename): - with suppress( - FileNotFoundError, - IsADirectoryError, - KeyError, - NotADirectoryError, - PermissionError, - ): - return self._path.joinpath(filename).read_text(encoding='utf-8') - - read_text.__doc__ = Distribution.read_text.__doc__ - - def locate_file(self, path): - return self._path.parent / path - - @property - def _normalized_name(self): - """ - Performance optimization: where possible, resolve the - normalized name from the file system path. - """ - stem = os.path.basename(str(self._path)) - return self._name_from_stem(stem) or super()._normalized_name - - def _name_from_stem(self, stem): - name, ext = os.path.splitext(stem) - if ext not in ('.dist-info', '.egg-info'): - return - name, sep, rest = stem.partition('-') - return name - - -def distribution(distribution_name): - """Get the ``Distribution`` instance for the named package. - - :param distribution_name: The name of the distribution package as a string. - :return: A ``Distribution`` instance (or subclass thereof). - """ - return Distribution.from_name(distribution_name) - - -def distributions(**kwargs): - """Get all ``Distribution`` instances in the current environment. - - :return: An iterable of ``Distribution`` instances. - """ - return Distribution.discover(**kwargs) - - -def metadata(distribution_name) -> _meta.PackageMetadata: - """Get the metadata for the named package. - - :param distribution_name: The name of the distribution package to query. - :return: A PackageMetadata containing the parsed metadata. - """ - return Distribution.from_name(distribution_name).metadata - - -def version(distribution_name): - """Get the version string for the named package. - - :param distribution_name: The name of the distribution package to query. - :return: The version string for the package as defined in the package's - "Version" metadata key. - """ - return distribution(distribution_name).version - - -def entry_points(**params) -> Union[EntryPoints, SelectableGroups]: - """Return EntryPoint objects for all installed packages. - - Pass selection parameters (group or name) to filter the - result to entry points matching those properties (see - EntryPoints.select()). - - For compatibility, returns ``SelectableGroups`` object unless - selection parameters are supplied. In the future, this function - will return ``EntryPoints`` instead of ``SelectableGroups`` - even when no selection parameters are supplied. - - For maximum future compatibility, pass selection parameters - or invoke ``.select`` with parameters on the result. - - :return: EntryPoints or SelectableGroups for all installed packages. - """ - norm_name = operator.attrgetter('_normalized_name') - unique = functools.partial(unique_everseen, key=norm_name) - eps = itertools.chain.from_iterable( - dist.entry_points for dist in unique(distributions()) - ) - return SelectableGroups.load(eps).select(**params) - - -def files(distribution_name): - """Return a list of files for the named package. - - :param distribution_name: The name of the distribution package to query. - :return: List of files composing the distribution. - """ - return distribution(distribution_name).files - - -def requires(distribution_name): - """ - Return a list of requirements for the named package. - - :return: An iterator of requirements, suitable for - packaging.requirement.Requirement. - """ - return distribution(distribution_name).requires - - -def packages_distributions() -> Mapping[str, List[str]]: - """ - Return a mapping of top-level packages to their - distributions. - - >>> import collections.abc - >>> pkgs = packages_distributions() - >>> all(isinstance(dist, collections.abc.Sequence) for dist in pkgs.values()) - True - """ - pkg_to_dist = collections.defaultdict(list) - for dist in distributions(): - for pkg in _top_level_declared(dist) or _top_level_inferred(dist): - pkg_to_dist[pkg].append(dist.metadata['Name']) - return dict(pkg_to_dist) - - -def _top_level_declared(dist): - return (dist.read_text('top_level.txt') or '').split() - - -def _top_level_inferred(dist): - return { - f.parts[0] if len(f.parts) > 1 else f.with_suffix('').name - for f in always_iterable(dist.files) - if f.suffix == ".py" - } diff --git a/venv/lib/python3.10/site-packages/setuptools/_vendor/importlib_metadata/_adapters.py b/venv/lib/python3.10/site-packages/setuptools/_vendor/importlib_metadata/_adapters.py deleted file mode 100644 index aa460d3..0000000 --- a/venv/lib/python3.10/site-packages/setuptools/_vendor/importlib_metadata/_adapters.py +++ /dev/null @@ -1,68 +0,0 @@ -import re -import textwrap -import email.message - -from ._text import FoldedCase - - -class Message(email.message.Message): - multiple_use_keys = set( - map( - FoldedCase, - [ - 'Classifier', - 'Obsoletes-Dist', - 'Platform', - 'Project-URL', - 'Provides-Dist', - 'Provides-Extra', - 'Requires-Dist', - 'Requires-External', - 'Supported-Platform', - 'Dynamic', - ], - ) - ) - """ - Keys that may be indicated multiple times per PEP 566. - """ - - def __new__(cls, orig: email.message.Message): - res = super().__new__(cls) - vars(res).update(vars(orig)) - return res - - def __init__(self, *args, **kwargs): - self._headers = self._repair_headers() - - # suppress spurious error from mypy - def __iter__(self): - return super().__iter__() - - def _repair_headers(self): - def redent(value): - "Correct for RFC822 indentation" - if not value or '\n' not in value: - return value - return textwrap.dedent(' ' * 8 + value) - - headers = [(key, redent(value)) for key, value in vars(self)['_headers']] - if self._payload: - headers.append(('Description', self.get_payload())) - return headers - - @property - def json(self): - """ - Convert PackageMetadata to a JSON-compatible format - per PEP 0566. - """ - - def transform(key): - value = self.get_all(key) if key in self.multiple_use_keys else self[key] - if key == 'Keywords': - value = re.split(r'\s+', value) - tk = key.lower().replace('-', '_') - return tk, value - - return dict(map(transform, map(FoldedCase, self))) diff --git a/venv/lib/python3.10/site-packages/setuptools/_vendor/importlib_metadata/_collections.py b/venv/lib/python3.10/site-packages/setuptools/_vendor/importlib_metadata/_collections.py deleted file mode 100644 index cf0954e..0000000 --- a/venv/lib/python3.10/site-packages/setuptools/_vendor/importlib_metadata/_collections.py +++ /dev/null @@ -1,30 +0,0 @@ -import collections - - -# from jaraco.collections 3.3 -class FreezableDefaultDict(collections.defaultdict): - """ - Often it is desirable to prevent the mutation of - a default dict after its initial construction, such - as to prevent mutation during iteration. - - >>> dd = FreezableDefaultDict(list) - >>> dd[0].append('1') - >>> dd.freeze() - >>> dd[1] - [] - >>> len(dd) - 1 - """ - - def __missing__(self, key): - return getattr(self, '_frozen', super().__missing__)(key) - - def freeze(self): - self._frozen = lambda key: self.default_factory() - - -class Pair(collections.namedtuple('Pair', 'name value')): - @classmethod - def parse(cls, text): - return cls(*map(str.strip, text.split("=", 1))) diff --git a/venv/lib/python3.10/site-packages/setuptools/_vendor/importlib_metadata/_compat.py b/venv/lib/python3.10/site-packages/setuptools/_vendor/importlib_metadata/_compat.py deleted file mode 100644 index ef3136f..0000000 --- a/venv/lib/python3.10/site-packages/setuptools/_vendor/importlib_metadata/_compat.py +++ /dev/null @@ -1,71 +0,0 @@ -import sys -import platform - - -__all__ = ['install', 'NullFinder', 'Protocol'] - - -try: - from typing import Protocol -except ImportError: # pragma: no cover - from ..typing_extensions import Protocol # type: ignore - - -def install(cls): - """ - Class decorator for installation on sys.meta_path. - - Adds the backport DistributionFinder to sys.meta_path and - attempts to disable the finder functionality of the stdlib - DistributionFinder. - """ - sys.meta_path.append(cls()) - disable_stdlib_finder() - return cls - - -def disable_stdlib_finder(): - """ - Give the backport primacy for discovering path-based distributions - by monkey-patching the stdlib O_O. - - See #91 for more background for rationale on this sketchy - behavior. - """ - - def matches(finder): - return getattr( - finder, '__module__', None - ) == '_frozen_importlib_external' and hasattr(finder, 'find_distributions') - - for finder in filter(matches, sys.meta_path): # pragma: nocover - del finder.find_distributions - - -class NullFinder: - """ - A "Finder" (aka "MetaClassFinder") that never finds any modules, - but may find distributions. - """ - - @staticmethod - def find_spec(*args, **kwargs): - return None - - # In Python 2, the import system requires finders - # to have a find_module() method, but this usage - # is deprecated in Python 3 in favor of find_spec(). - # For the purposes of this finder (i.e. being present - # on sys.meta_path but having no other import - # system functionality), the two methods are identical. - find_module = find_spec - - -def pypy_partial(val): - """ - Adjust for variable stacklevel on partial under PyPy. - - Workaround for #327. - """ - is_pypy = platform.python_implementation() == 'PyPy' - return val + is_pypy diff --git a/venv/lib/python3.10/site-packages/setuptools/_vendor/importlib_metadata/_functools.py b/venv/lib/python3.10/site-packages/setuptools/_vendor/importlib_metadata/_functools.py deleted file mode 100644 index 71f66bd..0000000 --- a/venv/lib/python3.10/site-packages/setuptools/_vendor/importlib_metadata/_functools.py +++ /dev/null @@ -1,104 +0,0 @@ -import types -import functools - - -# from jaraco.functools 3.3 -def method_cache(method, cache_wrapper=None): - """ - Wrap lru_cache to support storing the cache data in the object instances. - - Abstracts the common paradigm where the method explicitly saves an - underscore-prefixed protected property on first call and returns that - subsequently. - - >>> class MyClass: - ... calls = 0 - ... - ... @method_cache - ... def method(self, value): - ... self.calls += 1 - ... return value - - >>> a = MyClass() - >>> a.method(3) - 3 - >>> for x in range(75): - ... res = a.method(x) - >>> a.calls - 75 - - Note that the apparent behavior will be exactly like that of lru_cache - except that the cache is stored on each instance, so values in one - instance will not flush values from another, and when an instance is - deleted, so are the cached values for that instance. - - >>> b = MyClass() - >>> for x in range(35): - ... res = b.method(x) - >>> b.calls - 35 - >>> a.method(0) - 0 - >>> a.calls - 75 - - Note that if method had been decorated with ``functools.lru_cache()``, - a.calls would have been 76 (due to the cached value of 0 having been - flushed by the 'b' instance). - - Clear the cache with ``.cache_clear()`` - - >>> a.method.cache_clear() - - Same for a method that hasn't yet been called. - - >>> c = MyClass() - >>> c.method.cache_clear() - - Another cache wrapper may be supplied: - - >>> cache = functools.lru_cache(maxsize=2) - >>> MyClass.method2 = method_cache(lambda self: 3, cache_wrapper=cache) - >>> a = MyClass() - >>> a.method2() - 3 - - Caution - do not subsequently wrap the method with another decorator, such - as ``@property``, which changes the semantics of the function. - - See also - http://code.activestate.com/recipes/577452-a-memoize-decorator-for-instance-methods/ - for another implementation and additional justification. - """ - cache_wrapper = cache_wrapper or functools.lru_cache() - - def wrapper(self, *args, **kwargs): - # it's the first call, replace the method with a cached, bound method - bound_method = types.MethodType(method, self) - cached_method = cache_wrapper(bound_method) - setattr(self, method.__name__, cached_method) - return cached_method(*args, **kwargs) - - # Support cache clear even before cache has been created. - wrapper.cache_clear = lambda: None - - return wrapper - - -# From jaraco.functools 3.3 -def pass_none(func): - """ - Wrap func so it's not called if its first param is None - - >>> print_text = pass_none(print) - >>> print_text('text') - text - >>> print_text(None) - """ - - @functools.wraps(func) - def wrapper(param, *args, **kwargs): - if param is not None: - return func(param, *args, **kwargs) - - return wrapper diff --git a/venv/lib/python3.10/site-packages/setuptools/_vendor/importlib_metadata/_itertools.py b/venv/lib/python3.10/site-packages/setuptools/_vendor/importlib_metadata/_itertools.py deleted file mode 100644 index d4ca9b9..0000000 --- a/venv/lib/python3.10/site-packages/setuptools/_vendor/importlib_metadata/_itertools.py +++ /dev/null @@ -1,73 +0,0 @@ -from itertools import filterfalse - - -def unique_everseen(iterable, key=None): - "List unique elements, preserving order. Remember all elements ever seen." - # unique_everseen('AAAABBBCCDAABBB') --> A B C D - # unique_everseen('ABBCcAD', str.lower) --> A B C D - seen = set() - seen_add = seen.add - if key is None: - for element in filterfalse(seen.__contains__, iterable): - seen_add(element) - yield element - else: - for element in iterable: - k = key(element) - if k not in seen: - seen_add(k) - yield element - - -# copied from more_itertools 8.8 -def always_iterable(obj, base_type=(str, bytes)): - """If *obj* is iterable, return an iterator over its items:: - - >>> obj = (1, 2, 3) - >>> list(always_iterable(obj)) - [1, 2, 3] - - If *obj* is not iterable, return a one-item iterable containing *obj*:: - - >>> obj = 1 - >>> list(always_iterable(obj)) - [1] - - If *obj* is ``None``, return an empty iterable: - - >>> obj = None - >>> list(always_iterable(None)) - [] - - By default, binary and text strings are not considered iterable:: - - >>> obj = 'foo' - >>> list(always_iterable(obj)) - ['foo'] - - If *base_type* is set, objects for which ``isinstance(obj, base_type)`` - returns ``True`` won't be considered iterable. - - >>> obj = {'a': 1} - >>> list(always_iterable(obj)) # Iterate over the dict's keys - ['a'] - >>> list(always_iterable(obj, base_type=dict)) # Treat dicts as a unit - [{'a': 1}] - - Set *base_type* to ``None`` to avoid any special handling and treat objects - Python considers iterable as iterable: - - >>> obj = 'foo' - >>> list(always_iterable(obj, base_type=None)) - ['f', 'o', 'o'] - """ - if obj is None: - return iter(()) - - if (base_type is not None) and isinstance(obj, base_type): - return iter((obj,)) - - try: - return iter(obj) - except TypeError: - return iter((obj,)) diff --git a/venv/lib/python3.10/site-packages/setuptools/_vendor/importlib_metadata/_meta.py b/venv/lib/python3.10/site-packages/setuptools/_vendor/importlib_metadata/_meta.py deleted file mode 100644 index 37ee43e..0000000 --- a/venv/lib/python3.10/site-packages/setuptools/_vendor/importlib_metadata/_meta.py +++ /dev/null @@ -1,48 +0,0 @@ -from ._compat import Protocol -from typing import Any, Dict, Iterator, List, TypeVar, Union - - -_T = TypeVar("_T") - - -class PackageMetadata(Protocol): - def __len__(self) -> int: - ... # pragma: no cover - - def __contains__(self, item: str) -> bool: - ... # pragma: no cover - - def __getitem__(self, key: str) -> str: - ... # pragma: no cover - - def __iter__(self) -> Iterator[str]: - ... # pragma: no cover - - def get_all(self, name: str, failobj: _T = ...) -> Union[List[Any], _T]: - """ - Return all values associated with a possibly multi-valued key. - """ - - @property - def json(self) -> Dict[str, Union[str, List[str]]]: - """ - A JSON-compatible form of the metadata. - """ - - -class SimplePath(Protocol): - """ - A minimal subset of pathlib.Path required by PathDistribution. - """ - - def joinpath(self) -> 'SimplePath': - ... # pragma: no cover - - def __truediv__(self) -> 'SimplePath': - ... # pragma: no cover - - def parent(self) -> 'SimplePath': - ... # pragma: no cover - - def read_text(self) -> str: - ... # pragma: no cover diff --git a/venv/lib/python3.10/site-packages/setuptools/_vendor/importlib_metadata/_text.py b/venv/lib/python3.10/site-packages/setuptools/_vendor/importlib_metadata/_text.py deleted file mode 100644 index c88cfbb..0000000 --- a/venv/lib/python3.10/site-packages/setuptools/_vendor/importlib_metadata/_text.py +++ /dev/null @@ -1,99 +0,0 @@ -import re - -from ._functools import method_cache - - -# from jaraco.text 3.5 -class FoldedCase(str): - """ - A case insensitive string class; behaves just like str - except compares equal when the only variation is case. - - >>> s = FoldedCase('hello world') - - >>> s == 'Hello World' - True - - >>> 'Hello World' == s - True - - >>> s != 'Hello World' - False - - >>> s.index('O') - 4 - - >>> s.split('O') - ['hell', ' w', 'rld'] - - >>> sorted(map(FoldedCase, ['GAMMA', 'alpha', 'Beta'])) - ['alpha', 'Beta', 'GAMMA'] - - Sequence membership is straightforward. - - >>> "Hello World" in [s] - True - >>> s in ["Hello World"] - True - - You may test for set inclusion, but candidate and elements - must both be folded. - - >>> FoldedCase("Hello World") in {s} - True - >>> s in {FoldedCase("Hello World")} - True - - String inclusion works as long as the FoldedCase object - is on the right. - - >>> "hello" in FoldedCase("Hello World") - True - - But not if the FoldedCase object is on the left: - - >>> FoldedCase('hello') in 'Hello World' - False - - In that case, use in_: - - >>> FoldedCase('hello').in_('Hello World') - True - - >>> FoldedCase('hello') > FoldedCase('Hello') - False - """ - - def __lt__(self, other): - return self.lower() < other.lower() - - def __gt__(self, other): - return self.lower() > other.lower() - - def __eq__(self, other): - return self.lower() == other.lower() - - def __ne__(self, other): - return self.lower() != other.lower() - - def __hash__(self): - return hash(self.lower()) - - def __contains__(self, other): - return super().lower().__contains__(other.lower()) - - def in_(self, other): - "Does self appear in other?" - return self in FoldedCase(other) - - # cache lower since it's likely to be called frequently. - @method_cache - def lower(self): - return super().lower() - - def index(self, sub): - return self.lower().index(sub.lower()) - - def split(self, splitter=' ', maxsplit=0): - pattern = re.compile(re.escape(splitter), re.I) - return pattern.split(self, maxsplit) diff --git a/venv/lib/python3.10/site-packages/setuptools/_vendor/importlib_resources/__init__.py b/venv/lib/python3.10/site-packages/setuptools/_vendor/importlib_resources/__init__.py deleted file mode 100644 index 34e3a99..0000000 --- a/venv/lib/python3.10/site-packages/setuptools/_vendor/importlib_resources/__init__.py +++ /dev/null @@ -1,36 +0,0 @@ -"""Read resources contained within a package.""" - -from ._common import ( - as_file, - files, - Package, -) - -from ._legacy import ( - contents, - open_binary, - read_binary, - open_text, - read_text, - is_resource, - path, - Resource, -) - -from .abc import ResourceReader - - -__all__ = [ - 'Package', - 'Resource', - 'ResourceReader', - 'as_file', - 'contents', - 'files', - 'is_resource', - 'open_binary', - 'open_text', - 'path', - 'read_binary', - 'read_text', -] diff --git a/venv/lib/python3.10/site-packages/setuptools/_vendor/importlib_resources/_adapters.py b/venv/lib/python3.10/site-packages/setuptools/_vendor/importlib_resources/_adapters.py deleted file mode 100644 index ea363d8..0000000 --- a/venv/lib/python3.10/site-packages/setuptools/_vendor/importlib_resources/_adapters.py +++ /dev/null @@ -1,170 +0,0 @@ -from contextlib import suppress -from io import TextIOWrapper - -from . import abc - - -class SpecLoaderAdapter: - """ - Adapt a package spec to adapt the underlying loader. - """ - - def __init__(self, spec, adapter=lambda spec: spec.loader): - self.spec = spec - self.loader = adapter(spec) - - def __getattr__(self, name): - return getattr(self.spec, name) - - -class TraversableResourcesLoader: - """ - Adapt a loader to provide TraversableResources. - """ - - def __init__(self, spec): - self.spec = spec - - def get_resource_reader(self, name): - return CompatibilityFiles(self.spec)._native() - - -def _io_wrapper(file, mode='r', *args, **kwargs): - if mode == 'r': - return TextIOWrapper(file, *args, **kwargs) - elif mode == 'rb': - return file - raise ValueError( - "Invalid mode value '{}', only 'r' and 'rb' are supported".format(mode) - ) - - -class CompatibilityFiles: - """ - Adapter for an existing or non-existent resource reader - to provide a compatibility .files(). - """ - - class SpecPath(abc.Traversable): - """ - Path tied to a module spec. - Can be read and exposes the resource reader children. - """ - - def __init__(self, spec, reader): - self._spec = spec - self._reader = reader - - def iterdir(self): - if not self._reader: - return iter(()) - return iter( - CompatibilityFiles.ChildPath(self._reader, path) - for path in self._reader.contents() - ) - - def is_file(self): - return False - - is_dir = is_file - - def joinpath(self, other): - if not self._reader: - return CompatibilityFiles.OrphanPath(other) - return CompatibilityFiles.ChildPath(self._reader, other) - - @property - def name(self): - return self._spec.name - - def open(self, mode='r', *args, **kwargs): - return _io_wrapper(self._reader.open_resource(None), mode, *args, **kwargs) - - class ChildPath(abc.Traversable): - """ - Path tied to a resource reader child. - Can be read but doesn't expose any meaningful children. - """ - - def __init__(self, reader, name): - self._reader = reader - self._name = name - - def iterdir(self): - return iter(()) - - def is_file(self): - return self._reader.is_resource(self.name) - - def is_dir(self): - return not self.is_file() - - def joinpath(self, other): - return CompatibilityFiles.OrphanPath(self.name, other) - - @property - def name(self): - return self._name - - def open(self, mode='r', *args, **kwargs): - return _io_wrapper( - self._reader.open_resource(self.name), mode, *args, **kwargs - ) - - class OrphanPath(abc.Traversable): - """ - Orphan path, not tied to a module spec or resource reader. - Can't be read and doesn't expose any meaningful children. - """ - - def __init__(self, *path_parts): - if len(path_parts) < 1: - raise ValueError('Need at least one path part to construct a path') - self._path = path_parts - - def iterdir(self): - return iter(()) - - def is_file(self): - return False - - is_dir = is_file - - def joinpath(self, other): - return CompatibilityFiles.OrphanPath(*self._path, other) - - @property - def name(self): - return self._path[-1] - - def open(self, mode='r', *args, **kwargs): - raise FileNotFoundError("Can't open orphan path") - - def __init__(self, spec): - self.spec = spec - - @property - def _reader(self): - with suppress(AttributeError): - return self.spec.loader.get_resource_reader(self.spec.name) - - def _native(self): - """ - Return the native reader if it supports files(). - """ - reader = self._reader - return reader if hasattr(reader, 'files') else self - - def __getattr__(self, attr): - return getattr(self._reader, attr) - - def files(self): - return CompatibilityFiles.SpecPath(self.spec, self._reader) - - -def wrap_spec(package): - """ - Construct a package spec with traversable compatibility - on the spec/loader/reader. - """ - return SpecLoaderAdapter(package.__spec__, TraversableResourcesLoader) diff --git a/venv/lib/python3.10/site-packages/setuptools/_vendor/importlib_resources/_common.py b/venv/lib/python3.10/site-packages/setuptools/_vendor/importlib_resources/_common.py deleted file mode 100644 index a12e2c7..0000000 --- a/venv/lib/python3.10/site-packages/setuptools/_vendor/importlib_resources/_common.py +++ /dev/null @@ -1,104 +0,0 @@ -import os -import pathlib -import tempfile -import functools -import contextlib -import types -import importlib - -from typing import Union, Optional -from .abc import ResourceReader, Traversable - -from ._compat import wrap_spec - -Package = Union[types.ModuleType, str] - - -def files(package): - # type: (Package) -> Traversable - """ - Get a Traversable resource from a package - """ - return from_package(get_package(package)) - - -def get_resource_reader(package): - # type: (types.ModuleType) -> Optional[ResourceReader] - """ - Return the package's loader if it's a ResourceReader. - """ - # We can't use - # a issubclass() check here because apparently abc.'s __subclasscheck__() - # hook wants to create a weak reference to the object, but - # zipimport.zipimporter does not support weak references, resulting in a - # TypeError. That seems terrible. - spec = package.__spec__ - reader = getattr(spec.loader, 'get_resource_reader', None) # type: ignore - if reader is None: - return None - return reader(spec.name) # type: ignore - - -def resolve(cand): - # type: (Package) -> types.ModuleType - return cand if isinstance(cand, types.ModuleType) else importlib.import_module(cand) - - -def get_package(package): - # type: (Package) -> types.ModuleType - """Take a package name or module object and return the module. - - Raise an exception if the resolved module is not a package. - """ - resolved = resolve(package) - if wrap_spec(resolved).submodule_search_locations is None: - raise TypeError(f'{package!r} is not a package') - return resolved - - -def from_package(package): - """ - Return a Traversable object for the given package. - - """ - spec = wrap_spec(package) - reader = spec.loader.get_resource_reader(spec.name) - return reader.files() - - -@contextlib.contextmanager -def _tempfile(reader, suffix=''): - # Not using tempfile.NamedTemporaryFile as it leads to deeper 'try' - # blocks due to the need to close the temporary file to work on Windows - # properly. - fd, raw_path = tempfile.mkstemp(suffix=suffix) - try: - try: - os.write(fd, reader()) - finally: - os.close(fd) - del reader - yield pathlib.Path(raw_path) - finally: - try: - os.remove(raw_path) - except FileNotFoundError: - pass - - -@functools.singledispatch -def as_file(path): - """ - Given a Traversable object, return that object as a - path on the local file system in a context manager. - """ - return _tempfile(path.read_bytes, suffix=path.name) - - -@as_file.register(pathlib.Path) -@contextlib.contextmanager -def _(path): - """ - Degenerate behavior for pathlib.Path objects. - """ - yield path diff --git a/venv/lib/python3.10/site-packages/setuptools/_vendor/importlib_resources/_compat.py b/venv/lib/python3.10/site-packages/setuptools/_vendor/importlib_resources/_compat.py deleted file mode 100644 index cb9fc82..0000000 --- a/venv/lib/python3.10/site-packages/setuptools/_vendor/importlib_resources/_compat.py +++ /dev/null @@ -1,98 +0,0 @@ -# flake8: noqa - -import abc -import sys -import pathlib -from contextlib import suppress - -if sys.version_info >= (3, 10): - from zipfile import Path as ZipPath # type: ignore -else: - from ..zipp import Path as ZipPath # type: ignore - - -try: - from typing import runtime_checkable # type: ignore -except ImportError: - - def runtime_checkable(cls): # type: ignore - return cls - - -try: - from typing import Protocol # type: ignore -except ImportError: - Protocol = abc.ABC # type: ignore - - -class TraversableResourcesLoader: - """ - Adapt loaders to provide TraversableResources and other - compatibility. - - Used primarily for Python 3.9 and earlier where the native - loaders do not yet implement TraversableResources. - """ - - def __init__(self, spec): - self.spec = spec - - @property - def path(self): - return self.spec.origin - - def get_resource_reader(self, name): - from . import readers, _adapters - - def _zip_reader(spec): - with suppress(AttributeError): - return readers.ZipReader(spec.loader, spec.name) - - def _namespace_reader(spec): - with suppress(AttributeError, ValueError): - return readers.NamespaceReader(spec.submodule_search_locations) - - def _available_reader(spec): - with suppress(AttributeError): - return spec.loader.get_resource_reader(spec.name) - - def _native_reader(spec): - reader = _available_reader(spec) - return reader if hasattr(reader, 'files') else None - - def _file_reader(spec): - try: - path = pathlib.Path(self.path) - except TypeError: - return None - if path.exists(): - return readers.FileReader(self) - - return ( - # native reader if it supplies 'files' - _native_reader(self.spec) - or - # local ZipReader if a zip module - _zip_reader(self.spec) - or - # local NamespaceReader if a namespace module - _namespace_reader(self.spec) - or - # local FileReader - _file_reader(self.spec) - # fallback - adapt the spec ResourceReader to TraversableReader - or _adapters.CompatibilityFiles(self.spec) - ) - - -def wrap_spec(package): - """ - Construct a package spec with traversable compatibility - on the spec/loader/reader. - - Supersedes _adapters.wrap_spec to use TraversableResourcesLoader - from above for older Python compatibility (<3.10). - """ - from . import _adapters - - return _adapters.SpecLoaderAdapter(package.__spec__, TraversableResourcesLoader) diff --git a/venv/lib/python3.10/site-packages/setuptools/_vendor/importlib_resources/_itertools.py b/venv/lib/python3.10/site-packages/setuptools/_vendor/importlib_resources/_itertools.py deleted file mode 100644 index cce0558..0000000 --- a/venv/lib/python3.10/site-packages/setuptools/_vendor/importlib_resources/_itertools.py +++ /dev/null @@ -1,35 +0,0 @@ -from itertools import filterfalse - -from typing import ( - Callable, - Iterable, - Iterator, - Optional, - Set, - TypeVar, - Union, -) - -# Type and type variable definitions -_T = TypeVar('_T') -_U = TypeVar('_U') - - -def unique_everseen( - iterable: Iterable[_T], key: Optional[Callable[[_T], _U]] = None -) -> Iterator[_T]: - "List unique elements, preserving order. Remember all elements ever seen." - # unique_everseen('AAAABBBCCDAABBB') --> A B C D - # unique_everseen('ABBCcAD', str.lower) --> A B C D - seen: Set[Union[_T, _U]] = set() - seen_add = seen.add - if key is None: - for element in filterfalse(seen.__contains__, iterable): - seen_add(element) - yield element - else: - for element in iterable: - k = key(element) - if k not in seen: - seen_add(k) - yield element diff --git a/venv/lib/python3.10/site-packages/setuptools/_vendor/importlib_resources/_legacy.py b/venv/lib/python3.10/site-packages/setuptools/_vendor/importlib_resources/_legacy.py deleted file mode 100644 index 1d5d3f1..0000000 --- a/venv/lib/python3.10/site-packages/setuptools/_vendor/importlib_resources/_legacy.py +++ /dev/null @@ -1,121 +0,0 @@ -import functools -import os -import pathlib -import types -import warnings - -from typing import Union, Iterable, ContextManager, BinaryIO, TextIO, Any - -from . import _common - -Package = Union[types.ModuleType, str] -Resource = str - - -def deprecated(func): - @functools.wraps(func) - def wrapper(*args, **kwargs): - warnings.warn( - f"{func.__name__} is deprecated. Use files() instead. " - "Refer to https://importlib-resources.readthedocs.io" - "/en/latest/using.html#migrating-from-legacy for migration advice.", - DeprecationWarning, - stacklevel=2, - ) - return func(*args, **kwargs) - - return wrapper - - -def normalize_path(path): - # type: (Any) -> str - """Normalize a path by ensuring it is a string. - - If the resulting string contains path separators, an exception is raised. - """ - str_path = str(path) - parent, file_name = os.path.split(str_path) - if parent: - raise ValueError(f'{path!r} must be only a file name') - return file_name - - -@deprecated -def open_binary(package: Package, resource: Resource) -> BinaryIO: - """Return a file-like object opened for binary reading of the resource.""" - return (_common.files(package) / normalize_path(resource)).open('rb') - - -@deprecated -def read_binary(package: Package, resource: Resource) -> bytes: - """Return the binary contents of the resource.""" - return (_common.files(package) / normalize_path(resource)).read_bytes() - - -@deprecated -def open_text( - package: Package, - resource: Resource, - encoding: str = 'utf-8', - errors: str = 'strict', -) -> TextIO: - """Return a file-like object opened for text reading of the resource.""" - return (_common.files(package) / normalize_path(resource)).open( - 'r', encoding=encoding, errors=errors - ) - - -@deprecated -def read_text( - package: Package, - resource: Resource, - encoding: str = 'utf-8', - errors: str = 'strict', -) -> str: - """Return the decoded string of the resource. - - The decoding-related arguments have the same semantics as those of - bytes.decode(). - """ - with open_text(package, resource, encoding, errors) as fp: - return fp.read() - - -@deprecated -def contents(package: Package) -> Iterable[str]: - """Return an iterable of entries in `package`. - - Note that not all entries are resources. Specifically, directories are - not considered resources. Use `is_resource()` on each entry returned here - to check if it is a resource or not. - """ - return [path.name for path in _common.files(package).iterdir()] - - -@deprecated -def is_resource(package: Package, name: str) -> bool: - """True if `name` is a resource inside `package`. - - Directories are *not* resources. - """ - resource = normalize_path(name) - return any( - traversable.name == resource and traversable.is_file() - for traversable in _common.files(package).iterdir() - ) - - -@deprecated -def path( - package: Package, - resource: Resource, -) -> ContextManager[pathlib.Path]: - """A context manager providing a file path object to the resource. - - If the resource does not already exist on its own on the file system, - a temporary file will be created. If the file was created, the file - will be deleted upon exiting the context manager (no exception is - raised if the file was deleted prior to the context manager - exiting). - """ - return _common.as_file(_common.files(package) / normalize_path(resource)) diff --git a/venv/lib/python3.10/site-packages/setuptools/_vendor/importlib_resources/abc.py b/venv/lib/python3.10/site-packages/setuptools/_vendor/importlib_resources/abc.py deleted file mode 100644 index d39dc1a..0000000 --- a/venv/lib/python3.10/site-packages/setuptools/_vendor/importlib_resources/abc.py +++ /dev/null @@ -1,137 +0,0 @@ -import abc -from typing import BinaryIO, Iterable, Text - -from ._compat import runtime_checkable, Protocol - - -class ResourceReader(metaclass=abc.ABCMeta): - """Abstract base class for loaders to provide resource reading support.""" - - @abc.abstractmethod - def open_resource(self, resource: Text) -> BinaryIO: - """Return an opened, file-like object for binary reading. - - The 'resource' argument is expected to represent only a file name. - If the resource cannot be found, FileNotFoundError is raised. - """ - # This deliberately raises FileNotFoundError instead of - # NotImplementedError so that if this method is accidentally called, - # it'll still do the right thing. - raise FileNotFoundError - - @abc.abstractmethod - def resource_path(self, resource: Text) -> Text: - """Return the file system path to the specified resource. - - The 'resource' argument is expected to represent only a file name. - If the resource does not exist on the file system, raise - FileNotFoundError. - """ - # This deliberately raises FileNotFoundError instead of - # NotImplementedError so that if this method is accidentally called, - # it'll still do the right thing. - raise FileNotFoundError - - @abc.abstractmethod - def is_resource(self, path: Text) -> bool: - """Return True if the named 'path' is a resource. - - Files are resources, directories are not. - """ - raise FileNotFoundError - - @abc.abstractmethod - def contents(self) -> Iterable[str]: - """Return an iterable of entries in `package`.""" - raise FileNotFoundError - - -@runtime_checkable -class Traversable(Protocol): - """ - An object with a subset of pathlib.Path methods suitable for - traversing directories and opening files. - """ - - @abc.abstractmethod - def iterdir(self): - """ - Yield Traversable objects in self - """ - - def read_bytes(self): - """ - Read contents of self as bytes - """ - with self.open('rb') as strm: - return strm.read() - - def read_text(self, encoding=None): - """ - Read contents of self as text - """ - with self.open(encoding=encoding) as strm: - return strm.read() - - @abc.abstractmethod - def is_dir(self) -> bool: - """ - Return True if self is a directory - """ - - @abc.abstractmethod - def is_file(self) -> bool: - """ - Return True if self is a file - """ - - @abc.abstractmethod - def joinpath(self, child): - """ - Return Traversable child in self - """ - - def __truediv__(self, child): - """ - Return Traversable child in self - """ - return self.joinpath(child) - - @abc.abstractmethod - def open(self, mode='r', *args, **kwargs): - """ - mode may be 'r' or 'rb' to open as text or binary. Return a handle - suitable for reading (same as pathlib.Path.open). - - When opening as text, accepts encoding parameters such as those - accepted by io.TextIOWrapper. - """ - - @abc.abstractproperty - def name(self) -> str: - """ - The base name of this object without any parent references. - """ - - -class TraversableResources(ResourceReader): - """ - The required interface for providing traversable - resources. - """ - - @abc.abstractmethod - def files(self): - """Return a Traversable object for the loaded package.""" - - def open_resource(self, resource): - return self.files().joinpath(resource).open('rb') - - def resource_path(self, resource): - raise FileNotFoundError(resource) - - def is_resource(self, path): - return self.files().joinpath(path).is_file() - - def contents(self): - return (item.name for item in self.files().iterdir()) diff --git a/venv/lib/python3.10/site-packages/setuptools/_vendor/importlib_resources/readers.py b/venv/lib/python3.10/site-packages/setuptools/_vendor/importlib_resources/readers.py deleted file mode 100644 index f1190ca..0000000 --- a/venv/lib/python3.10/site-packages/setuptools/_vendor/importlib_resources/readers.py +++ /dev/null @@ -1,122 +0,0 @@ -import collections -import pathlib -import operator - -from . import abc - -from ._itertools import unique_everseen -from ._compat import ZipPath - - -def remove_duplicates(items): - return iter(collections.OrderedDict.fromkeys(items)) - - -class FileReader(abc.TraversableResources): - def __init__(self, loader): - self.path = pathlib.Path(loader.path).parent - - def resource_path(self, resource): - """ - Return the file system path to prevent - `resources.path()` from creating a temporary - copy. - """ - return str(self.path.joinpath(resource)) - - def files(self): - return self.path - - -class ZipReader(abc.TraversableResources): - def __init__(self, loader, module): - _, _, name = module.rpartition('.') - self.prefix = loader.prefix.replace('\\', '/') + name + '/' - self.archive = loader.archive - - def open_resource(self, resource): - try: - return super().open_resource(resource) - except KeyError as exc: - raise FileNotFoundError(exc.args[0]) - - def is_resource(self, path): - # workaround for `zipfile.Path.is_file` returning true - # for non-existent paths. - target = self.files().joinpath(path) - return target.is_file() and target.exists() - - def files(self): - return ZipPath(self.archive, self.prefix) - - -class MultiplexedPath(abc.Traversable): - """ - Given a series of Traversable objects, implement a merged - version of the interface across all objects. Useful for - namespace packages which may be multihomed at a single - name. - """ - - def __init__(self, *paths): - self._paths = list(map(pathlib.Path, remove_duplicates(paths))) - if not self._paths: - message = 'MultiplexedPath must contain at least one path' - raise FileNotFoundError(message) - if not all(path.is_dir() for path in self._paths): - raise NotADirectoryError('MultiplexedPath only supports directories') - - def iterdir(self): - files = (file for path in self._paths for file in path.iterdir()) - return unique_everseen(files, key=operator.attrgetter('name')) - - def read_bytes(self): - raise FileNotFoundError(f'{self} is not a file') - - def read_text(self, *args, **kwargs): - raise FileNotFoundError(f'{self} is not a file') - - def is_dir(self): - return True - - def is_file(self): - return False - - def joinpath(self, child): - # first try to find child in current paths - for file in self.iterdir(): - if file.name == child: - return file - # if it does not exist, construct it with the first path - return self._paths[0] / child - - __truediv__ = joinpath - - def open(self, *args, **kwargs): - raise FileNotFoundError(f'{self} is not a file') - - @property - def name(self): - return self._paths[0].name - - def __repr__(self): - paths = ', '.join(f"'{path}'" for path in self._paths) - return f'MultiplexedPath({paths})' - - -class NamespaceReader(abc.TraversableResources): - def __init__(self, namespace_path): - if 'NamespacePath' not in str(namespace_path): - raise ValueError('Invalid path') - self.path = MultiplexedPath(*list(namespace_path)) - - def resource_path(self, resource): - """ - Return the file system path to prevent - `resources.path()` from creating a temporary - copy. - """ - return str(self.path.joinpath(resource)) - - def files(self): - return self.path diff --git a/venv/lib/python3.10/site-packages/setuptools/_vendor/importlib_resources/simple.py b/venv/lib/python3.10/site-packages/setuptools/_vendor/importlib_resources/simple.py deleted file mode 100644 index da073cb..0000000 --- a/venv/lib/python3.10/site-packages/setuptools/_vendor/importlib_resources/simple.py +++ /dev/null @@ -1,116 +0,0 @@ -""" -Interface adapters for low-level readers. -""" - -import abc -import io -import itertools -from typing import BinaryIO, List - -from .abc import Traversable, TraversableResources - - -class SimpleReader(abc.ABC): - """ - The minimum, low-level interface required from a resource - provider. - """ - - @abc.abstractproperty - def package(self): - # type: () -> str - """ - The name of the package for which this reader loads resources. - """ - - @abc.abstractmethod - def children(self): - # type: () -> List['SimpleReader'] - """ - Obtain an iterable of SimpleReader for available - child containers (e.g. directories). - """ - - @abc.abstractmethod - def resources(self): - # type: () -> List[str] - """ - Obtain available named resources for this virtual package. - """ - - @abc.abstractmethod - def open_binary(self, resource): - # type: (str) -> BinaryIO - """ - Obtain a File-like for a named resource. - """ - - @property - def name(self): - return self.package.split('.')[-1] - - -class ResourceHandle(Traversable): - """ - Handle to a named resource in a ResourceReader. - """ - - def __init__(self, parent, name): - # type: (ResourceContainer, str) -> None - self.parent = parent - self.name = name # type: ignore - - def is_file(self): - return True - - def is_dir(self): - return False - - def open(self, mode='r', *args, **kwargs): - stream = self.parent.reader.open_binary(self.name) - if 'b' not in mode: - stream = io.TextIOWrapper(*args, **kwargs) - return stream - - def joinpath(self, name): - raise RuntimeError("Cannot traverse into a resource") - - -class ResourceContainer(Traversable): - """ - Traversable container for a package's resources via its reader. - """ - - def __init__(self, reader): - # type: (SimpleReader) -> None - self.reader = reader - - def is_dir(self): - return True - - def is_file(self): - return False - - def iterdir(self): - files = (ResourceHandle(self, name) for name in self.reader.resources) - dirs = map(ResourceContainer, self.reader.children()) - return itertools.chain(files, dirs) - - def open(self, *args, **kwargs): - raise IsADirectoryError() - - def joinpath(self, name): - return next( - traversable for traversable in self.iterdir() if traversable.name == name - ) - - -class TraversableReader(TraversableResources, SimpleReader): - """ - A TraversableResources based on SimpleReader. Resource providers - may derive from this class to provide the TraversableResources - interface by supplying the SimpleReader interface. - """ - - def files(self): - return ResourceContainer(self) diff --git a/venv/lib/python3.10/site-packages/setuptools/_vendor/jaraco/context.py b/venv/lib/python3.10/site-packages/setuptools/_vendor/jaraco/context.py deleted file mode 100644 index 87a4e3d..0000000 --- a/venv/lib/python3.10/site-packages/setuptools/_vendor/jaraco/context.py +++ /dev/null @@ -1,213 +0,0 @@ -import os -import subprocess -import contextlib -import functools -import tempfile -import shutil -import operator - - -@contextlib.contextmanager -def pushd(dir): - orig = os.getcwd() - os.chdir(dir) - try: - yield dir - finally: - os.chdir(orig) - - -@contextlib.contextmanager -def tarball_context(url, target_dir=None, runner=None, pushd=pushd): - """ - Get a tarball, extract it, change to that directory, yield, then - clean up. - `runner` is the function to invoke commands. - `pushd` is a context manager for changing the directory. - """ - if target_dir is None: - target_dir = os.path.basename(url).replace('.tar.gz', '').replace('.tgz', '') - if runner is None: - runner = functools.partial(subprocess.check_call, shell=True) - # In the tar command, use --strip-components=1 to strip the first path and - # then - # use -C to cause the files to be extracted to {target_dir}. This ensures - # that we always know where the files were extracted. - runner('mkdir {target_dir}'.format(**vars())) - try: - getter = 'wget {url} -O -' - extract = 'tar x{compression} --strip-components=1 -C {target_dir}' - cmd = ' | '.join((getter, extract)) - runner(cmd.format(compression=infer_compression(url), **vars())) - with pushd(target_dir): - yield target_dir - finally: - runner('rm -Rf {target_dir}'.format(**vars())) - - -def infer_compression(url): - """ - Given a URL or filename, infer the compression code for tar. - """ - # cheat and just assume it's the last two characters - compression_indicator = url[-2:] - mapping = dict(gz='z', bz='j', xz='J') - # Assume 'z' (gzip) if no match - return mapping.get(compression_indicator, 'z') - - -@contextlib.contextmanager -def temp_dir(remover=shutil.rmtree): - """ - Create a temporary directory context. Pass a custom remover - to override the removal behavior. - """ - temp_dir = tempfile.mkdtemp() - try: - yield temp_dir - finally: - remover(temp_dir) - - -@contextlib.contextmanager -def repo_context(url, branch=None, quiet=True, dest_ctx=temp_dir): - """ - Check out the repo indicated by url. - - If dest_ctx is supplied, it should be a context manager - to yield the target directory for the check out. - """ - exe = 'git' if 'git' in url else 'hg' - with dest_ctx() as repo_dir: - cmd = [exe, 'clone', url, repo_dir] - if branch: - cmd.extend(['--branch', branch]) - devnull = open(os.path.devnull, 'w') - stdout = devnull if quiet else None - subprocess.check_call(cmd, stdout=stdout) - yield repo_dir - - -@contextlib.contextmanager -def null(): - yield - - -class ExceptionTrap: - """ - A context manager that will catch certain exceptions and provide an - indication they occurred. - - >>> with ExceptionTrap() as trap: - ... raise Exception() - >>> bool(trap) - True - - >>> with ExceptionTrap() as trap: - ... pass - >>> bool(trap) - False - - >>> with ExceptionTrap(ValueError) as trap: - ... raise ValueError("1 + 1 is not 3") - >>> bool(trap) - True - - >>> with ExceptionTrap(ValueError) as trap: - ... raise Exception() - Traceback (most recent call last): - ... - Exception - - >>> bool(trap) - False - """ - - exc_info = None, None, None - - def __init__(self, exceptions=(Exception,)): - self.exceptions = exceptions - - def __enter__(self): - return self - - @property - def type(self): - return self.exc_info[0] - - @property - def value(self): - return self.exc_info[1] - - @property - def tb(self): - return self.exc_info[2] - - def __exit__(self, *exc_info): - type = exc_info[0] - matches = type and issubclass(type, self.exceptions) - if matches: - self.exc_info = exc_info - return matches - - def __bool__(self): - return bool(self.type) - - def raises(self, func, *, _test=bool): - """ - Wrap func and replace the result with the truth - value of the trap (True if an exception occurred). - - First, give the decorator an alias to support Python 3.8 - Syntax. - - >>> raises = ExceptionTrap(ValueError).raises - - Now decorate a function that always fails. - - >>> @raises - ... def fail(): - ... raise ValueError('failed') - >>> fail() - True - """ - - @functools.wraps(func) - def wrapper(*args, **kwargs): - with ExceptionTrap(self.exceptions) as trap: - func(*args, **kwargs) - return _test(trap) - - return wrapper - - def passes(self, func): - """ - Wrap func and replace the result with the truth - value of the trap (True if no exception). - - First, give the decorator an alias to support Python 3.8 - Syntax. - - >>> passes = ExceptionTrap(ValueError).passes - - Now decorate a function that always fails. - - >>> @passes - ... def fail(): - ... raise ValueError('failed') - - >>> fail() - False - """ - return self.raises(func, _test=operator.not_) - - -class suppress(contextlib.suppress, contextlib.ContextDecorator): - """ - A version of contextlib.suppress with decorator support. - - >>> @suppress(KeyError) - ... def key_error(): - ... {}[''] - >>> key_error() - """ diff --git a/venv/lib/python3.10/site-packages/setuptools/_vendor/jaraco/functools.py b/venv/lib/python3.10/site-packages/setuptools/_vendor/jaraco/functools.py deleted file mode 100644 index bbd8b29..0000000 --- a/venv/lib/python3.10/site-packages/setuptools/_vendor/jaraco/functools.py +++ /dev/null @@ -1,525 +0,0 @@ -import functools -import time -import inspect -import collections -import types -import itertools - -import setuptools.extern.more_itertools - -from typing import Callable, TypeVar - - -CallableT = TypeVar("CallableT", bound=Callable[..., object]) - - -def compose(*funcs): - """ - Compose any number of unary functions into a single unary function. - - >>> import textwrap - >>> expected = str.strip(textwrap.dedent(compose.__doc__)) - >>> strip_and_dedent = compose(str.strip, textwrap.dedent) - >>> strip_and_dedent(compose.__doc__) == expected - True - - Compose also allows the innermost function to take arbitrary arguments. - - >>> round_three = lambda x: round(x, ndigits=3) - >>> f = compose(round_three, int.__truediv__) - >>> [f(3*x, x+1) for x in range(1,10)] - [1.5, 2.0, 2.25, 2.4, 2.5, 2.571, 2.625, 2.667, 2.7] - """ - - def compose_two(f1, f2): - return lambda *args, **kwargs: f1(f2(*args, **kwargs)) - - return functools.reduce(compose_two, funcs) - - -def method_caller(method_name, *args, **kwargs): - """ - Return a function that will call a named method on the - target object with optional positional and keyword - arguments. - - >>> lower = method_caller('lower') - >>> lower('MyString') - 'mystring' - """ - - def call_method(target): - func = getattr(target, method_name) - return func(*args, **kwargs) - - return call_method - - -def once(func): - """ - Decorate func so it's only ever called the first time. - - This decorator can ensure that an expensive or non-idempotent function - will not be expensive on subsequent calls and is idempotent. - - >>> add_three = once(lambda a: a+3) - >>> add_three(3) - 6 - >>> add_three(9) - 6 - >>> add_three('12') - 6 - - To reset the stored value, simply clear the property ``saved_result``. - - >>> del add_three.saved_result - >>> add_three(9) - 12 - >>> add_three(8) - 12 - - Or invoke 'reset()' on it. - - >>> add_three.reset() - >>> add_three(-3) - 0 - >>> add_three(0) - 0 - """ - - @functools.wraps(func) - def wrapper(*args, **kwargs): - if not hasattr(wrapper, 'saved_result'): - wrapper.saved_result = func(*args, **kwargs) - return wrapper.saved_result - - wrapper.reset = lambda: vars(wrapper).__delitem__('saved_result') - return wrapper - - -def method_cache( - method: CallableT, - cache_wrapper: Callable[ - [CallableT], CallableT - ] = functools.lru_cache(), # type: ignore[assignment] -) -> CallableT: - """ - Wrap lru_cache to support storing the cache data in the object instances. - - Abstracts the common paradigm where the method explicitly saves an - underscore-prefixed protected property on first call and returns that - subsequently. - - >>> class MyClass: - ... calls = 0 - ... - ... @method_cache - ... def method(self, value): - ... self.calls += 1 - ... return value - - >>> a = MyClass() - >>> a.method(3) - 3 - >>> for x in range(75): - ... res = a.method(x) - >>> a.calls - 75 - - Note that the apparent behavior will be exactly like that of lru_cache - except that the cache is stored on each instance, so values in one - instance will not flush values from another, and when an instance is - deleted, so are the cached values for that instance. - - >>> b = MyClass() - >>> for x in range(35): - ... res = b.method(x) - >>> b.calls - 35 - >>> a.method(0) - 0 - >>> a.calls - 75 - - Note that if method had been decorated with ``functools.lru_cache()``, - a.calls would have been 76 (due to the cached value of 0 having been - flushed by the 'b' instance). - - Clear the cache with ``.cache_clear()`` - - >>> a.method.cache_clear() - - Same for a method that hasn't yet been called. - - >>> c = MyClass() - >>> c.method.cache_clear() - - Another cache wrapper may be supplied: - - >>> cache = functools.lru_cache(maxsize=2) - >>> MyClass.method2 = method_cache(lambda self: 3, cache_wrapper=cache) - >>> a = MyClass() - >>> a.method2() - 3 - - Caution - do not subsequently wrap the method with another decorator, such - as ``@property``, which changes the semantics of the function. - - See also - http://code.activestate.com/recipes/577452-a-memoize-decorator-for-instance-methods/ - for another implementation and additional justification. - """ - - def wrapper(self: object, *args: object, **kwargs: object) -> object: - # it's the first call, replace the method with a cached, bound method - bound_method: CallableT = types.MethodType( # type: ignore[assignment] - method, self - ) - cached_method = cache_wrapper(bound_method) - setattr(self, method.__name__, cached_method) - return cached_method(*args, **kwargs) - - # Support cache clear even before cache has been created. - wrapper.cache_clear = lambda: None # type: ignore[attr-defined] - - return ( # type: ignore[return-value] - _special_method_cache(method, cache_wrapper) or wrapper - ) - - -def _special_method_cache(method, cache_wrapper): - """ - Because Python treats special methods differently, it's not - possible to use instance attributes to implement the cached - methods. - - Instead, install the wrapper method under a different name - and return a simple proxy to that wrapper. - - https://github.com/jaraco/jaraco.functools/issues/5 - """ - name = method.__name__ - special_names = '__getattr__', '__getitem__' - if name not in special_names: - return - - wrapper_name = '__cached' + name - - def proxy(self, *args, **kwargs): - if wrapper_name not in vars(self): - bound = types.MethodType(method, self) - cache = cache_wrapper(bound) - setattr(self, wrapper_name, cache) - else: - cache = getattr(self, wrapper_name) - return cache(*args, **kwargs) - - return proxy - - -def apply(transform): - """ - Decorate a function with a transform function that is - invoked on results returned from the decorated function. - - >>> @apply(reversed) - ... def get_numbers(start): - ... "doc for get_numbers" - ... return range(start, start+3) - >>> list(get_numbers(4)) - [6, 5, 4] - >>> get_numbers.__doc__ - 'doc for get_numbers' - """ - - def wrap(func): - return functools.wraps(func)(compose(transform, func)) - - return wrap - - -def result_invoke(action): - r""" - Decorate a function with an action function that is - invoked on the results returned from the decorated - function (for its side-effect), then return the original - result. - - >>> @result_invoke(print) - ... def add_two(a, b): - ... return a + b - >>> x = add_two(2, 3) - 5 - >>> x - 5 - """ - - def wrap(func): - @functools.wraps(func) - def wrapper(*args, **kwargs): - result = func(*args, **kwargs) - action(result) - return result - - return wrapper - - return wrap - - -def call_aside(f, *args, **kwargs): - """ - Call a function for its side effect after initialization. - - >>> @call_aside - ... def func(): print("called") - called - >>> func() - called - - Use functools.partial to pass parameters to the initial call - - >>> @functools.partial(call_aside, name='bingo') - ... def func(name): print("called with", name) - called with bingo - """ - f(*args, **kwargs) - return f - - -class Throttler: - """ - Rate-limit a function (or other callable) - """ - - def __init__(self, func, max_rate=float('Inf')): - if isinstance(func, Throttler): - func = func.func - self.func = func - self.max_rate = max_rate - self.reset() - - def reset(self): - self.last_called = 0 - - def __call__(self, *args, **kwargs): - self._wait() - return self.func(*args, **kwargs) - - def _wait(self): - "ensure at least 1/max_rate seconds from last call" - elapsed = time.time() - self.last_called - must_wait = 1 / self.max_rate - elapsed - time.sleep(max(0, must_wait)) - self.last_called = time.time() - - def __get__(self, obj, type=None): - return first_invoke(self._wait, functools.partial(self.func, obj)) - - -def first_invoke(func1, func2): - """ - Return a function that when invoked will invoke func1 without - any parameters (for its side-effect) and then invoke func2 - with whatever parameters were passed, returning its result. - """ - - def wrapper(*args, **kwargs): - func1() - return func2(*args, **kwargs) - - return wrapper - - -def retry_call(func, cleanup=lambda: None, retries=0, trap=()): - """ - Given a callable func, trap the indicated exceptions - for up to 'retries' times, invoking cleanup on the - exception. On the final attempt, allow any exceptions - to propagate. - """ - attempts = itertools.count() if retries == float('inf') else range(retries) - for attempt in attempts: - try: - return func() - except trap: - cleanup() - - return func() - - -def retry(*r_args, **r_kwargs): - """ - Decorator wrapper for retry_call. Accepts arguments to retry_call - except func and then returns a decorator for the decorated function. - - Ex: - - >>> @retry(retries=3) - ... def my_func(a, b): - ... "this is my funk" - ... print(a, b) - >>> my_func.__doc__ - 'this is my funk' - """ - - def decorate(func): - @functools.wraps(func) - def wrapper(*f_args, **f_kwargs): - bound = functools.partial(func, *f_args, **f_kwargs) - return retry_call(bound, *r_args, **r_kwargs) - - return wrapper - - return decorate - - -def print_yielded(func): - """ - Convert a generator into a function that prints all yielded elements - - >>> @print_yielded - ... def x(): - ... yield 3; yield None - >>> x() - 3 - None - """ - print_all = functools.partial(map, print) - print_results = compose(more_itertools.consume, print_all, func) - return functools.wraps(func)(print_results) - - -def pass_none(func): - """ - Wrap func so it's not called if its first param is None - - >>> print_text = pass_none(print) - >>> print_text('text') - text - >>> print_text(None) - """ - - @functools.wraps(func) - def wrapper(param, *args, **kwargs): - if param is not None: - return func(param, *args, **kwargs) - - return wrapper - - -def assign_params(func, namespace): - """ - Assign parameters from namespace where func solicits. - - >>> def func(x, y=3): - ... print(x, y) - >>> assigned = assign_params(func, dict(x=2, z=4)) - >>> assigned() - 2 3 - - The usual errors are raised if a function doesn't receive - its required parameters: - - >>> assigned = assign_params(func, dict(y=3, z=4)) - >>> assigned() - Traceback (most recent call last): - TypeError: func() ...argument... - - It even works on methods: - - >>> class Handler: - ... def meth(self, arg): - ... print(arg) - >>> assign_params(Handler().meth, dict(arg='crystal', foo='clear'))() - crystal - """ - sig = inspect.signature(func) - params = sig.parameters.keys() - call_ns = {k: namespace[k] for k in params if k in namespace} - return functools.partial(func, **call_ns) - - -def save_method_args(method): - """ - Wrap a method such that when it is called, the args and kwargs are - saved on the method. - - >>> class MyClass: - ... @save_method_args - ... def method(self, a, b): - ... print(a, b) - >>> my_ob = MyClass() - >>> my_ob.method(1, 2) - 1 2 - >>> my_ob._saved_method.args - (1, 2) - >>> my_ob._saved_method.kwargs - {} - >>> my_ob.method(a=3, b='foo') - 3 foo - >>> my_ob._saved_method.args - () - >>> my_ob._saved_method.kwargs == dict(a=3, b='foo') - True - - The arguments are stored on the instance, allowing for - different instance to save different args. - - >>> your_ob = MyClass() - >>> your_ob.method({str('x'): 3}, b=[4]) - {'x': 3} [4] - >>> your_ob._saved_method.args - ({'x': 3},) - >>> my_ob._saved_method.args - () - """ - args_and_kwargs = collections.namedtuple('args_and_kwargs', 'args kwargs') - - @functools.wraps(method) - def wrapper(self, *args, **kwargs): - attr_name = '_saved_' + method.__name__ - attr = args_and_kwargs(args, kwargs) - setattr(self, attr_name, attr) - return method(self, *args, **kwargs) - - return wrapper - - -def except_(*exceptions, replace=None, use=None): - """ - Replace the indicated exceptions, if raised, with the indicated - literal replacement or evaluated expression (if present). - - >>> safe_int = except_(ValueError)(int) - >>> safe_int('five') - >>> safe_int('5') - 5 - - Specify a literal replacement with ``replace``. - - >>> safe_int_r = except_(ValueError, replace=0)(int) - >>> safe_int_r('five') - 0 - - Provide an expression to ``use`` to pass through particular parameters. - - >>> safe_int_pt = except_(ValueError, use='args[0]')(int) - >>> safe_int_pt('five') - 'five' - - """ - - def decorate(func): - @functools.wraps(func) - def wrapper(*args, **kwargs): - try: - return func(*args, **kwargs) - except exceptions: - try: - return eval(use) - except TypeError: - return replace - - return wrapper - - return decorate diff --git a/venv/lib/python3.10/site-packages/setuptools/_vendor/jaraco/text/__init__.py b/venv/lib/python3.10/site-packages/setuptools/_vendor/jaraco/text/__init__.py deleted file mode 100644 index a0306d5..0000000 --- a/venv/lib/python3.10/site-packages/setuptools/_vendor/jaraco/text/__init__.py +++ /dev/null @@ -1,599 +0,0 @@ -import re -import itertools -import textwrap -import functools - -try: - from importlib.resources import files # type: ignore -except ImportError: # pragma: nocover - from setuptools.extern.importlib_resources import files # type: ignore - -from setuptools.extern.jaraco.functools import compose, method_cache -from setuptools.extern.jaraco.context import ExceptionTrap - - -def substitution(old, new): - """ - Return a function that will perform a substitution on a string - """ - return lambda s: s.replace(old, new) - - -def multi_substitution(*substitutions): - """ - Take a sequence of pairs specifying substitutions, and create - a function that performs those substitutions. - - >>> multi_substitution(('foo', 'bar'), ('bar', 'baz'))('foo') - 'baz' - """ - substitutions = itertools.starmap(substitution, substitutions) - # compose function applies last function first, so reverse the - # substitutions to get the expected order. - substitutions = reversed(tuple(substitutions)) - return compose(*substitutions) - - -class FoldedCase(str): - """ - A case insensitive string class; behaves just like str - except compares equal when the only variation is case. - - >>> s = FoldedCase('hello world') - - >>> s == 'Hello World' - True - - >>> 'Hello World' == s - True - - >>> s != 'Hello World' - False - - >>> s.index('O') - 4 - - >>> s.split('O') - ['hell', ' w', 'rld'] - - >>> sorted(map(FoldedCase, ['GAMMA', 'alpha', 'Beta'])) - ['alpha', 'Beta', 'GAMMA'] - - Sequence membership is straightforward. - - >>> "Hello World" in [s] - True - >>> s in ["Hello World"] - True - - You may test for set inclusion, but candidate and elements - must both be folded. - - >>> FoldedCase("Hello World") in {s} - True - >>> s in {FoldedCase("Hello World")} - True - - String inclusion works as long as the FoldedCase object - is on the right. - - >>> "hello" in FoldedCase("Hello World") - True - - But not if the FoldedCase object is on the left: - - >>> FoldedCase('hello') in 'Hello World' - False - - In that case, use ``in_``: - - >>> FoldedCase('hello').in_('Hello World') - True - - >>> FoldedCase('hello') > FoldedCase('Hello') - False - """ - - def __lt__(self, other): - return self.lower() < other.lower() - - def __gt__(self, other): - return self.lower() > other.lower() - - def __eq__(self, other): - return self.lower() == other.lower() - - def __ne__(self, other): - return self.lower() != other.lower() - - def __hash__(self): - return hash(self.lower()) - - def __contains__(self, other): - return super().lower().__contains__(other.lower()) - - def in_(self, other): - "Does self appear in other?" - return self in FoldedCase(other) - - # cache lower since it's likely to be called frequently. - @method_cache - def lower(self): - return super().lower() - - def index(self, sub): - return self.lower().index(sub.lower()) - - def split(self, splitter=' ', maxsplit=0): - pattern = re.compile(re.escape(splitter), re.I) - return pattern.split(self, maxsplit) - - -# Python 3.8 compatibility -_unicode_trap = ExceptionTrap(UnicodeDecodeError) - - -@_unicode_trap.passes -def is_decodable(value): - r""" - Return True if the supplied value is decodable (using the default - encoding). - - >>> is_decodable(b'\xff') - False - >>> is_decodable(b'\x32') - True - """ - value.decode() - - -def is_binary(value): - r""" - Return True if the value appears to be binary (that is, it's a byte - string and isn't decodable). - - >>> is_binary(b'\xff') - True - >>> is_binary('\xff') - False - """ - return isinstance(value, bytes) and not is_decodable(value) - - -def trim(s): - r""" - Trim something like a docstring to remove the whitespace that - is common due to indentation and formatting. - - >>> trim("\n\tfoo = bar\n\t\tbar = baz\n") - 'foo = bar\n\tbar = baz' - """ - return textwrap.dedent(s).strip() - - -def wrap(s): - """ - Wrap lines of text, retaining existing newlines as - paragraph markers. - - >>> print(wrap(lorem_ipsum)) - Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do - eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad - minim veniam, quis nostrud exercitation ullamco laboris nisi ut - aliquip ex ea commodo consequat. Duis aute irure dolor in - reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla - pariatur. Excepteur sint occaecat cupidatat non proident, sunt in - culpa qui officia deserunt mollit anim id est laborum. - - Curabitur pretium tincidunt lacus. Nulla gravida orci a odio. Nullam - varius, turpis et commodo pharetra, est eros bibendum elit, nec luctus - magna felis sollicitudin mauris. Integer in mauris eu nibh euismod - gravida. Duis ac tellus et risus vulputate vehicula. Donec lobortis - risus a elit. Etiam tempor. Ut ullamcorper, ligula eu tempor congue, - eros est euismod turpis, id tincidunt sapien risus a quam. Maecenas - fermentum consequat mi. Donec fermentum. Pellentesque malesuada nulla - a mi. Duis sapien sem, aliquet nec, commodo eget, consequat quis, - neque. Aliquam faucibus, elit ut dictum aliquet, felis nisl adipiscing - sapien, sed malesuada diam lacus eget erat. Cras mollis scelerisque - nunc. Nullam arcu. Aliquam consequat. Curabitur augue lorem, dapibus - quis, laoreet et, pretium ac, nisi. Aenean magna nisl, mollis quis, - molestie eu, feugiat in, orci. In hac habitasse platea dictumst. - """ - paragraphs = s.splitlines() - wrapped = ('\n'.join(textwrap.wrap(para)) for para in paragraphs) - return '\n\n'.join(wrapped) - - -def unwrap(s): - r""" - Given a multi-line string, return an unwrapped version. - - >>> wrapped = wrap(lorem_ipsum) - >>> wrapped.count('\n') - 20 - >>> unwrapped = unwrap(wrapped) - >>> unwrapped.count('\n') - 1 - >>> print(unwrapped) - Lorem ipsum dolor sit amet, consectetur adipiscing ... - Curabitur pretium tincidunt lacus. Nulla gravida orci ... - - """ - paragraphs = re.split(r'\n\n+', s) - cleaned = (para.replace('\n', ' ') for para in paragraphs) - return '\n'.join(cleaned) - - - - -class Splitter(object): - """object that will split a string with the given arguments for each call - - >>> s = Splitter(',') - >>> s('hello, world, this is your, master calling') - ['hello', ' world', ' this is your', ' master calling'] - """ - - def __init__(self, *args): - self.args = args - - def __call__(self, s): - return s.split(*self.args) - - -def indent(string, prefix=' ' * 4): - """ - >>> indent('foo') - ' foo' - """ - return prefix + string - - -class WordSet(tuple): - """ - Given an identifier, return the words that identifier represents, - whether in camel case, underscore-separated, etc. - - >>> WordSet.parse("camelCase") - ('camel', 'Case') - - >>> WordSet.parse("under_sep") - ('under', 'sep') - - Acronyms should be retained - - >>> WordSet.parse("firstSNL") - ('first', 'SNL') - - >>> WordSet.parse("you_and_I") - ('you', 'and', 'I') - - >>> WordSet.parse("A simple test") - ('A', 'simple', 'test') - - Multiple caps should not interfere with the first cap of another word. - - >>> WordSet.parse("myABCClass") - ('my', 'ABC', 'Class') - - The result is a WordSet, so you can get the form you need. - - >>> WordSet.parse("myABCClass").underscore_separated() - 'my_ABC_Class' - - >>> WordSet.parse('a-command').camel_case() - 'ACommand' - - >>> WordSet.parse('someIdentifier').lowered().space_separated() - 'some identifier' - - Slices of the result should return another WordSet. - - >>> WordSet.parse('taken-out-of-context')[1:].underscore_separated() - 'out_of_context' - - >>> WordSet.from_class_name(WordSet()).lowered().space_separated() - 'word set' - - >>> example = WordSet.parse('figured it out') - >>> example.headless_camel_case() - 'figuredItOut' - >>> example.dash_separated() - 'figured-it-out' - - """ - - _pattern = re.compile('([A-Z]?[a-z]+)|([A-Z]+(?![a-z]))') - - def capitalized(self): - return WordSet(word.capitalize() for word in self) - - def lowered(self): - return WordSet(word.lower() for word in self) - - def camel_case(self): - return ''.join(self.capitalized()) - - def headless_camel_case(self): - words = iter(self) - first = next(words).lower() - new_words = itertools.chain((first,), WordSet(words).camel_case()) - return ''.join(new_words) - - def underscore_separated(self): - return '_'.join(self) - - def dash_separated(self): - return '-'.join(self) - - def space_separated(self): - return ' '.join(self) - - def trim_right(self, item): - """ - Remove the item from the end of the set. - - >>> WordSet.parse('foo bar').trim_right('foo') - ('foo', 'bar') - >>> WordSet.parse('foo bar').trim_right('bar') - ('foo',) - >>> WordSet.parse('').trim_right('bar') - () - """ - return self[:-1] if self and self[-1] == item else self - - def trim_left(self, item): - """ - Remove the item from the beginning of the set. - - >>> WordSet.parse('foo bar').trim_left('foo') - ('bar',) - >>> WordSet.parse('foo bar').trim_left('bar') - ('foo', 'bar') - >>> WordSet.parse('').trim_left('bar') - () - """ - return self[1:] if self and self[0] == item else self - - def trim(self, item): - """ - >>> WordSet.parse('foo bar').trim('foo') - ('bar',) - """ - return self.trim_left(item).trim_right(item) - - def __getitem__(self, item): - result = super(WordSet, self).__getitem__(item) - if isinstance(item, slice): - result = WordSet(result) - return result - - @classmethod - def parse(cls, identifier): - matches = cls._pattern.finditer(identifier) - return WordSet(match.group(0) for match in matches) - - @classmethod - def from_class_name(cls, subject): - return cls.parse(subject.__class__.__name__) - - -# for backward compatibility -words = WordSet.parse - - -def simple_html_strip(s): - r""" - Remove HTML from the string `s`. - - >>> str(simple_html_strip('')) - '' - - >>> print(simple_html_strip('A stormy day in paradise')) - A stormy day in paradise - - >>> print(simple_html_strip('Somebody tell the truth.')) - Somebody tell the truth. - - >>> print(simple_html_strip('What about
\nmultiple lines?')) - What about - multiple lines? - """ - html_stripper = re.compile('()|(<[^>]*>)|([^<]+)', re.DOTALL) - texts = (match.group(3) or '' for match in html_stripper.finditer(s)) - return ''.join(texts) - - -class SeparatedValues(str): - """ - A string separated by a separator. Overrides __iter__ for getting - the values. - - >>> list(SeparatedValues('a,b,c')) - ['a', 'b', 'c'] - - Whitespace is stripped and empty values are discarded. - - >>> list(SeparatedValues(' a, b , c, ')) - ['a', 'b', 'c'] - """ - - separator = ',' - - def __iter__(self): - parts = self.split(self.separator) - return filter(None, (part.strip() for part in parts)) - - -class Stripper: - r""" - Given a series of lines, find the common prefix and strip it from them. - - >>> lines = [ - ... 'abcdefg\n', - ... 'abc\n', - ... 'abcde\n', - ... ] - >>> res = Stripper.strip_prefix(lines) - >>> res.prefix - 'abc' - >>> list(res.lines) - ['defg\n', '\n', 'de\n'] - - If no prefix is common, nothing should be stripped. - - >>> lines = [ - ... 'abcd\n', - ... '1234\n', - ... ] - >>> res = Stripper.strip_prefix(lines) - >>> res.prefix = '' - >>> list(res.lines) - ['abcd\n', '1234\n'] - """ - - def __init__(self, prefix, lines): - self.prefix = prefix - self.lines = map(self, lines) - - @classmethod - def strip_prefix(cls, lines): - prefix_lines, lines = itertools.tee(lines) - prefix = functools.reduce(cls.common_prefix, prefix_lines) - return cls(prefix, lines) - - def __call__(self, line): - if not self.prefix: - return line - null, prefix, rest = line.partition(self.prefix) - return rest - - @staticmethod - def common_prefix(s1, s2): - """ - Return the common prefix of two lines. - """ - index = min(len(s1), len(s2)) - while s1[:index] != s2[:index]: - index -= 1 - return s1[:index] - - -def remove_prefix(text, prefix): - """ - Remove the prefix from the text if it exists. - - >>> remove_prefix('underwhelming performance', 'underwhelming ') - 'performance' - - >>> remove_prefix('something special', 'sample') - 'something special' - """ - null, prefix, rest = text.rpartition(prefix) - return rest - - -def remove_suffix(text, suffix): - """ - Remove the suffix from the text if it exists. - - >>> remove_suffix('name.git', '.git') - 'name' - - >>> remove_suffix('something special', 'sample') - 'something special' - """ - rest, suffix, null = text.partition(suffix) - return rest - - -def normalize_newlines(text): - r""" - Replace alternate newlines with the canonical newline. - - >>> normalize_newlines('Lorem Ipsum\u2029') - 'Lorem Ipsum\n' - >>> normalize_newlines('Lorem Ipsum\r\n') - 'Lorem Ipsum\n' - >>> normalize_newlines('Lorem Ipsum\x85') - 'Lorem Ipsum\n' - """ - newlines = ['\r\n', '\r', '\n', '\u0085', '\u2028', '\u2029'] - pattern = '|'.join(newlines) - return re.sub(pattern, '\n', text) - - -def _nonblank(str): - return str and not str.startswith('#') - - -@functools.singledispatch -def yield_lines(iterable): - r""" - Yield valid lines of a string or iterable. - - >>> list(yield_lines('')) - [] - >>> list(yield_lines(['foo', 'bar'])) - ['foo', 'bar'] - >>> list(yield_lines('foo\nbar')) - ['foo', 'bar'] - >>> list(yield_lines('\nfoo\n#bar\nbaz #comment')) - ['foo', 'baz #comment'] - >>> list(yield_lines(['foo\nbar', 'baz', 'bing\n\n\n'])) - ['foo', 'bar', 'baz', 'bing'] - """ - return itertools.chain.from_iterable(map(yield_lines, iterable)) - - -@yield_lines.register(str) -def _(text): - return filter(_nonblank, map(str.strip, text.splitlines())) - - -def drop_comment(line): - """ - Drop comments. - - >>> drop_comment('foo # bar') - 'foo' - - A hash without a space may be in a URL. - - >>> drop_comment('http://example.com/foo#bar') - 'http://example.com/foo#bar' - """ - return line.partition(' #')[0] - - -def join_continuation(lines): - r""" - Join lines continued by a trailing backslash. - - >>> list(join_continuation(['foo \\', 'bar', 'baz'])) - ['foobar', 'baz'] - >>> list(join_continuation(['foo \\', 'bar', 'baz'])) - ['foobar', 'baz'] - >>> list(join_continuation(['foo \\', 'bar \\', 'baz'])) - ['foobarbaz'] - - Not sure why, but... - The character preceeding the backslash is also elided. - - >>> list(join_continuation(['goo\\', 'dly'])) - ['godly'] - - A terrible idea, but... - If no line is available to continue, suppress the lines. - - >>> list(join_continuation(['foo', 'bar\\', 'baz\\'])) - ['foo'] - """ - lines = iter(lines) - for item in lines: - while item.endswith('\\'): - try: - item = item[:-2].strip() + next(lines) - except StopIteration: - return - yield item diff --git a/venv/lib/python3.10/site-packages/setuptools/_vendor/more_itertools/more.py b/venv/lib/python3.10/site-packages/setuptools/_vendor/more_itertools/more.py index e6fca4d..0f7d282 100644 --- a/venv/lib/python3.10/site-packages/setuptools/_vendor/more_itertools/more.py +++ b/venv/lib/python3.10/site-packages/setuptools/_vendor/more_itertools/more.py @@ -2,6 +2,7 @@ from collections import Counter, defaultdict, deque, abc from collections.abc import Sequence +from concurrent.futures import ThreadPoolExecutor from functools import partial, reduce, wraps from heapq import merge, heapify, heapreplace, heappop from itertools import ( @@ -3453,7 +3454,7 @@ def __init__(self, func, callback_kwd='callback', wait_seconds=0.1): self._aborted = False self._future = None self._wait_seconds = wait_seconds - self._executor = __import__("concurrent.futures").futures.ThreadPoolExecutor(max_workers=1) + self._executor = ThreadPoolExecutor(max_workers=1) self._iterator = self._reader() def __enter__(self): diff --git a/venv/lib/python3.10/site-packages/setuptools/_vendor/packaging/__about__.py b/venv/lib/python3.10/site-packages/setuptools/_vendor/packaging/__about__.py index 3551bc2..c359122 100644 --- a/venv/lib/python3.10/site-packages/setuptools/_vendor/packaging/__about__.py +++ b/venv/lib/python3.10/site-packages/setuptools/_vendor/packaging/__about__.py @@ -17,7 +17,7 @@ __summary__ = "Core utilities for Python packages" __uri__ = "https://github.com/pypa/packaging" -__version__ = "21.3" +__version__ = "21.2" __author__ = "Donald Stufft and individual contributors" __email__ = "donald@stufft.io" diff --git a/venv/lib/python3.10/site-packages/setuptools/_vendor/packaging/_musllinux.py b/venv/lib/python3.10/site-packages/setuptools/_vendor/packaging/_musllinux.py index 8ac3059..85450fa 100644 --- a/venv/lib/python3.10/site-packages/setuptools/_vendor/packaging/_musllinux.py +++ b/venv/lib/python3.10/site-packages/setuptools/_vendor/packaging/_musllinux.py @@ -98,7 +98,7 @@ def _get_musl_version(executable: str) -> Optional[_MuslVersion]: with contextlib.ExitStack() as stack: try: f = stack.enter_context(open(executable, "rb")) - except OSError: + except IOError: return None ld = _parse_ld_musl_from_elf(f) if not ld: diff --git a/venv/lib/python3.10/site-packages/setuptools/_vendor/packaging/_structures.py b/venv/lib/python3.10/site-packages/setuptools/_vendor/packaging/_structures.py index 90a6465..9515497 100644 --- a/venv/lib/python3.10/site-packages/setuptools/_vendor/packaging/_structures.py +++ b/venv/lib/python3.10/site-packages/setuptools/_vendor/packaging/_structures.py @@ -19,6 +19,9 @@ def __le__(self, other: object) -> bool: def __eq__(self, other: object) -> bool: return isinstance(other, self.__class__) + def __ne__(self, other: object) -> bool: + return not isinstance(other, self.__class__) + def __gt__(self, other: object) -> bool: return True @@ -48,6 +51,9 @@ def __le__(self, other: object) -> bool: def __eq__(self, other: object) -> bool: return isinstance(other, self.__class__) + def __ne__(self, other: object) -> bool: + return not isinstance(other, self.__class__) + def __gt__(self, other: object) -> bool: return False diff --git a/venv/lib/python3.10/site-packages/setuptools/_vendor/packaging/specifiers.py b/venv/lib/python3.10/site-packages/setuptools/_vendor/packaging/specifiers.py index 0e218a6..ce66bd4 100644 --- a/venv/lib/python3.10/site-packages/setuptools/_vendor/packaging/specifiers.py +++ b/venv/lib/python3.10/site-packages/setuptools/_vendor/packaging/specifiers.py @@ -57,6 +57,13 @@ def __eq__(self, other: object) -> bool: objects are equal. """ + @abc.abstractmethod + def __ne__(self, other: object) -> bool: + """ + Returns a boolean representing whether or not the two Specifier like + objects are not equal. + """ + @abc.abstractproperty def prereleases(self) -> Optional[bool]: """ @@ -112,7 +119,7 @@ def __repr__(self) -> str: else "" ) - return f"<{self.__class__.__name__}({str(self)!r}{pre})>" + return "<{}({!r}{})>".format(self.__class__.__name__, str(self), pre) def __str__(self) -> str: return "{}{}".format(*self._spec) @@ -135,6 +142,17 @@ def __eq__(self, other: object) -> bool: return self._canonical_spec == other._canonical_spec + def __ne__(self, other: object) -> bool: + if isinstance(other, str): + try: + other = self.__class__(str(other)) + except InvalidSpecifier: + return NotImplemented + elif not isinstance(other, self.__class__): + return NotImplemented + + return self._spec != other._spec + def _get_operator(self, op: str) -> CallableOperator: operator_callable: CallableOperator = getattr( self, f"_compare_{self._operators[op]}" @@ -649,7 +667,7 @@ def __repr__(self) -> str: else "" ) - return f"" + return "".format(str(self), pre) def __str__(self) -> str: return ",".join(sorted(str(s) for s in self._specs)) @@ -688,6 +706,14 @@ def __eq__(self, other: object) -> bool: return self._specs == other._specs + def __ne__(self, other: object) -> bool: + if isinstance(other, (str, _IndividualSpecifier)): + other = SpecifierSet(str(other)) + elif not isinstance(other, SpecifierSet): + return NotImplemented + + return self._specs != other._specs + def __len__(self) -> int: return len(self._specs) diff --git a/venv/lib/python3.10/site-packages/setuptools/_vendor/packaging/tags.py b/venv/lib/python3.10/site-packages/setuptools/_vendor/packaging/tags.py index 9a3d25a..e65890a 100644 --- a/venv/lib/python3.10/site-packages/setuptools/_vendor/packaging/tags.py +++ b/venv/lib/python3.10/site-packages/setuptools/_vendor/packaging/tags.py @@ -90,7 +90,7 @@ def __str__(self) -> str: return f"{self._interpreter}-{self._abi}-{self._platform}" def __repr__(self) -> str: - return f"<{self} @ {id(self)}>" + return "<{self} @ {self_id}>".format(self=self, self_id=id(self)) def parse_tag(tag: str) -> FrozenSet[Tag]: @@ -192,7 +192,7 @@ def cpython_tags( if not python_version: python_version = sys.version_info[:2] - interpreter = f"cp{_version_nodot(python_version[:2])}" + interpreter = "cp{}".format(_version_nodot(python_version[:2])) if abis is None: if len(python_version) > 1: @@ -268,11 +268,11 @@ def _py_interpreter_range(py_version: PythonVersion) -> Iterator[str]: all previous versions of that major version. """ if len(py_version) > 1: - yield f"py{_version_nodot(py_version[:2])}" - yield f"py{py_version[0]}" + yield "py{version}".format(version=_version_nodot(py_version[:2])) + yield "py{major}".format(major=py_version[0]) if len(py_version) > 1: for minor in range(py_version[1] - 1, -1, -1): - yield f"py{_version_nodot((py_version[0], minor))}" + yield "py{version}".format(version=_version_nodot((py_version[0], minor))) def compatible_tags( @@ -481,7 +481,4 @@ def sys_tags(*, warn: bool = False) -> Iterator[Tag]: else: yield from generic_tags() - if interp_name == "pp": - yield from compatible_tags(interpreter="pp3") - else: - yield from compatible_tags() + yield from compatible_tags() diff --git a/venv/lib/python3.10/site-packages/setuptools/_vendor/pyparsing/__init__.py b/venv/lib/python3.10/site-packages/setuptools/_vendor/pyparsing/__init__.py deleted file mode 100644 index 7802ff1..0000000 --- a/venv/lib/python3.10/site-packages/setuptools/_vendor/pyparsing/__init__.py +++ /dev/null @@ -1,331 +0,0 @@ -# module pyparsing.py -# -# Copyright (c) 2003-2022 Paul T. McGuire -# -# Permission is hereby granted, free of charge, to any person obtaining -# a copy of this software and associated documentation files (the -# "Software"), to deal in the Software without restriction, including -# without limitation the rights to use, copy, modify, merge, publish, -# distribute, sublicense, and/or sell copies of the Software, and to -# permit persons to whom the Software is furnished to do so, subject to -# the following conditions: -# -# The above copyright notice and this permission notice shall be -# included in all copies or substantial portions of the Software. -# -# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. -# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, -# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE -# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -# - -__doc__ = """ -pyparsing module - Classes and methods to define and execute parsing grammars -============================================================================= - -The pyparsing module is an alternative approach to creating and -executing simple grammars, vs. the traditional lex/yacc approach, or the -use of regular expressions. With pyparsing, you don't need to learn -a new syntax for defining grammars or matching expressions - the parsing -module provides a library of classes that you use to construct the -grammar directly in Python. - -Here is a program to parse "Hello, World!" (or any greeting of the form -``", !"``), built up using :class:`Word`, -:class:`Literal`, and :class:`And` elements -(the :meth:`'+'` operators create :class:`And` expressions, -and the strings are auto-converted to :class:`Literal` expressions):: - - from pyparsing import Word, alphas - - # define grammar of a greeting - greet = Word(alphas) + "," + Word(alphas) + "!" - - hello = "Hello, World!" - print(hello, "->", greet.parse_string(hello)) - -The program outputs the following:: - - Hello, World! -> ['Hello', ',', 'World', '!'] - -The Python representation of the grammar is quite readable, owing to the -self-explanatory class names, and the use of :class:`'+'`, -:class:`'|'`, :class:`'^'` and :class:`'&'` operators. - -The :class:`ParseResults` object returned from -:class:`ParserElement.parseString` can be -accessed as a nested list, a dictionary, or an object with named -attributes. - -The pyparsing module handles some of the problems that are typically -vexing when writing text parsers: - - - extra or missing whitespace (the above program will also handle - "Hello,World!", "Hello , World !", etc.) - - quoted strings - - embedded comments - - -Getting Started - ------------------ -Visit the classes :class:`ParserElement` and :class:`ParseResults` to -see the base classes that most other pyparsing -classes inherit from. Use the docstrings for examples of how to: - - - construct literal match expressions from :class:`Literal` and - :class:`CaselessLiteral` classes - - construct character word-group expressions using the :class:`Word` - class - - see how to create repetitive expressions using :class:`ZeroOrMore` - and :class:`OneOrMore` classes - - use :class:`'+'`, :class:`'|'`, :class:`'^'`, - and :class:`'&'` operators to combine simple expressions into - more complex ones - - associate names with your parsed results using - :class:`ParserElement.setResultsName` - - access the parsed data, which is returned as a :class:`ParseResults` - object - - find some helpful expression short-cuts like :class:`delimitedList` - and :class:`oneOf` - - find more useful common expressions in the :class:`pyparsing_common` - namespace class -""" -from typing import NamedTuple - - -class version_info(NamedTuple): - major: int - minor: int - micro: int - releaselevel: str - serial: int - - @property - def __version__(self): - return ( - "{}.{}.{}".format(self.major, self.minor, self.micro) - + ( - "{}{}{}".format( - "r" if self.releaselevel[0] == "c" else "", - self.releaselevel[0], - self.serial, - ), - "", - )[self.releaselevel == "final"] - ) - - def __str__(self): - return "{} {} / {}".format(__name__, self.__version__, __version_time__) - - def __repr__(self): - return "{}.{}({})".format( - __name__, - type(self).__name__, - ", ".join("{}={!r}".format(*nv) for nv in zip(self._fields, self)), - ) - - -__version_info__ = version_info(3, 0, 9, "final", 0) -__version_time__ = "05 May 2022 07:02 UTC" -__version__ = __version_info__.__version__ -__versionTime__ = __version_time__ -__author__ = "Paul McGuire " - -from .util import * -from .exceptions import * -from .actions import * -from .core import __diag__, __compat__ -from .results import * -from .core import * -from .core import _builtin_exprs as core_builtin_exprs -from .helpers import * -from .helpers import _builtin_exprs as helper_builtin_exprs - -from .unicode import unicode_set, UnicodeRangeList, pyparsing_unicode as unicode -from .testing import pyparsing_test as testing -from .common import ( - pyparsing_common as common, - _builtin_exprs as common_builtin_exprs, -) - -# define backward compat synonyms -if "pyparsing_unicode" not in globals(): - pyparsing_unicode = unicode -if "pyparsing_common" not in globals(): - pyparsing_common = common -if "pyparsing_test" not in globals(): - pyparsing_test = testing - -core_builtin_exprs += common_builtin_exprs + helper_builtin_exprs - - -__all__ = [ - "__version__", - "__version_time__", - "__author__", - "__compat__", - "__diag__", - "And", - "AtLineStart", - "AtStringStart", - "CaselessKeyword", - "CaselessLiteral", - "CharsNotIn", - "Combine", - "Dict", - "Each", - "Empty", - "FollowedBy", - "Forward", - "GoToColumn", - "Group", - "IndentedBlock", - "Keyword", - "LineEnd", - "LineStart", - "Literal", - "Located", - "PrecededBy", - "MatchFirst", - "NoMatch", - "NotAny", - "OneOrMore", - "OnlyOnce", - "OpAssoc", - "Opt", - "Optional", - "Or", - "ParseBaseException", - "ParseElementEnhance", - "ParseException", - "ParseExpression", - "ParseFatalException", - "ParseResults", - "ParseSyntaxException", - "ParserElement", - "PositionToken", - "QuotedString", - "RecursiveGrammarException", - "Regex", - "SkipTo", - "StringEnd", - "StringStart", - "Suppress", - "Token", - "TokenConverter", - "White", - "Word", - "WordEnd", - "WordStart", - "ZeroOrMore", - "Char", - "alphanums", - "alphas", - "alphas8bit", - "any_close_tag", - "any_open_tag", - "c_style_comment", - "col", - "common_html_entity", - "counted_array", - "cpp_style_comment", - "dbl_quoted_string", - "dbl_slash_comment", - "delimited_list", - "dict_of", - "empty", - "hexnums", - "html_comment", - "identchars", - "identbodychars", - "java_style_comment", - "line", - "line_end", - "line_start", - "lineno", - "make_html_tags", - "make_xml_tags", - "match_only_at_col", - "match_previous_expr", - "match_previous_literal", - "nested_expr", - "null_debug_action", - "nums", - "one_of", - "printables", - "punc8bit", - "python_style_comment", - "quoted_string", - "remove_quotes", - "replace_with", - "replace_html_entity", - "rest_of_line", - "sgl_quoted_string", - "srange", - "string_end", - "string_start", - "trace_parse_action", - "unicode_string", - "with_attribute", - "indentedBlock", - "original_text_for", - "ungroup", - "infix_notation", - "locatedExpr", - "with_class", - "CloseMatch", - "token_map", - "pyparsing_common", - "pyparsing_unicode", - "unicode_set", - "condition_as_parse_action", - "pyparsing_test", - # pre-PEP8 compatibility names - "__versionTime__", - "anyCloseTag", - "anyOpenTag", - "cStyleComment", - "commonHTMLEntity", - "countedArray", - "cppStyleComment", - "dblQuotedString", - "dblSlashComment", - "delimitedList", - "dictOf", - "htmlComment", - "javaStyleComment", - "lineEnd", - "lineStart", - "makeHTMLTags", - "makeXMLTags", - "matchOnlyAtCol", - "matchPreviousExpr", - "matchPreviousLiteral", - "nestedExpr", - "nullDebugAction", - "oneOf", - "opAssoc", - "pythonStyleComment", - "quotedString", - "removeQuotes", - "replaceHTMLEntity", - "replaceWith", - "restOfLine", - "sglQuotedString", - "stringEnd", - "stringStart", - "traceParseAction", - "unicodeString", - "withAttribute", - "indentedBlock", - "originalTextFor", - "infixNotation", - "locatedExpr", - "withClass", - "tokenMap", - "conditionAsParseAction", - "autoname_elements", -] diff --git a/venv/lib/python3.10/site-packages/setuptools/_vendor/pyparsing/actions.py b/venv/lib/python3.10/site-packages/setuptools/_vendor/pyparsing/actions.py deleted file mode 100644 index f72c66e..0000000 --- a/venv/lib/python3.10/site-packages/setuptools/_vendor/pyparsing/actions.py +++ /dev/null @@ -1,207 +0,0 @@ -# actions.py - -from .exceptions import ParseException -from .util import col - - -class OnlyOnce: - """ - Wrapper for parse actions, to ensure they are only called once. - """ - - def __init__(self, method_call): - from .core import _trim_arity - - self.callable = _trim_arity(method_call) - self.called = False - - def __call__(self, s, l, t): - if not self.called: - results = self.callable(s, l, t) - self.called = True - return results - raise ParseException(s, l, "OnlyOnce obj called multiple times w/out reset") - - def reset(self): - """ - Allow the associated parse action to be called once more. - """ - - self.called = False - - -def match_only_at_col(n): - """ - Helper method for defining parse actions that require matching at - a specific column in the input text. - """ - - def verify_col(strg, locn, toks): - if col(locn, strg) != n: - raise ParseException(strg, locn, "matched token not at column {}".format(n)) - - return verify_col - - -def replace_with(repl_str): - """ - Helper method for common parse actions that simply return - a literal value. Especially useful when used with - :class:`transform_string` (). - - Example:: - - num = Word(nums).set_parse_action(lambda toks: int(toks[0])) - na = one_of("N/A NA").set_parse_action(replace_with(math.nan)) - term = na | num - - term[1, ...].parse_string("324 234 N/A 234") # -> [324, 234, nan, 234] - """ - return lambda s, l, t: [repl_str] - - -def remove_quotes(s, l, t): - """ - Helper parse action for removing quotation marks from parsed - quoted strings. - - Example:: - - # by default, quotation marks are included in parsed results - quoted_string.parse_string("'Now is the Winter of our Discontent'") # -> ["'Now is the Winter of our Discontent'"] - - # use remove_quotes to strip quotation marks from parsed results - quoted_string.set_parse_action(remove_quotes) - quoted_string.parse_string("'Now is the Winter of our Discontent'") # -> ["Now is the Winter of our Discontent"] - """ - return t[0][1:-1] - - -def with_attribute(*args, **attr_dict): - """ - Helper to create a validating parse action to be used with start - tags created with :class:`make_xml_tags` or - :class:`make_html_tags`. Use ``with_attribute`` to qualify - a starting tag with a required attribute value, to avoid false - matches on common tags such as ```` or ``
``. - - Call ``with_attribute`` with a series of attribute names and - values. Specify the list of filter attributes names and values as: - - - keyword arguments, as in ``(align="right")``, or - - as an explicit dict with ``**`` operator, when an attribute - name is also a Python reserved word, as in ``**{"class":"Customer", "align":"right"}`` - - a list of name-value tuples, as in ``(("ns1:class", "Customer"), ("ns2:align", "right"))`` - - For attribute names with a namespace prefix, you must use the second - form. Attribute names are matched insensitive to upper/lower case. - - If just testing for ``class`` (with or without a namespace), use - :class:`with_class`. - - To verify that the attribute exists, but without specifying a value, - pass ``with_attribute.ANY_VALUE`` as the value. - - Example:: - - html = ''' -
- Some text -
1 4 0 1 0
-
1,3 2,3 1,1
-
this has no type
-
- - ''' - div,div_end = make_html_tags("div") - - # only match div tag having a type attribute with value "grid" - div_grid = div().set_parse_action(with_attribute(type="grid")) - grid_expr = div_grid + SkipTo(div | div_end)("body") - for grid_header in grid_expr.search_string(html): - print(grid_header.body) - - # construct a match with any div tag having a type attribute, regardless of the value - div_any_type = div().set_parse_action(with_attribute(type=with_attribute.ANY_VALUE)) - div_expr = div_any_type + SkipTo(div | div_end)("body") - for div_header in div_expr.search_string(html): - print(div_header.body) - - prints:: - - 1 4 0 1 0 - - 1 4 0 1 0 - 1,3 2,3 1,1 - """ - if args: - attrs = args[:] - else: - attrs = attr_dict.items() - attrs = [(k, v) for k, v in attrs] - - def pa(s, l, tokens): - for attrName, attrValue in attrs: - if attrName not in tokens: - raise ParseException(s, l, "no matching attribute " + attrName) - if attrValue != with_attribute.ANY_VALUE and tokens[attrName] != attrValue: - raise ParseException( - s, - l, - "attribute {!r} has value {!r}, must be {!r}".format( - attrName, tokens[attrName], attrValue - ), - ) - - return pa - - -with_attribute.ANY_VALUE = object() - - -def with_class(classname, namespace=""): - """ - Simplified version of :class:`with_attribute` when - matching on a div class - made difficult because ``class`` is - a reserved word in Python. - - Example:: - - html = ''' -
- Some text -
1 4 0 1 0
-
1,3 2,3 1,1
-
this <div> has no class
-
- - ''' - div,div_end = make_html_tags("div") - div_grid = div().set_parse_action(with_class("grid")) - - grid_expr = div_grid + SkipTo(div | div_end)("body") - for grid_header in grid_expr.search_string(html): - print(grid_header.body) - - div_any_type = div().set_parse_action(with_class(withAttribute.ANY_VALUE)) - div_expr = div_any_type + SkipTo(div | div_end)("body") - for div_header in div_expr.search_string(html): - print(div_header.body) - - prints:: - - 1 4 0 1 0 - - 1 4 0 1 0 - 1,3 2,3 1,1 - """ - classattr = "{}:class".format(namespace) if namespace else "class" - return with_attribute(**{classattr: classname}) - - -# pre-PEP8 compatibility symbols -replaceWith = replace_with -removeQuotes = remove_quotes -withAttribute = with_attribute -withClass = with_class -matchOnlyAtCol = match_only_at_col diff --git a/venv/lib/python3.10/site-packages/setuptools/_vendor/pyparsing/common.py b/venv/lib/python3.10/site-packages/setuptools/_vendor/pyparsing/common.py deleted file mode 100644 index 1859fb7..0000000 --- a/venv/lib/python3.10/site-packages/setuptools/_vendor/pyparsing/common.py +++ /dev/null @@ -1,424 +0,0 @@ -# common.py -from .core import * -from .helpers import delimited_list, any_open_tag, any_close_tag -from datetime import datetime - - -# some other useful expressions - using lower-case class name since we are really using this as a namespace -class pyparsing_common: - """Here are some common low-level expressions that may be useful in - jump-starting parser development: - - - numeric forms (:class:`integers`, :class:`reals`, - :class:`scientific notation`) - - common :class:`programming identifiers` - - network addresses (:class:`MAC`, - :class:`IPv4`, :class:`IPv6`) - - ISO8601 :class:`dates` and - :class:`datetime` - - :class:`UUID` - - :class:`comma-separated list` - - :class:`url` - - Parse actions: - - - :class:`convertToInteger` - - :class:`convertToFloat` - - :class:`convertToDate` - - :class:`convertToDatetime` - - :class:`stripHTMLTags` - - :class:`upcaseTokens` - - :class:`downcaseTokens` - - Example:: - - pyparsing_common.number.runTests(''' - # any int or real number, returned as the appropriate type - 100 - -100 - +100 - 3.14159 - 6.02e23 - 1e-12 - ''') - - pyparsing_common.fnumber.runTests(''' - # any int or real number, returned as float - 100 - -100 - +100 - 3.14159 - 6.02e23 - 1e-12 - ''') - - pyparsing_common.hex_integer.runTests(''' - # hex numbers - 100 - FF - ''') - - pyparsing_common.fraction.runTests(''' - # fractions - 1/2 - -3/4 - ''') - - pyparsing_common.mixed_integer.runTests(''' - # mixed fractions - 1 - 1/2 - -3/4 - 1-3/4 - ''') - - import uuid - pyparsing_common.uuid.setParseAction(tokenMap(uuid.UUID)) - pyparsing_common.uuid.runTests(''' - # uuid - 12345678-1234-5678-1234-567812345678 - ''') - - prints:: - - # any int or real number, returned as the appropriate type - 100 - [100] - - -100 - [-100] - - +100 - [100] - - 3.14159 - [3.14159] - - 6.02e23 - [6.02e+23] - - 1e-12 - [1e-12] - - # any int or real number, returned as float - 100 - [100.0] - - -100 - [-100.0] - - +100 - [100.0] - - 3.14159 - [3.14159] - - 6.02e23 - [6.02e+23] - - 1e-12 - [1e-12] - - # hex numbers - 100 - [256] - - FF - [255] - - # fractions - 1/2 - [0.5] - - -3/4 - [-0.75] - - # mixed fractions - 1 - [1] - - 1/2 - [0.5] - - -3/4 - [-0.75] - - 1-3/4 - [1.75] - - # uuid - 12345678-1234-5678-1234-567812345678 - [UUID('12345678-1234-5678-1234-567812345678')] - """ - - convert_to_integer = token_map(int) - """ - Parse action for converting parsed integers to Python int - """ - - convert_to_float = token_map(float) - """ - Parse action for converting parsed numbers to Python float - """ - - integer = Word(nums).set_name("integer").set_parse_action(convert_to_integer) - """expression that parses an unsigned integer, returns an int""" - - hex_integer = ( - Word(hexnums).set_name("hex integer").set_parse_action(token_map(int, 16)) - ) - """expression that parses a hexadecimal integer, returns an int""" - - signed_integer = ( - Regex(r"[+-]?\d+") - .set_name("signed integer") - .set_parse_action(convert_to_integer) - ) - """expression that parses an integer with optional leading sign, returns an int""" - - fraction = ( - signed_integer().set_parse_action(convert_to_float) - + "/" - + signed_integer().set_parse_action(convert_to_float) - ).set_name("fraction") - """fractional expression of an integer divided by an integer, returns a float""" - fraction.add_parse_action(lambda tt: tt[0] / tt[-1]) - - mixed_integer = ( - fraction | signed_integer + Opt(Opt("-").suppress() + fraction) - ).set_name("fraction or mixed integer-fraction") - """mixed integer of the form 'integer - fraction', with optional leading integer, returns float""" - mixed_integer.add_parse_action(sum) - - real = ( - Regex(r"[+-]?(?:\d+\.\d*|\.\d+)") - .set_name("real number") - .set_parse_action(convert_to_float) - ) - """expression that parses a floating point number and returns a float""" - - sci_real = ( - Regex(r"[+-]?(?:\d+(?:[eE][+-]?\d+)|(?:\d+\.\d*|\.\d+)(?:[eE][+-]?\d+)?)") - .set_name("real number with scientific notation") - .set_parse_action(convert_to_float) - ) - """expression that parses a floating point number with optional - scientific notation and returns a float""" - - # streamlining this expression makes the docs nicer-looking - number = (sci_real | real | signed_integer).setName("number").streamline() - """any numeric expression, returns the corresponding Python type""" - - fnumber = ( - Regex(r"[+-]?\d+\.?\d*([eE][+-]?\d+)?") - .set_name("fnumber") - .set_parse_action(convert_to_float) - ) - """any int or real number, returned as float""" - - identifier = Word(identchars, identbodychars).set_name("identifier") - """typical code identifier (leading alpha or '_', followed by 0 or more alphas, nums, or '_')""" - - ipv4_address = Regex( - r"(25[0-5]|2[0-4][0-9]|1?[0-9]{1,2})(\.(25[0-5]|2[0-4][0-9]|1?[0-9]{1,2})){3}" - ).set_name("IPv4 address") - "IPv4 address (``0.0.0.0 - 255.255.255.255``)" - - _ipv6_part = Regex(r"[0-9a-fA-F]{1,4}").set_name("hex_integer") - _full_ipv6_address = (_ipv6_part + (":" + _ipv6_part) * 7).set_name( - "full IPv6 address" - ) - _short_ipv6_address = ( - Opt(_ipv6_part + (":" + _ipv6_part) * (0, 6)) - + "::" - + Opt(_ipv6_part + (":" + _ipv6_part) * (0, 6)) - ).set_name("short IPv6 address") - _short_ipv6_address.add_condition( - lambda t: sum(1 for tt in t if pyparsing_common._ipv6_part.matches(tt)) < 8 - ) - _mixed_ipv6_address = ("::ffff:" + ipv4_address).set_name("mixed IPv6 address") - ipv6_address = Combine( - (_full_ipv6_address | _mixed_ipv6_address | _short_ipv6_address).set_name( - "IPv6 address" - ) - ).set_name("IPv6 address") - "IPv6 address (long, short, or mixed form)" - - mac_address = Regex( - r"[0-9a-fA-F]{2}([:.-])[0-9a-fA-F]{2}(?:\1[0-9a-fA-F]{2}){4}" - ).set_name("MAC address") - "MAC address xx:xx:xx:xx:xx (may also have '-' or '.' delimiters)" - - @staticmethod - def convert_to_date(fmt: str = "%Y-%m-%d"): - """ - Helper to create a parse action for converting parsed date string to Python datetime.date - - Params - - - fmt - format to be passed to datetime.strptime (default= ``"%Y-%m-%d"``) - - Example:: - - date_expr = pyparsing_common.iso8601_date.copy() - date_expr.setParseAction(pyparsing_common.convertToDate()) - print(date_expr.parseString("1999-12-31")) - - prints:: - - [datetime.date(1999, 12, 31)] - """ - - def cvt_fn(ss, ll, tt): - try: - return datetime.strptime(tt[0], fmt).date() - except ValueError as ve: - raise ParseException(ss, ll, str(ve)) - - return cvt_fn - - @staticmethod - def convert_to_datetime(fmt: str = "%Y-%m-%dT%H:%M:%S.%f"): - """Helper to create a parse action for converting parsed - datetime string to Python datetime.datetime - - Params - - - fmt - format to be passed to datetime.strptime (default= ``"%Y-%m-%dT%H:%M:%S.%f"``) - - Example:: - - dt_expr = pyparsing_common.iso8601_datetime.copy() - dt_expr.setParseAction(pyparsing_common.convertToDatetime()) - print(dt_expr.parseString("1999-12-31T23:59:59.999")) - - prints:: - - [datetime.datetime(1999, 12, 31, 23, 59, 59, 999000)] - """ - - def cvt_fn(s, l, t): - try: - return datetime.strptime(t[0], fmt) - except ValueError as ve: - raise ParseException(s, l, str(ve)) - - return cvt_fn - - iso8601_date = Regex( - r"(?P\d{4})(?:-(?P\d\d)(?:-(?P\d\d))?)?" - ).set_name("ISO8601 date") - "ISO8601 date (``yyyy-mm-dd``)" - - iso8601_datetime = Regex( - r"(?P\d{4})-(?P\d\d)-(?P\d\d)[T ](?P\d\d):(?P\d\d)(:(?P\d\d(\.\d*)?)?)?(?PZ|[+-]\d\d:?\d\d)?" - ).set_name("ISO8601 datetime") - "ISO8601 datetime (``yyyy-mm-ddThh:mm:ss.s(Z|+-00:00)``) - trailing seconds, milliseconds, and timezone optional; accepts separating ``'T'`` or ``' '``" - - uuid = Regex(r"[0-9a-fA-F]{8}(-[0-9a-fA-F]{4}){3}-[0-9a-fA-F]{12}").set_name("UUID") - "UUID (``xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx``)" - - _html_stripper = any_open_tag.suppress() | any_close_tag.suppress() - - @staticmethod - def strip_html_tags(s: str, l: int, tokens: ParseResults): - """Parse action to remove HTML tags from web page HTML source - - Example:: - - # strip HTML links from normal text - text = 'More info at the
pyparsing wiki page' - td, td_end = makeHTMLTags("TD") - table_text = td + SkipTo(td_end).setParseAction(pyparsing_common.stripHTMLTags)("body") + td_end - print(table_text.parseString(text).body) - - Prints:: - - More info at the pyparsing wiki page - """ - return pyparsing_common._html_stripper.transform_string(tokens[0]) - - _commasepitem = ( - Combine( - OneOrMore( - ~Literal(",") - + ~LineEnd() - + Word(printables, exclude_chars=",") - + Opt(White(" \t") + ~FollowedBy(LineEnd() | ",")) - ) - ) - .streamline() - .set_name("commaItem") - ) - comma_separated_list = delimited_list( - Opt(quoted_string.copy() | _commasepitem, default="") - ).set_name("comma separated list") - """Predefined expression of 1 or more printable words or quoted strings, separated by commas.""" - - upcase_tokens = staticmethod(token_map(lambda t: t.upper())) - """Parse action to convert tokens to upper case.""" - - downcase_tokens = staticmethod(token_map(lambda t: t.lower())) - """Parse action to convert tokens to lower case.""" - - # fmt: off - url = Regex( - # https://mathiasbynens.be/demo/url-regex - # https://gist.github.com/dperini/729294 - r"^" + - # protocol identifier (optional) - # short syntax // still required - r"(?:(?:(?Phttps?|ftp):)?\/\/)" + - # user:pass BasicAuth (optional) - r"(?:(?P\S+(?::\S*)?)@)?" + - r"(?P" + - # IP address exclusion - # private & local networks - r"(?!(?:10|127)(?:\.\d{1,3}){3})" + - r"(?!(?:169\.254|192\.168)(?:\.\d{1,3}){2})" + - r"(?!172\.(?:1[6-9]|2\d|3[0-1])(?:\.\d{1,3}){2})" + - # IP address dotted notation octets - # excludes loopback network 0.0.0.0 - # excludes reserved space >= 224.0.0.0 - # excludes network & broadcast addresses - # (first & last IP address of each class) - r"(?:[1-9]\d?|1\d\d|2[01]\d|22[0-3])" + - r"(?:\.(?:1?\d{1,2}|2[0-4]\d|25[0-5])){2}" + - r"(?:\.(?:[1-9]\d?|1\d\d|2[0-4]\d|25[0-4]))" + - r"|" + - # host & domain names, may end with dot - # can be replaced by a shortest alternative - # (?![-_])(?:[-\w\u00a1-\uffff]{0,63}[^-_]\.)+ - r"(?:" + - r"(?:" + - r"[a-z0-9\u00a1-\uffff]" + - r"[a-z0-9\u00a1-\uffff_-]{0,62}" + - r")?" + - r"[a-z0-9\u00a1-\uffff]\." + - r")+" + - # TLD identifier name, may end with dot - r"(?:[a-z\u00a1-\uffff]{2,}\.?)" + - r")" + - # port number (optional) - r"(:(?P\d{2,5}))?" + - # resource path (optional) - r"(?P\/[^?# ]*)?" + - # query string (optional) - r"(\?(?P[^#]*))?" + - # fragment (optional) - r"(#(?P\S*))?" + - r"$" - ).set_name("url") - # fmt: on - - # pre-PEP8 compatibility names - convertToInteger = convert_to_integer - convertToFloat = convert_to_float - convertToDate = convert_to_date - convertToDatetime = convert_to_datetime - stripHTMLTags = strip_html_tags - upcaseTokens = upcase_tokens - downcaseTokens = downcase_tokens - - -_builtin_exprs = [ - v for v in vars(pyparsing_common).values() if isinstance(v, ParserElement) -] diff --git a/venv/lib/python3.10/site-packages/setuptools/_vendor/pyparsing/core.py b/venv/lib/python3.10/site-packages/setuptools/_vendor/pyparsing/core.py deleted file mode 100644 index 9acba3f..0000000 --- a/venv/lib/python3.10/site-packages/setuptools/_vendor/pyparsing/core.py +++ /dev/null @@ -1,5814 +0,0 @@ -# -# core.py -# -import os -import typing -from typing import ( - NamedTuple, - Union, - Callable, - Any, - Generator, - Tuple, - List, - TextIO, - Set, - Sequence, -) -from abc import ABC, abstractmethod -from enum import Enum -import string -import copy -import warnings -import re -import sys -from collections.abc import Iterable -import traceback -import types -from operator import itemgetter -from functools import wraps -from threading import RLock -from pathlib import Path - -from .util import ( - _FifoCache, - _UnboundedCache, - __config_flags, - _collapse_string_to_ranges, - _escape_regex_range_chars, - _bslash, - _flatten, - LRUMemo as _LRUMemo, - UnboundedMemo as _UnboundedMemo, -) -from .exceptions import * -from .actions import * -from .results import ParseResults, _ParseResultsWithOffset -from .unicode import pyparsing_unicode - -_MAX_INT = sys.maxsize -str_type: Tuple[type, ...] = (str, bytes) - -# -# Copyright (c) 2003-2022 Paul T. McGuire -# -# Permission is hereby granted, free of charge, to any person obtaining -# a copy of this software and associated documentation files (the -# "Software"), to deal in the Software without restriction, including -# without limitation the rights to use, copy, modify, merge, publish, -# distribute, sublicense, and/or sell copies of the Software, and to -# permit persons to whom the Software is furnished to do so, subject to -# the following conditions: -# -# The above copyright notice and this permission notice shall be -# included in all copies or substantial portions of the Software. -# -# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. -# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, -# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE -# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -# - - -if sys.version_info >= (3, 8): - from functools import cached_property -else: - - class cached_property: - def __init__(self, func): - self._func = func - - def __get__(self, instance, owner=None): - ret = instance.__dict__[self._func.__name__] = self._func(instance) - return ret - - -class __compat__(__config_flags): - """ - A cross-version compatibility configuration for pyparsing features that will be - released in a future version. By setting values in this configuration to True, - those features can be enabled in prior versions for compatibility development - and testing. - - - ``collect_all_And_tokens`` - flag to enable fix for Issue #63 that fixes erroneous grouping - of results names when an :class:`And` expression is nested within an :class:`Or` or :class:`MatchFirst`; - maintained for compatibility, but setting to ``False`` no longer restores pre-2.3.1 - behavior - """ - - _type_desc = "compatibility" - - collect_all_And_tokens = True - - _all_names = [__ for __ in locals() if not __.startswith("_")] - _fixed_names = """ - collect_all_And_tokens - """.split() - - -class __diag__(__config_flags): - _type_desc = "diagnostic" - - warn_multiple_tokens_in_named_alternation = False - warn_ungrouped_named_tokens_in_collection = False - warn_name_set_on_empty_Forward = False - warn_on_parse_using_empty_Forward = False - warn_on_assignment_to_Forward = False - warn_on_multiple_string_args_to_oneof = False - warn_on_match_first_with_lshift_operator = False - enable_debug_on_named_expressions = False - - _all_names = [__ for __ in locals() if not __.startswith("_")] - _warning_names = [name for name in _all_names if name.startswith("warn")] - _debug_names = [name for name in _all_names if name.startswith("enable_debug")] - - @classmethod - def enable_all_warnings(cls) -> None: - for name in cls._warning_names: - cls.enable(name) - - -class Diagnostics(Enum): - """ - Diagnostic configuration (all default to disabled) - - ``warn_multiple_tokens_in_named_alternation`` - flag to enable warnings when a results - name is defined on a :class:`MatchFirst` or :class:`Or` expression with one or more :class:`And` subexpressions - - ``warn_ungrouped_named_tokens_in_collection`` - flag to enable warnings when a results - name is defined on a containing expression with ungrouped subexpressions that also - have results names - - ``warn_name_set_on_empty_Forward`` - flag to enable warnings when a :class:`Forward` is defined - with a results name, but has no contents defined - - ``warn_on_parse_using_empty_Forward`` - flag to enable warnings when a :class:`Forward` is - defined in a grammar but has never had an expression attached to it - - ``warn_on_assignment_to_Forward`` - flag to enable warnings when a :class:`Forward` is defined - but is overwritten by assigning using ``'='`` instead of ``'<<='`` or ``'<<'`` - - ``warn_on_multiple_string_args_to_oneof`` - flag to enable warnings when :class:`one_of` is - incorrectly called with multiple str arguments - - ``enable_debug_on_named_expressions`` - flag to auto-enable debug on all subsequent - calls to :class:`ParserElement.set_name` - - Diagnostics are enabled/disabled by calling :class:`enable_diag` and :class:`disable_diag`. - All warnings can be enabled by calling :class:`enable_all_warnings`. - """ - - warn_multiple_tokens_in_named_alternation = 0 - warn_ungrouped_named_tokens_in_collection = 1 - warn_name_set_on_empty_Forward = 2 - warn_on_parse_using_empty_Forward = 3 - warn_on_assignment_to_Forward = 4 - warn_on_multiple_string_args_to_oneof = 5 - warn_on_match_first_with_lshift_operator = 6 - enable_debug_on_named_expressions = 7 - - -def enable_diag(diag_enum: Diagnostics) -> None: - """ - Enable a global pyparsing diagnostic flag (see :class:`Diagnostics`). - """ - __diag__.enable(diag_enum.name) - - -def disable_diag(diag_enum: Diagnostics) -> None: - """ - Disable a global pyparsing diagnostic flag (see :class:`Diagnostics`). - """ - __diag__.disable(diag_enum.name) - - -def enable_all_warnings() -> None: - """ - Enable all global pyparsing diagnostic warnings (see :class:`Diagnostics`). - """ - __diag__.enable_all_warnings() - - -# hide abstract class -del __config_flags - - -def _should_enable_warnings( - cmd_line_warn_options: typing.Iterable[str], warn_env_var: typing.Optional[str] -) -> bool: - enable = bool(warn_env_var) - for warn_opt in cmd_line_warn_options: - w_action, w_message, w_category, w_module, w_line = (warn_opt + "::::").split( - ":" - )[:5] - if not w_action.lower().startswith("i") and ( - not (w_message or w_category or w_module) or w_module == "pyparsing" - ): - enable = True - elif w_action.lower().startswith("i") and w_module in ("pyparsing", ""): - enable = False - return enable - - -if _should_enable_warnings( - sys.warnoptions, os.environ.get("PYPARSINGENABLEALLWARNINGS") -): - enable_all_warnings() - - -# build list of single arg builtins, that can be used as parse actions -_single_arg_builtins = { - sum, - len, - sorted, - reversed, - list, - tuple, - set, - any, - all, - min, - max, -} - -_generatorType = types.GeneratorType -ParseAction = Union[ - Callable[[], Any], - Callable[[ParseResults], Any], - Callable[[int, ParseResults], Any], - Callable[[str, int, ParseResults], Any], -] -ParseCondition = Union[ - Callable[[], bool], - Callable[[ParseResults], bool], - Callable[[int, ParseResults], bool], - Callable[[str, int, ParseResults], bool], -] -ParseFailAction = Callable[[str, int, "ParserElement", Exception], None] -DebugStartAction = Callable[[str, int, "ParserElement", bool], None] -DebugSuccessAction = Callable[ - [str, int, int, "ParserElement", ParseResults, bool], None -] -DebugExceptionAction = Callable[[str, int, "ParserElement", Exception, bool], None] - - -alphas = string.ascii_uppercase + string.ascii_lowercase -identchars = pyparsing_unicode.Latin1.identchars -identbodychars = pyparsing_unicode.Latin1.identbodychars -nums = "0123456789" -hexnums = nums + "ABCDEFabcdef" -alphanums = alphas + nums -printables = "".join([c for c in string.printable if c not in string.whitespace]) - -_trim_arity_call_line: traceback.StackSummary = None - - -def _trim_arity(func, max_limit=3): - """decorator to trim function calls to match the arity of the target""" - global _trim_arity_call_line - - if func in _single_arg_builtins: - return lambda s, l, t: func(t) - - limit = 0 - found_arity = False - - def extract_tb(tb, limit=0): - frames = traceback.extract_tb(tb, limit=limit) - frame_summary = frames[-1] - return [frame_summary[:2]] - - # synthesize what would be returned by traceback.extract_stack at the call to - # user's parse action 'func', so that we don't incur call penalty at parse time - - # fmt: off - LINE_DIFF = 7 - # IF ANY CODE CHANGES, EVEN JUST COMMENTS OR BLANK LINES, BETWEEN THE NEXT LINE AND - # THE CALL TO FUNC INSIDE WRAPPER, LINE_DIFF MUST BE MODIFIED!!!! - _trim_arity_call_line = (_trim_arity_call_line or traceback.extract_stack(limit=2)[-1]) - pa_call_line_synth = (_trim_arity_call_line[0], _trim_arity_call_line[1] + LINE_DIFF) - - def wrapper(*args): - nonlocal found_arity, limit - while 1: - try: - ret = func(*args[limit:]) - found_arity = True - return ret - except TypeError as te: - # re-raise TypeErrors if they did not come from our arity testing - if found_arity: - raise - else: - tb = te.__traceback__ - trim_arity_type_error = ( - extract_tb(tb, limit=2)[-1][:2] == pa_call_line_synth - ) - del tb - - if trim_arity_type_error: - if limit < max_limit: - limit += 1 - continue - - raise - # fmt: on - - # copy func name to wrapper for sensible debug output - # (can't use functools.wraps, since that messes with function signature) - func_name = getattr(func, "__name__", getattr(func, "__class__").__name__) - wrapper.__name__ = func_name - wrapper.__doc__ = func.__doc__ - - return wrapper - - -def condition_as_parse_action( - fn: ParseCondition, message: str = None, fatal: bool = False -) -> ParseAction: - """ - Function to convert a simple predicate function that returns ``True`` or ``False`` - into a parse action. Can be used in places when a parse action is required - and :class:`ParserElement.add_condition` cannot be used (such as when adding a condition - to an operator level in :class:`infix_notation`). - - Optional keyword arguments: - - - ``message`` - define a custom message to be used in the raised exception - - ``fatal`` - if True, will raise :class:`ParseFatalException` to stop parsing immediately; - otherwise will raise :class:`ParseException` - - """ - msg = message if message is not None else "failed user-defined condition" - exc_type = ParseFatalException if fatal else ParseException - fn = _trim_arity(fn) - - @wraps(fn) - def pa(s, l, t): - if not bool(fn(s, l, t)): - raise exc_type(s, l, msg) - - return pa - - -def _default_start_debug_action( - instring: str, loc: int, expr: "ParserElement", cache_hit: bool = False -): - cache_hit_str = "*" if cache_hit else "" - print( - ( - "{}Match {} at loc {}({},{})\n {}\n {}^".format( - cache_hit_str, - expr, - loc, - lineno(loc, instring), - col(loc, instring), - line(loc, instring), - " " * (col(loc, instring) - 1), - ) - ) - ) - - -def _default_success_debug_action( - instring: str, - startloc: int, - endloc: int, - expr: "ParserElement", - toks: ParseResults, - cache_hit: bool = False, -): - cache_hit_str = "*" if cache_hit else "" - print("{}Matched {} -> {}".format(cache_hit_str, expr, toks.as_list())) - - -def _default_exception_debug_action( - instring: str, - loc: int, - expr: "ParserElement", - exc: Exception, - cache_hit: bool = False, -): - cache_hit_str = "*" if cache_hit else "" - print( - "{}Match {} failed, {} raised: {}".format( - cache_hit_str, expr, type(exc).__name__, exc - ) - ) - - -def null_debug_action(*args): - """'Do-nothing' debug action, to suppress debugging output during parsing.""" - - -class ParserElement(ABC): - """Abstract base level parser element class.""" - - DEFAULT_WHITE_CHARS: str = " \n\t\r" - verbose_stacktrace: bool = False - _literalStringClass: typing.Optional[type] = None - - @staticmethod - def set_default_whitespace_chars(chars: str) -> None: - r""" - Overrides the default whitespace chars - - Example:: - - # default whitespace chars are space, and newline - Word(alphas)[1, ...].parse_string("abc def\nghi jkl") # -> ['abc', 'def', 'ghi', 'jkl'] - - # change to just treat newline as significant - ParserElement.set_default_whitespace_chars(" \t") - Word(alphas)[1, ...].parse_string("abc def\nghi jkl") # -> ['abc', 'def'] - """ - ParserElement.DEFAULT_WHITE_CHARS = chars - - # update whitespace all parse expressions defined in this module - for expr in _builtin_exprs: - if expr.copyDefaultWhiteChars: - expr.whiteChars = set(chars) - - @staticmethod - def inline_literals_using(cls: type) -> None: - """ - Set class to be used for inclusion of string literals into a parser. - - Example:: - - # default literal class used is Literal - integer = Word(nums) - date_str = integer("year") + '/' + integer("month") + '/' + integer("day") - - date_str.parse_string("1999/12/31") # -> ['1999', '/', '12', '/', '31'] - - - # change to Suppress - ParserElement.inline_literals_using(Suppress) - date_str = integer("year") + '/' + integer("month") + '/' + integer("day") - - date_str.parse_string("1999/12/31") # -> ['1999', '12', '31'] - """ - ParserElement._literalStringClass = cls - - class DebugActions(NamedTuple): - debug_try: typing.Optional[DebugStartAction] - debug_match: typing.Optional[DebugSuccessAction] - debug_fail: typing.Optional[DebugExceptionAction] - - def __init__(self, savelist: bool = False): - self.parseAction: List[ParseAction] = list() - self.failAction: typing.Optional[ParseFailAction] = None - self.customName = None - self._defaultName = None - self.resultsName = None - self.saveAsList = savelist - self.skipWhitespace = True - self.whiteChars = set(ParserElement.DEFAULT_WHITE_CHARS) - self.copyDefaultWhiteChars = True - # used when checking for left-recursion - self.mayReturnEmpty = False - self.keepTabs = False - self.ignoreExprs: List["ParserElement"] = list() - self.debug = False - self.streamlined = False - # optimize exception handling for subclasses that don't advance parse index - self.mayIndexError = True - self.errmsg = "" - # mark results names as modal (report only last) or cumulative (list all) - self.modalResults = True - # custom debug actions - self.debugActions = self.DebugActions(None, None, None) - # avoid redundant calls to preParse - self.callPreparse = True - self.callDuringTry = False - self.suppress_warnings_: List[Diagnostics] = [] - - def suppress_warning(self, warning_type: Diagnostics) -> "ParserElement": - """ - Suppress warnings emitted for a particular diagnostic on this expression. - - Example:: - - base = pp.Forward() - base.suppress_warning(Diagnostics.warn_on_parse_using_empty_Forward) - - # statement would normally raise a warning, but is now suppressed - print(base.parseString("x")) - - """ - self.suppress_warnings_.append(warning_type) - return self - - def copy(self) -> "ParserElement": - """ - Make a copy of this :class:`ParserElement`. Useful for defining - different parse actions for the same parsing pattern, using copies of - the original parse element. - - Example:: - - integer = Word(nums).set_parse_action(lambda toks: int(toks[0])) - integerK = integer.copy().add_parse_action(lambda toks: toks[0] * 1024) + Suppress("K") - integerM = integer.copy().add_parse_action(lambda toks: toks[0] * 1024 * 1024) + Suppress("M") - - print((integerK | integerM | integer)[1, ...].parse_string("5K 100 640K 256M")) - - prints:: - - [5120, 100, 655360, 268435456] - - Equivalent form of ``expr.copy()`` is just ``expr()``:: - - integerM = integer().add_parse_action(lambda toks: toks[0] * 1024 * 1024) + Suppress("M") - """ - cpy = copy.copy(self) - cpy.parseAction = self.parseAction[:] - cpy.ignoreExprs = self.ignoreExprs[:] - if self.copyDefaultWhiteChars: - cpy.whiteChars = set(ParserElement.DEFAULT_WHITE_CHARS) - return cpy - - def set_results_name( - self, name: str, list_all_matches: bool = False, *, listAllMatches: bool = False - ) -> "ParserElement": - """ - Define name for referencing matching tokens as a nested attribute - of the returned parse results. - - Normally, results names are assigned as you would assign keys in a dict: - any existing value is overwritten by later values. If it is necessary to - keep all values captured for a particular results name, call ``set_results_name`` - with ``list_all_matches`` = True. - - NOTE: ``set_results_name`` returns a *copy* of the original :class:`ParserElement` object; - this is so that the client can define a basic element, such as an - integer, and reference it in multiple places with different names. - - You can also set results names using the abbreviated syntax, - ``expr("name")`` in place of ``expr.set_results_name("name")`` - - see :class:`__call__`. If ``list_all_matches`` is required, use - ``expr("name*")``. - - Example:: - - date_str = (integer.set_results_name("year") + '/' - + integer.set_results_name("month") + '/' - + integer.set_results_name("day")) - - # equivalent form: - date_str = integer("year") + '/' + integer("month") + '/' + integer("day") - """ - listAllMatches = listAllMatches or list_all_matches - return self._setResultsName(name, listAllMatches) - - def _setResultsName(self, name, listAllMatches=False): - if name is None: - return self - newself = self.copy() - if name.endswith("*"): - name = name[:-1] - listAllMatches = True - newself.resultsName = name - newself.modalResults = not listAllMatches - return newself - - def set_break(self, break_flag: bool = True) -> "ParserElement": - """ - Method to invoke the Python pdb debugger when this element is - about to be parsed. Set ``break_flag`` to ``True`` to enable, ``False`` to - disable. - """ - if break_flag: - _parseMethod = self._parse - - def breaker(instring, loc, doActions=True, callPreParse=True): - import pdb - - # this call to pdb.set_trace() is intentional, not a checkin error - pdb.set_trace() - return _parseMethod(instring, loc, doActions, callPreParse) - - breaker._originalParseMethod = _parseMethod - self._parse = breaker - else: - if hasattr(self._parse, "_originalParseMethod"): - self._parse = self._parse._originalParseMethod - return self - - def set_parse_action(self, *fns: ParseAction, **kwargs) -> "ParserElement": - """ - Define one or more actions to perform when successfully matching parse element definition. - - Parse actions can be called to perform data conversions, do extra validation, - update external data structures, or enhance or replace the parsed tokens. - Each parse action ``fn`` is a callable method with 0-3 arguments, called as - ``fn(s, loc, toks)`` , ``fn(loc, toks)`` , ``fn(toks)`` , or just ``fn()`` , where: - - - s = the original string being parsed (see note below) - - loc = the location of the matching substring - - toks = a list of the matched tokens, packaged as a :class:`ParseResults` object - - The parsed tokens are passed to the parse action as ParseResults. They can be - modified in place using list-style append, extend, and pop operations to update - the parsed list elements; and with dictionary-style item set and del operations - to add, update, or remove any named results. If the tokens are modified in place, - it is not necessary to return them with a return statement. - - Parse actions can also completely replace the given tokens, with another ``ParseResults`` - object, or with some entirely different object (common for parse actions that perform data - conversions). A convenient way to build a new parse result is to define the values - using a dict, and then create the return value using :class:`ParseResults.from_dict`. - - If None is passed as the ``fn`` parse action, all previously added parse actions for this - expression are cleared. - - Optional keyword arguments: - - - call_during_try = (default= ``False``) indicate if parse action should be run during - lookaheads and alternate testing. For parse actions that have side effects, it is - important to only call the parse action once it is determined that it is being - called as part of a successful parse. For parse actions that perform additional - validation, then call_during_try should be passed as True, so that the validation - code is included in the preliminary "try" parses. - - Note: the default parsing behavior is to expand tabs in the input string - before starting the parsing process. See :class:`parse_string` for more - information on parsing strings containing ```` s, and suggested - methods to maintain a consistent view of the parsed string, the parse - location, and line and column positions within the parsed string. - - Example:: - - # parse dates in the form YYYY/MM/DD - - # use parse action to convert toks from str to int at parse time - def convert_to_int(toks): - return int(toks[0]) - - # use a parse action to verify that the date is a valid date - def is_valid_date(instring, loc, toks): - from datetime import date - year, month, day = toks[::2] - try: - date(year, month, day) - except ValueError: - raise ParseException(instring, loc, "invalid date given") - - integer = Word(nums) - date_str = integer + '/' + integer + '/' + integer - - # add parse actions - integer.set_parse_action(convert_to_int) - date_str.set_parse_action(is_valid_date) - - # note that integer fields are now ints, not strings - date_str.run_tests(''' - # successful parse - note that integer fields were converted to ints - 1999/12/31 - - # fail - invalid date - 1999/13/31 - ''') - """ - if list(fns) == [None]: - self.parseAction = [] - else: - if not all(callable(fn) for fn in fns): - raise TypeError("parse actions must be callable") - self.parseAction = [_trim_arity(fn) for fn in fns] - self.callDuringTry = kwargs.get( - "call_during_try", kwargs.get("callDuringTry", False) - ) - return self - - def add_parse_action(self, *fns: ParseAction, **kwargs) -> "ParserElement": - """ - Add one or more parse actions to expression's list of parse actions. See :class:`set_parse_action`. - - See examples in :class:`copy`. - """ - self.parseAction += [_trim_arity(fn) for fn in fns] - self.callDuringTry = self.callDuringTry or kwargs.get( - "call_during_try", kwargs.get("callDuringTry", False) - ) - return self - - def add_condition(self, *fns: ParseCondition, **kwargs) -> "ParserElement": - """Add a boolean predicate function to expression's list of parse actions. See - :class:`set_parse_action` for function call signatures. Unlike ``set_parse_action``, - functions passed to ``add_condition`` need to return boolean success/fail of the condition. - - Optional keyword arguments: - - - message = define a custom message to be used in the raised exception - - fatal = if True, will raise ParseFatalException to stop parsing immediately; otherwise will raise - ParseException - - call_during_try = boolean to indicate if this method should be called during internal tryParse calls, - default=False - - Example:: - - integer = Word(nums).set_parse_action(lambda toks: int(toks[0])) - year_int = integer.copy() - year_int.add_condition(lambda toks: toks[0] >= 2000, message="Only support years 2000 and later") - date_str = year_int + '/' + integer + '/' + integer - - result = date_str.parse_string("1999/12/31") # -> Exception: Only support years 2000 and later (at char 0), - (line:1, col:1) - """ - for fn in fns: - self.parseAction.append( - condition_as_parse_action( - fn, message=kwargs.get("message"), fatal=kwargs.get("fatal", False) - ) - ) - - self.callDuringTry = self.callDuringTry or kwargs.get( - "call_during_try", kwargs.get("callDuringTry", False) - ) - return self - - def set_fail_action(self, fn: ParseFailAction) -> "ParserElement": - """ - Define action to perform if parsing fails at this expression. - Fail acton fn is a callable function that takes the arguments - ``fn(s, loc, expr, err)`` where: - - - s = string being parsed - - loc = location where expression match was attempted and failed - - expr = the parse expression that failed - - err = the exception thrown - - The function returns no value. It may throw :class:`ParseFatalException` - if it is desired to stop parsing immediately.""" - self.failAction = fn - return self - - def _skipIgnorables(self, instring, loc): - exprsFound = True - while exprsFound: - exprsFound = False - for e in self.ignoreExprs: - try: - while 1: - loc, dummy = e._parse(instring, loc) - exprsFound = True - except ParseException: - pass - return loc - - def preParse(self, instring, loc): - if self.ignoreExprs: - loc = self._skipIgnorables(instring, loc) - - if self.skipWhitespace: - instrlen = len(instring) - white_chars = self.whiteChars - while loc < instrlen and instring[loc] in white_chars: - loc += 1 - - return loc - - def parseImpl(self, instring, loc, doActions=True): - return loc, [] - - def postParse(self, instring, loc, tokenlist): - return tokenlist - - # @profile - def _parseNoCache( - self, instring, loc, doActions=True, callPreParse=True - ) -> Tuple[int, ParseResults]: - TRY, MATCH, FAIL = 0, 1, 2 - debugging = self.debug # and doActions) - len_instring = len(instring) - - if debugging or self.failAction: - # print("Match {} at loc {}({}, {})".format(self, loc, lineno(loc, instring), col(loc, instring))) - try: - if callPreParse and self.callPreparse: - pre_loc = self.preParse(instring, loc) - else: - pre_loc = loc - tokens_start = pre_loc - if self.debugActions.debug_try: - self.debugActions.debug_try(instring, tokens_start, self, False) - if self.mayIndexError or pre_loc >= len_instring: - try: - loc, tokens = self.parseImpl(instring, pre_loc, doActions) - except IndexError: - raise ParseException(instring, len_instring, self.errmsg, self) - else: - loc, tokens = self.parseImpl(instring, pre_loc, doActions) - except Exception as err: - # print("Exception raised:", err) - if self.debugActions.debug_fail: - self.debugActions.debug_fail( - instring, tokens_start, self, err, False - ) - if self.failAction: - self.failAction(instring, tokens_start, self, err) - raise - else: - if callPreParse and self.callPreparse: - pre_loc = self.preParse(instring, loc) - else: - pre_loc = loc - tokens_start = pre_loc - if self.mayIndexError or pre_loc >= len_instring: - try: - loc, tokens = self.parseImpl(instring, pre_loc, doActions) - except IndexError: - raise ParseException(instring, len_instring, self.errmsg, self) - else: - loc, tokens = self.parseImpl(instring, pre_loc, doActions) - - tokens = self.postParse(instring, loc, tokens) - - ret_tokens = ParseResults( - tokens, self.resultsName, asList=self.saveAsList, modal=self.modalResults - ) - if self.parseAction and (doActions or self.callDuringTry): - if debugging: - try: - for fn in self.parseAction: - try: - tokens = fn(instring, tokens_start, ret_tokens) - except IndexError as parse_action_exc: - exc = ParseException("exception raised in parse action") - raise exc from parse_action_exc - - if tokens is not None and tokens is not ret_tokens: - ret_tokens = ParseResults( - tokens, - self.resultsName, - asList=self.saveAsList - and isinstance(tokens, (ParseResults, list)), - modal=self.modalResults, - ) - except Exception as err: - # print "Exception raised in user parse action:", err - if self.debugActions.debug_fail: - self.debugActions.debug_fail( - instring, tokens_start, self, err, False - ) - raise - else: - for fn in self.parseAction: - try: - tokens = fn(instring, tokens_start, ret_tokens) - except IndexError as parse_action_exc: - exc = ParseException("exception raised in parse action") - raise exc from parse_action_exc - - if tokens is not None and tokens is not ret_tokens: - ret_tokens = ParseResults( - tokens, - self.resultsName, - asList=self.saveAsList - and isinstance(tokens, (ParseResults, list)), - modal=self.modalResults, - ) - if debugging: - # print("Matched", self, "->", ret_tokens.as_list()) - if self.debugActions.debug_match: - self.debugActions.debug_match( - instring, tokens_start, loc, self, ret_tokens, False - ) - - return loc, ret_tokens - - def try_parse(self, instring: str, loc: int, raise_fatal: bool = False) -> int: - try: - return self._parse(instring, loc, doActions=False)[0] - except ParseFatalException: - if raise_fatal: - raise - raise ParseException(instring, loc, self.errmsg, self) - - def can_parse_next(self, instring: str, loc: int) -> bool: - try: - self.try_parse(instring, loc) - except (ParseException, IndexError): - return False - else: - return True - - # cache for left-recursion in Forward references - recursion_lock = RLock() - recursion_memos: typing.Dict[ - Tuple[int, "Forward", bool], Tuple[int, Union[ParseResults, Exception]] - ] = {} - - # argument cache for optimizing repeated calls when backtracking through recursive expressions - packrat_cache = ( - {} - ) # this is set later by enabled_packrat(); this is here so that reset_cache() doesn't fail - packrat_cache_lock = RLock() - packrat_cache_stats = [0, 0] - - # this method gets repeatedly called during backtracking with the same arguments - - # we can cache these arguments and save ourselves the trouble of re-parsing the contained expression - def _parseCache( - self, instring, loc, doActions=True, callPreParse=True - ) -> Tuple[int, ParseResults]: - HIT, MISS = 0, 1 - TRY, MATCH, FAIL = 0, 1, 2 - lookup = (self, instring, loc, callPreParse, doActions) - with ParserElement.packrat_cache_lock: - cache = ParserElement.packrat_cache - value = cache.get(lookup) - if value is cache.not_in_cache: - ParserElement.packrat_cache_stats[MISS] += 1 - try: - value = self._parseNoCache(instring, loc, doActions, callPreParse) - except ParseBaseException as pe: - # cache a copy of the exception, without the traceback - cache.set(lookup, pe.__class__(*pe.args)) - raise - else: - cache.set(lookup, (value[0], value[1].copy(), loc)) - return value - else: - ParserElement.packrat_cache_stats[HIT] += 1 - if self.debug and self.debugActions.debug_try: - try: - self.debugActions.debug_try(instring, loc, self, cache_hit=True) - except TypeError: - pass - if isinstance(value, Exception): - if self.debug and self.debugActions.debug_fail: - try: - self.debugActions.debug_fail( - instring, loc, self, value, cache_hit=True - ) - except TypeError: - pass - raise value - - loc_, result, endloc = value[0], value[1].copy(), value[2] - if self.debug and self.debugActions.debug_match: - try: - self.debugActions.debug_match( - instring, loc_, endloc, self, result, cache_hit=True - ) - except TypeError: - pass - - return loc_, result - - _parse = _parseNoCache - - @staticmethod - def reset_cache() -> None: - ParserElement.packrat_cache.clear() - ParserElement.packrat_cache_stats[:] = [0] * len( - ParserElement.packrat_cache_stats - ) - ParserElement.recursion_memos.clear() - - _packratEnabled = False - _left_recursion_enabled = False - - @staticmethod - def disable_memoization() -> None: - """ - Disables active Packrat or Left Recursion parsing and their memoization - - This method also works if neither Packrat nor Left Recursion are enabled. - This makes it safe to call before activating Packrat nor Left Recursion - to clear any previous settings. - """ - ParserElement.reset_cache() - ParserElement._left_recursion_enabled = False - ParserElement._packratEnabled = False - ParserElement._parse = ParserElement._parseNoCache - - @staticmethod - def enable_left_recursion( - cache_size_limit: typing.Optional[int] = None, *, force=False - ) -> None: - """ - Enables "bounded recursion" parsing, which allows for both direct and indirect - left-recursion. During parsing, left-recursive :class:`Forward` elements are - repeatedly matched with a fixed recursion depth that is gradually increased - until finding the longest match. - - Example:: - - import pyparsing as pp - pp.ParserElement.enable_left_recursion() - - E = pp.Forward("E") - num = pp.Word(pp.nums) - # match `num`, or `num '+' num`, or `num '+' num '+' num`, ... - E <<= E + '+' - num | num - - print(E.parse_string("1+2+3")) - - Recursion search naturally memoizes matches of ``Forward`` elements and may - thus skip reevaluation of parse actions during backtracking. This may break - programs with parse actions which rely on strict ordering of side-effects. - - Parameters: - - - cache_size_limit - (default=``None``) - memoize at most this many - ``Forward`` elements during matching; if ``None`` (the default), - memoize all ``Forward`` elements. - - Bounded Recursion parsing works similar but not identical to Packrat parsing, - thus the two cannot be used together. Use ``force=True`` to disable any - previous, conflicting settings. - """ - if force: - ParserElement.disable_memoization() - elif ParserElement._packratEnabled: - raise RuntimeError("Packrat and Bounded Recursion are not compatible") - if cache_size_limit is None: - ParserElement.recursion_memos = _UnboundedMemo() - elif cache_size_limit > 0: - ParserElement.recursion_memos = _LRUMemo(capacity=cache_size_limit) - else: - raise NotImplementedError("Memo size of %s" % cache_size_limit) - ParserElement._left_recursion_enabled = True - - @staticmethod - def enable_packrat(cache_size_limit: int = 128, *, force: bool = False) -> None: - """ - Enables "packrat" parsing, which adds memoizing to the parsing logic. - Repeated parse attempts at the same string location (which happens - often in many complex grammars) can immediately return a cached value, - instead of re-executing parsing/validating code. Memoizing is done of - both valid results and parsing exceptions. - - Parameters: - - - cache_size_limit - (default= ``128``) - if an integer value is provided - will limit the size of the packrat cache; if None is passed, then - the cache size will be unbounded; if 0 is passed, the cache will - be effectively disabled. - - This speedup may break existing programs that use parse actions that - have side-effects. For this reason, packrat parsing is disabled when - you first import pyparsing. To activate the packrat feature, your - program must call the class method :class:`ParserElement.enable_packrat`. - For best results, call ``enable_packrat()`` immediately after - importing pyparsing. - - Example:: - - import pyparsing - pyparsing.ParserElement.enable_packrat() - - Packrat parsing works similar but not identical to Bounded Recursion parsing, - thus the two cannot be used together. Use ``force=True`` to disable any - previous, conflicting settings. - """ - if force: - ParserElement.disable_memoization() - elif ParserElement._left_recursion_enabled: - raise RuntimeError("Packrat and Bounded Recursion are not compatible") - if not ParserElement._packratEnabled: - ParserElement._packratEnabled = True - if cache_size_limit is None: - ParserElement.packrat_cache = _UnboundedCache() - else: - ParserElement.packrat_cache = _FifoCache(cache_size_limit) - ParserElement._parse = ParserElement._parseCache - - def parse_string( - self, instring: str, parse_all: bool = False, *, parseAll: bool = False - ) -> ParseResults: - """ - Parse a string with respect to the parser definition. This function is intended as the primary interface to the - client code. - - :param instring: The input string to be parsed. - :param parse_all: If set, the entire input string must match the grammar. - :param parseAll: retained for pre-PEP8 compatibility, will be removed in a future release. - :raises ParseException: Raised if ``parse_all`` is set and the input string does not match the whole grammar. - :returns: the parsed data as a :class:`ParseResults` object, which may be accessed as a `list`, a `dict`, or - an object with attributes if the given parser includes results names. - - If the input string is required to match the entire grammar, ``parse_all`` flag must be set to ``True``. This - is also equivalent to ending the grammar with :class:`StringEnd`(). - - To report proper column numbers, ``parse_string`` operates on a copy of the input string where all tabs are - converted to spaces (8 spaces per tab, as per the default in ``string.expandtabs``). If the input string - contains tabs and the grammar uses parse actions that use the ``loc`` argument to index into the string - being parsed, one can ensure a consistent view of the input string by doing one of the following: - - - calling ``parse_with_tabs`` on your grammar before calling ``parse_string`` (see :class:`parse_with_tabs`), - - define your parse action using the full ``(s,loc,toks)`` signature, and reference the input string using the - parse action's ``s`` argument, or - - explicitly expand the tabs in your input string before calling ``parse_string``. - - Examples: - - By default, partial matches are OK. - - >>> res = Word('a').parse_string('aaaaabaaa') - >>> print(res) - ['aaaaa'] - - The parsing behavior varies by the inheriting class of this abstract class. Please refer to the children - directly to see more examples. - - It raises an exception if parse_all flag is set and instring does not match the whole grammar. - - >>> res = Word('a').parse_string('aaaaabaaa', parse_all=True) - Traceback (most recent call last): - ... - pyparsing.ParseException: Expected end of text, found 'b' (at char 5), (line:1, col:6) - """ - parseAll = parse_all or parseAll - - ParserElement.reset_cache() - if not self.streamlined: - self.streamline() - for e in self.ignoreExprs: - e.streamline() - if not self.keepTabs: - instring = instring.expandtabs() - try: - loc, tokens = self._parse(instring, 0) - if parseAll: - loc = self.preParse(instring, loc) - se = Empty() + StringEnd() - se._parse(instring, loc) - except ParseBaseException as exc: - if ParserElement.verbose_stacktrace: - raise - else: - # catch and re-raise exception from here, clearing out pyparsing internal stack trace - raise exc.with_traceback(None) - else: - return tokens - - def scan_string( - self, - instring: str, - max_matches: int = _MAX_INT, - overlap: bool = False, - *, - debug: bool = False, - maxMatches: int = _MAX_INT, - ) -> Generator[Tuple[ParseResults, int, int], None, None]: - """ - Scan the input string for expression matches. Each match will return the - matching tokens, start location, and end location. May be called with optional - ``max_matches`` argument, to clip scanning after 'n' matches are found. If - ``overlap`` is specified, then overlapping matches will be reported. - - Note that the start and end locations are reported relative to the string - being parsed. See :class:`parse_string` for more information on parsing - strings with embedded tabs. - - Example:: - - source = "sldjf123lsdjjkf345sldkjf879lkjsfd987" - print(source) - for tokens, start, end in Word(alphas).scan_string(source): - print(' '*start + '^'*(end-start)) - print(' '*start + tokens[0]) - - prints:: - - sldjf123lsdjjkf345sldkjf879lkjsfd987 - ^^^^^ - sldjf - ^^^^^^^ - lsdjjkf - ^^^^^^ - sldkjf - ^^^^^^ - lkjsfd - """ - maxMatches = min(maxMatches, max_matches) - if not self.streamlined: - self.streamline() - for e in self.ignoreExprs: - e.streamline() - - if not self.keepTabs: - instring = str(instring).expandtabs() - instrlen = len(instring) - loc = 0 - preparseFn = self.preParse - parseFn = self._parse - ParserElement.resetCache() - matches = 0 - try: - while loc <= instrlen and matches < maxMatches: - try: - preloc = preparseFn(instring, loc) - nextLoc, tokens = parseFn(instring, preloc, callPreParse=False) - except ParseException: - loc = preloc + 1 - else: - if nextLoc > loc: - matches += 1 - if debug: - print( - { - "tokens": tokens.asList(), - "start": preloc, - "end": nextLoc, - } - ) - yield tokens, preloc, nextLoc - if overlap: - nextloc = preparseFn(instring, loc) - if nextloc > loc: - loc = nextLoc - else: - loc += 1 - else: - loc = nextLoc - else: - loc = preloc + 1 - except ParseBaseException as exc: - if ParserElement.verbose_stacktrace: - raise - else: - # catch and re-raise exception from here, clears out pyparsing internal stack trace - raise exc.with_traceback(None) - - def transform_string(self, instring: str, *, debug: bool = False) -> str: - """ - Extension to :class:`scan_string`, to modify matching text with modified tokens that may - be returned from a parse action. To use ``transform_string``, define a grammar and - attach a parse action to it that modifies the returned token list. - Invoking ``transform_string()`` on a target string will then scan for matches, - and replace the matched text patterns according to the logic in the parse - action. ``transform_string()`` returns the resulting transformed string. - - Example:: - - wd = Word(alphas) - wd.set_parse_action(lambda toks: toks[0].title()) - - print(wd.transform_string("now is the winter of our discontent made glorious summer by this sun of york.")) - - prints:: - - Now Is The Winter Of Our Discontent Made Glorious Summer By This Sun Of York. - """ - out: List[str] = [] - lastE = 0 - # force preservation of s, to minimize unwanted transformation of string, and to - # keep string locs straight between transform_string and scan_string - self.keepTabs = True - try: - for t, s, e in self.scan_string(instring, debug=debug): - out.append(instring[lastE:s]) - if t: - if isinstance(t, ParseResults): - out += t.as_list() - elif isinstance(t, Iterable) and not isinstance(t, str_type): - out.extend(t) - else: - out.append(t) - lastE = e - out.append(instring[lastE:]) - out = [o for o in out if o] - return "".join([str(s) for s in _flatten(out)]) - except ParseBaseException as exc: - if ParserElement.verbose_stacktrace: - raise - else: - # catch and re-raise exception from here, clears out pyparsing internal stack trace - raise exc.with_traceback(None) - - def search_string( - self, - instring: str, - max_matches: int = _MAX_INT, - *, - debug: bool = False, - maxMatches: int = _MAX_INT, - ) -> ParseResults: - """ - Another extension to :class:`scan_string`, simplifying the access to the tokens found - to match the given parse expression. May be called with optional - ``max_matches`` argument, to clip searching after 'n' matches are found. - - Example:: - - # a capitalized word starts with an uppercase letter, followed by zero or more lowercase letters - cap_word = Word(alphas.upper(), alphas.lower()) - - print(cap_word.search_string("More than Iron, more than Lead, more than Gold I need Electricity")) - - # the sum() builtin can be used to merge results into a single ParseResults object - print(sum(cap_word.search_string("More than Iron, more than Lead, more than Gold I need Electricity"))) - - prints:: - - [['More'], ['Iron'], ['Lead'], ['Gold'], ['I'], ['Electricity']] - ['More', 'Iron', 'Lead', 'Gold', 'I', 'Electricity'] - """ - maxMatches = min(maxMatches, max_matches) - try: - return ParseResults( - [t for t, s, e in self.scan_string(instring, maxMatches, debug=debug)] - ) - except ParseBaseException as exc: - if ParserElement.verbose_stacktrace: - raise - else: - # catch and re-raise exception from here, clears out pyparsing internal stack trace - raise exc.with_traceback(None) - - def split( - self, - instring: str, - maxsplit: int = _MAX_INT, - include_separators: bool = False, - *, - includeSeparators=False, - ) -> Generator[str, None, None]: - """ - Generator method to split a string using the given expression as a separator. - May be called with optional ``maxsplit`` argument, to limit the number of splits; - and the optional ``include_separators`` argument (default= ``False``), if the separating - matching text should be included in the split results. - - Example:: - - punc = one_of(list(".,;:/-!?")) - print(list(punc.split("This, this?, this sentence, is badly punctuated!"))) - - prints:: - - ['This', ' this', '', ' this sentence', ' is badly punctuated', ''] - """ - includeSeparators = includeSeparators or include_separators - last = 0 - for t, s, e in self.scan_string(instring, max_matches=maxsplit): - yield instring[last:s] - if includeSeparators: - yield t[0] - last = e - yield instring[last:] - - def __add__(self, other) -> "ParserElement": - """ - Implementation of ``+`` operator - returns :class:`And`. Adding strings to a :class:`ParserElement` - converts them to :class:`Literal`s by default. - - Example:: - - greet = Word(alphas) + "," + Word(alphas) + "!" - hello = "Hello, World!" - print(hello, "->", greet.parse_string(hello)) - - prints:: - - Hello, World! -> ['Hello', ',', 'World', '!'] - - ``...`` may be used as a parse expression as a short form of :class:`SkipTo`. - - Literal('start') + ... + Literal('end') - - is equivalent to: - - Literal('start') + SkipTo('end')("_skipped*") + Literal('end') - - Note that the skipped text is returned with '_skipped' as a results name, - and to support having multiple skips in the same parser, the value returned is - a list of all skipped text. - """ - if other is Ellipsis: - return _PendingSkip(self) - - if isinstance(other, str_type): - other = self._literalStringClass(other) - if not isinstance(other, ParserElement): - raise TypeError( - "Cannot combine element of type {} with ParserElement".format( - type(other).__name__ - ) - ) - return And([self, other]) - - def __radd__(self, other) -> "ParserElement": - """ - Implementation of ``+`` operator when left operand is not a :class:`ParserElement` - """ - if other is Ellipsis: - return SkipTo(self)("_skipped*") + self - - if isinstance(other, str_type): - other = self._literalStringClass(other) - if not isinstance(other, ParserElement): - raise TypeError( - "Cannot combine element of type {} with ParserElement".format( - type(other).__name__ - ) - ) - return other + self - - def __sub__(self, other) -> "ParserElement": - """ - Implementation of ``-`` operator, returns :class:`And` with error stop - """ - if isinstance(other, str_type): - other = self._literalStringClass(other) - if not isinstance(other, ParserElement): - raise TypeError( - "Cannot combine element of type {} with ParserElement".format( - type(other).__name__ - ) - ) - return self + And._ErrorStop() + other - - def __rsub__(self, other) -> "ParserElement": - """ - Implementation of ``-`` operator when left operand is not a :class:`ParserElement` - """ - if isinstance(other, str_type): - other = self._literalStringClass(other) - if not isinstance(other, ParserElement): - raise TypeError( - "Cannot combine element of type {} with ParserElement".format( - type(other).__name__ - ) - ) - return other - self - - def __mul__(self, other) -> "ParserElement": - """ - Implementation of ``*`` operator, allows use of ``expr * 3`` in place of - ``expr + expr + expr``. Expressions may also be multiplied by a 2-integer - tuple, similar to ``{min, max}`` multipliers in regular expressions. Tuples - may also include ``None`` as in: - - ``expr*(n, None)`` or ``expr*(n, )`` is equivalent - to ``expr*n + ZeroOrMore(expr)`` - (read as "at least n instances of ``expr``") - - ``expr*(None, n)`` is equivalent to ``expr*(0, n)`` - (read as "0 to n instances of ``expr``") - - ``expr*(None, None)`` is equivalent to ``ZeroOrMore(expr)`` - - ``expr*(1, None)`` is equivalent to ``OneOrMore(expr)`` - - Note that ``expr*(None, n)`` does not raise an exception if - more than n exprs exist in the input stream; that is, - ``expr*(None, n)`` does not enforce a maximum number of expr - occurrences. If this behavior is desired, then write - ``expr*(None, n) + ~expr`` - """ - if other is Ellipsis: - other = (0, None) - elif isinstance(other, tuple) and other[:1] == (Ellipsis,): - other = ((0,) + other[1:] + (None,))[:2] - - if isinstance(other, int): - minElements, optElements = other, 0 - elif isinstance(other, tuple): - other = tuple(o if o is not Ellipsis else None for o in other) - other = (other + (None, None))[:2] - if other[0] is None: - other = (0, other[1]) - if isinstance(other[0], int) and other[1] is None: - if other[0] == 0: - return ZeroOrMore(self) - if other[0] == 1: - return OneOrMore(self) - else: - return self * other[0] + ZeroOrMore(self) - elif isinstance(other[0], int) and isinstance(other[1], int): - minElements, optElements = other - optElements -= minElements - else: - raise TypeError( - "cannot multiply ParserElement and ({}) objects".format( - ",".join(type(item).__name__ for item in other) - ) - ) - else: - raise TypeError( - "cannot multiply ParserElement and {} objects".format( - type(other).__name__ - ) - ) - - if minElements < 0: - raise ValueError("cannot multiply ParserElement by negative value") - if optElements < 0: - raise ValueError( - "second tuple value must be greater or equal to first tuple value" - ) - if minElements == optElements == 0: - return And([]) - - if optElements: - - def makeOptionalList(n): - if n > 1: - return Opt(self + makeOptionalList(n - 1)) - else: - return Opt(self) - - if minElements: - if minElements == 1: - ret = self + makeOptionalList(optElements) - else: - ret = And([self] * minElements) + makeOptionalList(optElements) - else: - ret = makeOptionalList(optElements) - else: - if minElements == 1: - ret = self - else: - ret = And([self] * minElements) - return ret - - def __rmul__(self, other) -> "ParserElement": - return self.__mul__(other) - - def __or__(self, other) -> "ParserElement": - """ - Implementation of ``|`` operator - returns :class:`MatchFirst` - """ - if other is Ellipsis: - return _PendingSkip(self, must_skip=True) - - if isinstance(other, str_type): - other = self._literalStringClass(other) - if not isinstance(other, ParserElement): - raise TypeError( - "Cannot combine element of type {} with ParserElement".format( - type(other).__name__ - ) - ) - return MatchFirst([self, other]) - - def __ror__(self, other) -> "ParserElement": - """ - Implementation of ``|`` operator when left operand is not a :class:`ParserElement` - """ - if isinstance(other, str_type): - other = self._literalStringClass(other) - if not isinstance(other, ParserElement): - raise TypeError( - "Cannot combine element of type {} with ParserElement".format( - type(other).__name__ - ) - ) - return other | self - - def __xor__(self, other) -> "ParserElement": - """ - Implementation of ``^`` operator - returns :class:`Or` - """ - if isinstance(other, str_type): - other = self._literalStringClass(other) - if not isinstance(other, ParserElement): - raise TypeError( - "Cannot combine element of type {} with ParserElement".format( - type(other).__name__ - ) - ) - return Or([self, other]) - - def __rxor__(self, other) -> "ParserElement": - """ - Implementation of ``^`` operator when left operand is not a :class:`ParserElement` - """ - if isinstance(other, str_type): - other = self._literalStringClass(other) - if not isinstance(other, ParserElement): - raise TypeError( - "Cannot combine element of type {} with ParserElement".format( - type(other).__name__ - ) - ) - return other ^ self - - def __and__(self, other) -> "ParserElement": - """ - Implementation of ``&`` operator - returns :class:`Each` - """ - if isinstance(other, str_type): - other = self._literalStringClass(other) - if not isinstance(other, ParserElement): - raise TypeError( - "Cannot combine element of type {} with ParserElement".format( - type(other).__name__ - ) - ) - return Each([self, other]) - - def __rand__(self, other) -> "ParserElement": - """ - Implementation of ``&`` operator when left operand is not a :class:`ParserElement` - """ - if isinstance(other, str_type): - other = self._literalStringClass(other) - if not isinstance(other, ParserElement): - raise TypeError( - "Cannot combine element of type {} with ParserElement".format( - type(other).__name__ - ) - ) - return other & self - - def __invert__(self) -> "ParserElement": - """ - Implementation of ``~`` operator - returns :class:`NotAny` - """ - return NotAny(self) - - # disable __iter__ to override legacy use of sequential access to __getitem__ to - # iterate over a sequence - __iter__ = None - - def __getitem__(self, key): - """ - use ``[]`` indexing notation as a short form for expression repetition: - - - ``expr[n]`` is equivalent to ``expr*n`` - - ``expr[m, n]`` is equivalent to ``expr*(m, n)`` - - ``expr[n, ...]`` or ``expr[n,]`` is equivalent - to ``expr*n + ZeroOrMore(expr)`` - (read as "at least n instances of ``expr``") - - ``expr[..., n]`` is equivalent to ``expr*(0, n)`` - (read as "0 to n instances of ``expr``") - - ``expr[...]`` and ``expr[0, ...]`` are equivalent to ``ZeroOrMore(expr)`` - - ``expr[1, ...]`` is equivalent to ``OneOrMore(expr)`` - - ``None`` may be used in place of ``...``. - - Note that ``expr[..., n]`` and ``expr[m, n]``do not raise an exception - if more than ``n`` ``expr``s exist in the input stream. If this behavior is - desired, then write ``expr[..., n] + ~expr``. - """ - - # convert single arg keys to tuples - try: - if isinstance(key, str_type): - key = (key,) - iter(key) - except TypeError: - key = (key, key) - - if len(key) > 2: - raise TypeError( - "only 1 or 2 index arguments supported ({}{})".format( - key[:5], "... [{}]".format(len(key)) if len(key) > 5 else "" - ) - ) - - # clip to 2 elements - ret = self * tuple(key[:2]) - return ret - - def __call__(self, name: str = None) -> "ParserElement": - """ - Shortcut for :class:`set_results_name`, with ``list_all_matches=False``. - - If ``name`` is given with a trailing ``'*'`` character, then ``list_all_matches`` will be - passed as ``True``. - - If ``name` is omitted, same as calling :class:`copy`. - - Example:: - - # these are equivalent - userdata = Word(alphas).set_results_name("name") + Word(nums + "-").set_results_name("socsecno") - userdata = Word(alphas)("name") + Word(nums + "-")("socsecno") - """ - if name is not None: - return self._setResultsName(name) - else: - return self.copy() - - def suppress(self) -> "ParserElement": - """ - Suppresses the output of this :class:`ParserElement`; useful to keep punctuation from - cluttering up returned output. - """ - return Suppress(self) - - def ignore_whitespace(self, recursive: bool = True) -> "ParserElement": - """ - Enables the skipping of whitespace before matching the characters in the - :class:`ParserElement`'s defined pattern. - - :param recursive: If ``True`` (the default), also enable whitespace skipping in child elements (if any) - """ - self.skipWhitespace = True - return self - - def leave_whitespace(self, recursive: bool = True) -> "ParserElement": - """ - Disables the skipping of whitespace before matching the characters in the - :class:`ParserElement`'s defined pattern. This is normally only used internally by - the pyparsing module, but may be needed in some whitespace-sensitive grammars. - - :param recursive: If true (the default), also disable whitespace skipping in child elements (if any) - """ - self.skipWhitespace = False - return self - - def set_whitespace_chars( - self, chars: Union[Set[str], str], copy_defaults: bool = False - ) -> "ParserElement": - """ - Overrides the default whitespace chars - """ - self.skipWhitespace = True - self.whiteChars = set(chars) - self.copyDefaultWhiteChars = copy_defaults - return self - - def parse_with_tabs(self) -> "ParserElement": - """ - Overrides default behavior to expand ```` s to spaces before parsing the input string. - Must be called before ``parse_string`` when the input grammar contains elements that - match ```` characters. - """ - self.keepTabs = True - return self - - def ignore(self, other: "ParserElement") -> "ParserElement": - """ - Define expression to be ignored (e.g., comments) while doing pattern - matching; may be called repeatedly, to define multiple comment or other - ignorable patterns. - - Example:: - - patt = Word(alphas)[1, ...] - patt.parse_string('ablaj /* comment */ lskjd') - # -> ['ablaj'] - - patt.ignore(c_style_comment) - patt.parse_string('ablaj /* comment */ lskjd') - # -> ['ablaj', 'lskjd'] - """ - import typing - - if isinstance(other, str_type): - other = Suppress(other) - - if isinstance(other, Suppress): - if other not in self.ignoreExprs: - self.ignoreExprs.append(other) - else: - self.ignoreExprs.append(Suppress(other.copy())) - return self - - def set_debug_actions( - self, - start_action: DebugStartAction, - success_action: DebugSuccessAction, - exception_action: DebugExceptionAction, - ) -> "ParserElement": - """ - Customize display of debugging messages while doing pattern matching: - - - ``start_action`` - method to be called when an expression is about to be parsed; - should have the signature ``fn(input_string: str, location: int, expression: ParserElement, cache_hit: bool)`` - - - ``success_action`` - method to be called when an expression has successfully parsed; - should have the signature ``fn(input_string: str, start_location: int, end_location: int, expression: ParserELement, parsed_tokens: ParseResults, cache_hit: bool)`` - - - ``exception_action`` - method to be called when expression fails to parse; - should have the signature ``fn(input_string: str, location: int, expression: ParserElement, exception: Exception, cache_hit: bool)`` - """ - self.debugActions = self.DebugActions( - start_action or _default_start_debug_action, - success_action or _default_success_debug_action, - exception_action or _default_exception_debug_action, - ) - self.debug = True - return self - - def set_debug(self, flag: bool = True) -> "ParserElement": - """ - Enable display of debugging messages while doing pattern matching. - Set ``flag`` to ``True`` to enable, ``False`` to disable. - - Example:: - - wd = Word(alphas).set_name("alphaword") - integer = Word(nums).set_name("numword") - term = wd | integer - - # turn on debugging for wd - wd.set_debug() - - term[1, ...].parse_string("abc 123 xyz 890") - - prints:: - - Match alphaword at loc 0(1,1) - Matched alphaword -> ['abc'] - Match alphaword at loc 3(1,4) - Exception raised:Expected alphaword (at char 4), (line:1, col:5) - Match alphaword at loc 7(1,8) - Matched alphaword -> ['xyz'] - Match alphaword at loc 11(1,12) - Exception raised:Expected alphaword (at char 12), (line:1, col:13) - Match alphaword at loc 15(1,16) - Exception raised:Expected alphaword (at char 15), (line:1, col:16) - - The output shown is that produced by the default debug actions - custom debug actions can be - specified using :class:`set_debug_actions`. Prior to attempting - to match the ``wd`` expression, the debugging message ``"Match at loc (,)"`` - is shown. Then if the parse succeeds, a ``"Matched"`` message is shown, or an ``"Exception raised"`` - message is shown. Also note the use of :class:`set_name` to assign a human-readable name to the expression, - which makes debugging and exception messages easier to understand - for instance, the default - name created for the :class:`Word` expression without calling ``set_name`` is ``"W:(A-Za-z)"``. - """ - if flag: - self.set_debug_actions( - _default_start_debug_action, - _default_success_debug_action, - _default_exception_debug_action, - ) - else: - self.debug = False - return self - - @property - def default_name(self) -> str: - if self._defaultName is None: - self._defaultName = self._generateDefaultName() - return self._defaultName - - @abstractmethod - def _generateDefaultName(self): - """ - Child classes must define this method, which defines how the ``default_name`` is set. - """ - - def set_name(self, name: str) -> "ParserElement": - """ - Define name for this expression, makes debugging and exception messages clearer. - Example:: - Word(nums).parse_string("ABC") # -> Exception: Expected W:(0-9) (at char 0), (line:1, col:1) - Word(nums).set_name("integer").parse_string("ABC") # -> Exception: Expected integer (at char 0), (line:1, col:1) - """ - self.customName = name - self.errmsg = "Expected " + self.name - if __diag__.enable_debug_on_named_expressions: - self.set_debug() - return self - - @property - def name(self) -> str: - # This will use a user-defined name if available, but otherwise defaults back to the auto-generated name - return self.customName if self.customName is not None else self.default_name - - def __str__(self) -> str: - return self.name - - def __repr__(self) -> str: - return str(self) - - def streamline(self) -> "ParserElement": - self.streamlined = True - self._defaultName = None - return self - - def recurse(self) -> Sequence["ParserElement"]: - return [] - - def _checkRecursion(self, parseElementList): - subRecCheckList = parseElementList[:] + [self] - for e in self.recurse(): - e._checkRecursion(subRecCheckList) - - def validate(self, validateTrace=None) -> None: - """ - Check defined expressions for valid structure, check for infinite recursive definitions. - """ - self._checkRecursion([]) - - def parse_file( - self, - file_or_filename: Union[str, Path, TextIO], - encoding: str = "utf-8", - parse_all: bool = False, - *, - parseAll: bool = False, - ) -> ParseResults: - """ - Execute the parse expression on the given file or filename. - If a filename is specified (instead of a file object), - the entire file is opened, read, and closed before parsing. - """ - parseAll = parseAll or parse_all - try: - file_contents = file_or_filename.read() - except AttributeError: - with open(file_or_filename, "r", encoding=encoding) as f: - file_contents = f.read() - try: - return self.parse_string(file_contents, parseAll) - except ParseBaseException as exc: - if ParserElement.verbose_stacktrace: - raise - else: - # catch and re-raise exception from here, clears out pyparsing internal stack trace - raise exc.with_traceback(None) - - def __eq__(self, other): - if self is other: - return True - elif isinstance(other, str_type): - return self.matches(other, parse_all=True) - elif isinstance(other, ParserElement): - return vars(self) == vars(other) - return False - - def __hash__(self): - return id(self) - - def matches( - self, test_string: str, parse_all: bool = True, *, parseAll: bool = True - ) -> bool: - """ - Method for quick testing of a parser against a test string. Good for simple - inline microtests of sub expressions while building up larger parser. - - Parameters: - - ``test_string`` - to test against this expression for a match - - ``parse_all`` - (default= ``True``) - flag to pass to :class:`parse_string` when running tests - - Example:: - - expr = Word(nums) - assert expr.matches("100") - """ - parseAll = parseAll and parse_all - try: - self.parse_string(str(test_string), parse_all=parseAll) - return True - except ParseBaseException: - return False - - def run_tests( - self, - tests: Union[str, List[str]], - parse_all: bool = True, - comment: typing.Optional[Union["ParserElement", str]] = "#", - full_dump: bool = True, - print_results: bool = True, - failure_tests: bool = False, - post_parse: Callable[[str, ParseResults], str] = None, - file: typing.Optional[TextIO] = None, - with_line_numbers: bool = False, - *, - parseAll: bool = True, - fullDump: bool = True, - printResults: bool = True, - failureTests: bool = False, - postParse: Callable[[str, ParseResults], str] = None, - ) -> Tuple[bool, List[Tuple[str, Union[ParseResults, Exception]]]]: - """ - Execute the parse expression on a series of test strings, showing each - test, the parsed results or where the parse failed. Quick and easy way to - run a parse expression against a list of sample strings. - - Parameters: - - ``tests`` - a list of separate test strings, or a multiline string of test strings - - ``parse_all`` - (default= ``True``) - flag to pass to :class:`parse_string` when running tests - - ``comment`` - (default= ``'#'``) - expression for indicating embedded comments in the test - string; pass None to disable comment filtering - - ``full_dump`` - (default= ``True``) - dump results as list followed by results names in nested outline; - if False, only dump nested list - - ``print_results`` - (default= ``True``) prints test output to stdout - - ``failure_tests`` - (default= ``False``) indicates if these tests are expected to fail parsing - - ``post_parse`` - (default= ``None``) optional callback for successful parse results; called as - `fn(test_string, parse_results)` and returns a string to be added to the test output - - ``file`` - (default= ``None``) optional file-like object to which test output will be written; - if None, will default to ``sys.stdout`` - - ``with_line_numbers`` - default= ``False``) show test strings with line and column numbers - - Returns: a (success, results) tuple, where success indicates that all tests succeeded - (or failed if ``failure_tests`` is True), and the results contain a list of lines of each - test's output - - Example:: - - number_expr = pyparsing_common.number.copy() - - result = number_expr.run_tests(''' - # unsigned integer - 100 - # negative integer - -100 - # float with scientific notation - 6.02e23 - # integer with scientific notation - 1e-12 - ''') - print("Success" if result[0] else "Failed!") - - result = number_expr.run_tests(''' - # stray character - 100Z - # missing leading digit before '.' - -.100 - # too many '.' - 3.14.159 - ''', failure_tests=True) - print("Success" if result[0] else "Failed!") - - prints:: - - # unsigned integer - 100 - [100] - - # negative integer - -100 - [-100] - - # float with scientific notation - 6.02e23 - [6.02e+23] - - # integer with scientific notation - 1e-12 - [1e-12] - - Success - - # stray character - 100Z - ^ - FAIL: Expected end of text (at char 3), (line:1, col:4) - - # missing leading digit before '.' - -.100 - ^ - FAIL: Expected {real number with scientific notation | real number | signed integer} (at char 0), (line:1, col:1) - - # too many '.' - 3.14.159 - ^ - FAIL: Expected end of text (at char 4), (line:1, col:5) - - Success - - Each test string must be on a single line. If you want to test a string that spans multiple - lines, create a test like this:: - - expr.run_tests(r"this is a test\\n of strings that spans \\n 3 lines") - - (Note that this is a raw string literal, you must include the leading ``'r'``.) - """ - from .testing import pyparsing_test - - parseAll = parseAll and parse_all - fullDump = fullDump and full_dump - printResults = printResults and print_results - failureTests = failureTests or failure_tests - postParse = postParse or post_parse - if isinstance(tests, str_type): - line_strip = type(tests).strip - tests = [line_strip(test_line) for test_line in tests.rstrip().splitlines()] - if isinstance(comment, str_type): - comment = Literal(comment) - if file is None: - file = sys.stdout - print_ = file.write - - result: Union[ParseResults, Exception] - allResults = [] - comments = [] - success = True - NL = Literal(r"\n").add_parse_action(replace_with("\n")).ignore(quoted_string) - BOM = "\ufeff" - for t in tests: - if comment is not None and comment.matches(t, False) or comments and not t: - comments.append( - pyparsing_test.with_line_numbers(t) if with_line_numbers else t - ) - continue - if not t: - continue - out = [ - "\n" + "\n".join(comments) if comments else "", - pyparsing_test.with_line_numbers(t) if with_line_numbers else t, - ] - comments = [] - try: - # convert newline marks to actual newlines, and strip leading BOM if present - t = NL.transform_string(t.lstrip(BOM)) - result = self.parse_string(t, parse_all=parseAll) - except ParseBaseException as pe: - fatal = "(FATAL)" if isinstance(pe, ParseFatalException) else "" - out.append(pe.explain()) - out.append("FAIL: " + str(pe)) - if ParserElement.verbose_stacktrace: - out.extend(traceback.format_tb(pe.__traceback__)) - success = success and failureTests - result = pe - except Exception as exc: - out.append("FAIL-EXCEPTION: {}: {}".format(type(exc).__name__, exc)) - if ParserElement.verbose_stacktrace: - out.extend(traceback.format_tb(exc.__traceback__)) - success = success and failureTests - result = exc - else: - success = success and not failureTests - if postParse is not None: - try: - pp_value = postParse(t, result) - if pp_value is not None: - if isinstance(pp_value, ParseResults): - out.append(pp_value.dump()) - else: - out.append(str(pp_value)) - else: - out.append(result.dump()) - except Exception as e: - out.append(result.dump(full=fullDump)) - out.append( - "{} failed: {}: {}".format( - postParse.__name__, type(e).__name__, e - ) - ) - else: - out.append(result.dump(full=fullDump)) - out.append("") - - if printResults: - print_("\n".join(out)) - - allResults.append((t, result)) - - return success, allResults - - def create_diagram( - self, - output_html: Union[TextIO, Path, str], - vertical: int = 3, - show_results_names: bool = False, - show_groups: bool = False, - **kwargs, - ) -> None: - """ - Create a railroad diagram for the parser. - - Parameters: - - output_html (str or file-like object) - output target for generated - diagram HTML - - vertical (int) - threshold for formatting multiple alternatives vertically - instead of horizontally (default=3) - - show_results_names - bool flag whether diagram should show annotations for - defined results names - - show_groups - bool flag whether groups should be highlighted with an unlabeled surrounding box - Additional diagram-formatting keyword arguments can also be included; - see railroad.Diagram class. - """ - - try: - from .diagram import to_railroad, railroad_to_html - except ImportError as ie: - raise Exception( - "must ``pip install pyparsing[diagrams]`` to generate parser railroad diagrams" - ) from ie - - self.streamline() - - railroad = to_railroad( - self, - vertical=vertical, - show_results_names=show_results_names, - show_groups=show_groups, - diagram_kwargs=kwargs, - ) - if isinstance(output_html, (str, Path)): - with open(output_html, "w", encoding="utf-8") as diag_file: - diag_file.write(railroad_to_html(railroad)) - else: - # we were passed a file-like object, just write to it - output_html.write(railroad_to_html(railroad)) - - setDefaultWhitespaceChars = set_default_whitespace_chars - inlineLiteralsUsing = inline_literals_using - setResultsName = set_results_name - setBreak = set_break - setParseAction = set_parse_action - addParseAction = add_parse_action - addCondition = add_condition - setFailAction = set_fail_action - tryParse = try_parse - canParseNext = can_parse_next - resetCache = reset_cache - enableLeftRecursion = enable_left_recursion - enablePackrat = enable_packrat - parseString = parse_string - scanString = scan_string - searchString = search_string - transformString = transform_string - setWhitespaceChars = set_whitespace_chars - parseWithTabs = parse_with_tabs - setDebugActions = set_debug_actions - setDebug = set_debug - defaultName = default_name - setName = set_name - parseFile = parse_file - runTests = run_tests - ignoreWhitespace = ignore_whitespace - leaveWhitespace = leave_whitespace - - -class _PendingSkip(ParserElement): - # internal placeholder class to hold a place were '...' is added to a parser element, - # once another ParserElement is added, this placeholder will be replaced with a SkipTo - def __init__(self, expr: ParserElement, must_skip: bool = False): - super().__init__() - self.anchor = expr - self.must_skip = must_skip - - def _generateDefaultName(self): - return str(self.anchor + Empty()).replace("Empty", "...") - - def __add__(self, other) -> "ParserElement": - skipper = SkipTo(other).set_name("...")("_skipped*") - if self.must_skip: - - def must_skip(t): - if not t._skipped or t._skipped.as_list() == [""]: - del t[0] - t.pop("_skipped", None) - - def show_skip(t): - if t._skipped.as_list()[-1:] == [""]: - t.pop("_skipped") - t["_skipped"] = "missing <" + repr(self.anchor) + ">" - - return ( - self.anchor + skipper().add_parse_action(must_skip) - | skipper().add_parse_action(show_skip) - ) + other - - return self.anchor + skipper + other - - def __repr__(self): - return self.defaultName - - def parseImpl(self, *args): - raise Exception( - "use of `...` expression without following SkipTo target expression" - ) - - -class Token(ParserElement): - """Abstract :class:`ParserElement` subclass, for defining atomic - matching patterns. - """ - - def __init__(self): - super().__init__(savelist=False) - - def _generateDefaultName(self): - return type(self).__name__ - - -class Empty(Token): - """ - An empty token, will always match. - """ - - def __init__(self): - super().__init__() - self.mayReturnEmpty = True - self.mayIndexError = False - - -class NoMatch(Token): - """ - A token that will never match. - """ - - def __init__(self): - super().__init__() - self.mayReturnEmpty = True - self.mayIndexError = False - self.errmsg = "Unmatchable token" - - def parseImpl(self, instring, loc, doActions=True): - raise ParseException(instring, loc, self.errmsg, self) - - -class Literal(Token): - """ - Token to exactly match a specified string. - - Example:: - - Literal('blah').parse_string('blah') # -> ['blah'] - Literal('blah').parse_string('blahfooblah') # -> ['blah'] - Literal('blah').parse_string('bla') # -> Exception: Expected "blah" - - For case-insensitive matching, use :class:`CaselessLiteral`. - - For keyword matching (force word break before and after the matched string), - use :class:`Keyword` or :class:`CaselessKeyword`. - """ - - def __init__(self, match_string: str = "", *, matchString: str = ""): - super().__init__() - match_string = matchString or match_string - self.match = match_string - self.matchLen = len(match_string) - try: - self.firstMatchChar = match_string[0] - except IndexError: - raise ValueError("null string passed to Literal; use Empty() instead") - self.errmsg = "Expected " + self.name - self.mayReturnEmpty = False - self.mayIndexError = False - - # Performance tuning: modify __class__ to select - # a parseImpl optimized for single-character check - if self.matchLen == 1 and type(self) is Literal: - self.__class__ = _SingleCharLiteral - - def _generateDefaultName(self): - return repr(self.match) - - def parseImpl(self, instring, loc, doActions=True): - if instring[loc] == self.firstMatchChar and instring.startswith( - self.match, loc - ): - return loc + self.matchLen, self.match - raise ParseException(instring, loc, self.errmsg, self) - - -class _SingleCharLiteral(Literal): - def parseImpl(self, instring, loc, doActions=True): - if instring[loc] == self.firstMatchChar: - return loc + 1, self.match - raise ParseException(instring, loc, self.errmsg, self) - - -ParserElement._literalStringClass = Literal - - -class Keyword(Token): - """ - Token to exactly match a specified string as a keyword, that is, - it must be immediately followed by a non-keyword character. Compare - with :class:`Literal`: - - - ``Literal("if")`` will match the leading ``'if'`` in - ``'ifAndOnlyIf'``. - - ``Keyword("if")`` will not; it will only match the leading - ``'if'`` in ``'if x=1'``, or ``'if(y==2)'`` - - Accepts two optional constructor arguments in addition to the - keyword string: - - - ``identChars`` is a string of characters that would be valid - identifier characters, defaulting to all alphanumerics + "_" and - "$" - - ``caseless`` allows case-insensitive matching, default is ``False``. - - Example:: - - Keyword("start").parse_string("start") # -> ['start'] - Keyword("start").parse_string("starting") # -> Exception - - For case-insensitive matching, use :class:`CaselessKeyword`. - """ - - DEFAULT_KEYWORD_CHARS = alphanums + "_$" - - def __init__( - self, - match_string: str = "", - ident_chars: typing.Optional[str] = None, - caseless: bool = False, - *, - matchString: str = "", - identChars: typing.Optional[str] = None, - ): - super().__init__() - identChars = identChars or ident_chars - if identChars is None: - identChars = Keyword.DEFAULT_KEYWORD_CHARS - match_string = matchString or match_string - self.match = match_string - self.matchLen = len(match_string) - try: - self.firstMatchChar = match_string[0] - except IndexError: - raise ValueError("null string passed to Keyword; use Empty() instead") - self.errmsg = "Expected {} {}".format(type(self).__name__, self.name) - self.mayReturnEmpty = False - self.mayIndexError = False - self.caseless = caseless - if caseless: - self.caselessmatch = match_string.upper() - identChars = identChars.upper() - self.identChars = set(identChars) - - def _generateDefaultName(self): - return repr(self.match) - - def parseImpl(self, instring, loc, doActions=True): - errmsg = self.errmsg - errloc = loc - if self.caseless: - if instring[loc : loc + self.matchLen].upper() == self.caselessmatch: - if loc == 0 or instring[loc - 1].upper() not in self.identChars: - if ( - loc >= len(instring) - self.matchLen - or instring[loc + self.matchLen].upper() not in self.identChars - ): - return loc + self.matchLen, self.match - else: - # followed by keyword char - errmsg += ", was immediately followed by keyword character" - errloc = loc + self.matchLen - else: - # preceded by keyword char - errmsg += ", keyword was immediately preceded by keyword character" - errloc = loc - 1 - # else no match just raise plain exception - - else: - if ( - instring[loc] == self.firstMatchChar - and self.matchLen == 1 - or instring.startswith(self.match, loc) - ): - if loc == 0 or instring[loc - 1] not in self.identChars: - if ( - loc >= len(instring) - self.matchLen - or instring[loc + self.matchLen] not in self.identChars - ): - return loc + self.matchLen, self.match - else: - # followed by keyword char - errmsg += ( - ", keyword was immediately followed by keyword character" - ) - errloc = loc + self.matchLen - else: - # preceded by keyword char - errmsg += ", keyword was immediately preceded by keyword character" - errloc = loc - 1 - # else no match just raise plain exception - - raise ParseException(instring, errloc, errmsg, self) - - @staticmethod - def set_default_keyword_chars(chars) -> None: - """ - Overrides the default characters used by :class:`Keyword` expressions. - """ - Keyword.DEFAULT_KEYWORD_CHARS = chars - - setDefaultKeywordChars = set_default_keyword_chars - - -class CaselessLiteral(Literal): - """ - Token to match a specified string, ignoring case of letters. - Note: the matched results will always be in the case of the given - match string, NOT the case of the input text. - - Example:: - - CaselessLiteral("CMD")[1, ...].parse_string("cmd CMD Cmd10") - # -> ['CMD', 'CMD', 'CMD'] - - (Contrast with example for :class:`CaselessKeyword`.) - """ - - def __init__(self, match_string: str = "", *, matchString: str = ""): - match_string = matchString or match_string - super().__init__(match_string.upper()) - # Preserve the defining literal. - self.returnString = match_string - self.errmsg = "Expected " + self.name - - def parseImpl(self, instring, loc, doActions=True): - if instring[loc : loc + self.matchLen].upper() == self.match: - return loc + self.matchLen, self.returnString - raise ParseException(instring, loc, self.errmsg, self) - - -class CaselessKeyword(Keyword): - """ - Caseless version of :class:`Keyword`. - - Example:: - - CaselessKeyword("CMD")[1, ...].parse_string("cmd CMD Cmd10") - # -> ['CMD', 'CMD'] - - (Contrast with example for :class:`CaselessLiteral`.) - """ - - def __init__( - self, - match_string: str = "", - ident_chars: typing.Optional[str] = None, - *, - matchString: str = "", - identChars: typing.Optional[str] = None, - ): - identChars = identChars or ident_chars - match_string = matchString or match_string - super().__init__(match_string, identChars, caseless=True) - - -class CloseMatch(Token): - """A variation on :class:`Literal` which matches "close" matches, - that is, strings with at most 'n' mismatching characters. - :class:`CloseMatch` takes parameters: - - - ``match_string`` - string to be matched - - ``caseless`` - a boolean indicating whether to ignore casing when comparing characters - - ``max_mismatches`` - (``default=1``) maximum number of - mismatches allowed to count as a match - - The results from a successful parse will contain the matched text - from the input string and the following named results: - - - ``mismatches`` - a list of the positions within the - match_string where mismatches were found - - ``original`` - the original match_string used to compare - against the input string - - If ``mismatches`` is an empty list, then the match was an exact - match. - - Example:: - - patt = CloseMatch("ATCATCGAATGGA") - patt.parse_string("ATCATCGAAXGGA") # -> (['ATCATCGAAXGGA'], {'mismatches': [[9]], 'original': ['ATCATCGAATGGA']}) - patt.parse_string("ATCAXCGAAXGGA") # -> Exception: Expected 'ATCATCGAATGGA' (with up to 1 mismatches) (at char 0), (line:1, col:1) - - # exact match - patt.parse_string("ATCATCGAATGGA") # -> (['ATCATCGAATGGA'], {'mismatches': [[]], 'original': ['ATCATCGAATGGA']}) - - # close match allowing up to 2 mismatches - patt = CloseMatch("ATCATCGAATGGA", max_mismatches=2) - patt.parse_string("ATCAXCGAAXGGA") # -> (['ATCAXCGAAXGGA'], {'mismatches': [[4, 9]], 'original': ['ATCATCGAATGGA']}) - """ - - def __init__( - self, - match_string: str, - max_mismatches: int = None, - *, - maxMismatches: int = 1, - caseless=False, - ): - maxMismatches = max_mismatches if max_mismatches is not None else maxMismatches - super().__init__() - self.match_string = match_string - self.maxMismatches = maxMismatches - self.errmsg = "Expected {!r} (with up to {} mismatches)".format( - self.match_string, self.maxMismatches - ) - self.caseless = caseless - self.mayIndexError = False - self.mayReturnEmpty = False - - def _generateDefaultName(self): - return "{}:{!r}".format(type(self).__name__, self.match_string) - - def parseImpl(self, instring, loc, doActions=True): - start = loc - instrlen = len(instring) - maxloc = start + len(self.match_string) - - if maxloc <= instrlen: - match_string = self.match_string - match_stringloc = 0 - mismatches = [] - maxMismatches = self.maxMismatches - - for match_stringloc, s_m in enumerate( - zip(instring[loc:maxloc], match_string) - ): - src, mat = s_m - if self.caseless: - src, mat = src.lower(), mat.lower() - - if src != mat: - mismatches.append(match_stringloc) - if len(mismatches) > maxMismatches: - break - else: - loc = start + match_stringloc + 1 - results = ParseResults([instring[start:loc]]) - results["original"] = match_string - results["mismatches"] = mismatches - return loc, results - - raise ParseException(instring, loc, self.errmsg, self) - - -class Word(Token): - """Token for matching words composed of allowed character sets. - Parameters: - - ``init_chars`` - string of all characters that should be used to - match as a word; "ABC" will match "AAA", "ABAB", "CBAC", etc.; - if ``body_chars`` is also specified, then this is the string of - initial characters - - ``body_chars`` - string of characters that - can be used for matching after a matched initial character as - given in ``init_chars``; if omitted, same as the initial characters - (default=``None``) - - ``min`` - minimum number of characters to match (default=1) - - ``max`` - maximum number of characters to match (default=0) - - ``exact`` - exact number of characters to match (default=0) - - ``as_keyword`` - match as a keyword (default=``False``) - - ``exclude_chars`` - characters that might be - found in the input ``body_chars`` string but which should not be - accepted for matching ;useful to define a word of all - printables except for one or two characters, for instance - (default=``None``) - - :class:`srange` is useful for defining custom character set strings - for defining :class:`Word` expressions, using range notation from - regular expression character sets. - - A common mistake is to use :class:`Word` to match a specific literal - string, as in ``Word("Address")``. Remember that :class:`Word` - uses the string argument to define *sets* of matchable characters. - This expression would match "Add", "AAA", "dAred", or any other word - made up of the characters 'A', 'd', 'r', 'e', and 's'. To match an - exact literal string, use :class:`Literal` or :class:`Keyword`. - - pyparsing includes helper strings for building Words: - - - :class:`alphas` - - :class:`nums` - - :class:`alphanums` - - :class:`hexnums` - - :class:`alphas8bit` (alphabetic characters in ASCII range 128-255 - - accented, tilded, umlauted, etc.) - - :class:`punc8bit` (non-alphabetic characters in ASCII range - 128-255 - currency, symbols, superscripts, diacriticals, etc.) - - :class:`printables` (any non-whitespace character) - - ``alphas``, ``nums``, and ``printables`` are also defined in several - Unicode sets - see :class:`pyparsing_unicode``. - - Example:: - - # a word composed of digits - integer = Word(nums) # equivalent to Word("0123456789") or Word(srange("0-9")) - - # a word with a leading capital, and zero or more lowercase - capital_word = Word(alphas.upper(), alphas.lower()) - - # hostnames are alphanumeric, with leading alpha, and '-' - hostname = Word(alphas, alphanums + '-') - - # roman numeral (not a strict parser, accepts invalid mix of characters) - roman = Word("IVXLCDM") - - # any string of non-whitespace characters, except for ',' - csv_value = Word(printables, exclude_chars=",") - """ - - def __init__( - self, - init_chars: str = "", - body_chars: typing.Optional[str] = None, - min: int = 1, - max: int = 0, - exact: int = 0, - as_keyword: bool = False, - exclude_chars: typing.Optional[str] = None, - *, - initChars: typing.Optional[str] = None, - bodyChars: typing.Optional[str] = None, - asKeyword: bool = False, - excludeChars: typing.Optional[str] = None, - ): - initChars = initChars or init_chars - bodyChars = bodyChars or body_chars - asKeyword = asKeyword or as_keyword - excludeChars = excludeChars or exclude_chars - super().__init__() - if not initChars: - raise ValueError( - "invalid {}, initChars cannot be empty string".format( - type(self).__name__ - ) - ) - - initChars = set(initChars) - self.initChars = initChars - if excludeChars: - excludeChars = set(excludeChars) - initChars -= excludeChars - if bodyChars: - bodyChars = set(bodyChars) - excludeChars - self.initCharsOrig = "".join(sorted(initChars)) - - if bodyChars: - self.bodyCharsOrig = "".join(sorted(bodyChars)) - self.bodyChars = set(bodyChars) - else: - self.bodyCharsOrig = "".join(sorted(initChars)) - self.bodyChars = set(initChars) - - self.maxSpecified = max > 0 - - if min < 1: - raise ValueError( - "cannot specify a minimum length < 1; use Opt(Word()) if zero-length word is permitted" - ) - - self.minLen = min - - if max > 0: - self.maxLen = max - else: - self.maxLen = _MAX_INT - - if exact > 0: - self.maxLen = exact - self.minLen = exact - - self.errmsg = "Expected " + self.name - self.mayIndexError = False - self.asKeyword = asKeyword - - # see if we can make a regex for this Word - if " " not in self.initChars | self.bodyChars and (min == 1 and exact == 0): - if self.bodyChars == self.initChars: - if max == 0: - repeat = "+" - elif max == 1: - repeat = "" - else: - repeat = "{{{},{}}}".format( - self.minLen, "" if self.maxLen == _MAX_INT else self.maxLen - ) - self.reString = "[{}]{}".format( - _collapse_string_to_ranges(self.initChars), - repeat, - ) - elif len(self.initChars) == 1: - if max == 0: - repeat = "*" - else: - repeat = "{{0,{}}}".format(max - 1) - self.reString = "{}[{}]{}".format( - re.escape(self.initCharsOrig), - _collapse_string_to_ranges(self.bodyChars), - repeat, - ) - else: - if max == 0: - repeat = "*" - elif max == 2: - repeat = "" - else: - repeat = "{{0,{}}}".format(max - 1) - self.reString = "[{}][{}]{}".format( - _collapse_string_to_ranges(self.initChars), - _collapse_string_to_ranges(self.bodyChars), - repeat, - ) - if self.asKeyword: - self.reString = r"\b" + self.reString + r"\b" - - try: - self.re = re.compile(self.reString) - except re.error: - self.re = None - else: - self.re_match = self.re.match - self.__class__ = _WordRegex - - def _generateDefaultName(self): - def charsAsStr(s): - max_repr_len = 16 - s = _collapse_string_to_ranges(s, re_escape=False) - if len(s) > max_repr_len: - return s[: max_repr_len - 3] + "..." - else: - return s - - if self.initChars != self.bodyChars: - base = "W:({}, {})".format( - charsAsStr(self.initChars), charsAsStr(self.bodyChars) - ) - else: - base = "W:({})".format(charsAsStr(self.initChars)) - - # add length specification - if self.minLen > 1 or self.maxLen != _MAX_INT: - if self.minLen == self.maxLen: - if self.minLen == 1: - return base[2:] - else: - return base + "{{{}}}".format(self.minLen) - elif self.maxLen == _MAX_INT: - return base + "{{{},...}}".format(self.minLen) - else: - return base + "{{{},{}}}".format(self.minLen, self.maxLen) - return base - - def parseImpl(self, instring, loc, doActions=True): - if instring[loc] not in self.initChars: - raise ParseException(instring, loc, self.errmsg, self) - - start = loc - loc += 1 - instrlen = len(instring) - bodychars = self.bodyChars - maxloc = start + self.maxLen - maxloc = min(maxloc, instrlen) - while loc < maxloc and instring[loc] in bodychars: - loc += 1 - - throwException = False - if loc - start < self.minLen: - throwException = True - elif self.maxSpecified and loc < instrlen and instring[loc] in bodychars: - throwException = True - elif self.asKeyword: - if ( - start > 0 - and instring[start - 1] in bodychars - or loc < instrlen - and instring[loc] in bodychars - ): - throwException = True - - if throwException: - raise ParseException(instring, loc, self.errmsg, self) - - return loc, instring[start:loc] - - -class _WordRegex(Word): - def parseImpl(self, instring, loc, doActions=True): - result = self.re_match(instring, loc) - if not result: - raise ParseException(instring, loc, self.errmsg, self) - - loc = result.end() - return loc, result.group() - - -class Char(_WordRegex): - """A short-cut class for defining :class:`Word` ``(characters, exact=1)``, - when defining a match of any single character in a string of - characters. - """ - - def __init__( - self, - charset: str, - as_keyword: bool = False, - exclude_chars: typing.Optional[str] = None, - *, - asKeyword: bool = False, - excludeChars: typing.Optional[str] = None, - ): - asKeyword = asKeyword or as_keyword - excludeChars = excludeChars or exclude_chars - super().__init__( - charset, exact=1, asKeyword=asKeyword, excludeChars=excludeChars - ) - self.reString = "[{}]".format(_collapse_string_to_ranges(self.initChars)) - if asKeyword: - self.reString = r"\b{}\b".format(self.reString) - self.re = re.compile(self.reString) - self.re_match = self.re.match - - -class Regex(Token): - r"""Token for matching strings that match a given regular - expression. Defined with string specifying the regular expression in - a form recognized by the stdlib Python `re module `_. - If the given regex contains named groups (defined using ``(?P...)``), - these will be preserved as named :class:`ParseResults`. - - If instead of the Python stdlib ``re`` module you wish to use a different RE module - (such as the ``regex`` module), you can do so by building your ``Regex`` object with - a compiled RE that was compiled using ``regex``. - - Example:: - - realnum = Regex(r"[+-]?\d+\.\d*") - # ref: https://stackoverflow.com/questions/267399/how-do-you-match-only-valid-roman-numerals-with-a-regular-expression - roman = Regex(r"M{0,4}(CM|CD|D?{0,3})(XC|XL|L?X{0,3})(IX|IV|V?I{0,3})") - - # named fields in a regex will be returned as named results - date = Regex(r'(?P\d{4})-(?P\d\d?)-(?P\d\d?)') - - # the Regex class will accept re's compiled using the regex module - import regex - parser = pp.Regex(regex.compile(r'[0-9]')) - """ - - def __init__( - self, - pattern: Any, - flags: Union[re.RegexFlag, int] = 0, - as_group_list: bool = False, - as_match: bool = False, - *, - asGroupList: bool = False, - asMatch: bool = False, - ): - """The parameters ``pattern`` and ``flags`` are passed - to the ``re.compile()`` function as-is. See the Python - `re module `_ module for an - explanation of the acceptable patterns and flags. - """ - super().__init__() - asGroupList = asGroupList or as_group_list - asMatch = asMatch or as_match - - if isinstance(pattern, str_type): - if not pattern: - raise ValueError("null string passed to Regex; use Empty() instead") - - self._re = None - self.reString = self.pattern = pattern - self.flags = flags - - elif hasattr(pattern, "pattern") and hasattr(pattern, "match"): - self._re = pattern - self.pattern = self.reString = pattern.pattern - self.flags = flags - - else: - raise TypeError( - "Regex may only be constructed with a string or a compiled RE object" - ) - - self.errmsg = "Expected " + self.name - self.mayIndexError = False - self.asGroupList = asGroupList - self.asMatch = asMatch - if self.asGroupList: - self.parseImpl = self.parseImplAsGroupList - if self.asMatch: - self.parseImpl = self.parseImplAsMatch - - @cached_property - def re(self): - if self._re: - return self._re - else: - try: - return re.compile(self.pattern, self.flags) - except re.error: - raise ValueError( - "invalid pattern ({!r}) passed to Regex".format(self.pattern) - ) - - @cached_property - def re_match(self): - return self.re.match - - @cached_property - def mayReturnEmpty(self): - return self.re_match("") is not None - - def _generateDefaultName(self): - return "Re:({})".format(repr(self.pattern).replace("\\\\", "\\")) - - def parseImpl(self, instring, loc, doActions=True): - result = self.re_match(instring, loc) - if not result: - raise ParseException(instring, loc, self.errmsg, self) - - loc = result.end() - ret = ParseResults(result.group()) - d = result.groupdict() - if d: - for k, v in d.items(): - ret[k] = v - return loc, ret - - def parseImplAsGroupList(self, instring, loc, doActions=True): - result = self.re_match(instring, loc) - if not result: - raise ParseException(instring, loc, self.errmsg, self) - - loc = result.end() - ret = result.groups() - return loc, ret - - def parseImplAsMatch(self, instring, loc, doActions=True): - result = self.re_match(instring, loc) - if not result: - raise ParseException(instring, loc, self.errmsg, self) - - loc = result.end() - ret = result - return loc, ret - - def sub(self, repl: str) -> ParserElement: - r""" - Return :class:`Regex` with an attached parse action to transform the parsed - result as if called using `re.sub(expr, repl, string) `_. - - Example:: - - make_html = Regex(r"(\w+):(.*?):").sub(r"<\1>\2") - print(make_html.transform_string("h1:main title:")) - # prints "

main title

" - """ - if self.asGroupList: - raise TypeError("cannot use sub() with Regex(asGroupList=True)") - - if self.asMatch and callable(repl): - raise TypeError("cannot use sub() with a callable with Regex(asMatch=True)") - - if self.asMatch: - - def pa(tokens): - return tokens[0].expand(repl) - - else: - - def pa(tokens): - return self.re.sub(repl, tokens[0]) - - return self.add_parse_action(pa) - - -class QuotedString(Token): - r""" - Token for matching strings that are delimited by quoting characters. - - Defined with the following parameters: - - - ``quote_char`` - string of one or more characters defining the - quote delimiting string - - ``esc_char`` - character to re_escape quotes, typically backslash - (default= ``None``) - - ``esc_quote`` - special quote sequence to re_escape an embedded quote - string (such as SQL's ``""`` to re_escape an embedded ``"``) - (default= ``None``) - - ``multiline`` - boolean indicating whether quotes can span - multiple lines (default= ``False``) - - ``unquote_results`` - boolean indicating whether the matched text - should be unquoted (default= ``True``) - - ``end_quote_char`` - string of one or more characters defining the - end of the quote delimited string (default= ``None`` => same as - quote_char) - - ``convert_whitespace_escapes`` - convert escaped whitespace - (``'\t'``, ``'\n'``, etc.) to actual whitespace - (default= ``True``) - - Example:: - - qs = QuotedString('"') - print(qs.search_string('lsjdf "This is the quote" sldjf')) - complex_qs = QuotedString('{{', end_quote_char='}}') - print(complex_qs.search_string('lsjdf {{This is the "quote"}} sldjf')) - sql_qs = QuotedString('"', esc_quote='""') - print(sql_qs.search_string('lsjdf "This is the quote with ""embedded"" quotes" sldjf')) - - prints:: - - [['This is the quote']] - [['This is the "quote"']] - [['This is the quote with "embedded" quotes']] - """ - ws_map = ((r"\t", "\t"), (r"\n", "\n"), (r"\f", "\f"), (r"\r", "\r")) - - def __init__( - self, - quote_char: str = "", - esc_char: typing.Optional[str] = None, - esc_quote: typing.Optional[str] = None, - multiline: bool = False, - unquote_results: bool = True, - end_quote_char: typing.Optional[str] = None, - convert_whitespace_escapes: bool = True, - *, - quoteChar: str = "", - escChar: typing.Optional[str] = None, - escQuote: typing.Optional[str] = None, - unquoteResults: bool = True, - endQuoteChar: typing.Optional[str] = None, - convertWhitespaceEscapes: bool = True, - ): - super().__init__() - escChar = escChar or esc_char - escQuote = escQuote or esc_quote - unquoteResults = unquoteResults and unquote_results - endQuoteChar = endQuoteChar or end_quote_char - convertWhitespaceEscapes = ( - convertWhitespaceEscapes and convert_whitespace_escapes - ) - quote_char = quoteChar or quote_char - - # remove white space from quote chars - wont work anyway - quote_char = quote_char.strip() - if not quote_char: - raise ValueError("quote_char cannot be the empty string") - - if endQuoteChar is None: - endQuoteChar = quote_char - else: - endQuoteChar = endQuoteChar.strip() - if not endQuoteChar: - raise ValueError("endQuoteChar cannot be the empty string") - - self.quoteChar = quote_char - self.quoteCharLen = len(quote_char) - self.firstQuoteChar = quote_char[0] - self.endQuoteChar = endQuoteChar - self.endQuoteCharLen = len(endQuoteChar) - self.escChar = escChar - self.escQuote = escQuote - self.unquoteResults = unquoteResults - self.convertWhitespaceEscapes = convertWhitespaceEscapes - - sep = "" - inner_pattern = "" - - if escQuote: - inner_pattern += r"{}(?:{})".format(sep, re.escape(escQuote)) - sep = "|" - - if escChar: - inner_pattern += r"{}(?:{}.)".format(sep, re.escape(escChar)) - sep = "|" - self.escCharReplacePattern = re.escape(self.escChar) + "(.)" - - if len(self.endQuoteChar) > 1: - inner_pattern += ( - "{}(?:".format(sep) - + "|".join( - "(?:{}(?!{}))".format( - re.escape(self.endQuoteChar[:i]), - re.escape(self.endQuoteChar[i:]), - ) - for i in range(len(self.endQuoteChar) - 1, 0, -1) - ) - + ")" - ) - sep = "|" - - if multiline: - self.flags = re.MULTILINE | re.DOTALL - inner_pattern += r"{}(?:[^{}{}])".format( - sep, - _escape_regex_range_chars(self.endQuoteChar[0]), - (_escape_regex_range_chars(escChar) if escChar is not None else ""), - ) - else: - self.flags = 0 - inner_pattern += r"{}(?:[^{}\n\r{}])".format( - sep, - _escape_regex_range_chars(self.endQuoteChar[0]), - (_escape_regex_range_chars(escChar) if escChar is not None else ""), - ) - - self.pattern = "".join( - [ - re.escape(self.quoteChar), - "(?:", - inner_pattern, - ")*", - re.escape(self.endQuoteChar), - ] - ) - - try: - self.re = re.compile(self.pattern, self.flags) - self.reString = self.pattern - self.re_match = self.re.match - except re.error: - raise ValueError( - "invalid pattern {!r} passed to Regex".format(self.pattern) - ) - - self.errmsg = "Expected " + self.name - self.mayIndexError = False - self.mayReturnEmpty = True - - def _generateDefaultName(self): - if self.quoteChar == self.endQuoteChar and isinstance(self.quoteChar, str_type): - return "string enclosed in {!r}".format(self.quoteChar) - - return "quoted string, starting with {} ending with {}".format( - self.quoteChar, self.endQuoteChar - ) - - def parseImpl(self, instring, loc, doActions=True): - result = ( - instring[loc] == self.firstQuoteChar - and self.re_match(instring, loc) - or None - ) - if not result: - raise ParseException(instring, loc, self.errmsg, self) - - loc = result.end() - ret = result.group() - - if self.unquoteResults: - - # strip off quotes - ret = ret[self.quoteCharLen : -self.endQuoteCharLen] - - if isinstance(ret, str_type): - # replace escaped whitespace - if "\\" in ret and self.convertWhitespaceEscapes: - for wslit, wschar in self.ws_map: - ret = ret.replace(wslit, wschar) - - # replace escaped characters - if self.escChar: - ret = re.sub(self.escCharReplacePattern, r"\g<1>", ret) - - # replace escaped quotes - if self.escQuote: - ret = ret.replace(self.escQuote, self.endQuoteChar) - - return loc, ret - - -class CharsNotIn(Token): - """Token for matching words composed of characters *not* in a given - set (will include whitespace in matched characters if not listed in - the provided exclusion set - see example). Defined with string - containing all disallowed characters, and an optional minimum, - maximum, and/or exact length. The default value for ``min`` is - 1 (a minimum value < 1 is not valid); the default values for - ``max`` and ``exact`` are 0, meaning no maximum or exact - length restriction. - - Example:: - - # define a comma-separated-value as anything that is not a ',' - csv_value = CharsNotIn(',') - print(delimited_list(csv_value).parse_string("dkls,lsdkjf,s12 34,@!#,213")) - - prints:: - - ['dkls', 'lsdkjf', 's12 34', '@!#', '213'] - """ - - def __init__( - self, - not_chars: str = "", - min: int = 1, - max: int = 0, - exact: int = 0, - *, - notChars: str = "", - ): - super().__init__() - self.skipWhitespace = False - self.notChars = not_chars or notChars - self.notCharsSet = set(self.notChars) - - if min < 1: - raise ValueError( - "cannot specify a minimum length < 1; use " - "Opt(CharsNotIn()) if zero-length char group is permitted" - ) - - self.minLen = min - - if max > 0: - self.maxLen = max - else: - self.maxLen = _MAX_INT - - if exact > 0: - self.maxLen = exact - self.minLen = exact - - self.errmsg = "Expected " + self.name - self.mayReturnEmpty = self.minLen == 0 - self.mayIndexError = False - - def _generateDefaultName(self): - not_chars_str = _collapse_string_to_ranges(self.notChars) - if len(not_chars_str) > 16: - return "!W:({}...)".format(self.notChars[: 16 - 3]) - else: - return "!W:({})".format(self.notChars) - - def parseImpl(self, instring, loc, doActions=True): - notchars = self.notCharsSet - if instring[loc] in notchars: - raise ParseException(instring, loc, self.errmsg, self) - - start = loc - loc += 1 - maxlen = min(start + self.maxLen, len(instring)) - while loc < maxlen and instring[loc] not in notchars: - loc += 1 - - if loc - start < self.minLen: - raise ParseException(instring, loc, self.errmsg, self) - - return loc, instring[start:loc] - - -class White(Token): - """Special matching class for matching whitespace. Normally, - whitespace is ignored by pyparsing grammars. This class is included - when some whitespace structures are significant. Define with - a string containing the whitespace characters to be matched; default - is ``" \\t\\r\\n"``. Also takes optional ``min``, - ``max``, and ``exact`` arguments, as defined for the - :class:`Word` class. - """ - - whiteStrs = { - " ": "", - "\t": "", - "\n": "", - "\r": "", - "\f": "", - "\u00A0": "", - "\u1680": "", - "\u180E": "", - "\u2000": "", - "\u2001": "", - "\u2002": "", - "\u2003": "", - "\u2004": "", - "\u2005": "", - "\u2006": "", - "\u2007": "", - "\u2008": "", - "\u2009": "", - "\u200A": "", - "\u200B": "", - "\u202F": "", - "\u205F": "", - "\u3000": "", - } - - def __init__(self, ws: str = " \t\r\n", min: int = 1, max: int = 0, exact: int = 0): - super().__init__() - self.matchWhite = ws - self.set_whitespace_chars( - "".join(c for c in self.whiteStrs if c not in self.matchWhite), - copy_defaults=True, - ) - # self.leave_whitespace() - self.mayReturnEmpty = True - self.errmsg = "Expected " + self.name - - self.minLen = min - - if max > 0: - self.maxLen = max - else: - self.maxLen = _MAX_INT - - if exact > 0: - self.maxLen = exact - self.minLen = exact - - def _generateDefaultName(self): - return "".join(White.whiteStrs[c] for c in self.matchWhite) - - def parseImpl(self, instring, loc, doActions=True): - if instring[loc] not in self.matchWhite: - raise ParseException(instring, loc, self.errmsg, self) - start = loc - loc += 1 - maxloc = start + self.maxLen - maxloc = min(maxloc, len(instring)) - while loc < maxloc and instring[loc] in self.matchWhite: - loc += 1 - - if loc - start < self.minLen: - raise ParseException(instring, loc, self.errmsg, self) - - return loc, instring[start:loc] - - -class PositionToken(Token): - def __init__(self): - super().__init__() - self.mayReturnEmpty = True - self.mayIndexError = False - - -class GoToColumn(PositionToken): - """Token to advance to a specific column of input text; useful for - tabular report scraping. - """ - - def __init__(self, colno: int): - super().__init__() - self.col = colno - - def preParse(self, instring, loc): - if col(loc, instring) != self.col: - instrlen = len(instring) - if self.ignoreExprs: - loc = self._skipIgnorables(instring, loc) - while ( - loc < instrlen - and instring[loc].isspace() - and col(loc, instring) != self.col - ): - loc += 1 - return loc - - def parseImpl(self, instring, loc, doActions=True): - thiscol = col(loc, instring) - if thiscol > self.col: - raise ParseException(instring, loc, "Text not in expected column", self) - newloc = loc + self.col - thiscol - ret = instring[loc:newloc] - return newloc, ret - - -class LineStart(PositionToken): - r"""Matches if current position is at the beginning of a line within - the parse string - - Example:: - - test = '''\ - AAA this line - AAA and this line - AAA but not this one - B AAA and definitely not this one - ''' - - for t in (LineStart() + 'AAA' + restOfLine).search_string(test): - print(t) - - prints:: - - ['AAA', ' this line'] - ['AAA', ' and this line'] - - """ - - def __init__(self): - super().__init__() - self.leave_whitespace() - self.orig_whiteChars = set() | self.whiteChars - self.whiteChars.discard("\n") - self.skipper = Empty().set_whitespace_chars(self.whiteChars) - self.errmsg = "Expected start of line" - - def preParse(self, instring, loc): - if loc == 0: - return loc - else: - ret = self.skipper.preParse(instring, loc) - if "\n" in self.orig_whiteChars: - while instring[ret : ret + 1] == "\n": - ret = self.skipper.preParse(instring, ret + 1) - return ret - - def parseImpl(self, instring, loc, doActions=True): - if col(loc, instring) == 1: - return loc, [] - raise ParseException(instring, loc, self.errmsg, self) - - -class LineEnd(PositionToken): - """Matches if current position is at the end of a line within the - parse string - """ - - def __init__(self): - super().__init__() - self.whiteChars.discard("\n") - self.set_whitespace_chars(self.whiteChars, copy_defaults=False) - self.errmsg = "Expected end of line" - - def parseImpl(self, instring, loc, doActions=True): - if loc < len(instring): - if instring[loc] == "\n": - return loc + 1, "\n" - else: - raise ParseException(instring, loc, self.errmsg, self) - elif loc == len(instring): - return loc + 1, [] - else: - raise ParseException(instring, loc, self.errmsg, self) - - -class StringStart(PositionToken): - """Matches if current position is at the beginning of the parse - string - """ - - def __init__(self): - super().__init__() - self.errmsg = "Expected start of text" - - def parseImpl(self, instring, loc, doActions=True): - if loc != 0: - # see if entire string up to here is just whitespace and ignoreables - if loc != self.preParse(instring, 0): - raise ParseException(instring, loc, self.errmsg, self) - return loc, [] - - -class StringEnd(PositionToken): - """ - Matches if current position is at the end of the parse string - """ - - def __init__(self): - super().__init__() - self.errmsg = "Expected end of text" - - def parseImpl(self, instring, loc, doActions=True): - if loc < len(instring): - raise ParseException(instring, loc, self.errmsg, self) - elif loc == len(instring): - return loc + 1, [] - elif loc > len(instring): - return loc, [] - else: - raise ParseException(instring, loc, self.errmsg, self) - - -class WordStart(PositionToken): - """Matches if the current position is at the beginning of a - :class:`Word`, and is not preceded by any character in a given - set of ``word_chars`` (default= ``printables``). To emulate the - ``\b`` behavior of regular expressions, use - ``WordStart(alphanums)``. ``WordStart`` will also match at - the beginning of the string being parsed, or at the beginning of - a line. - """ - - def __init__(self, word_chars: str = printables, *, wordChars: str = printables): - wordChars = word_chars if wordChars == printables else wordChars - super().__init__() - self.wordChars = set(wordChars) - self.errmsg = "Not at the start of a word" - - def parseImpl(self, instring, loc, doActions=True): - if loc != 0: - if ( - instring[loc - 1] in self.wordChars - or instring[loc] not in self.wordChars - ): - raise ParseException(instring, loc, self.errmsg, self) - return loc, [] - - -class WordEnd(PositionToken): - """Matches if the current position is at the end of a :class:`Word`, - and is not followed by any character in a given set of ``word_chars`` - (default= ``printables``). To emulate the ``\b`` behavior of - regular expressions, use ``WordEnd(alphanums)``. ``WordEnd`` - will also match at the end of the string being parsed, or at the end - of a line. - """ - - def __init__(self, word_chars: str = printables, *, wordChars: str = printables): - wordChars = word_chars if wordChars == printables else wordChars - super().__init__() - self.wordChars = set(wordChars) - self.skipWhitespace = False - self.errmsg = "Not at the end of a word" - - def parseImpl(self, instring, loc, doActions=True): - instrlen = len(instring) - if instrlen > 0 and loc < instrlen: - if ( - instring[loc] in self.wordChars - or instring[loc - 1] not in self.wordChars - ): - raise ParseException(instring, loc, self.errmsg, self) - return loc, [] - - -class ParseExpression(ParserElement): - """Abstract subclass of ParserElement, for combining and - post-processing parsed tokens. - """ - - def __init__(self, exprs: typing.Iterable[ParserElement], savelist: bool = False): - super().__init__(savelist) - self.exprs: List[ParserElement] - if isinstance(exprs, _generatorType): - exprs = list(exprs) - - if isinstance(exprs, str_type): - self.exprs = [self._literalStringClass(exprs)] - elif isinstance(exprs, ParserElement): - self.exprs = [exprs] - elif isinstance(exprs, Iterable): - exprs = list(exprs) - # if sequence of strings provided, wrap with Literal - if any(isinstance(expr, str_type) for expr in exprs): - exprs = ( - self._literalStringClass(e) if isinstance(e, str_type) else e - for e in exprs - ) - self.exprs = list(exprs) - else: - try: - self.exprs = list(exprs) - except TypeError: - self.exprs = [exprs] - self.callPreparse = False - - def recurse(self) -> Sequence[ParserElement]: - return self.exprs[:] - - def append(self, other) -> ParserElement: - self.exprs.append(other) - self._defaultName = None - return self - - def leave_whitespace(self, recursive: bool = True) -> ParserElement: - """ - Extends ``leave_whitespace`` defined in base class, and also invokes ``leave_whitespace`` on - all contained expressions. - """ - super().leave_whitespace(recursive) - - if recursive: - self.exprs = [e.copy() for e in self.exprs] - for e in self.exprs: - e.leave_whitespace(recursive) - return self - - def ignore_whitespace(self, recursive: bool = True) -> ParserElement: - """ - Extends ``ignore_whitespace`` defined in base class, and also invokes ``leave_whitespace`` on - all contained expressions. - """ - super().ignore_whitespace(recursive) - if recursive: - self.exprs = [e.copy() for e in self.exprs] - for e in self.exprs: - e.ignore_whitespace(recursive) - return self - - def ignore(self, other) -> ParserElement: - if isinstance(other, Suppress): - if other not in self.ignoreExprs: - super().ignore(other) - for e in self.exprs: - e.ignore(self.ignoreExprs[-1]) - else: - super().ignore(other) - for e in self.exprs: - e.ignore(self.ignoreExprs[-1]) - return self - - def _generateDefaultName(self): - return "{}:({})".format(self.__class__.__name__, str(self.exprs)) - - def streamline(self) -> ParserElement: - if self.streamlined: - return self - - super().streamline() - - for e in self.exprs: - e.streamline() - - # collapse nested :class:`And`'s of the form ``And(And(And(a, b), c), d)`` to ``And(a, b, c, d)`` - # but only if there are no parse actions or resultsNames on the nested And's - # (likewise for :class:`Or`'s and :class:`MatchFirst`'s) - if len(self.exprs) == 2: - other = self.exprs[0] - if ( - isinstance(other, self.__class__) - and not other.parseAction - and other.resultsName is None - and not other.debug - ): - self.exprs = other.exprs[:] + [self.exprs[1]] - self._defaultName = None - self.mayReturnEmpty |= other.mayReturnEmpty - self.mayIndexError |= other.mayIndexError - - other = self.exprs[-1] - if ( - isinstance(other, self.__class__) - and not other.parseAction - and other.resultsName is None - and not other.debug - ): - self.exprs = self.exprs[:-1] + other.exprs[:] - self._defaultName = None - self.mayReturnEmpty |= other.mayReturnEmpty - self.mayIndexError |= other.mayIndexError - - self.errmsg = "Expected " + str(self) - - return self - - def validate(self, validateTrace=None) -> None: - tmp = (validateTrace if validateTrace is not None else [])[:] + [self] - for e in self.exprs: - e.validate(tmp) - self._checkRecursion([]) - - def copy(self) -> ParserElement: - ret = super().copy() - ret.exprs = [e.copy() for e in self.exprs] - return ret - - def _setResultsName(self, name, listAllMatches=False): - if ( - __diag__.warn_ungrouped_named_tokens_in_collection - and Diagnostics.warn_ungrouped_named_tokens_in_collection - not in self.suppress_warnings_ - ): - for e in self.exprs: - if ( - isinstance(e, ParserElement) - and e.resultsName - and Diagnostics.warn_ungrouped_named_tokens_in_collection - not in e.suppress_warnings_ - ): - warnings.warn( - "{}: setting results name {!r} on {} expression " - "collides with {!r} on contained expression".format( - "warn_ungrouped_named_tokens_in_collection", - name, - type(self).__name__, - e.resultsName, - ), - stacklevel=3, - ) - - return super()._setResultsName(name, listAllMatches) - - ignoreWhitespace = ignore_whitespace - leaveWhitespace = leave_whitespace - - -class And(ParseExpression): - """ - Requires all given :class:`ParseExpression` s to be found in the given order. - Expressions may be separated by whitespace. - May be constructed using the ``'+'`` operator. - May also be constructed using the ``'-'`` operator, which will - suppress backtracking. - - Example:: - - integer = Word(nums) - name_expr = Word(alphas)[1, ...] - - expr = And([integer("id"), name_expr("name"), integer("age")]) - # more easily written as: - expr = integer("id") + name_expr("name") + integer("age") - """ - - class _ErrorStop(Empty): - def __init__(self, *args, **kwargs): - super().__init__(*args, **kwargs) - self.leave_whitespace() - - def _generateDefaultName(self): - return "-" - - def __init__( - self, exprs_arg: typing.Iterable[ParserElement], savelist: bool = True - ): - exprs: List[ParserElement] = list(exprs_arg) - if exprs and Ellipsis in exprs: - tmp = [] - for i, expr in enumerate(exprs): - if expr is Ellipsis: - if i < len(exprs) - 1: - skipto_arg: ParserElement = (Empty() + exprs[i + 1]).exprs[-1] - tmp.append(SkipTo(skipto_arg)("_skipped*")) - else: - raise Exception( - "cannot construct And with sequence ending in ..." - ) - else: - tmp.append(expr) - exprs[:] = tmp - super().__init__(exprs, savelist) - if self.exprs: - self.mayReturnEmpty = all(e.mayReturnEmpty for e in self.exprs) - if not isinstance(self.exprs[0], White): - self.set_whitespace_chars( - self.exprs[0].whiteChars, - copy_defaults=self.exprs[0].copyDefaultWhiteChars, - ) - self.skipWhitespace = self.exprs[0].skipWhitespace - else: - self.skipWhitespace = False - else: - self.mayReturnEmpty = True - self.callPreparse = True - - def streamline(self) -> ParserElement: - # collapse any _PendingSkip's - if self.exprs: - if any( - isinstance(e, ParseExpression) - and e.exprs - and isinstance(e.exprs[-1], _PendingSkip) - for e in self.exprs[:-1] - ): - for i, e in enumerate(self.exprs[:-1]): - if e is None: - continue - if ( - isinstance(e, ParseExpression) - and e.exprs - and isinstance(e.exprs[-1], _PendingSkip) - ): - e.exprs[-1] = e.exprs[-1] + self.exprs[i + 1] - self.exprs[i + 1] = None - self.exprs = [e for e in self.exprs if e is not None] - - super().streamline() - - # link any IndentedBlocks to the prior expression - for prev, cur in zip(self.exprs, self.exprs[1:]): - # traverse cur or any first embedded expr of cur looking for an IndentedBlock - # (but watch out for recursive grammar) - seen = set() - while cur: - if id(cur) in seen: - break - seen.add(id(cur)) - if isinstance(cur, IndentedBlock): - prev.add_parse_action( - lambda s, l, t, cur_=cur: setattr( - cur_, "parent_anchor", col(l, s) - ) - ) - break - subs = cur.recurse() - cur = next(iter(subs), None) - - self.mayReturnEmpty = all(e.mayReturnEmpty for e in self.exprs) - return self - - def parseImpl(self, instring, loc, doActions=True): - # pass False as callPreParse arg to _parse for first element, since we already - # pre-parsed the string as part of our And pre-parsing - loc, resultlist = self.exprs[0]._parse( - instring, loc, doActions, callPreParse=False - ) - errorStop = False - for e in self.exprs[1:]: - # if isinstance(e, And._ErrorStop): - if type(e) is And._ErrorStop: - errorStop = True - continue - if errorStop: - try: - loc, exprtokens = e._parse(instring, loc, doActions) - except ParseSyntaxException: - raise - except ParseBaseException as pe: - pe.__traceback__ = None - raise ParseSyntaxException._from_exception(pe) - except IndexError: - raise ParseSyntaxException( - instring, len(instring), self.errmsg, self - ) - else: - loc, exprtokens = e._parse(instring, loc, doActions) - if exprtokens or exprtokens.haskeys(): - resultlist += exprtokens - return loc, resultlist - - def __iadd__(self, other): - if isinstance(other, str_type): - other = self._literalStringClass(other) - return self.append(other) # And([self, other]) - - def _checkRecursion(self, parseElementList): - subRecCheckList = parseElementList[:] + [self] - for e in self.exprs: - e._checkRecursion(subRecCheckList) - if not e.mayReturnEmpty: - break - - def _generateDefaultName(self): - inner = " ".join(str(e) for e in self.exprs) - # strip off redundant inner {}'s - while len(inner) > 1 and inner[0 :: len(inner) - 1] == "{}": - inner = inner[1:-1] - return "{" + inner + "}" - - -class Or(ParseExpression): - """Requires that at least one :class:`ParseExpression` is found. If - two expressions match, the expression that matches the longest - string will be used. May be constructed using the ``'^'`` - operator. - - Example:: - - # construct Or using '^' operator - - number = Word(nums) ^ Combine(Word(nums) + '.' + Word(nums)) - print(number.search_string("123 3.1416 789")) - - prints:: - - [['123'], ['3.1416'], ['789']] - """ - - def __init__(self, exprs: typing.Iterable[ParserElement], savelist: bool = False): - super().__init__(exprs, savelist) - if self.exprs: - self.mayReturnEmpty = any(e.mayReturnEmpty for e in self.exprs) - self.skipWhitespace = all(e.skipWhitespace for e in self.exprs) - else: - self.mayReturnEmpty = True - - def streamline(self) -> ParserElement: - super().streamline() - if self.exprs: - self.mayReturnEmpty = any(e.mayReturnEmpty for e in self.exprs) - self.saveAsList = any(e.saveAsList for e in self.exprs) - self.skipWhitespace = all( - e.skipWhitespace and not isinstance(e, White) for e in self.exprs - ) - else: - self.saveAsList = False - return self - - def parseImpl(self, instring, loc, doActions=True): - maxExcLoc = -1 - maxException = None - matches = [] - fatals = [] - if all(e.callPreparse for e in self.exprs): - loc = self.preParse(instring, loc) - for e in self.exprs: - try: - loc2 = e.try_parse(instring, loc, raise_fatal=True) - except ParseFatalException as pfe: - pfe.__traceback__ = None - pfe.parserElement = e - fatals.append(pfe) - maxException = None - maxExcLoc = -1 - except ParseException as err: - if not fatals: - err.__traceback__ = None - if err.loc > maxExcLoc: - maxException = err - maxExcLoc = err.loc - except IndexError: - if len(instring) > maxExcLoc: - maxException = ParseException( - instring, len(instring), e.errmsg, self - ) - maxExcLoc = len(instring) - else: - # save match among all matches, to retry longest to shortest - matches.append((loc2, e)) - - if matches: - # re-evaluate all matches in descending order of length of match, in case attached actions - # might change whether or how much they match of the input. - matches.sort(key=itemgetter(0), reverse=True) - - if not doActions: - # no further conditions or parse actions to change the selection of - # alternative, so the first match will be the best match - best_expr = matches[0][1] - return best_expr._parse(instring, loc, doActions) - - longest = -1, None - for loc1, expr1 in matches: - if loc1 <= longest[0]: - # already have a longer match than this one will deliver, we are done - return longest - - try: - loc2, toks = expr1._parse(instring, loc, doActions) - except ParseException as err: - err.__traceback__ = None - if err.loc > maxExcLoc: - maxException = err - maxExcLoc = err.loc - else: - if loc2 >= loc1: - return loc2, toks - # didn't match as much as before - elif loc2 > longest[0]: - longest = loc2, toks - - if longest != (-1, None): - return longest - - if fatals: - if len(fatals) > 1: - fatals.sort(key=lambda e: -e.loc) - if fatals[0].loc == fatals[1].loc: - fatals.sort(key=lambda e: (-e.loc, -len(str(e.parserElement)))) - max_fatal = fatals[0] - raise max_fatal - - if maxException is not None: - maxException.msg = self.errmsg - raise maxException - else: - raise ParseException( - instring, loc, "no defined alternatives to match", self - ) - - def __ixor__(self, other): - if isinstance(other, str_type): - other = self._literalStringClass(other) - return self.append(other) # Or([self, other]) - - def _generateDefaultName(self): - return "{" + " ^ ".join(str(e) for e in self.exprs) + "}" - - def _setResultsName(self, name, listAllMatches=False): - if ( - __diag__.warn_multiple_tokens_in_named_alternation - and Diagnostics.warn_multiple_tokens_in_named_alternation - not in self.suppress_warnings_ - ): - if any( - isinstance(e, And) - and Diagnostics.warn_multiple_tokens_in_named_alternation - not in e.suppress_warnings_ - for e in self.exprs - ): - warnings.warn( - "{}: setting results name {!r} on {} expression " - "will return a list of all parsed tokens in an And alternative, " - "in prior versions only the first token was returned; enclose " - "contained argument in Group".format( - "warn_multiple_tokens_in_named_alternation", - name, - type(self).__name__, - ), - stacklevel=3, - ) - - return super()._setResultsName(name, listAllMatches) - - -class MatchFirst(ParseExpression): - """Requires that at least one :class:`ParseExpression` is found. If - more than one expression matches, the first one listed is the one that will - match. May be constructed using the ``'|'`` operator. - - Example:: - - # construct MatchFirst using '|' operator - - # watch the order of expressions to match - number = Word(nums) | Combine(Word(nums) + '.' + Word(nums)) - print(number.search_string("123 3.1416 789")) # Fail! -> [['123'], ['3'], ['1416'], ['789']] - - # put more selective expression first - number = Combine(Word(nums) + '.' + Word(nums)) | Word(nums) - print(number.search_string("123 3.1416 789")) # Better -> [['123'], ['3.1416'], ['789']] - """ - - def __init__(self, exprs: typing.Iterable[ParserElement], savelist: bool = False): - super().__init__(exprs, savelist) - if self.exprs: - self.mayReturnEmpty = any(e.mayReturnEmpty for e in self.exprs) - self.skipWhitespace = all(e.skipWhitespace for e in self.exprs) - else: - self.mayReturnEmpty = True - - def streamline(self) -> ParserElement: - if self.streamlined: - return self - - super().streamline() - if self.exprs: - self.saveAsList = any(e.saveAsList for e in self.exprs) - self.mayReturnEmpty = any(e.mayReturnEmpty for e in self.exprs) - self.skipWhitespace = all( - e.skipWhitespace and not isinstance(e, White) for e in self.exprs - ) - else: - self.saveAsList = False - self.mayReturnEmpty = True - return self - - def parseImpl(self, instring, loc, doActions=True): - maxExcLoc = -1 - maxException = None - - for e in self.exprs: - try: - return e._parse( - instring, - loc, - doActions, - ) - except ParseFatalException as pfe: - pfe.__traceback__ = None - pfe.parserElement = e - raise - except ParseException as err: - if err.loc > maxExcLoc: - maxException = err - maxExcLoc = err.loc - except IndexError: - if len(instring) > maxExcLoc: - maxException = ParseException( - instring, len(instring), e.errmsg, self - ) - maxExcLoc = len(instring) - - if maxException is not None: - maxException.msg = self.errmsg - raise maxException - else: - raise ParseException( - instring, loc, "no defined alternatives to match", self - ) - - def __ior__(self, other): - if isinstance(other, str_type): - other = self._literalStringClass(other) - return self.append(other) # MatchFirst([self, other]) - - def _generateDefaultName(self): - return "{" + " | ".join(str(e) for e in self.exprs) + "}" - - def _setResultsName(self, name, listAllMatches=False): - if ( - __diag__.warn_multiple_tokens_in_named_alternation - and Diagnostics.warn_multiple_tokens_in_named_alternation - not in self.suppress_warnings_ - ): - if any( - isinstance(e, And) - and Diagnostics.warn_multiple_tokens_in_named_alternation - not in e.suppress_warnings_ - for e in self.exprs - ): - warnings.warn( - "{}: setting results name {!r} on {} expression " - "will return a list of all parsed tokens in an And alternative, " - "in prior versions only the first token was returned; enclose " - "contained argument in Group".format( - "warn_multiple_tokens_in_named_alternation", - name, - type(self).__name__, - ), - stacklevel=3, - ) - - return super()._setResultsName(name, listAllMatches) - - -class Each(ParseExpression): - """Requires all given :class:`ParseExpression` s to be found, but in - any order. Expressions may be separated by whitespace. - - May be constructed using the ``'&'`` operator. - - Example:: - - color = one_of("RED ORANGE YELLOW GREEN BLUE PURPLE BLACK WHITE BROWN") - shape_type = one_of("SQUARE CIRCLE TRIANGLE STAR HEXAGON OCTAGON") - integer = Word(nums) - shape_attr = "shape:" + shape_type("shape") - posn_attr = "posn:" + Group(integer("x") + ',' + integer("y"))("posn") - color_attr = "color:" + color("color") - size_attr = "size:" + integer("size") - - # use Each (using operator '&') to accept attributes in any order - # (shape and posn are required, color and size are optional) - shape_spec = shape_attr & posn_attr & Opt(color_attr) & Opt(size_attr) - - shape_spec.run_tests(''' - shape: SQUARE color: BLACK posn: 100, 120 - shape: CIRCLE size: 50 color: BLUE posn: 50,80 - color:GREEN size:20 shape:TRIANGLE posn:20,40 - ''' - ) - - prints:: - - shape: SQUARE color: BLACK posn: 100, 120 - ['shape:', 'SQUARE', 'color:', 'BLACK', 'posn:', ['100', ',', '120']] - - color: BLACK - - posn: ['100', ',', '120'] - - x: 100 - - y: 120 - - shape: SQUARE - - - shape: CIRCLE size: 50 color: BLUE posn: 50,80 - ['shape:', 'CIRCLE', 'size:', '50', 'color:', 'BLUE', 'posn:', ['50', ',', '80']] - - color: BLUE - - posn: ['50', ',', '80'] - - x: 50 - - y: 80 - - shape: CIRCLE - - size: 50 - - - color: GREEN size: 20 shape: TRIANGLE posn: 20,40 - ['color:', 'GREEN', 'size:', '20', 'shape:', 'TRIANGLE', 'posn:', ['20', ',', '40']] - - color: GREEN - - posn: ['20', ',', '40'] - - x: 20 - - y: 40 - - shape: TRIANGLE - - size: 20 - """ - - def __init__(self, exprs: typing.Iterable[ParserElement], savelist: bool = True): - super().__init__(exprs, savelist) - if self.exprs: - self.mayReturnEmpty = all(e.mayReturnEmpty for e in self.exprs) - else: - self.mayReturnEmpty = True - self.skipWhitespace = True - self.initExprGroups = True - self.saveAsList = True - - def streamline(self) -> ParserElement: - super().streamline() - if self.exprs: - self.mayReturnEmpty = all(e.mayReturnEmpty for e in self.exprs) - else: - self.mayReturnEmpty = True - return self - - def parseImpl(self, instring, loc, doActions=True): - if self.initExprGroups: - self.opt1map = dict( - (id(e.expr), e) for e in self.exprs if isinstance(e, Opt) - ) - opt1 = [e.expr for e in self.exprs if isinstance(e, Opt)] - opt2 = [ - e - for e in self.exprs - if e.mayReturnEmpty and not isinstance(e, (Opt, Regex, ZeroOrMore)) - ] - self.optionals = opt1 + opt2 - self.multioptionals = [ - e.expr.set_results_name(e.resultsName, list_all_matches=True) - for e in self.exprs - if isinstance(e, _MultipleMatch) - ] - self.multirequired = [ - e.expr.set_results_name(e.resultsName, list_all_matches=True) - for e in self.exprs - if isinstance(e, OneOrMore) - ] - self.required = [ - e for e in self.exprs if not isinstance(e, (Opt, ZeroOrMore, OneOrMore)) - ] - self.required += self.multirequired - self.initExprGroups = False - - tmpLoc = loc - tmpReqd = self.required[:] - tmpOpt = self.optionals[:] - multis = self.multioptionals[:] - matchOrder = [] - - keepMatching = True - failed = [] - fatals = [] - while keepMatching: - tmpExprs = tmpReqd + tmpOpt + multis - failed.clear() - fatals.clear() - for e in tmpExprs: - try: - tmpLoc = e.try_parse(instring, tmpLoc, raise_fatal=True) - except ParseFatalException as pfe: - pfe.__traceback__ = None - pfe.parserElement = e - fatals.append(pfe) - failed.append(e) - except ParseException: - failed.append(e) - else: - matchOrder.append(self.opt1map.get(id(e), e)) - if e in tmpReqd: - tmpReqd.remove(e) - elif e in tmpOpt: - tmpOpt.remove(e) - if len(failed) == len(tmpExprs): - keepMatching = False - - # look for any ParseFatalExceptions - if fatals: - if len(fatals) > 1: - fatals.sort(key=lambda e: -e.loc) - if fatals[0].loc == fatals[1].loc: - fatals.sort(key=lambda e: (-e.loc, -len(str(e.parserElement)))) - max_fatal = fatals[0] - raise max_fatal - - if tmpReqd: - missing = ", ".join([str(e) for e in tmpReqd]) - raise ParseException( - instring, - loc, - "Missing one or more required elements ({})".format(missing), - ) - - # add any unmatched Opts, in case they have default values defined - matchOrder += [e for e in self.exprs if isinstance(e, Opt) and e.expr in tmpOpt] - - total_results = ParseResults([]) - for e in matchOrder: - loc, results = e._parse(instring, loc, doActions) - total_results += results - - return loc, total_results - - def _generateDefaultName(self): - return "{" + " & ".join(str(e) for e in self.exprs) + "}" - - -class ParseElementEnhance(ParserElement): - """Abstract subclass of :class:`ParserElement`, for combining and - post-processing parsed tokens. - """ - - def __init__(self, expr: Union[ParserElement, str], savelist: bool = False): - super().__init__(savelist) - if isinstance(expr, str_type): - if issubclass(self._literalStringClass, Token): - expr = self._literalStringClass(expr) - elif issubclass(type(self), self._literalStringClass): - expr = Literal(expr) - else: - expr = self._literalStringClass(Literal(expr)) - self.expr = expr - if expr is not None: - self.mayIndexError = expr.mayIndexError - self.mayReturnEmpty = expr.mayReturnEmpty - self.set_whitespace_chars( - expr.whiteChars, copy_defaults=expr.copyDefaultWhiteChars - ) - self.skipWhitespace = expr.skipWhitespace - self.saveAsList = expr.saveAsList - self.callPreparse = expr.callPreparse - self.ignoreExprs.extend(expr.ignoreExprs) - - def recurse(self) -> Sequence[ParserElement]: - return [self.expr] if self.expr is not None else [] - - def parseImpl(self, instring, loc, doActions=True): - if self.expr is not None: - return self.expr._parse(instring, loc, doActions, callPreParse=False) - else: - raise ParseException(instring, loc, "No expression defined", self) - - def leave_whitespace(self, recursive: bool = True) -> ParserElement: - super().leave_whitespace(recursive) - - if recursive: - self.expr = self.expr.copy() - if self.expr is not None: - self.expr.leave_whitespace(recursive) - return self - - def ignore_whitespace(self, recursive: bool = True) -> ParserElement: - super().ignore_whitespace(recursive) - - if recursive: - self.expr = self.expr.copy() - if self.expr is not None: - self.expr.ignore_whitespace(recursive) - return self - - def ignore(self, other) -> ParserElement: - if isinstance(other, Suppress): - if other not in self.ignoreExprs: - super().ignore(other) - if self.expr is not None: - self.expr.ignore(self.ignoreExprs[-1]) - else: - super().ignore(other) - if self.expr is not None: - self.expr.ignore(self.ignoreExprs[-1]) - return self - - def streamline(self) -> ParserElement: - super().streamline() - if self.expr is not None: - self.expr.streamline() - return self - - def _checkRecursion(self, parseElementList): - if self in parseElementList: - raise RecursiveGrammarException(parseElementList + [self]) - subRecCheckList = parseElementList[:] + [self] - if self.expr is not None: - self.expr._checkRecursion(subRecCheckList) - - def validate(self, validateTrace=None) -> None: - if validateTrace is None: - validateTrace = [] - tmp = validateTrace[:] + [self] - if self.expr is not None: - self.expr.validate(tmp) - self._checkRecursion([]) - - def _generateDefaultName(self): - return "{}:({})".format(self.__class__.__name__, str(self.expr)) - - ignoreWhitespace = ignore_whitespace - leaveWhitespace = leave_whitespace - - -class IndentedBlock(ParseElementEnhance): - """ - Expression to match one or more expressions at a given indentation level. - Useful for parsing text where structure is implied by indentation (like Python source code). - """ - - class _Indent(Empty): - def __init__(self, ref_col: int): - super().__init__() - self.errmsg = "expected indent at column {}".format(ref_col) - self.add_condition(lambda s, l, t: col(l, s) == ref_col) - - class _IndentGreater(Empty): - def __init__(self, ref_col: int): - super().__init__() - self.errmsg = "expected indent at column greater than {}".format(ref_col) - self.add_condition(lambda s, l, t: col(l, s) > ref_col) - - def __init__( - self, expr: ParserElement, *, recursive: bool = False, grouped: bool = True - ): - super().__init__(expr, savelist=True) - # if recursive: - # raise NotImplementedError("IndentedBlock with recursive is not implemented") - self._recursive = recursive - self._grouped = grouped - self.parent_anchor = 1 - - def parseImpl(self, instring, loc, doActions=True): - # advance parse position to non-whitespace by using an Empty() - # this should be the column to be used for all subsequent indented lines - anchor_loc = Empty().preParse(instring, loc) - - # see if self.expr matches at the current location - if not it will raise an exception - # and no further work is necessary - self.expr.try_parse(instring, anchor_loc, doActions) - - indent_col = col(anchor_loc, instring) - peer_detect_expr = self._Indent(indent_col) - - inner_expr = Empty() + peer_detect_expr + self.expr - if self._recursive: - sub_indent = self._IndentGreater(indent_col) - nested_block = IndentedBlock( - self.expr, recursive=self._recursive, grouped=self._grouped - ) - nested_block.set_debug(self.debug) - nested_block.parent_anchor = indent_col - inner_expr += Opt(sub_indent + nested_block) - - inner_expr.set_name(f"inner {hex(id(inner_expr))[-4:].upper()}@{indent_col}") - block = OneOrMore(inner_expr) - - trailing_undent = self._Indent(self.parent_anchor) | StringEnd() - - if self._grouped: - wrapper = Group - else: - wrapper = lambda expr: expr - return (wrapper(block) + Optional(trailing_undent)).parseImpl( - instring, anchor_loc, doActions - ) - - -class AtStringStart(ParseElementEnhance): - """Matches if expression matches at the beginning of the parse - string:: - - AtStringStart(Word(nums)).parse_string("123") - # prints ["123"] - - AtStringStart(Word(nums)).parse_string(" 123") - # raises ParseException - """ - - def __init__(self, expr: Union[ParserElement, str]): - super().__init__(expr) - self.callPreparse = False - - def parseImpl(self, instring, loc, doActions=True): - if loc != 0: - raise ParseException(instring, loc, "not found at string start") - return super().parseImpl(instring, loc, doActions) - - -class AtLineStart(ParseElementEnhance): - r"""Matches if an expression matches at the beginning of a line within - the parse string - - Example:: - - test = '''\ - AAA this line - AAA and this line - AAA but not this one - B AAA and definitely not this one - ''' - - for t in (AtLineStart('AAA') + restOfLine).search_string(test): - print(t) - - prints:: - - ['AAA', ' this line'] - ['AAA', ' and this line'] - - """ - - def __init__(self, expr: Union[ParserElement, str]): - super().__init__(expr) - self.callPreparse = False - - def parseImpl(self, instring, loc, doActions=True): - if col(loc, instring) != 1: - raise ParseException(instring, loc, "not found at line start") - return super().parseImpl(instring, loc, doActions) - - -class FollowedBy(ParseElementEnhance): - """Lookahead matching of the given parse expression. - ``FollowedBy`` does *not* advance the parsing position within - the input string, it only verifies that the specified parse - expression matches at the current position. ``FollowedBy`` - always returns a null token list. If any results names are defined - in the lookahead expression, those *will* be returned for access by - name. - - Example:: - - # use FollowedBy to match a label only if it is followed by a ':' - data_word = Word(alphas) - label = data_word + FollowedBy(':') - attr_expr = Group(label + Suppress(':') + OneOrMore(data_word, stop_on=label).set_parse_action(' '.join)) - - attr_expr[1, ...].parse_string("shape: SQUARE color: BLACK posn: upper left").pprint() - - prints:: - - [['shape', 'SQUARE'], ['color', 'BLACK'], ['posn', 'upper left']] - """ - - def __init__(self, expr: Union[ParserElement, str]): - super().__init__(expr) - self.mayReturnEmpty = True - - def parseImpl(self, instring, loc, doActions=True): - # by using self._expr.parse and deleting the contents of the returned ParseResults list - # we keep any named results that were defined in the FollowedBy expression - _, ret = self.expr._parse(instring, loc, doActions=doActions) - del ret[:] - - return loc, ret - - -class PrecededBy(ParseElementEnhance): - """Lookbehind matching of the given parse expression. - ``PrecededBy`` does not advance the parsing position within the - input string, it only verifies that the specified parse expression - matches prior to the current position. ``PrecededBy`` always - returns a null token list, but if a results name is defined on the - given expression, it is returned. - - Parameters: - - - expr - expression that must match prior to the current parse - location - - retreat - (default= ``None``) - (int) maximum number of characters - to lookbehind prior to the current parse location - - If the lookbehind expression is a string, :class:`Literal`, - :class:`Keyword`, or a :class:`Word` or :class:`CharsNotIn` - with a specified exact or maximum length, then the retreat - parameter is not required. Otherwise, retreat must be specified to - give a maximum number of characters to look back from - the current parse position for a lookbehind match. - - Example:: - - # VB-style variable names with type prefixes - int_var = PrecededBy("#") + pyparsing_common.identifier - str_var = PrecededBy("$") + pyparsing_common.identifier - - """ - - def __init__( - self, expr: Union[ParserElement, str], retreat: typing.Optional[int] = None - ): - super().__init__(expr) - self.expr = self.expr().leave_whitespace() - self.mayReturnEmpty = True - self.mayIndexError = False - self.exact = False - if isinstance(expr, str_type): - retreat = len(expr) - self.exact = True - elif isinstance(expr, (Literal, Keyword)): - retreat = expr.matchLen - self.exact = True - elif isinstance(expr, (Word, CharsNotIn)) and expr.maxLen != _MAX_INT: - retreat = expr.maxLen - self.exact = True - elif isinstance(expr, PositionToken): - retreat = 0 - self.exact = True - self.retreat = retreat - self.errmsg = "not preceded by " + str(expr) - self.skipWhitespace = False - self.parseAction.append(lambda s, l, t: t.__delitem__(slice(None, None))) - - def parseImpl(self, instring, loc=0, doActions=True): - if self.exact: - if loc < self.retreat: - raise ParseException(instring, loc, self.errmsg) - start = loc - self.retreat - _, ret = self.expr._parse(instring, start) - else: - # retreat specified a maximum lookbehind window, iterate - test_expr = self.expr + StringEnd() - instring_slice = instring[max(0, loc - self.retreat) : loc] - last_expr = ParseException(instring, loc, self.errmsg) - for offset in range(1, min(loc, self.retreat + 1) + 1): - try: - # print('trying', offset, instring_slice, repr(instring_slice[loc - offset:])) - _, ret = test_expr._parse( - instring_slice, len(instring_slice) - offset - ) - except ParseBaseException as pbe: - last_expr = pbe - else: - break - else: - raise last_expr - return loc, ret - - -class Located(ParseElementEnhance): - """ - Decorates a returned token with its starting and ending - locations in the input string. - - This helper adds the following results names: - - - ``locn_start`` - location where matched expression begins - - ``locn_end`` - location where matched expression ends - - ``value`` - the actual parsed results - - Be careful if the input text contains ```` characters, you - may want to call :class:`ParserElement.parse_with_tabs` - - Example:: - - wd = Word(alphas) - for match in Located(wd).search_string("ljsdf123lksdjjf123lkkjj1222"): - print(match) - - prints:: - - [0, ['ljsdf'], 5] - [8, ['lksdjjf'], 15] - [18, ['lkkjj'], 23] - - """ - - def parseImpl(self, instring, loc, doActions=True): - start = loc - loc, tokens = self.expr._parse(instring, start, doActions, callPreParse=False) - ret_tokens = ParseResults([start, tokens, loc]) - ret_tokens["locn_start"] = start - ret_tokens["value"] = tokens - ret_tokens["locn_end"] = loc - if self.resultsName: - # must return as a list, so that the name will be attached to the complete group - return loc, [ret_tokens] - else: - return loc, ret_tokens - - -class NotAny(ParseElementEnhance): - """ - Lookahead to disallow matching with the given parse expression. - ``NotAny`` does *not* advance the parsing position within the - input string, it only verifies that the specified parse expression - does *not* match at the current position. Also, ``NotAny`` does - *not* skip over leading whitespace. ``NotAny`` always returns - a null token list. May be constructed using the ``'~'`` operator. - - Example:: - - AND, OR, NOT = map(CaselessKeyword, "AND OR NOT".split()) - - # take care not to mistake keywords for identifiers - ident = ~(AND | OR | NOT) + Word(alphas) - boolean_term = Opt(NOT) + ident - - # very crude boolean expression - to support parenthesis groups and - # operation hierarchy, use infix_notation - boolean_expr = boolean_term + ((AND | OR) + boolean_term)[...] - - # integers that are followed by "." are actually floats - integer = Word(nums) + ~Char(".") - """ - - def __init__(self, expr: Union[ParserElement, str]): - super().__init__(expr) - # do NOT use self.leave_whitespace(), don't want to propagate to exprs - # self.leave_whitespace() - self.skipWhitespace = False - - self.mayReturnEmpty = True - self.errmsg = "Found unwanted token, " + str(self.expr) - - def parseImpl(self, instring, loc, doActions=True): - if self.expr.can_parse_next(instring, loc): - raise ParseException(instring, loc, self.errmsg, self) - return loc, [] - - def _generateDefaultName(self): - return "~{" + str(self.expr) + "}" - - -class _MultipleMatch(ParseElementEnhance): - def __init__( - self, - expr: ParserElement, - stop_on: typing.Optional[Union[ParserElement, str]] = None, - *, - stopOn: typing.Optional[Union[ParserElement, str]] = None, - ): - super().__init__(expr) - stopOn = stopOn or stop_on - self.saveAsList = True - ender = stopOn - if isinstance(ender, str_type): - ender = self._literalStringClass(ender) - self.stopOn(ender) - - def stopOn(self, ender) -> ParserElement: - if isinstance(ender, str_type): - ender = self._literalStringClass(ender) - self.not_ender = ~ender if ender is not None else None - return self - - def parseImpl(self, instring, loc, doActions=True): - self_expr_parse = self.expr._parse - self_skip_ignorables = self._skipIgnorables - check_ender = self.not_ender is not None - if check_ender: - try_not_ender = self.not_ender.tryParse - - # must be at least one (but first see if we are the stopOn sentinel; - # if so, fail) - if check_ender: - try_not_ender(instring, loc) - loc, tokens = self_expr_parse(instring, loc, doActions) - try: - hasIgnoreExprs = not not self.ignoreExprs - while 1: - if check_ender: - try_not_ender(instring, loc) - if hasIgnoreExprs: - preloc = self_skip_ignorables(instring, loc) - else: - preloc = loc - loc, tmptokens = self_expr_parse(instring, preloc, doActions) - if tmptokens or tmptokens.haskeys(): - tokens += tmptokens - except (ParseException, IndexError): - pass - - return loc, tokens - - def _setResultsName(self, name, listAllMatches=False): - if ( - __diag__.warn_ungrouped_named_tokens_in_collection - and Diagnostics.warn_ungrouped_named_tokens_in_collection - not in self.suppress_warnings_ - ): - for e in [self.expr] + self.expr.recurse(): - if ( - isinstance(e, ParserElement) - and e.resultsName - and Diagnostics.warn_ungrouped_named_tokens_in_collection - not in e.suppress_warnings_ - ): - warnings.warn( - "{}: setting results name {!r} on {} expression " - "collides with {!r} on contained expression".format( - "warn_ungrouped_named_tokens_in_collection", - name, - type(self).__name__, - e.resultsName, - ), - stacklevel=3, - ) - - return super()._setResultsName(name, listAllMatches) - - -class OneOrMore(_MultipleMatch): - """ - Repetition of one or more of the given expression. - - Parameters: - - expr - expression that must match one or more times - - stop_on - (default= ``None``) - expression for a terminating sentinel - (only required if the sentinel would ordinarily match the repetition - expression) - - Example:: - - data_word = Word(alphas) - label = data_word + FollowedBy(':') - attr_expr = Group(label + Suppress(':') + OneOrMore(data_word).set_parse_action(' '.join)) - - text = "shape: SQUARE posn: upper left color: BLACK" - attr_expr[1, ...].parse_string(text).pprint() # Fail! read 'color' as data instead of next label -> [['shape', 'SQUARE color']] - - # use stop_on attribute for OneOrMore to avoid reading label string as part of the data - attr_expr = Group(label + Suppress(':') + OneOrMore(data_word, stop_on=label).set_parse_action(' '.join)) - OneOrMore(attr_expr).parse_string(text).pprint() # Better -> [['shape', 'SQUARE'], ['posn', 'upper left'], ['color', 'BLACK']] - - # could also be written as - (attr_expr * (1,)).parse_string(text).pprint() - """ - - def _generateDefaultName(self): - return "{" + str(self.expr) + "}..." - - -class ZeroOrMore(_MultipleMatch): - """ - Optional repetition of zero or more of the given expression. - - Parameters: - - ``expr`` - expression that must match zero or more times - - ``stop_on`` - expression for a terminating sentinel - (only required if the sentinel would ordinarily match the repetition - expression) - (default= ``None``) - - Example: similar to :class:`OneOrMore` - """ - - def __init__( - self, - expr: ParserElement, - stop_on: typing.Optional[Union[ParserElement, str]] = None, - *, - stopOn: typing.Optional[Union[ParserElement, str]] = None, - ): - super().__init__(expr, stopOn=stopOn or stop_on) - self.mayReturnEmpty = True - - def parseImpl(self, instring, loc, doActions=True): - try: - return super().parseImpl(instring, loc, doActions) - except (ParseException, IndexError): - return loc, ParseResults([], name=self.resultsName) - - def _generateDefaultName(self): - return "[" + str(self.expr) + "]..." - - -class _NullToken: - def __bool__(self): - return False - - def __str__(self): - return "" - - -class Opt(ParseElementEnhance): - """ - Optional matching of the given expression. - - Parameters: - - ``expr`` - expression that must match zero or more times - - ``default`` (optional) - value to be returned if the optional expression is not found. - - Example:: - - # US postal code can be a 5-digit zip, plus optional 4-digit qualifier - zip = Combine(Word(nums, exact=5) + Opt('-' + Word(nums, exact=4))) - zip.run_tests(''' - # traditional ZIP code - 12345 - - # ZIP+4 form - 12101-0001 - - # invalid ZIP - 98765- - ''') - - prints:: - - # traditional ZIP code - 12345 - ['12345'] - - # ZIP+4 form - 12101-0001 - ['12101-0001'] - - # invalid ZIP - 98765- - ^ - FAIL: Expected end of text (at char 5), (line:1, col:6) - """ - - __optionalNotMatched = _NullToken() - - def __init__( - self, expr: Union[ParserElement, str], default: Any = __optionalNotMatched - ): - super().__init__(expr, savelist=False) - self.saveAsList = self.expr.saveAsList - self.defaultValue = default - self.mayReturnEmpty = True - - def parseImpl(self, instring, loc, doActions=True): - self_expr = self.expr - try: - loc, tokens = self_expr._parse(instring, loc, doActions, callPreParse=False) - except (ParseException, IndexError): - default_value = self.defaultValue - if default_value is not self.__optionalNotMatched: - if self_expr.resultsName: - tokens = ParseResults([default_value]) - tokens[self_expr.resultsName] = default_value - else: - tokens = [default_value] - else: - tokens = [] - return loc, tokens - - def _generateDefaultName(self): - inner = str(self.expr) - # strip off redundant inner {}'s - while len(inner) > 1 and inner[0 :: len(inner) - 1] == "{}": - inner = inner[1:-1] - return "[" + inner + "]" - - -Optional = Opt - - -class SkipTo(ParseElementEnhance): - """ - Token for skipping over all undefined text until the matched - expression is found. - - Parameters: - - ``expr`` - target expression marking the end of the data to be skipped - - ``include`` - if ``True``, the target expression is also parsed - (the skipped text and target expression are returned as a 2-element - list) (default= ``False``). - - ``ignore`` - (default= ``None``) used to define grammars (typically quoted strings and - comments) that might contain false matches to the target expression - - ``fail_on`` - (default= ``None``) define expressions that are not allowed to be - included in the skipped test; if found before the target expression is found, - the :class:`SkipTo` is not a match - - Example:: - - report = ''' - Outstanding Issues Report - 1 Jan 2000 - - # | Severity | Description | Days Open - -----+----------+-------------------------------------------+----------- - 101 | Critical | Intermittent system crash | 6 - 94 | Cosmetic | Spelling error on Login ('log|n') | 14 - 79 | Minor | System slow when running too many reports | 47 - ''' - integer = Word(nums) - SEP = Suppress('|') - # use SkipTo to simply match everything up until the next SEP - # - ignore quoted strings, so that a '|' character inside a quoted string does not match - # - parse action will call token.strip() for each matched token, i.e., the description body - string_data = SkipTo(SEP, ignore=quoted_string) - string_data.set_parse_action(token_map(str.strip)) - ticket_expr = (integer("issue_num") + SEP - + string_data("sev") + SEP - + string_data("desc") + SEP - + integer("days_open")) - - for tkt in ticket_expr.search_string(report): - print tkt.dump() - - prints:: - - ['101', 'Critical', 'Intermittent system crash', '6'] - - days_open: '6' - - desc: 'Intermittent system crash' - - issue_num: '101' - - sev: 'Critical' - ['94', 'Cosmetic', "Spelling error on Login ('log|n')", '14'] - - days_open: '14' - - desc: "Spelling error on Login ('log|n')" - - issue_num: '94' - - sev: 'Cosmetic' - ['79', 'Minor', 'System slow when running too many reports', '47'] - - days_open: '47' - - desc: 'System slow when running too many reports' - - issue_num: '79' - - sev: 'Minor' - """ - - def __init__( - self, - other: Union[ParserElement, str], - include: bool = False, - ignore: bool = None, - fail_on: typing.Optional[Union[ParserElement, str]] = None, - *, - failOn: Union[ParserElement, str] = None, - ): - super().__init__(other) - failOn = failOn or fail_on - self.ignoreExpr = ignore - self.mayReturnEmpty = True - self.mayIndexError = False - self.includeMatch = include - self.saveAsList = False - if isinstance(failOn, str_type): - self.failOn = self._literalStringClass(failOn) - else: - self.failOn = failOn - self.errmsg = "No match found for " + str(self.expr) - - def parseImpl(self, instring, loc, doActions=True): - startloc = loc - instrlen = len(instring) - self_expr_parse = self.expr._parse - self_failOn_canParseNext = ( - self.failOn.canParseNext if self.failOn is not None else None - ) - self_ignoreExpr_tryParse = ( - self.ignoreExpr.tryParse if self.ignoreExpr is not None else None - ) - - tmploc = loc - while tmploc <= instrlen: - if self_failOn_canParseNext is not None: - # break if failOn expression matches - if self_failOn_canParseNext(instring, tmploc): - break - - if self_ignoreExpr_tryParse is not None: - # advance past ignore expressions - while 1: - try: - tmploc = self_ignoreExpr_tryParse(instring, tmploc) - except ParseBaseException: - break - - try: - self_expr_parse(instring, tmploc, doActions=False, callPreParse=False) - except (ParseException, IndexError): - # no match, advance loc in string - tmploc += 1 - else: - # matched skipto expr, done - break - - else: - # ran off the end of the input string without matching skipto expr, fail - raise ParseException(instring, loc, self.errmsg, self) - - # build up return values - loc = tmploc - skiptext = instring[startloc:loc] - skipresult = ParseResults(skiptext) - - if self.includeMatch: - loc, mat = self_expr_parse(instring, loc, doActions, callPreParse=False) - skipresult += mat - - return loc, skipresult - - -class Forward(ParseElementEnhance): - """ - Forward declaration of an expression to be defined later - - used for recursive grammars, such as algebraic infix notation. - When the expression is known, it is assigned to the ``Forward`` - variable using the ``'<<'`` operator. - - Note: take care when assigning to ``Forward`` not to overlook - precedence of operators. - - Specifically, ``'|'`` has a lower precedence than ``'<<'``, so that:: - - fwd_expr << a | b | c - - will actually be evaluated as:: - - (fwd_expr << a) | b | c - - thereby leaving b and c out as parseable alternatives. It is recommended that you - explicitly group the values inserted into the ``Forward``:: - - fwd_expr << (a | b | c) - - Converting to use the ``'<<='`` operator instead will avoid this problem. - - See :class:`ParseResults.pprint` for an example of a recursive - parser created using ``Forward``. - """ - - def __init__(self, other: typing.Optional[Union[ParserElement, str]] = None): - self.caller_frame = traceback.extract_stack(limit=2)[0] - super().__init__(other, savelist=False) - self.lshift_line = None - - def __lshift__(self, other): - if hasattr(self, "caller_frame"): - del self.caller_frame - if isinstance(other, str_type): - other = self._literalStringClass(other) - self.expr = other - self.mayIndexError = self.expr.mayIndexError - self.mayReturnEmpty = self.expr.mayReturnEmpty - self.set_whitespace_chars( - self.expr.whiteChars, copy_defaults=self.expr.copyDefaultWhiteChars - ) - self.skipWhitespace = self.expr.skipWhitespace - self.saveAsList = self.expr.saveAsList - self.ignoreExprs.extend(self.expr.ignoreExprs) - self.lshift_line = traceback.extract_stack(limit=2)[-2] - return self - - def __ilshift__(self, other): - return self << other - - def __or__(self, other): - caller_line = traceback.extract_stack(limit=2)[-2] - if ( - __diag__.warn_on_match_first_with_lshift_operator - and caller_line == self.lshift_line - and Diagnostics.warn_on_match_first_with_lshift_operator - not in self.suppress_warnings_ - ): - warnings.warn( - "using '<<' operator with '|' is probably an error, use '<<='", - stacklevel=2, - ) - ret = super().__or__(other) - return ret - - def __del__(self): - # see if we are getting dropped because of '=' reassignment of var instead of '<<=' or '<<' - if ( - self.expr is None - and __diag__.warn_on_assignment_to_Forward - and Diagnostics.warn_on_assignment_to_Forward not in self.suppress_warnings_ - ): - warnings.warn_explicit( - "Forward defined here but no expression attached later using '<<=' or '<<'", - UserWarning, - filename=self.caller_frame.filename, - lineno=self.caller_frame.lineno, - ) - - def parseImpl(self, instring, loc, doActions=True): - if ( - self.expr is None - and __diag__.warn_on_parse_using_empty_Forward - and Diagnostics.warn_on_parse_using_empty_Forward - not in self.suppress_warnings_ - ): - # walk stack until parse_string, scan_string, search_string, or transform_string is found - parse_fns = [ - "parse_string", - "scan_string", - "search_string", - "transform_string", - ] - tb = traceback.extract_stack(limit=200) - for i, frm in enumerate(reversed(tb), start=1): - if frm.name in parse_fns: - stacklevel = i + 1 - break - else: - stacklevel = 2 - warnings.warn( - "Forward expression was never assigned a value, will not parse any input", - stacklevel=stacklevel, - ) - if not ParserElement._left_recursion_enabled: - return super().parseImpl(instring, loc, doActions) - # ## Bounded Recursion algorithm ## - # Recursion only needs to be processed at ``Forward`` elements, since they are - # the only ones that can actually refer to themselves. The general idea is - # to handle recursion stepwise: We start at no recursion, then recurse once, - # recurse twice, ..., until more recursion offers no benefit (we hit the bound). - # - # The "trick" here is that each ``Forward`` gets evaluated in two contexts - # - to *match* a specific recursion level, and - # - to *search* the bounded recursion level - # and the two run concurrently. The *search* must *match* each recursion level - # to find the best possible match. This is handled by a memo table, which - # provides the previous match to the next level match attempt. - # - # See also "Left Recursion in Parsing Expression Grammars", Medeiros et al. - # - # There is a complication since we not only *parse* but also *transform* via - # actions: We do not want to run the actions too often while expanding. Thus, - # we expand using `doActions=False` and only run `doActions=True` if the next - # recursion level is acceptable. - with ParserElement.recursion_lock: - memo = ParserElement.recursion_memos - try: - # we are parsing at a specific recursion expansion - use it as-is - prev_loc, prev_result = memo[loc, self, doActions] - if isinstance(prev_result, Exception): - raise prev_result - return prev_loc, prev_result.copy() - except KeyError: - act_key = (loc, self, True) - peek_key = (loc, self, False) - # we are searching for the best recursion expansion - keep on improving - # both `doActions` cases must be tracked separately here! - prev_loc, prev_peek = memo[peek_key] = ( - loc - 1, - ParseException( - instring, loc, "Forward recursion without base case", self - ), - ) - if doActions: - memo[act_key] = memo[peek_key] - while True: - try: - new_loc, new_peek = super().parseImpl(instring, loc, False) - except ParseException: - # we failed before getting any match – do not hide the error - if isinstance(prev_peek, Exception): - raise - new_loc, new_peek = prev_loc, prev_peek - # the match did not get better: we are done - if new_loc <= prev_loc: - if doActions: - # replace the match for doActions=False as well, - # in case the action did backtrack - prev_loc, prev_result = memo[peek_key] = memo[act_key] - del memo[peek_key], memo[act_key] - return prev_loc, prev_result.copy() - del memo[peek_key] - return prev_loc, prev_peek.copy() - # the match did get better: see if we can improve further - else: - if doActions: - try: - memo[act_key] = super().parseImpl(instring, loc, True) - except ParseException as e: - memo[peek_key] = memo[act_key] = (new_loc, e) - raise - prev_loc, prev_peek = memo[peek_key] = new_loc, new_peek - - def leave_whitespace(self, recursive: bool = True) -> ParserElement: - self.skipWhitespace = False - return self - - def ignore_whitespace(self, recursive: bool = True) -> ParserElement: - self.skipWhitespace = True - return self - - def streamline(self) -> ParserElement: - if not self.streamlined: - self.streamlined = True - if self.expr is not None: - self.expr.streamline() - return self - - def validate(self, validateTrace=None) -> None: - if validateTrace is None: - validateTrace = [] - - if self not in validateTrace: - tmp = validateTrace[:] + [self] - if self.expr is not None: - self.expr.validate(tmp) - self._checkRecursion([]) - - def _generateDefaultName(self): - # Avoid infinite recursion by setting a temporary _defaultName - self._defaultName = ": ..." - - # Use the string representation of main expression. - retString = "..." - try: - if self.expr is not None: - retString = str(self.expr)[:1000] - else: - retString = "None" - finally: - return self.__class__.__name__ + ": " + retString - - def copy(self) -> ParserElement: - if self.expr is not None: - return super().copy() - else: - ret = Forward() - ret <<= self - return ret - - def _setResultsName(self, name, list_all_matches=False): - if ( - __diag__.warn_name_set_on_empty_Forward - and Diagnostics.warn_name_set_on_empty_Forward - not in self.suppress_warnings_ - ): - if self.expr is None: - warnings.warn( - "{}: setting results name {!r} on {} expression " - "that has no contained expression".format( - "warn_name_set_on_empty_Forward", name, type(self).__name__ - ), - stacklevel=3, - ) - - return super()._setResultsName(name, list_all_matches) - - ignoreWhitespace = ignore_whitespace - leaveWhitespace = leave_whitespace - - -class TokenConverter(ParseElementEnhance): - """ - Abstract subclass of :class:`ParseExpression`, for converting parsed results. - """ - - def __init__(self, expr: Union[ParserElement, str], savelist=False): - super().__init__(expr) # , savelist) - self.saveAsList = False - - -class Combine(TokenConverter): - """Converter to concatenate all matching tokens to a single string. - By default, the matching patterns must also be contiguous in the - input string; this can be disabled by specifying - ``'adjacent=False'`` in the constructor. - - Example:: - - real = Word(nums) + '.' + Word(nums) - print(real.parse_string('3.1416')) # -> ['3', '.', '1416'] - # will also erroneously match the following - print(real.parse_string('3. 1416')) # -> ['3', '.', '1416'] - - real = Combine(Word(nums) + '.' + Word(nums)) - print(real.parse_string('3.1416')) # -> ['3.1416'] - # no match when there are internal spaces - print(real.parse_string('3. 1416')) # -> Exception: Expected W:(0123...) - """ - - def __init__( - self, - expr: ParserElement, - join_string: str = "", - adjacent: bool = True, - *, - joinString: typing.Optional[str] = None, - ): - super().__init__(expr) - joinString = joinString if joinString is not None else join_string - # suppress whitespace-stripping in contained parse expressions, but re-enable it on the Combine itself - if adjacent: - self.leave_whitespace() - self.adjacent = adjacent - self.skipWhitespace = True - self.joinString = joinString - self.callPreparse = True - - def ignore(self, other) -> ParserElement: - if self.adjacent: - ParserElement.ignore(self, other) - else: - super().ignore(other) - return self - - def postParse(self, instring, loc, tokenlist): - retToks = tokenlist.copy() - del retToks[:] - retToks += ParseResults( - ["".join(tokenlist._asStringList(self.joinString))], modal=self.modalResults - ) - - if self.resultsName and retToks.haskeys(): - return [retToks] - else: - return retToks - - -class Group(TokenConverter): - """Converter to return the matched tokens as a list - useful for - returning tokens of :class:`ZeroOrMore` and :class:`OneOrMore` expressions. - - The optional ``aslist`` argument when set to True will return the - parsed tokens as a Python list instead of a pyparsing ParseResults. - - Example:: - - ident = Word(alphas) - num = Word(nums) - term = ident | num - func = ident + Opt(delimited_list(term)) - print(func.parse_string("fn a, b, 100")) - # -> ['fn', 'a', 'b', '100'] - - func = ident + Group(Opt(delimited_list(term))) - print(func.parse_string("fn a, b, 100")) - # -> ['fn', ['a', 'b', '100']] - """ - - def __init__(self, expr: ParserElement, aslist: bool = False): - super().__init__(expr) - self.saveAsList = True - self._asPythonList = aslist - - def postParse(self, instring, loc, tokenlist): - if self._asPythonList: - return ParseResults.List( - tokenlist.asList() - if isinstance(tokenlist, ParseResults) - else list(tokenlist) - ) - else: - return [tokenlist] - - -class Dict(TokenConverter): - """Converter to return a repetitive expression as a list, but also - as a dictionary. Each element can also be referenced using the first - token in the expression as its key. Useful for tabular report - scraping when the first column can be used as a item key. - - The optional ``asdict`` argument when set to True will return the - parsed tokens as a Python dict instead of a pyparsing ParseResults. - - Example:: - - data_word = Word(alphas) - label = data_word + FollowedBy(':') - - text = "shape: SQUARE posn: upper left color: light blue texture: burlap" - attr_expr = (label + Suppress(':') + OneOrMore(data_word, stop_on=label).set_parse_action(' '.join)) - - # print attributes as plain groups - print(attr_expr[1, ...].parse_string(text).dump()) - - # instead of OneOrMore(expr), parse using Dict(Group(expr)[1, ...]) - Dict will auto-assign names - result = Dict(Group(attr_expr)[1, ...]).parse_string(text) - print(result.dump()) - - # access named fields as dict entries, or output as dict - print(result['shape']) - print(result.as_dict()) - - prints:: - - ['shape', 'SQUARE', 'posn', 'upper left', 'color', 'light blue', 'texture', 'burlap'] - [['shape', 'SQUARE'], ['posn', 'upper left'], ['color', 'light blue'], ['texture', 'burlap']] - - color: 'light blue' - - posn: 'upper left' - - shape: 'SQUARE' - - texture: 'burlap' - SQUARE - {'color': 'light blue', 'posn': 'upper left', 'texture': 'burlap', 'shape': 'SQUARE'} - - See more examples at :class:`ParseResults` of accessing fields by results name. - """ - - def __init__(self, expr: ParserElement, asdict: bool = False): - super().__init__(expr) - self.saveAsList = True - self._asPythonDict = asdict - - def postParse(self, instring, loc, tokenlist): - for i, tok in enumerate(tokenlist): - if len(tok) == 0: - continue - - ikey = tok[0] - if isinstance(ikey, int): - ikey = str(ikey).strip() - - if len(tok) == 1: - tokenlist[ikey] = _ParseResultsWithOffset("", i) - - elif len(tok) == 2 and not isinstance(tok[1], ParseResults): - tokenlist[ikey] = _ParseResultsWithOffset(tok[1], i) - - else: - try: - dictvalue = tok.copy() # ParseResults(i) - except Exception: - exc = TypeError( - "could not extract dict values from parsed results" - " - Dict expression must contain Grouped expressions" - ) - raise exc from None - - del dictvalue[0] - - if len(dictvalue) != 1 or ( - isinstance(dictvalue, ParseResults) and dictvalue.haskeys() - ): - tokenlist[ikey] = _ParseResultsWithOffset(dictvalue, i) - else: - tokenlist[ikey] = _ParseResultsWithOffset(dictvalue[0], i) - - if self._asPythonDict: - return [tokenlist.as_dict()] if self.resultsName else tokenlist.as_dict() - else: - return [tokenlist] if self.resultsName else tokenlist - - -class Suppress(TokenConverter): - """Converter for ignoring the results of a parsed expression. - - Example:: - - source = "a, b, c,d" - wd = Word(alphas) - wd_list1 = wd + (',' + wd)[...] - print(wd_list1.parse_string(source)) - - # often, delimiters that are useful during parsing are just in the - # way afterward - use Suppress to keep them out of the parsed output - wd_list2 = wd + (Suppress(',') + wd)[...] - print(wd_list2.parse_string(source)) - - # Skipped text (using '...') can be suppressed as well - source = "lead in START relevant text END trailing text" - start_marker = Keyword("START") - end_marker = Keyword("END") - find_body = Suppress(...) + start_marker + ... + end_marker - print(find_body.parse_string(source) - - prints:: - - ['a', ',', 'b', ',', 'c', ',', 'd'] - ['a', 'b', 'c', 'd'] - ['START', 'relevant text ', 'END'] - - (See also :class:`delimited_list`.) - """ - - def __init__(self, expr: Union[ParserElement, str], savelist: bool = False): - if expr is ...: - expr = _PendingSkip(NoMatch()) - super().__init__(expr) - - def __add__(self, other) -> "ParserElement": - if isinstance(self.expr, _PendingSkip): - return Suppress(SkipTo(other)) + other - else: - return super().__add__(other) - - def __sub__(self, other) -> "ParserElement": - if isinstance(self.expr, _PendingSkip): - return Suppress(SkipTo(other)) - other - else: - return super().__sub__(other) - - def postParse(self, instring, loc, tokenlist): - return [] - - def suppress(self) -> ParserElement: - return self - - -def trace_parse_action(f: ParseAction) -> ParseAction: - """Decorator for debugging parse actions. - - When the parse action is called, this decorator will print - ``">> entering method-name(line:, , )"``. - When the parse action completes, the decorator will print - ``"<<"`` followed by the returned value, or any exception that the parse action raised. - - Example:: - - wd = Word(alphas) - - @trace_parse_action - def remove_duplicate_chars(tokens): - return ''.join(sorted(set(''.join(tokens)))) - - wds = wd[1, ...].set_parse_action(remove_duplicate_chars) - print(wds.parse_string("slkdjs sld sldd sdlf sdljf")) - - prints:: - - >>entering remove_duplicate_chars(line: 'slkdjs sld sldd sdlf sdljf', 0, (['slkdjs', 'sld', 'sldd', 'sdlf', 'sdljf'], {})) - < 3: - thisFunc = paArgs[0].__class__.__name__ + "." + thisFunc - sys.stderr.write( - ">>entering {}(line: {!r}, {}, {!r})\n".format(thisFunc, line(l, s), l, t) - ) - try: - ret = f(*paArgs) - except Exception as exc: - sys.stderr.write("< str: - r"""Helper to easily define string ranges for use in :class:`Word` - construction. Borrows syntax from regexp ``'[]'`` string range - definitions:: - - srange("[0-9]") -> "0123456789" - srange("[a-z]") -> "abcdefghijklmnopqrstuvwxyz" - srange("[a-z$_]") -> "abcdefghijklmnopqrstuvwxyz$_" - - The input string must be enclosed in []'s, and the returned string - is the expanded character set joined into a single string. The - values enclosed in the []'s may be: - - - a single character - - an escaped character with a leading backslash (such as ``\-`` - or ``\]``) - - an escaped hex character with a leading ``'\x'`` - (``\x21``, which is a ``'!'`` character) (``\0x##`` - is also supported for backwards compatibility) - - an escaped octal character with a leading ``'\0'`` - (``\041``, which is a ``'!'`` character) - - a range of any of the above, separated by a dash (``'a-z'``, - etc.) - - any combination of the above (``'aeiouy'``, - ``'a-zA-Z0-9_$'``, etc.) - """ - _expanded = ( - lambda p: p - if not isinstance(p, ParseResults) - else "".join(chr(c) for c in range(ord(p[0]), ord(p[1]) + 1)) - ) - try: - return "".join(_expanded(part) for part in _reBracketExpr.parse_string(s).body) - except Exception: - return "" - - -def token_map(func, *args) -> ParseAction: - """Helper to define a parse action by mapping a function to all - elements of a :class:`ParseResults` list. If any additional args are passed, - they are forwarded to the given function as additional arguments - after the token, as in - ``hex_integer = Word(hexnums).set_parse_action(token_map(int, 16))``, - which will convert the parsed data to an integer using base 16. - - Example (compare the last to example in :class:`ParserElement.transform_string`:: - - hex_ints = Word(hexnums)[1, ...].set_parse_action(token_map(int, 16)) - hex_ints.run_tests(''' - 00 11 22 aa FF 0a 0d 1a - ''') - - upperword = Word(alphas).set_parse_action(token_map(str.upper)) - upperword[1, ...].run_tests(''' - my kingdom for a horse - ''') - - wd = Word(alphas).set_parse_action(token_map(str.title)) - wd[1, ...].set_parse_action(' '.join).run_tests(''' - now is the winter of our discontent made glorious summer by this sun of york - ''') - - prints:: - - 00 11 22 aa FF 0a 0d 1a - [0, 17, 34, 170, 255, 10, 13, 26] - - my kingdom for a horse - ['MY', 'KINGDOM', 'FOR', 'A', 'HORSE'] - - now is the winter of our discontent made glorious summer by this sun of york - ['Now Is The Winter Of Our Discontent Made Glorious Summer By This Sun Of York'] - """ - - def pa(s, l, t): - return [func(tokn, *args) for tokn in t] - - func_name = getattr(func, "__name__", getattr(func, "__class__").__name__) - pa.__name__ = func_name - - return pa - - -def autoname_elements() -> None: - """ - Utility to simplify mass-naming of parser elements, for - generating railroad diagram with named subdiagrams. - """ - for name, var in sys._getframe().f_back.f_locals.items(): - if isinstance(var, ParserElement) and not var.customName: - var.set_name(name) - - -dbl_quoted_string = Combine( - Regex(r'"(?:[^"\n\r\\]|(?:"")|(?:\\(?:[^x]|x[0-9a-fA-F]+)))*') + '"' -).set_name("string enclosed in double quotes") - -sgl_quoted_string = Combine( - Regex(r"'(?:[^'\n\r\\]|(?:'')|(?:\\(?:[^x]|x[0-9a-fA-F]+)))*") + "'" -).set_name("string enclosed in single quotes") - -quoted_string = Combine( - Regex(r'"(?:[^"\n\r\\]|(?:"")|(?:\\(?:[^x]|x[0-9a-fA-F]+)))*') + '"' - | Regex(r"'(?:[^'\n\r\\]|(?:'')|(?:\\(?:[^x]|x[0-9a-fA-F]+)))*") + "'" -).set_name("quotedString using single or double quotes") - -unicode_string = Combine("u" + quoted_string.copy()).set_name("unicode string literal") - - -alphas8bit = srange(r"[\0xc0-\0xd6\0xd8-\0xf6\0xf8-\0xff]") -punc8bit = srange(r"[\0xa1-\0xbf\0xd7\0xf7]") - -# build list of built-in expressions, for future reference if a global default value -# gets updated -_builtin_exprs: List[ParserElement] = [ - v for v in vars().values() if isinstance(v, ParserElement) -] - -# backward compatibility names -tokenMap = token_map -conditionAsParseAction = condition_as_parse_action -nullDebugAction = null_debug_action -sglQuotedString = sgl_quoted_string -dblQuotedString = dbl_quoted_string -quotedString = quoted_string -unicodeString = unicode_string -lineStart = line_start -lineEnd = line_end -stringStart = string_start -stringEnd = string_end -traceParseAction = trace_parse_action diff --git a/venv/lib/python3.10/site-packages/setuptools/_vendor/pyparsing/diagram/__init__.py b/venv/lib/python3.10/site-packages/setuptools/_vendor/pyparsing/diagram/__init__.py deleted file mode 100644 index 8986447..0000000 --- a/venv/lib/python3.10/site-packages/setuptools/_vendor/pyparsing/diagram/__init__.py +++ /dev/null @@ -1,642 +0,0 @@ -import railroad -import pyparsing -import typing -from typing import ( - List, - NamedTuple, - Generic, - TypeVar, - Dict, - Callable, - Set, - Iterable, -) -from jinja2 import Template -from io import StringIO -import inspect - - -jinja2_template_source = """\ - - - - {% if not head %} - - {% else %} - {{ head | safe }} - {% endif %} - - -{{ body | safe }} -{% for diagram in diagrams %} -
-

{{ diagram.title }}

-
{{ diagram.text }}
-
- {{ diagram.svg }} -
-
-{% endfor %} - - -""" - -template = Template(jinja2_template_source) - -# Note: ideally this would be a dataclass, but we're supporting Python 3.5+ so we can't do this yet -NamedDiagram = NamedTuple( - "NamedDiagram", - [("name", str), ("diagram", typing.Optional[railroad.DiagramItem]), ("index", int)], -) -""" -A simple structure for associating a name with a railroad diagram -""" - -T = TypeVar("T") - - -class EachItem(railroad.Group): - """ - Custom railroad item to compose a: - - Group containing a - - OneOrMore containing a - - Choice of the elements in the Each - with the group label indicating that all must be matched - """ - - all_label = "[ALL]" - - def __init__(self, *items): - choice_item = railroad.Choice(len(items) - 1, *items) - one_or_more_item = railroad.OneOrMore(item=choice_item) - super().__init__(one_or_more_item, label=self.all_label) - - -class AnnotatedItem(railroad.Group): - """ - Simple subclass of Group that creates an annotation label - """ - - def __init__(self, label: str, item): - super().__init__(item=item, label="[{}]".format(label) if label else label) - - -class EditablePartial(Generic[T]): - """ - Acts like a functools.partial, but can be edited. In other words, it represents a type that hasn't yet been - constructed. - """ - - # We need this here because the railroad constructors actually transform the data, so can't be called until the - # entire tree is assembled - - def __init__(self, func: Callable[..., T], args: list, kwargs: dict): - self.func = func - self.args = args - self.kwargs = kwargs - - @classmethod - def from_call(cls, func: Callable[..., T], *args, **kwargs) -> "EditablePartial[T]": - """ - If you call this function in the same way that you would call the constructor, it will store the arguments - as you expect. For example EditablePartial.from_call(Fraction, 1, 3)() == Fraction(1, 3) - """ - return EditablePartial(func=func, args=list(args), kwargs=kwargs) - - @property - def name(self): - return self.kwargs["name"] - - def __call__(self) -> T: - """ - Evaluate the partial and return the result - """ - args = self.args.copy() - kwargs = self.kwargs.copy() - - # This is a helpful hack to allow you to specify varargs parameters (e.g. *args) as keyword args (e.g. - # args=['list', 'of', 'things']) - arg_spec = inspect.getfullargspec(self.func) - if arg_spec.varargs in self.kwargs: - args += kwargs.pop(arg_spec.varargs) - - return self.func(*args, **kwargs) - - -def railroad_to_html(diagrams: List[NamedDiagram], **kwargs) -> str: - """ - Given a list of NamedDiagram, produce a single HTML string that visualises those diagrams - :params kwargs: kwargs to be passed in to the template - """ - data = [] - for diagram in diagrams: - if diagram.diagram is None: - continue - io = StringIO() - diagram.diagram.writeSvg(io.write) - title = diagram.name - if diagram.index == 0: - title += " (root)" - data.append({"title": title, "text": "", "svg": io.getvalue()}) - - return template.render(diagrams=data, **kwargs) - - -def resolve_partial(partial: "EditablePartial[T]") -> T: - """ - Recursively resolves a collection of Partials into whatever type they are - """ - if isinstance(partial, EditablePartial): - partial.args = resolve_partial(partial.args) - partial.kwargs = resolve_partial(partial.kwargs) - return partial() - elif isinstance(partial, list): - return [resolve_partial(x) for x in partial] - elif isinstance(partial, dict): - return {key: resolve_partial(x) for key, x in partial.items()} - else: - return partial - - -def to_railroad( - element: pyparsing.ParserElement, - diagram_kwargs: typing.Optional[dict] = None, - vertical: int = 3, - show_results_names: bool = False, - show_groups: bool = False, -) -> List[NamedDiagram]: - """ - Convert a pyparsing element tree into a list of diagrams. This is the recommended entrypoint to diagram - creation if you want to access the Railroad tree before it is converted to HTML - :param element: base element of the parser being diagrammed - :param diagram_kwargs: kwargs to pass to the Diagram() constructor - :param vertical: (optional) - int - limit at which number of alternatives should be - shown vertically instead of horizontally - :param show_results_names - bool to indicate whether results name annotations should be - included in the diagram - :param show_groups - bool to indicate whether groups should be highlighted with an unlabeled - surrounding box - """ - # Convert the whole tree underneath the root - lookup = ConverterState(diagram_kwargs=diagram_kwargs or {}) - _to_diagram_element( - element, - lookup=lookup, - parent=None, - vertical=vertical, - show_results_names=show_results_names, - show_groups=show_groups, - ) - - root_id = id(element) - # Convert the root if it hasn't been already - if root_id in lookup: - if not element.customName: - lookup[root_id].name = "" - lookup[root_id].mark_for_extraction(root_id, lookup, force=True) - - # Now that we're finished, we can convert from intermediate structures into Railroad elements - diags = list(lookup.diagrams.values()) - if len(diags) > 1: - # collapse out duplicate diags with the same name - seen = set() - deduped_diags = [] - for d in diags: - # don't extract SkipTo elements, they are uninformative as subdiagrams - if d.name == "...": - continue - if d.name is not None and d.name not in seen: - seen.add(d.name) - deduped_diags.append(d) - resolved = [resolve_partial(partial) for partial in deduped_diags] - else: - # special case - if just one diagram, always display it, even if - # it has no name - resolved = [resolve_partial(partial) for partial in diags] - return sorted(resolved, key=lambda diag: diag.index) - - -def _should_vertical( - specification: int, exprs: Iterable[pyparsing.ParserElement] -) -> bool: - """ - Returns true if we should return a vertical list of elements - """ - if specification is None: - return False - else: - return len(_visible_exprs(exprs)) >= specification - - -class ElementState: - """ - State recorded for an individual pyparsing Element - """ - - # Note: this should be a dataclass, but we have to support Python 3.5 - def __init__( - self, - element: pyparsing.ParserElement, - converted: EditablePartial, - parent: EditablePartial, - number: int, - name: str = None, - parent_index: typing.Optional[int] = None, - ): - #: The pyparsing element that this represents - self.element: pyparsing.ParserElement = element - #: The name of the element - self.name: typing.Optional[str] = name - #: The output Railroad element in an unconverted state - self.converted: EditablePartial = converted - #: The parent Railroad element, which we store so that we can extract this if it's duplicated - self.parent: EditablePartial = parent - #: The order in which we found this element, used for sorting diagrams if this is extracted into a diagram - self.number: int = number - #: The index of this inside its parent - self.parent_index: typing.Optional[int] = parent_index - #: If true, we should extract this out into a subdiagram - self.extract: bool = False - #: If true, all of this element's children have been filled out - self.complete: bool = False - - def mark_for_extraction( - self, el_id: int, state: "ConverterState", name: str = None, force: bool = False - ): - """ - Called when this instance has been seen twice, and thus should eventually be extracted into a sub-diagram - :param el_id: id of the element - :param state: element/diagram state tracker - :param name: name to use for this element's text - :param force: If true, force extraction now, regardless of the state of this. Only useful for extracting the - root element when we know we're finished - """ - self.extract = True - - # Set the name - if not self.name: - if name: - # Allow forcing a custom name - self.name = name - elif self.element.customName: - self.name = self.element.customName - else: - self.name = "" - - # Just because this is marked for extraction doesn't mean we can do it yet. We may have to wait for children - # to be added - # Also, if this is just a string literal etc, don't bother extracting it - if force or (self.complete and _worth_extracting(self.element)): - state.extract_into_diagram(el_id) - - -class ConverterState: - """ - Stores some state that persists between recursions into the element tree - """ - - def __init__(self, diagram_kwargs: typing.Optional[dict] = None): - #: A dictionary mapping ParserElements to state relating to them - self._element_diagram_states: Dict[int, ElementState] = {} - #: A dictionary mapping ParserElement IDs to subdiagrams generated from them - self.diagrams: Dict[int, EditablePartial[NamedDiagram]] = {} - #: The index of the next unnamed element - self.unnamed_index: int = 1 - #: The index of the next element. This is used for sorting - self.index: int = 0 - #: Shared kwargs that are used to customize the construction of diagrams - self.diagram_kwargs: dict = diagram_kwargs or {} - self.extracted_diagram_names: Set[str] = set() - - def __setitem__(self, key: int, value: ElementState): - self._element_diagram_states[key] = value - - def __getitem__(self, key: int) -> ElementState: - return self._element_diagram_states[key] - - def __delitem__(self, key: int): - del self._element_diagram_states[key] - - def __contains__(self, key: int): - return key in self._element_diagram_states - - def generate_unnamed(self) -> int: - """ - Generate a number used in the name of an otherwise unnamed diagram - """ - self.unnamed_index += 1 - return self.unnamed_index - - def generate_index(self) -> int: - """ - Generate a number used to index a diagram - """ - self.index += 1 - return self.index - - def extract_into_diagram(self, el_id: int): - """ - Used when we encounter the same token twice in the same tree. When this - happens, we replace all instances of that token with a terminal, and - create a new subdiagram for the token - """ - position = self[el_id] - - # Replace the original definition of this element with a regular block - if position.parent: - ret = EditablePartial.from_call(railroad.NonTerminal, text=position.name) - if "item" in position.parent.kwargs: - position.parent.kwargs["item"] = ret - elif "items" in position.parent.kwargs: - position.parent.kwargs["items"][position.parent_index] = ret - - # If the element we're extracting is a group, skip to its content but keep the title - if position.converted.func == railroad.Group: - content = position.converted.kwargs["item"] - else: - content = position.converted - - self.diagrams[el_id] = EditablePartial.from_call( - NamedDiagram, - name=position.name, - diagram=EditablePartial.from_call( - railroad.Diagram, content, **self.diagram_kwargs - ), - index=position.number, - ) - - del self[el_id] - - -def _worth_extracting(element: pyparsing.ParserElement) -> bool: - """ - Returns true if this element is worth having its own sub-diagram. Simply, if any of its children - themselves have children, then its complex enough to extract - """ - children = element.recurse() - return any(child.recurse() for child in children) - - -def _apply_diagram_item_enhancements(fn): - """ - decorator to ensure enhancements to a diagram item (such as results name annotations) - get applied on return from _to_diagram_element (we do this since there are several - returns in _to_diagram_element) - """ - - def _inner( - element: pyparsing.ParserElement, - parent: typing.Optional[EditablePartial], - lookup: ConverterState = None, - vertical: int = None, - index: int = 0, - name_hint: str = None, - show_results_names: bool = False, - show_groups: bool = False, - ) -> typing.Optional[EditablePartial]: - - ret = fn( - element, - parent, - lookup, - vertical, - index, - name_hint, - show_results_names, - show_groups, - ) - - # apply annotation for results name, if present - if show_results_names and ret is not None: - element_results_name = element.resultsName - if element_results_name: - # add "*" to indicate if this is a "list all results" name - element_results_name += "" if element.modalResults else "*" - ret = EditablePartial.from_call( - railroad.Group, item=ret, label=element_results_name - ) - - return ret - - return _inner - - -def _visible_exprs(exprs: Iterable[pyparsing.ParserElement]): - non_diagramming_exprs = ( - pyparsing.ParseElementEnhance, - pyparsing.PositionToken, - pyparsing.And._ErrorStop, - ) - return [ - e - for e in exprs - if not (e.customName or e.resultsName or isinstance(e, non_diagramming_exprs)) - ] - - -@_apply_diagram_item_enhancements -def _to_diagram_element( - element: pyparsing.ParserElement, - parent: typing.Optional[EditablePartial], - lookup: ConverterState = None, - vertical: int = None, - index: int = 0, - name_hint: str = None, - show_results_names: bool = False, - show_groups: bool = False, -) -> typing.Optional[EditablePartial]: - """ - Recursively converts a PyParsing Element to a railroad Element - :param lookup: The shared converter state that keeps track of useful things - :param index: The index of this element within the parent - :param parent: The parent of this element in the output tree - :param vertical: Controls at what point we make a list of elements vertical. If this is an integer (the default), - it sets the threshold of the number of items before we go vertical. If True, always go vertical, if False, never - do so - :param name_hint: If provided, this will override the generated name - :param show_results_names: bool flag indicating whether to add annotations for results names - :returns: The converted version of the input element, but as a Partial that hasn't yet been constructed - :param show_groups: bool flag indicating whether to show groups using bounding box - """ - exprs = element.recurse() - name = name_hint or element.customName or element.__class__.__name__ - - # Python's id() is used to provide a unique identifier for elements - el_id = id(element) - - element_results_name = element.resultsName - - # Here we basically bypass processing certain wrapper elements if they contribute nothing to the diagram - if not element.customName: - if isinstance( - element, - ( - # pyparsing.TokenConverter, - # pyparsing.Forward, - pyparsing.Located, - ), - ): - # However, if this element has a useful custom name, and its child does not, we can pass it on to the child - if exprs: - if not exprs[0].customName: - propagated_name = name - else: - propagated_name = None - - return _to_diagram_element( - element.expr, - parent=parent, - lookup=lookup, - vertical=vertical, - index=index, - name_hint=propagated_name, - show_results_names=show_results_names, - show_groups=show_groups, - ) - - # If the element isn't worth extracting, we always treat it as the first time we say it - if _worth_extracting(element): - if el_id in lookup: - # If we've seen this element exactly once before, we are only just now finding out that it's a duplicate, - # so we have to extract it into a new diagram. - looked_up = lookup[el_id] - looked_up.mark_for_extraction(el_id, lookup, name=name_hint) - ret = EditablePartial.from_call(railroad.NonTerminal, text=looked_up.name) - return ret - - elif el_id in lookup.diagrams: - # If we have seen the element at least twice before, and have already extracted it into a subdiagram, we - # just put in a marker element that refers to the sub-diagram - ret = EditablePartial.from_call( - railroad.NonTerminal, text=lookup.diagrams[el_id].kwargs["name"] - ) - return ret - - # Recursively convert child elements - # Here we find the most relevant Railroad element for matching pyparsing Element - # We use ``items=[]`` here to hold the place for where the child elements will go once created - if isinstance(element, pyparsing.And): - # detect And's created with ``expr*N`` notation - for these use a OneOrMore with a repeat - # (all will have the same name, and resultsName) - if not exprs: - return None - if len(set((e.name, e.resultsName) for e in exprs)) == 1: - ret = EditablePartial.from_call( - railroad.OneOrMore, item="", repeat=str(len(exprs)) - ) - elif _should_vertical(vertical, exprs): - ret = EditablePartial.from_call(railroad.Stack, items=[]) - else: - ret = EditablePartial.from_call(railroad.Sequence, items=[]) - elif isinstance(element, (pyparsing.Or, pyparsing.MatchFirst)): - if not exprs: - return None - if _should_vertical(vertical, exprs): - ret = EditablePartial.from_call(railroad.Choice, 0, items=[]) - else: - ret = EditablePartial.from_call(railroad.HorizontalChoice, items=[]) - elif isinstance(element, pyparsing.Each): - if not exprs: - return None - ret = EditablePartial.from_call(EachItem, items=[]) - elif isinstance(element, pyparsing.NotAny): - ret = EditablePartial.from_call(AnnotatedItem, label="NOT", item="") - elif isinstance(element, pyparsing.FollowedBy): - ret = EditablePartial.from_call(AnnotatedItem, label="LOOKAHEAD", item="") - elif isinstance(element, pyparsing.PrecededBy): - ret = EditablePartial.from_call(AnnotatedItem, label="LOOKBEHIND", item="") - elif isinstance(element, pyparsing.Group): - if show_groups: - ret = EditablePartial.from_call(AnnotatedItem, label="", item="") - else: - ret = EditablePartial.from_call(railroad.Group, label="", item="") - elif isinstance(element, pyparsing.TokenConverter): - ret = EditablePartial.from_call( - AnnotatedItem, label=type(element).__name__.lower(), item="" - ) - elif isinstance(element, pyparsing.Opt): - ret = EditablePartial.from_call(railroad.Optional, item="") - elif isinstance(element, pyparsing.OneOrMore): - ret = EditablePartial.from_call(railroad.OneOrMore, item="") - elif isinstance(element, pyparsing.ZeroOrMore): - ret = EditablePartial.from_call(railroad.ZeroOrMore, item="") - elif isinstance(element, pyparsing.Group): - ret = EditablePartial.from_call( - railroad.Group, item=None, label=element_results_name - ) - elif isinstance(element, pyparsing.Empty) and not element.customName: - # Skip unnamed "Empty" elements - ret = None - elif len(exprs) > 1: - ret = EditablePartial.from_call(railroad.Sequence, items=[]) - elif len(exprs) > 0 and not element_results_name: - ret = EditablePartial.from_call(railroad.Group, item="", label=name) - else: - terminal = EditablePartial.from_call(railroad.Terminal, element.defaultName) - ret = terminal - - if ret is None: - return - - # Indicate this element's position in the tree so we can extract it if necessary - lookup[el_id] = ElementState( - element=element, - converted=ret, - parent=parent, - parent_index=index, - number=lookup.generate_index(), - ) - if element.customName: - lookup[el_id].mark_for_extraction(el_id, lookup, element.customName) - - i = 0 - for expr in exprs: - # Add a placeholder index in case we have to extract the child before we even add it to the parent - if "items" in ret.kwargs: - ret.kwargs["items"].insert(i, None) - - item = _to_diagram_element( - expr, - parent=ret, - lookup=lookup, - vertical=vertical, - index=i, - show_results_names=show_results_names, - show_groups=show_groups, - ) - - # Some elements don't need to be shown in the diagram - if item is not None: - if "item" in ret.kwargs: - ret.kwargs["item"] = item - elif "items" in ret.kwargs: - # If we've already extracted the child, don't touch this index, since it's occupied by a nonterminal - ret.kwargs["items"][i] = item - i += 1 - elif "items" in ret.kwargs: - # If we're supposed to skip this element, remove it from the parent - del ret.kwargs["items"][i] - - # If all this items children are none, skip this item - if ret and ( - ("items" in ret.kwargs and len(ret.kwargs["items"]) == 0) - or ("item" in ret.kwargs and ret.kwargs["item"] is None) - ): - ret = EditablePartial.from_call(railroad.Terminal, name) - - # Mark this element as "complete", ie it has all of its children - if el_id in lookup: - lookup[el_id].complete = True - - if el_id in lookup and lookup[el_id].extract and lookup[el_id].complete: - lookup.extract_into_diagram(el_id) - if ret is not None: - ret = EditablePartial.from_call( - railroad.NonTerminal, text=lookup.diagrams[el_id].kwargs["name"] - ) - - return ret diff --git a/venv/lib/python3.10/site-packages/setuptools/_vendor/pyparsing/exceptions.py b/venv/lib/python3.10/site-packages/setuptools/_vendor/pyparsing/exceptions.py deleted file mode 100644 index a38447b..0000000 --- a/venv/lib/python3.10/site-packages/setuptools/_vendor/pyparsing/exceptions.py +++ /dev/null @@ -1,267 +0,0 @@ -# exceptions.py - -import re -import sys -import typing - -from .util import col, line, lineno, _collapse_string_to_ranges -from .unicode import pyparsing_unicode as ppu - - -class ExceptionWordUnicode(ppu.Latin1, ppu.LatinA, ppu.LatinB, ppu.Greek, ppu.Cyrillic): - pass - - -_extract_alphanums = _collapse_string_to_ranges(ExceptionWordUnicode.alphanums) -_exception_word_extractor = re.compile("([" + _extract_alphanums + "]{1,16})|.") - - -class ParseBaseException(Exception): - """base exception class for all parsing runtime exceptions""" - - # Performance tuning: we construct a *lot* of these, so keep this - # constructor as small and fast as possible - def __init__( - self, - pstr: str, - loc: int = 0, - msg: typing.Optional[str] = None, - elem=None, - ): - self.loc = loc - if msg is None: - self.msg = pstr - self.pstr = "" - else: - self.msg = msg - self.pstr = pstr - self.parser_element = self.parserElement = elem - self.args = (pstr, loc, msg) - - @staticmethod - def explain_exception(exc, depth=16): - """ - Method to take an exception and translate the Python internal traceback into a list - of the pyparsing expressions that caused the exception to be raised. - - Parameters: - - - exc - exception raised during parsing (need not be a ParseException, in support - of Python exceptions that might be raised in a parse action) - - depth (default=16) - number of levels back in the stack trace to list expression - and function names; if None, the full stack trace names will be listed; if 0, only - the failing input line, marker, and exception string will be shown - - Returns a multi-line string listing the ParserElements and/or function names in the - exception's stack trace. - """ - import inspect - from .core import ParserElement - - if depth is None: - depth = sys.getrecursionlimit() - ret = [] - if isinstance(exc, ParseBaseException): - ret.append(exc.line) - ret.append(" " * (exc.column - 1) + "^") - ret.append("{}: {}".format(type(exc).__name__, exc)) - - if depth > 0: - callers = inspect.getinnerframes(exc.__traceback__, context=depth) - seen = set() - for i, ff in enumerate(callers[-depth:]): - frm = ff[0] - - f_self = frm.f_locals.get("self", None) - if isinstance(f_self, ParserElement): - if frm.f_code.co_name not in ("parseImpl", "_parseNoCache"): - continue - if id(f_self) in seen: - continue - seen.add(id(f_self)) - - self_type = type(f_self) - ret.append( - "{}.{} - {}".format( - self_type.__module__, self_type.__name__, f_self - ) - ) - - elif f_self is not None: - self_type = type(f_self) - ret.append("{}.{}".format(self_type.__module__, self_type.__name__)) - - else: - code = frm.f_code - if code.co_name in ("wrapper", ""): - continue - - ret.append("{}".format(code.co_name)) - - depth -= 1 - if not depth: - break - - return "\n".join(ret) - - @classmethod - def _from_exception(cls, pe): - """ - internal factory method to simplify creating one type of ParseException - from another - avoids having __init__ signature conflicts among subclasses - """ - return cls(pe.pstr, pe.loc, pe.msg, pe.parserElement) - - @property - def line(self) -> str: - """ - Return the line of text where the exception occurred. - """ - return line(self.loc, self.pstr) - - @property - def lineno(self) -> int: - """ - Return the 1-based line number of text where the exception occurred. - """ - return lineno(self.loc, self.pstr) - - @property - def col(self) -> int: - """ - Return the 1-based column on the line of text where the exception occurred. - """ - return col(self.loc, self.pstr) - - @property - def column(self) -> int: - """ - Return the 1-based column on the line of text where the exception occurred. - """ - return col(self.loc, self.pstr) - - def __str__(self) -> str: - if self.pstr: - if self.loc >= len(self.pstr): - foundstr = ", found end of text" - else: - # pull out next word at error location - found_match = _exception_word_extractor.match(self.pstr, self.loc) - if found_match is not None: - found = found_match.group(0) - else: - found = self.pstr[self.loc : self.loc + 1] - foundstr = (", found %r" % found).replace(r"\\", "\\") - else: - foundstr = "" - return "{}{} (at char {}), (line:{}, col:{})".format( - self.msg, foundstr, self.loc, self.lineno, self.column - ) - - def __repr__(self): - return str(self) - - def mark_input_line(self, marker_string: str = None, *, markerString=">!<") -> str: - """ - Extracts the exception line from the input string, and marks - the location of the exception with a special symbol. - """ - markerString = marker_string if marker_string is not None else markerString - line_str = self.line - line_column = self.column - 1 - if markerString: - line_str = "".join( - (line_str[:line_column], markerString, line_str[line_column:]) - ) - return line_str.strip() - - def explain(self, depth=16) -> str: - """ - Method to translate the Python internal traceback into a list - of the pyparsing expressions that caused the exception to be raised. - - Parameters: - - - depth (default=16) - number of levels back in the stack trace to list expression - and function names; if None, the full stack trace names will be listed; if 0, only - the failing input line, marker, and exception string will be shown - - Returns a multi-line string listing the ParserElements and/or function names in the - exception's stack trace. - - Example:: - - expr = pp.Word(pp.nums) * 3 - try: - expr.parse_string("123 456 A789") - except pp.ParseException as pe: - print(pe.explain(depth=0)) - - prints:: - - 123 456 A789 - ^ - ParseException: Expected W:(0-9), found 'A' (at char 8), (line:1, col:9) - - Note: the diagnostic output will include string representations of the expressions - that failed to parse. These representations will be more helpful if you use `set_name` to - give identifiable names to your expressions. Otherwise they will use the default string - forms, which may be cryptic to read. - - Note: pyparsing's default truncation of exception tracebacks may also truncate the - stack of expressions that are displayed in the ``explain`` output. To get the full listing - of parser expressions, you may have to set ``ParserElement.verbose_stacktrace = True`` - """ - return self.explain_exception(self, depth) - - markInputline = mark_input_line - - -class ParseException(ParseBaseException): - """ - Exception thrown when a parse expression doesn't match the input string - - Example:: - - try: - Word(nums).set_name("integer").parse_string("ABC") - except ParseException as pe: - print(pe) - print("column: {}".format(pe.column)) - - prints:: - - Expected integer (at char 0), (line:1, col:1) - column: 1 - - """ - - -class ParseFatalException(ParseBaseException): - """ - User-throwable exception thrown when inconsistent parse content - is found; stops all parsing immediately - """ - - -class ParseSyntaxException(ParseFatalException): - """ - Just like :class:`ParseFatalException`, but thrown internally - when an :class:`ErrorStop` ('-' operator) indicates - that parsing is to stop immediately because an unbacktrackable - syntax error has been found. - """ - - -class RecursiveGrammarException(Exception): - """ - Exception thrown by :class:`ParserElement.validate` if the - grammar could be left-recursive; parser may need to enable - left recursion using :class:`ParserElement.enable_left_recursion` - """ - - def __init__(self, parseElementList): - self.parseElementTrace = parseElementList - - def __str__(self) -> str: - return "RecursiveGrammarException: {}".format(self.parseElementTrace) diff --git a/venv/lib/python3.10/site-packages/setuptools/_vendor/pyparsing/helpers.py b/venv/lib/python3.10/site-packages/setuptools/_vendor/pyparsing/helpers.py deleted file mode 100644 index 9588b3b..0000000 --- a/venv/lib/python3.10/site-packages/setuptools/_vendor/pyparsing/helpers.py +++ /dev/null @@ -1,1088 +0,0 @@ -# helpers.py -import html.entities -import re -import typing - -from . import __diag__ -from .core import * -from .util import _bslash, _flatten, _escape_regex_range_chars - - -# -# global helpers -# -def delimited_list( - expr: Union[str, ParserElement], - delim: Union[str, ParserElement] = ",", - combine: bool = False, - min: typing.Optional[int] = None, - max: typing.Optional[int] = None, - *, - allow_trailing_delim: bool = False, -) -> ParserElement: - """Helper to define a delimited list of expressions - the delimiter - defaults to ','. By default, the list elements and delimiters can - have intervening whitespace, and comments, but this can be - overridden by passing ``combine=True`` in the constructor. If - ``combine`` is set to ``True``, the matching tokens are - returned as a single token string, with the delimiters included; - otherwise, the matching tokens are returned as a list of tokens, - with the delimiters suppressed. - - If ``allow_trailing_delim`` is set to True, then the list may end with - a delimiter. - - Example:: - - delimited_list(Word(alphas)).parse_string("aa,bb,cc") # -> ['aa', 'bb', 'cc'] - delimited_list(Word(hexnums), delim=':', combine=True).parse_string("AA:BB:CC:DD:EE") # -> ['AA:BB:CC:DD:EE'] - """ - if isinstance(expr, str_type): - expr = ParserElement._literalStringClass(expr) - - dlName = "{expr} [{delim} {expr}]...{end}".format( - expr=str(expr.copy().streamline()), - delim=str(delim), - end=" [{}]".format(str(delim)) if allow_trailing_delim else "", - ) - - if not combine: - delim = Suppress(delim) - - if min is not None: - if min < 1: - raise ValueError("min must be greater than 0") - min -= 1 - if max is not None: - if min is not None and max <= min: - raise ValueError("max must be greater than, or equal to min") - max -= 1 - delimited_list_expr = expr + (delim + expr)[min, max] - - if allow_trailing_delim: - delimited_list_expr += Opt(delim) - - if combine: - return Combine(delimited_list_expr).set_name(dlName) - else: - return delimited_list_expr.set_name(dlName) - - -def counted_array( - expr: ParserElement, - int_expr: typing.Optional[ParserElement] = None, - *, - intExpr: typing.Optional[ParserElement] = None, -) -> ParserElement: - """Helper to define a counted list of expressions. - - This helper defines a pattern of the form:: - - integer expr expr expr... - - where the leading integer tells how many expr expressions follow. - The matched tokens returns the array of expr tokens as a list - the - leading count token is suppressed. - - If ``int_expr`` is specified, it should be a pyparsing expression - that produces an integer value. - - Example:: - - counted_array(Word(alphas)).parse_string('2 ab cd ef') # -> ['ab', 'cd'] - - # in this parser, the leading integer value is given in binary, - # '10' indicating that 2 values are in the array - binary_constant = Word('01').set_parse_action(lambda t: int(t[0], 2)) - counted_array(Word(alphas), int_expr=binary_constant).parse_string('10 ab cd ef') # -> ['ab', 'cd'] - - # if other fields must be parsed after the count but before the - # list items, give the fields results names and they will - # be preserved in the returned ParseResults: - count_with_metadata = integer + Word(alphas)("type") - typed_array = counted_array(Word(alphanums), int_expr=count_with_metadata)("items") - result = typed_array.parse_string("3 bool True True False") - print(result.dump()) - - # prints - # ['True', 'True', 'False'] - # - items: ['True', 'True', 'False'] - # - type: 'bool' - """ - intExpr = intExpr or int_expr - array_expr = Forward() - - def count_field_parse_action(s, l, t): - nonlocal array_expr - n = t[0] - array_expr <<= (expr * n) if n else Empty() - # clear list contents, but keep any named results - del t[:] - - if intExpr is None: - intExpr = Word(nums).set_parse_action(lambda t: int(t[0])) - else: - intExpr = intExpr.copy() - intExpr.set_name("arrayLen") - intExpr.add_parse_action(count_field_parse_action, call_during_try=True) - return (intExpr + array_expr).set_name("(len) " + str(expr) + "...") - - -def match_previous_literal(expr: ParserElement) -> ParserElement: - """Helper to define an expression that is indirectly defined from - the tokens matched in a previous expression, that is, it looks for - a 'repeat' of a previous expression. For example:: - - first = Word(nums) - second = match_previous_literal(first) - match_expr = first + ":" + second - - will match ``"1:1"``, but not ``"1:2"``. Because this - matches a previous literal, will also match the leading - ``"1:1"`` in ``"1:10"``. If this is not desired, use - :class:`match_previous_expr`. Do *not* use with packrat parsing - enabled. - """ - rep = Forward() - - def copy_token_to_repeater(s, l, t): - if t: - if len(t) == 1: - rep << t[0] - else: - # flatten t tokens - tflat = _flatten(t.as_list()) - rep << And(Literal(tt) for tt in tflat) - else: - rep << Empty() - - expr.add_parse_action(copy_token_to_repeater, callDuringTry=True) - rep.set_name("(prev) " + str(expr)) - return rep - - -def match_previous_expr(expr: ParserElement) -> ParserElement: - """Helper to define an expression that is indirectly defined from - the tokens matched in a previous expression, that is, it looks for - a 'repeat' of a previous expression. For example:: - - first = Word(nums) - second = match_previous_expr(first) - match_expr = first + ":" + second - - will match ``"1:1"``, but not ``"1:2"``. Because this - matches by expressions, will *not* match the leading ``"1:1"`` - in ``"1:10"``; the expressions are evaluated first, and then - compared, so ``"1"`` is compared with ``"10"``. Do *not* use - with packrat parsing enabled. - """ - rep = Forward() - e2 = expr.copy() - rep <<= e2 - - def copy_token_to_repeater(s, l, t): - matchTokens = _flatten(t.as_list()) - - def must_match_these_tokens(s, l, t): - theseTokens = _flatten(t.as_list()) - if theseTokens != matchTokens: - raise ParseException( - s, l, "Expected {}, found{}".format(matchTokens, theseTokens) - ) - - rep.set_parse_action(must_match_these_tokens, callDuringTry=True) - - expr.add_parse_action(copy_token_to_repeater, callDuringTry=True) - rep.set_name("(prev) " + str(expr)) - return rep - - -def one_of( - strs: Union[typing.Iterable[str], str], - caseless: bool = False, - use_regex: bool = True, - as_keyword: bool = False, - *, - useRegex: bool = True, - asKeyword: bool = False, -) -> ParserElement: - """Helper to quickly define a set of alternative :class:`Literal` s, - and makes sure to do longest-first testing when there is a conflict, - regardless of the input order, but returns - a :class:`MatchFirst` for best performance. - - Parameters: - - - ``strs`` - a string of space-delimited literals, or a collection of - string literals - - ``caseless`` - treat all literals as caseless - (default= ``False``) - - ``use_regex`` - as an optimization, will - generate a :class:`Regex` object; otherwise, will generate - a :class:`MatchFirst` object (if ``caseless=True`` or ``asKeyword=True``, or if - creating a :class:`Regex` raises an exception) - (default= ``True``) - - ``as_keyword`` - enforce :class:`Keyword`-style matching on the - generated expressions - (default= ``False``) - - ``asKeyword`` and ``useRegex`` are retained for pre-PEP8 compatibility, - but will be removed in a future release - - Example:: - - comp_oper = one_of("< = > <= >= !=") - var = Word(alphas) - number = Word(nums) - term = var | number - comparison_expr = term + comp_oper + term - print(comparison_expr.search_string("B = 12 AA=23 B<=AA AA>12")) - - prints:: - - [['B', '=', '12'], ['AA', '=', '23'], ['B', '<=', 'AA'], ['AA', '>', '12']] - """ - asKeyword = asKeyword or as_keyword - useRegex = useRegex and use_regex - - if ( - isinstance(caseless, str_type) - and __diag__.warn_on_multiple_string_args_to_oneof - ): - warnings.warn( - "More than one string argument passed to one_of, pass" - " choices as a list or space-delimited string", - stacklevel=2, - ) - - if caseless: - isequal = lambda a, b: a.upper() == b.upper() - masks = lambda a, b: b.upper().startswith(a.upper()) - parseElementClass = CaselessKeyword if asKeyword else CaselessLiteral - else: - isequal = lambda a, b: a == b - masks = lambda a, b: b.startswith(a) - parseElementClass = Keyword if asKeyword else Literal - - symbols: List[str] = [] - if isinstance(strs, str_type): - symbols = strs.split() - elif isinstance(strs, Iterable): - symbols = list(strs) - else: - raise TypeError("Invalid argument to one_of, expected string or iterable") - if not symbols: - return NoMatch() - - # reorder given symbols to take care to avoid masking longer choices with shorter ones - # (but only if the given symbols are not just single characters) - if any(len(sym) > 1 for sym in symbols): - i = 0 - while i < len(symbols) - 1: - cur = symbols[i] - for j, other in enumerate(symbols[i + 1 :]): - if isequal(other, cur): - del symbols[i + j + 1] - break - elif masks(cur, other): - del symbols[i + j + 1] - symbols.insert(i, other) - break - else: - i += 1 - - if useRegex: - re_flags: int = re.IGNORECASE if caseless else 0 - - try: - if all(len(sym) == 1 for sym in symbols): - # symbols are just single characters, create range regex pattern - patt = "[{}]".format( - "".join(_escape_regex_range_chars(sym) for sym in symbols) - ) - else: - patt = "|".join(re.escape(sym) for sym in symbols) - - # wrap with \b word break markers if defining as keywords - if asKeyword: - patt = r"\b(?:{})\b".format(patt) - - ret = Regex(patt, flags=re_flags).set_name(" | ".join(symbols)) - - if caseless: - # add parse action to return symbols as specified, not in random - # casing as found in input string - symbol_map = {sym.lower(): sym for sym in symbols} - ret.add_parse_action(lambda s, l, t: symbol_map[t[0].lower()]) - - return ret - - except re.error: - warnings.warn( - "Exception creating Regex for one_of, building MatchFirst", stacklevel=2 - ) - - # last resort, just use MatchFirst - return MatchFirst(parseElementClass(sym) for sym in symbols).set_name( - " | ".join(symbols) - ) - - -def dict_of(key: ParserElement, value: ParserElement) -> ParserElement: - """Helper to easily and clearly define a dictionary by specifying - the respective patterns for the key and value. Takes care of - defining the :class:`Dict`, :class:`ZeroOrMore`, and - :class:`Group` tokens in the proper order. The key pattern - can include delimiting markers or punctuation, as long as they are - suppressed, thereby leaving the significant key text. The value - pattern can include named results, so that the :class:`Dict` results - can include named token fields. - - Example:: - - text = "shape: SQUARE posn: upper left color: light blue texture: burlap" - attr_expr = (label + Suppress(':') + OneOrMore(data_word, stop_on=label).set_parse_action(' '.join)) - print(attr_expr[1, ...].parse_string(text).dump()) - - attr_label = label - attr_value = Suppress(':') + OneOrMore(data_word, stop_on=label).set_parse_action(' '.join) - - # similar to Dict, but simpler call format - result = dict_of(attr_label, attr_value).parse_string(text) - print(result.dump()) - print(result['shape']) - print(result.shape) # object attribute access works too - print(result.as_dict()) - - prints:: - - [['shape', 'SQUARE'], ['posn', 'upper left'], ['color', 'light blue'], ['texture', 'burlap']] - - color: 'light blue' - - posn: 'upper left' - - shape: 'SQUARE' - - texture: 'burlap' - SQUARE - SQUARE - {'color': 'light blue', 'shape': 'SQUARE', 'posn': 'upper left', 'texture': 'burlap'} - """ - return Dict(OneOrMore(Group(key + value))) - - -def original_text_for( - expr: ParserElement, as_string: bool = True, *, asString: bool = True -) -> ParserElement: - """Helper to return the original, untokenized text for a given - expression. Useful to restore the parsed fields of an HTML start - tag into the raw tag text itself, or to revert separate tokens with - intervening whitespace back to the original matching input text. By - default, returns astring containing the original parsed text. - - If the optional ``as_string`` argument is passed as - ``False``, then the return value is - a :class:`ParseResults` containing any results names that - were originally matched, and a single token containing the original - matched text from the input string. So if the expression passed to - :class:`original_text_for` contains expressions with defined - results names, you must set ``as_string`` to ``False`` if you - want to preserve those results name values. - - The ``asString`` pre-PEP8 argument is retained for compatibility, - but will be removed in a future release. - - Example:: - - src = "this is test bold text normal text " - for tag in ("b", "i"): - opener, closer = make_html_tags(tag) - patt = original_text_for(opener + SkipTo(closer) + closer) - print(patt.search_string(src)[0]) - - prints:: - - [' bold text '] - ['text'] - """ - asString = asString and as_string - - locMarker = Empty().set_parse_action(lambda s, loc, t: loc) - endlocMarker = locMarker.copy() - endlocMarker.callPreparse = False - matchExpr = locMarker("_original_start") + expr + endlocMarker("_original_end") - if asString: - extractText = lambda s, l, t: s[t._original_start : t._original_end] - else: - - def extractText(s, l, t): - t[:] = [s[t.pop("_original_start") : t.pop("_original_end")]] - - matchExpr.set_parse_action(extractText) - matchExpr.ignoreExprs = expr.ignoreExprs - matchExpr.suppress_warning(Diagnostics.warn_ungrouped_named_tokens_in_collection) - return matchExpr - - -def ungroup(expr: ParserElement) -> ParserElement: - """Helper to undo pyparsing's default grouping of And expressions, - even if all but one are non-empty. - """ - return TokenConverter(expr).add_parse_action(lambda t: t[0]) - - -def locatedExpr(expr: ParserElement) -> ParserElement: - """ - (DEPRECATED - future code should use the Located class) - Helper to decorate a returned token with its starting and ending - locations in the input string. - - This helper adds the following results names: - - - ``locn_start`` - location where matched expression begins - - ``locn_end`` - location where matched expression ends - - ``value`` - the actual parsed results - - Be careful if the input text contains ```` characters, you - may want to call :class:`ParserElement.parseWithTabs` - - Example:: - - wd = Word(alphas) - for match in locatedExpr(wd).searchString("ljsdf123lksdjjf123lkkjj1222"): - print(match) - - prints:: - - [[0, 'ljsdf', 5]] - [[8, 'lksdjjf', 15]] - [[18, 'lkkjj', 23]] - """ - locator = Empty().set_parse_action(lambda ss, ll, tt: ll) - return Group( - locator("locn_start") - + expr("value") - + locator.copy().leaveWhitespace()("locn_end") - ) - - -def nested_expr( - opener: Union[str, ParserElement] = "(", - closer: Union[str, ParserElement] = ")", - content: typing.Optional[ParserElement] = None, - ignore_expr: ParserElement = quoted_string(), - *, - ignoreExpr: ParserElement = quoted_string(), -) -> ParserElement: - """Helper method for defining nested lists enclosed in opening and - closing delimiters (``"("`` and ``")"`` are the default). - - Parameters: - - ``opener`` - opening character for a nested list - (default= ``"("``); can also be a pyparsing expression - - ``closer`` - closing character for a nested list - (default= ``")"``); can also be a pyparsing expression - - ``content`` - expression for items within the nested lists - (default= ``None``) - - ``ignore_expr`` - expression for ignoring opening and closing delimiters - (default= :class:`quoted_string`) - - ``ignoreExpr`` - this pre-PEP8 argument is retained for compatibility - but will be removed in a future release - - If an expression is not provided for the content argument, the - nested expression will capture all whitespace-delimited content - between delimiters as a list of separate values. - - Use the ``ignore_expr`` argument to define expressions that may - contain opening or closing characters that should not be treated as - opening or closing characters for nesting, such as quoted_string or - a comment expression. Specify multiple expressions using an - :class:`Or` or :class:`MatchFirst`. The default is - :class:`quoted_string`, but if no expressions are to be ignored, then - pass ``None`` for this argument. - - Example:: - - data_type = one_of("void int short long char float double") - decl_data_type = Combine(data_type + Opt(Word('*'))) - ident = Word(alphas+'_', alphanums+'_') - number = pyparsing_common.number - arg = Group(decl_data_type + ident) - LPAR, RPAR = map(Suppress, "()") - - code_body = nested_expr('{', '}', ignore_expr=(quoted_string | c_style_comment)) - - c_function = (decl_data_type("type") - + ident("name") - + LPAR + Opt(delimited_list(arg), [])("args") + RPAR - + code_body("body")) - c_function.ignore(c_style_comment) - - source_code = ''' - int is_odd(int x) { - return (x%2); - } - - int dec_to_hex(char hchar) { - if (hchar >= '0' && hchar <= '9') { - return (ord(hchar)-ord('0')); - } else { - return (10+ord(hchar)-ord('A')); - } - } - ''' - for func in c_function.search_string(source_code): - print("%(name)s (%(type)s) args: %(args)s" % func) - - - prints:: - - is_odd (int) args: [['int', 'x']] - dec_to_hex (int) args: [['char', 'hchar']] - """ - if ignoreExpr != ignore_expr: - ignoreExpr = ignore_expr if ignoreExpr == quoted_string() else ignoreExpr - if opener == closer: - raise ValueError("opening and closing strings cannot be the same") - if content is None: - if isinstance(opener, str_type) and isinstance(closer, str_type): - if len(opener) == 1 and len(closer) == 1: - if ignoreExpr is not None: - content = Combine( - OneOrMore( - ~ignoreExpr - + CharsNotIn( - opener + closer + ParserElement.DEFAULT_WHITE_CHARS, - exact=1, - ) - ) - ).set_parse_action(lambda t: t[0].strip()) - else: - content = empty.copy() + CharsNotIn( - opener + closer + ParserElement.DEFAULT_WHITE_CHARS - ).set_parse_action(lambda t: t[0].strip()) - else: - if ignoreExpr is not None: - content = Combine( - OneOrMore( - ~ignoreExpr - + ~Literal(opener) - + ~Literal(closer) - + CharsNotIn(ParserElement.DEFAULT_WHITE_CHARS, exact=1) - ) - ).set_parse_action(lambda t: t[0].strip()) - else: - content = Combine( - OneOrMore( - ~Literal(opener) - + ~Literal(closer) - + CharsNotIn(ParserElement.DEFAULT_WHITE_CHARS, exact=1) - ) - ).set_parse_action(lambda t: t[0].strip()) - else: - raise ValueError( - "opening and closing arguments must be strings if no content expression is given" - ) - ret = Forward() - if ignoreExpr is not None: - ret <<= Group( - Suppress(opener) + ZeroOrMore(ignoreExpr | ret | content) + Suppress(closer) - ) - else: - ret <<= Group(Suppress(opener) + ZeroOrMore(ret | content) + Suppress(closer)) - ret.set_name("nested %s%s expression" % (opener, closer)) - return ret - - -def _makeTags(tagStr, xml, suppress_LT=Suppress("<"), suppress_GT=Suppress(">")): - """Internal helper to construct opening and closing tag expressions, given a tag name""" - if isinstance(tagStr, str_type): - resname = tagStr - tagStr = Keyword(tagStr, caseless=not xml) - else: - resname = tagStr.name - - tagAttrName = Word(alphas, alphanums + "_-:") - if xml: - tagAttrValue = dbl_quoted_string.copy().set_parse_action(remove_quotes) - openTag = ( - suppress_LT - + tagStr("tag") - + Dict(ZeroOrMore(Group(tagAttrName + Suppress("=") + tagAttrValue))) - + Opt("/", default=[False])("empty").set_parse_action( - lambda s, l, t: t[0] == "/" - ) - + suppress_GT - ) - else: - tagAttrValue = quoted_string.copy().set_parse_action(remove_quotes) | Word( - printables, exclude_chars=">" - ) - openTag = ( - suppress_LT - + tagStr("tag") - + Dict( - ZeroOrMore( - Group( - tagAttrName.set_parse_action(lambda t: t[0].lower()) - + Opt(Suppress("=") + tagAttrValue) - ) - ) - ) - + Opt("/", default=[False])("empty").set_parse_action( - lambda s, l, t: t[0] == "/" - ) - + suppress_GT - ) - closeTag = Combine(Literal("", adjacent=False) - - openTag.set_name("<%s>" % resname) - # add start results name in parse action now that ungrouped names are not reported at two levels - openTag.add_parse_action( - lambda t: t.__setitem__( - "start" + "".join(resname.replace(":", " ").title().split()), t.copy() - ) - ) - closeTag = closeTag( - "end" + "".join(resname.replace(":", " ").title().split()) - ).set_name("" % resname) - openTag.tag = resname - closeTag.tag = resname - openTag.tag_body = SkipTo(closeTag()) - return openTag, closeTag - - -def make_html_tags( - tag_str: Union[str, ParserElement] -) -> Tuple[ParserElement, ParserElement]: - """Helper to construct opening and closing tag expressions for HTML, - given a tag name. Matches tags in either upper or lower case, - attributes with namespaces and with quoted or unquoted values. - - Example:: - - text = 'More info at the pyparsing wiki page' - # make_html_tags returns pyparsing expressions for the opening and - # closing tags as a 2-tuple - a, a_end = make_html_tags("A") - link_expr = a + SkipTo(a_end)("link_text") + a_end - - for link in link_expr.search_string(text): - # attributes in the tag (like "href" shown here) are - # also accessible as named results - print(link.link_text, '->', link.href) - - prints:: - - pyparsing -> https://github.com/pyparsing/pyparsing/wiki - """ - return _makeTags(tag_str, False) - - -def make_xml_tags( - tag_str: Union[str, ParserElement] -) -> Tuple[ParserElement, ParserElement]: - """Helper to construct opening and closing tag expressions for XML, - given a tag name. Matches tags only in the given upper/lower case. - - Example: similar to :class:`make_html_tags` - """ - return _makeTags(tag_str, True) - - -any_open_tag: ParserElement -any_close_tag: ParserElement -any_open_tag, any_close_tag = make_html_tags( - Word(alphas, alphanums + "_:").set_name("any tag") -) - -_htmlEntityMap = {k.rstrip(";"): v for k, v in html.entities.html5.items()} -common_html_entity = Regex("&(?P" + "|".join(_htmlEntityMap) + ");").set_name( - "common HTML entity" -) - - -def replace_html_entity(t): - """Helper parser action to replace common HTML entities with their special characters""" - return _htmlEntityMap.get(t.entity) - - -class OpAssoc(Enum): - LEFT = 1 - RIGHT = 2 - - -InfixNotationOperatorArgType = Union[ - ParserElement, str, Tuple[Union[ParserElement, str], Union[ParserElement, str]] -] -InfixNotationOperatorSpec = Union[ - Tuple[ - InfixNotationOperatorArgType, - int, - OpAssoc, - typing.Optional[ParseAction], - ], - Tuple[ - InfixNotationOperatorArgType, - int, - OpAssoc, - ], -] - - -def infix_notation( - base_expr: ParserElement, - op_list: List[InfixNotationOperatorSpec], - lpar: Union[str, ParserElement] = Suppress("("), - rpar: Union[str, ParserElement] = Suppress(")"), -) -> ParserElement: - """Helper method for constructing grammars of expressions made up of - operators working in a precedence hierarchy. Operators may be unary - or binary, left- or right-associative. Parse actions can also be - attached to operator expressions. The generated parser will also - recognize the use of parentheses to override operator precedences - (see example below). - - Note: if you define a deep operator list, you may see performance - issues when using infix_notation. See - :class:`ParserElement.enable_packrat` for a mechanism to potentially - improve your parser performance. - - Parameters: - - ``base_expr`` - expression representing the most basic operand to - be used in the expression - - ``op_list`` - list of tuples, one for each operator precedence level - in the expression grammar; each tuple is of the form ``(op_expr, - num_operands, right_left_assoc, (optional)parse_action)``, where: - - - ``op_expr`` is the pyparsing expression for the operator; may also - be a string, which will be converted to a Literal; if ``num_operands`` - is 3, ``op_expr`` is a tuple of two expressions, for the two - operators separating the 3 terms - - ``num_operands`` is the number of terms for this operator (must be 1, - 2, or 3) - - ``right_left_assoc`` is the indicator whether the operator is right - or left associative, using the pyparsing-defined constants - ``OpAssoc.RIGHT`` and ``OpAssoc.LEFT``. - - ``parse_action`` is the parse action to be associated with - expressions matching this operator expression (the parse action - tuple member may be omitted); if the parse action is passed - a tuple or list of functions, this is equivalent to calling - ``set_parse_action(*fn)`` - (:class:`ParserElement.set_parse_action`) - - ``lpar`` - expression for matching left-parentheses; if passed as a - str, then will be parsed as Suppress(lpar). If lpar is passed as - an expression (such as ``Literal('(')``), then it will be kept in - the parsed results, and grouped with them. (default= ``Suppress('(')``) - - ``rpar`` - expression for matching right-parentheses; if passed as a - str, then will be parsed as Suppress(rpar). If rpar is passed as - an expression (such as ``Literal(')')``), then it will be kept in - the parsed results, and grouped with them. (default= ``Suppress(')')``) - - Example:: - - # simple example of four-function arithmetic with ints and - # variable names - integer = pyparsing_common.signed_integer - varname = pyparsing_common.identifier - - arith_expr = infix_notation(integer | varname, - [ - ('-', 1, OpAssoc.RIGHT), - (one_of('* /'), 2, OpAssoc.LEFT), - (one_of('+ -'), 2, OpAssoc.LEFT), - ]) - - arith_expr.run_tests(''' - 5+3*6 - (5+3)*6 - -2--11 - ''', full_dump=False) - - prints:: - - 5+3*6 - [[5, '+', [3, '*', 6]]] - - (5+3)*6 - [[[5, '+', 3], '*', 6]] - - -2--11 - [[['-', 2], '-', ['-', 11]]] - """ - # captive version of FollowedBy that does not do parse actions or capture results names - class _FB(FollowedBy): - def parseImpl(self, instring, loc, doActions=True): - self.expr.try_parse(instring, loc) - return loc, [] - - _FB.__name__ = "FollowedBy>" - - ret = Forward() - if isinstance(lpar, str): - lpar = Suppress(lpar) - if isinstance(rpar, str): - rpar = Suppress(rpar) - - # if lpar and rpar are not suppressed, wrap in group - if not (isinstance(rpar, Suppress) and isinstance(rpar, Suppress)): - lastExpr = base_expr | Group(lpar + ret + rpar) - else: - lastExpr = base_expr | (lpar + ret + rpar) - - for i, operDef in enumerate(op_list): - opExpr, arity, rightLeftAssoc, pa = (operDef + (None,))[:4] - if isinstance(opExpr, str_type): - opExpr = ParserElement._literalStringClass(opExpr) - if arity == 3: - if not isinstance(opExpr, (tuple, list)) or len(opExpr) != 2: - raise ValueError( - "if numterms=3, opExpr must be a tuple or list of two expressions" - ) - opExpr1, opExpr2 = opExpr - term_name = "{}{} term".format(opExpr1, opExpr2) - else: - term_name = "{} term".format(opExpr) - - if not 1 <= arity <= 3: - raise ValueError("operator must be unary (1), binary (2), or ternary (3)") - - if rightLeftAssoc not in (OpAssoc.LEFT, OpAssoc.RIGHT): - raise ValueError("operator must indicate right or left associativity") - - thisExpr: Forward = Forward().set_name(term_name) - if rightLeftAssoc is OpAssoc.LEFT: - if arity == 1: - matchExpr = _FB(lastExpr + opExpr) + Group(lastExpr + opExpr[1, ...]) - elif arity == 2: - if opExpr is not None: - matchExpr = _FB(lastExpr + opExpr + lastExpr) + Group( - lastExpr + (opExpr + lastExpr)[1, ...] - ) - else: - matchExpr = _FB(lastExpr + lastExpr) + Group(lastExpr[2, ...]) - elif arity == 3: - matchExpr = _FB( - lastExpr + opExpr1 + lastExpr + opExpr2 + lastExpr - ) + Group(lastExpr + OneOrMore(opExpr1 + lastExpr + opExpr2 + lastExpr)) - elif rightLeftAssoc is OpAssoc.RIGHT: - if arity == 1: - # try to avoid LR with this extra test - if not isinstance(opExpr, Opt): - opExpr = Opt(opExpr) - matchExpr = _FB(opExpr.expr + thisExpr) + Group(opExpr + thisExpr) - elif arity == 2: - if opExpr is not None: - matchExpr = _FB(lastExpr + opExpr + thisExpr) + Group( - lastExpr + (opExpr + thisExpr)[1, ...] - ) - else: - matchExpr = _FB(lastExpr + thisExpr) + Group( - lastExpr + thisExpr[1, ...] - ) - elif arity == 3: - matchExpr = _FB( - lastExpr + opExpr1 + thisExpr + opExpr2 + thisExpr - ) + Group(lastExpr + opExpr1 + thisExpr + opExpr2 + thisExpr) - if pa: - if isinstance(pa, (tuple, list)): - matchExpr.set_parse_action(*pa) - else: - matchExpr.set_parse_action(pa) - thisExpr <<= (matchExpr | lastExpr).setName(term_name) - lastExpr = thisExpr - ret <<= lastExpr - return ret - - -def indentedBlock(blockStatementExpr, indentStack, indent=True, backup_stacks=[]): - """ - (DEPRECATED - use IndentedBlock class instead) - Helper method for defining space-delimited indentation blocks, - such as those used to define block statements in Python source code. - - Parameters: - - - ``blockStatementExpr`` - expression defining syntax of statement that - is repeated within the indented block - - ``indentStack`` - list created by caller to manage indentation stack - (multiple ``statementWithIndentedBlock`` expressions within a single - grammar should share a common ``indentStack``) - - ``indent`` - boolean indicating whether block must be indented beyond - the current level; set to ``False`` for block of left-most statements - (default= ``True``) - - A valid block must contain at least one ``blockStatement``. - - (Note that indentedBlock uses internal parse actions which make it - incompatible with packrat parsing.) - - Example:: - - data = ''' - def A(z): - A1 - B = 100 - G = A2 - A2 - A3 - B - def BB(a,b,c): - BB1 - def BBA(): - bba1 - bba2 - bba3 - C - D - def spam(x,y): - def eggs(z): - pass - ''' - - - indentStack = [1] - stmt = Forward() - - identifier = Word(alphas, alphanums) - funcDecl = ("def" + identifier + Group("(" + Opt(delimitedList(identifier)) + ")") + ":") - func_body = indentedBlock(stmt, indentStack) - funcDef = Group(funcDecl + func_body) - - rvalue = Forward() - funcCall = Group(identifier + "(" + Opt(delimitedList(rvalue)) + ")") - rvalue << (funcCall | identifier | Word(nums)) - assignment = Group(identifier + "=" + rvalue) - stmt << (funcDef | assignment | identifier) - - module_body = stmt[1, ...] - - parseTree = module_body.parseString(data) - parseTree.pprint() - - prints:: - - [['def', - 'A', - ['(', 'z', ')'], - ':', - [['A1'], [['B', '=', '100']], [['G', '=', 'A2']], ['A2'], ['A3']]], - 'B', - ['def', - 'BB', - ['(', 'a', 'b', 'c', ')'], - ':', - [['BB1'], [['def', 'BBA', ['(', ')'], ':', [['bba1'], ['bba2'], ['bba3']]]]]], - 'C', - 'D', - ['def', - 'spam', - ['(', 'x', 'y', ')'], - ':', - [[['def', 'eggs', ['(', 'z', ')'], ':', [['pass']]]]]]] - """ - backup_stacks.append(indentStack[:]) - - def reset_stack(): - indentStack[:] = backup_stacks[-1] - - def checkPeerIndent(s, l, t): - if l >= len(s): - return - curCol = col(l, s) - if curCol != indentStack[-1]: - if curCol > indentStack[-1]: - raise ParseException(s, l, "illegal nesting") - raise ParseException(s, l, "not a peer entry") - - def checkSubIndent(s, l, t): - curCol = col(l, s) - if curCol > indentStack[-1]: - indentStack.append(curCol) - else: - raise ParseException(s, l, "not a subentry") - - def checkUnindent(s, l, t): - if l >= len(s): - return - curCol = col(l, s) - if not (indentStack and curCol in indentStack): - raise ParseException(s, l, "not an unindent") - if curCol < indentStack[-1]: - indentStack.pop() - - NL = OneOrMore(LineEnd().set_whitespace_chars("\t ").suppress()) - INDENT = (Empty() + Empty().set_parse_action(checkSubIndent)).set_name("INDENT") - PEER = Empty().set_parse_action(checkPeerIndent).set_name("") - UNDENT = Empty().set_parse_action(checkUnindent).set_name("UNINDENT") - if indent: - smExpr = Group( - Opt(NL) - + INDENT - + OneOrMore(PEER + Group(blockStatementExpr) + Opt(NL)) - + UNDENT - ) - else: - smExpr = Group( - Opt(NL) - + OneOrMore(PEER + Group(blockStatementExpr) + Opt(NL)) - + Opt(UNDENT) - ) - - # add a parse action to remove backup_stack from list of backups - smExpr.add_parse_action( - lambda: backup_stacks.pop(-1) and None if backup_stacks else None - ) - smExpr.set_fail_action(lambda a, b, c, d: reset_stack()) - blockStatementExpr.ignore(_bslash + LineEnd()) - return smExpr.set_name("indented block") - - -# it's easy to get these comment structures wrong - they're very common, so may as well make them available -c_style_comment = Combine(Regex(r"/\*(?:[^*]|\*(?!/))*") + "*/").set_name( - "C style comment" -) -"Comment of the form ``/* ... */``" - -html_comment = Regex(r"").set_name("HTML comment") -"Comment of the form ````" - -rest_of_line = Regex(r".*").leave_whitespace().set_name("rest of line") -dbl_slash_comment = Regex(r"//(?:\\\n|[^\n])*").set_name("// comment") -"Comment of the form ``// ... (to end of line)``" - -cpp_style_comment = Combine( - Regex(r"/\*(?:[^*]|\*(?!/))*") + "*/" | dbl_slash_comment -).set_name("C++ style comment") -"Comment of either form :class:`c_style_comment` or :class:`dbl_slash_comment`" - -java_style_comment = cpp_style_comment -"Same as :class:`cpp_style_comment`" - -python_style_comment = Regex(r"#.*").set_name("Python style comment") -"Comment of the form ``# ... (to end of line)``" - - -# build list of built-in expressions, for future reference if a global default value -# gets updated -_builtin_exprs: List[ParserElement] = [ - v for v in vars().values() if isinstance(v, ParserElement) -] - - -# pre-PEP8 compatible names -delimitedList = delimited_list -countedArray = counted_array -matchPreviousLiteral = match_previous_literal -matchPreviousExpr = match_previous_expr -oneOf = one_of -dictOf = dict_of -originalTextFor = original_text_for -nestedExpr = nested_expr -makeHTMLTags = make_html_tags -makeXMLTags = make_xml_tags -anyOpenTag, anyCloseTag = any_open_tag, any_close_tag -commonHTMLEntity = common_html_entity -replaceHTMLEntity = replace_html_entity -opAssoc = OpAssoc -infixNotation = infix_notation -cStyleComment = c_style_comment -htmlComment = html_comment -restOfLine = rest_of_line -dblSlashComment = dbl_slash_comment -cppStyleComment = cpp_style_comment -javaStyleComment = java_style_comment -pythonStyleComment = python_style_comment diff --git a/venv/lib/python3.10/site-packages/setuptools/_vendor/pyparsing/results.py b/venv/lib/python3.10/site-packages/setuptools/_vendor/pyparsing/results.py deleted file mode 100644 index 00c9421..0000000 --- a/venv/lib/python3.10/site-packages/setuptools/_vendor/pyparsing/results.py +++ /dev/null @@ -1,760 +0,0 @@ -# results.py -from collections.abc import MutableMapping, Mapping, MutableSequence, Iterator -import pprint -from weakref import ref as wkref -from typing import Tuple, Any - -str_type: Tuple[type, ...] = (str, bytes) -_generator_type = type((_ for _ in ())) - - -class _ParseResultsWithOffset: - __slots__ = ["tup"] - - def __init__(self, p1, p2): - self.tup = (p1, p2) - - def __getitem__(self, i): - return self.tup[i] - - def __getstate__(self): - return self.tup - - def __setstate__(self, *args): - self.tup = args[0] - - -class ParseResults: - """Structured parse results, to provide multiple means of access to - the parsed data: - - - as a list (``len(results)``) - - by list index (``results[0], results[1]``, etc.) - - by attribute (``results.`` - see :class:`ParserElement.set_results_name`) - - Example:: - - integer = Word(nums) - date_str = (integer.set_results_name("year") + '/' - + integer.set_results_name("month") + '/' - + integer.set_results_name("day")) - # equivalent form: - # date_str = (integer("year") + '/' - # + integer("month") + '/' - # + integer("day")) - - # parse_string returns a ParseResults object - result = date_str.parse_string("1999/12/31") - - def test(s, fn=repr): - print("{} -> {}".format(s, fn(eval(s)))) - test("list(result)") - test("result[0]") - test("result['month']") - test("result.day") - test("'month' in result") - test("'minutes' in result") - test("result.dump()", str) - - prints:: - - list(result) -> ['1999', '/', '12', '/', '31'] - result[0] -> '1999' - result['month'] -> '12' - result.day -> '31' - 'month' in result -> True - 'minutes' in result -> False - result.dump() -> ['1999', '/', '12', '/', '31'] - - day: '31' - - month: '12' - - year: '1999' - """ - - _null_values: Tuple[Any, ...] = (None, [], "", ()) - - __slots__ = [ - "_name", - "_parent", - "_all_names", - "_modal", - "_toklist", - "_tokdict", - "__weakref__", - ] - - class List(list): - """ - Simple wrapper class to distinguish parsed list results that should be preserved - as actual Python lists, instead of being converted to :class:`ParseResults`: - - LBRACK, RBRACK = map(pp.Suppress, "[]") - element = pp.Forward() - item = ppc.integer - element_list = LBRACK + pp.delimited_list(element) + RBRACK - - # add parse actions to convert from ParseResults to actual Python collection types - def as_python_list(t): - return pp.ParseResults.List(t.as_list()) - element_list.add_parse_action(as_python_list) - - element <<= item | element_list - - element.run_tests(''' - 100 - [2,3,4] - [[2, 1],3,4] - [(2, 1),3,4] - (2,3,4) - ''', post_parse=lambda s, r: (r[0], type(r[0]))) - - prints: - - 100 - (100, ) - - [2,3,4] - ([2, 3, 4], ) - - [[2, 1],3,4] - ([[2, 1], 3, 4], ) - - (Used internally by :class:`Group` when `aslist=True`.) - """ - - def __new__(cls, contained=None): - if contained is None: - contained = [] - - if not isinstance(contained, list): - raise TypeError( - "{} may only be constructed with a list," - " not {}".format(cls.__name__, type(contained).__name__) - ) - - return list.__new__(cls) - - def __new__(cls, toklist=None, name=None, **kwargs): - if isinstance(toklist, ParseResults): - return toklist - self = object.__new__(cls) - self._name = None - self._parent = None - self._all_names = set() - - if toklist is None: - self._toklist = [] - elif isinstance(toklist, (list, _generator_type)): - self._toklist = ( - [toklist[:]] - if isinstance(toklist, ParseResults.List) - else list(toklist) - ) - else: - self._toklist = [toklist] - self._tokdict = dict() - return self - - # Performance tuning: we construct a *lot* of these, so keep this - # constructor as small and fast as possible - def __init__( - self, toklist=None, name=None, asList=True, modal=True, isinstance=isinstance - ): - self._modal = modal - if name is not None and name != "": - if isinstance(name, int): - name = str(name) - if not modal: - self._all_names = {name} - self._name = name - if toklist not in self._null_values: - if isinstance(toklist, (str_type, type)): - toklist = [toklist] - if asList: - if isinstance(toklist, ParseResults): - self[name] = _ParseResultsWithOffset( - ParseResults(toklist._toklist), 0 - ) - else: - self[name] = _ParseResultsWithOffset( - ParseResults(toklist[0]), 0 - ) - self[name]._name = name - else: - try: - self[name] = toklist[0] - except (KeyError, TypeError, IndexError): - if toklist is not self: - self[name] = toklist - else: - self._name = name - - def __getitem__(self, i): - if isinstance(i, (int, slice)): - return self._toklist[i] - else: - if i not in self._all_names: - return self._tokdict[i][-1][0] - else: - return ParseResults([v[0] for v in self._tokdict[i]]) - - def __setitem__(self, k, v, isinstance=isinstance): - if isinstance(v, _ParseResultsWithOffset): - self._tokdict[k] = self._tokdict.get(k, list()) + [v] - sub = v[0] - elif isinstance(k, (int, slice)): - self._toklist[k] = v - sub = v - else: - self._tokdict[k] = self._tokdict.get(k, list()) + [ - _ParseResultsWithOffset(v, 0) - ] - sub = v - if isinstance(sub, ParseResults): - sub._parent = wkref(self) - - def __delitem__(self, i): - if isinstance(i, (int, slice)): - mylen = len(self._toklist) - del self._toklist[i] - - # convert int to slice - if isinstance(i, int): - if i < 0: - i += mylen - i = slice(i, i + 1) - # get removed indices - removed = list(range(*i.indices(mylen))) - removed.reverse() - # fixup indices in token dictionary - for name, occurrences in self._tokdict.items(): - for j in removed: - for k, (value, position) in enumerate(occurrences): - occurrences[k] = _ParseResultsWithOffset( - value, position - (position > j) - ) - else: - del self._tokdict[i] - - def __contains__(self, k) -> bool: - return k in self._tokdict - - def __len__(self) -> int: - return len(self._toklist) - - def __bool__(self) -> bool: - return not not (self._toklist or self._tokdict) - - def __iter__(self) -> Iterator: - return iter(self._toklist) - - def __reversed__(self) -> Iterator: - return iter(self._toklist[::-1]) - - def keys(self): - return iter(self._tokdict) - - def values(self): - return (self[k] for k in self.keys()) - - def items(self): - return ((k, self[k]) for k in self.keys()) - - def haskeys(self) -> bool: - """ - Since ``keys()`` returns an iterator, this method is helpful in bypassing - code that looks for the existence of any defined results names.""" - return bool(self._tokdict) - - def pop(self, *args, **kwargs): - """ - Removes and returns item at specified index (default= ``last``). - Supports both ``list`` and ``dict`` semantics for ``pop()``. If - passed no argument or an integer argument, it will use ``list`` - semantics and pop tokens from the list of parsed tokens. If passed - a non-integer argument (most likely a string), it will use ``dict`` - semantics and pop the corresponding value from any defined results - names. A second default return value argument is supported, just as in - ``dict.pop()``. - - Example:: - - numlist = Word(nums)[...] - print(numlist.parse_string("0 123 321")) # -> ['0', '123', '321'] - - def remove_first(tokens): - tokens.pop(0) - numlist.add_parse_action(remove_first) - print(numlist.parse_string("0 123 321")) # -> ['123', '321'] - - label = Word(alphas) - patt = label("LABEL") + Word(nums)[1, ...] - print(patt.parse_string("AAB 123 321").dump()) - - # Use pop() in a parse action to remove named result (note that corresponding value is not - # removed from list form of results) - def remove_LABEL(tokens): - tokens.pop("LABEL") - return tokens - patt.add_parse_action(remove_LABEL) - print(patt.parse_string("AAB 123 321").dump()) - - prints:: - - ['AAB', '123', '321'] - - LABEL: 'AAB' - - ['AAB', '123', '321'] - """ - if not args: - args = [-1] - for k, v in kwargs.items(): - if k == "default": - args = (args[0], v) - else: - raise TypeError( - "pop() got an unexpected keyword argument {!r}".format(k) - ) - if isinstance(args[0], int) or len(args) == 1 or args[0] in self: - index = args[0] - ret = self[index] - del self[index] - return ret - else: - defaultvalue = args[1] - return defaultvalue - - def get(self, key, default_value=None): - """ - Returns named result matching the given key, or if there is no - such name, then returns the given ``default_value`` or ``None`` if no - ``default_value`` is specified. - - Similar to ``dict.get()``. - - Example:: - - integer = Word(nums) - date_str = integer("year") + '/' + integer("month") + '/' + integer("day") - - result = date_str.parse_string("1999/12/31") - print(result.get("year")) # -> '1999' - print(result.get("hour", "not specified")) # -> 'not specified' - print(result.get("hour")) # -> None - """ - if key in self: - return self[key] - else: - return default_value - - def insert(self, index, ins_string): - """ - Inserts new element at location index in the list of parsed tokens. - - Similar to ``list.insert()``. - - Example:: - - numlist = Word(nums)[...] - print(numlist.parse_string("0 123 321")) # -> ['0', '123', '321'] - - # use a parse action to insert the parse location in the front of the parsed results - def insert_locn(locn, tokens): - tokens.insert(0, locn) - numlist.add_parse_action(insert_locn) - print(numlist.parse_string("0 123 321")) # -> [0, '0', '123', '321'] - """ - self._toklist.insert(index, ins_string) - # fixup indices in token dictionary - for name, occurrences in self._tokdict.items(): - for k, (value, position) in enumerate(occurrences): - occurrences[k] = _ParseResultsWithOffset( - value, position + (position > index) - ) - - def append(self, item): - """ - Add single element to end of ``ParseResults`` list of elements. - - Example:: - - numlist = Word(nums)[...] - print(numlist.parse_string("0 123 321")) # -> ['0', '123', '321'] - - # use a parse action to compute the sum of the parsed integers, and add it to the end - def append_sum(tokens): - tokens.append(sum(map(int, tokens))) - numlist.add_parse_action(append_sum) - print(numlist.parse_string("0 123 321")) # -> ['0', '123', '321', 444] - """ - self._toklist.append(item) - - def extend(self, itemseq): - """ - Add sequence of elements to end of ``ParseResults`` list of elements. - - Example:: - - patt = Word(alphas)[1, ...] - - # use a parse action to append the reverse of the matched strings, to make a palindrome - def make_palindrome(tokens): - tokens.extend(reversed([t[::-1] for t in tokens])) - return ''.join(tokens) - patt.add_parse_action(make_palindrome) - print(patt.parse_string("lskdj sdlkjf lksd")) # -> 'lskdjsdlkjflksddsklfjkldsjdksl' - """ - if isinstance(itemseq, ParseResults): - self.__iadd__(itemseq) - else: - self._toklist.extend(itemseq) - - def clear(self): - """ - Clear all elements and results names. - """ - del self._toklist[:] - self._tokdict.clear() - - def __getattr__(self, name): - try: - return self[name] - except KeyError: - if name.startswith("__"): - raise AttributeError(name) - return "" - - def __add__(self, other) -> "ParseResults": - ret = self.copy() - ret += other - return ret - - def __iadd__(self, other) -> "ParseResults": - if other._tokdict: - offset = len(self._toklist) - addoffset = lambda a: offset if a < 0 else a + offset - otheritems = other._tokdict.items() - otherdictitems = [ - (k, _ParseResultsWithOffset(v[0], addoffset(v[1]))) - for k, vlist in otheritems - for v in vlist - ] - for k, v in otherdictitems: - self[k] = v - if isinstance(v[0], ParseResults): - v[0]._parent = wkref(self) - - self._toklist += other._toklist - self._all_names |= other._all_names - return self - - def __radd__(self, other) -> "ParseResults": - if isinstance(other, int) and other == 0: - # useful for merging many ParseResults using sum() builtin - return self.copy() - else: - # this may raise a TypeError - so be it - return other + self - - def __repr__(self) -> str: - return "{}({!r}, {})".format(type(self).__name__, self._toklist, self.as_dict()) - - def __str__(self) -> str: - return ( - "[" - + ", ".join( - [ - str(i) if isinstance(i, ParseResults) else repr(i) - for i in self._toklist - ] - ) - + "]" - ) - - def _asStringList(self, sep=""): - out = [] - for item in self._toklist: - if out and sep: - out.append(sep) - if isinstance(item, ParseResults): - out += item._asStringList() - else: - out.append(str(item)) - return out - - def as_list(self) -> list: - """ - Returns the parse results as a nested list of matching tokens, all converted to strings. - - Example:: - - patt = Word(alphas)[1, ...] - result = patt.parse_string("sldkj lsdkj sldkj") - # even though the result prints in string-like form, it is actually a pyparsing ParseResults - print(type(result), result) # -> ['sldkj', 'lsdkj', 'sldkj'] - - # Use as_list() to create an actual list - result_list = result.as_list() - print(type(result_list), result_list) # -> ['sldkj', 'lsdkj', 'sldkj'] - """ - return [ - res.as_list() if isinstance(res, ParseResults) else res - for res in self._toklist - ] - - def as_dict(self) -> dict: - """ - Returns the named parse results as a nested dictionary. - - Example:: - - integer = Word(nums) - date_str = integer("year") + '/' + integer("month") + '/' + integer("day") - - result = date_str.parse_string('12/31/1999') - print(type(result), repr(result)) # -> (['12', '/', '31', '/', '1999'], {'day': [('1999', 4)], 'year': [('12', 0)], 'month': [('31', 2)]}) - - result_dict = result.as_dict() - print(type(result_dict), repr(result_dict)) # -> {'day': '1999', 'year': '12', 'month': '31'} - - # even though a ParseResults supports dict-like access, sometime you just need to have a dict - import json - print(json.dumps(result)) # -> Exception: TypeError: ... is not JSON serializable - print(json.dumps(result.as_dict())) # -> {"month": "31", "day": "1999", "year": "12"} - """ - - def to_item(obj): - if isinstance(obj, ParseResults): - return obj.as_dict() if obj.haskeys() else [to_item(v) for v in obj] - else: - return obj - - return dict((k, to_item(v)) for k, v in self.items()) - - def copy(self) -> "ParseResults": - """ - Returns a new copy of a :class:`ParseResults` object. - """ - ret = ParseResults(self._toklist) - ret._tokdict = self._tokdict.copy() - ret._parent = self._parent - ret._all_names |= self._all_names - ret._name = self._name - return ret - - def get_name(self): - r""" - Returns the results name for this token expression. Useful when several - different expressions might match at a particular location. - - Example:: - - integer = Word(nums) - ssn_expr = Regex(r"\d\d\d-\d\d-\d\d\d\d") - house_number_expr = Suppress('#') + Word(nums, alphanums) - user_data = (Group(house_number_expr)("house_number") - | Group(ssn_expr)("ssn") - | Group(integer)("age")) - user_info = user_data[1, ...] - - result = user_info.parse_string("22 111-22-3333 #221B") - for item in result: - print(item.get_name(), ':', item[0]) - - prints:: - - age : 22 - ssn : 111-22-3333 - house_number : 221B - """ - if self._name: - return self._name - elif self._parent: - par = self._parent() - - def find_in_parent(sub): - return next( - ( - k - for k, vlist in par._tokdict.items() - for v, loc in vlist - if sub is v - ), - None, - ) - - return find_in_parent(self) if par else None - elif ( - len(self) == 1 - and len(self._tokdict) == 1 - and next(iter(self._tokdict.values()))[0][1] in (0, -1) - ): - return next(iter(self._tokdict.keys())) - else: - return None - - def dump(self, indent="", full=True, include_list=True, _depth=0) -> str: - """ - Diagnostic method for listing out the contents of - a :class:`ParseResults`. Accepts an optional ``indent`` argument so - that this string can be embedded in a nested display of other data. - - Example:: - - integer = Word(nums) - date_str = integer("year") + '/' + integer("month") + '/' + integer("day") - - result = date_str.parse_string('1999/12/31') - print(result.dump()) - - prints:: - - ['1999', '/', '12', '/', '31'] - - day: '31' - - month: '12' - - year: '1999' - """ - out = [] - NL = "\n" - out.append(indent + str(self.as_list()) if include_list else "") - - if full: - if self.haskeys(): - items = sorted((str(k), v) for k, v in self.items()) - for k, v in items: - if out: - out.append(NL) - out.append("{}{}- {}: ".format(indent, (" " * _depth), k)) - if isinstance(v, ParseResults): - if v: - out.append( - v.dump( - indent=indent, - full=full, - include_list=include_list, - _depth=_depth + 1, - ) - ) - else: - out.append(str(v)) - else: - out.append(repr(v)) - if any(isinstance(vv, ParseResults) for vv in self): - v = self - for i, vv in enumerate(v): - if isinstance(vv, ParseResults): - out.append( - "\n{}{}[{}]:\n{}{}{}".format( - indent, - (" " * (_depth)), - i, - indent, - (" " * (_depth + 1)), - vv.dump( - indent=indent, - full=full, - include_list=include_list, - _depth=_depth + 1, - ), - ) - ) - else: - out.append( - "\n%s%s[%d]:\n%s%s%s" - % ( - indent, - (" " * (_depth)), - i, - indent, - (" " * (_depth + 1)), - str(vv), - ) - ) - - return "".join(out) - - def pprint(self, *args, **kwargs): - """ - Pretty-printer for parsed results as a list, using the - `pprint `_ module. - Accepts additional positional or keyword args as defined for - `pprint.pprint `_ . - - Example:: - - ident = Word(alphas, alphanums) - num = Word(nums) - func = Forward() - term = ident | num | Group('(' + func + ')') - func <<= ident + Group(Optional(delimited_list(term))) - result = func.parse_string("fna a,b,(fnb c,d,200),100") - result.pprint(width=40) - - prints:: - - ['fna', - ['a', - 'b', - ['(', 'fnb', ['c', 'd', '200'], ')'], - '100']] - """ - pprint.pprint(self.as_list(), *args, **kwargs) - - # add support for pickle protocol - def __getstate__(self): - return ( - self._toklist, - ( - self._tokdict.copy(), - self._parent is not None and self._parent() or None, - self._all_names, - self._name, - ), - ) - - def __setstate__(self, state): - self._toklist, (self._tokdict, par, inAccumNames, self._name) = state - self._all_names = set(inAccumNames) - if par is not None: - self._parent = wkref(par) - else: - self._parent = None - - def __getnewargs__(self): - return self._toklist, self._name - - def __dir__(self): - return dir(type(self)) + list(self.keys()) - - @classmethod - def from_dict(cls, other, name=None) -> "ParseResults": - """ - Helper classmethod to construct a ``ParseResults`` from a ``dict``, preserving the - name-value relations as results names. If an optional ``name`` argument is - given, a nested ``ParseResults`` will be returned. - """ - - def is_iterable(obj): - try: - iter(obj) - except Exception: - return False - else: - return not isinstance(obj, str_type) - - ret = cls([]) - for k, v in other.items(): - if isinstance(v, Mapping): - ret += cls.from_dict(v, name=k) - else: - ret += cls([v], name=k, asList=is_iterable(v)) - if name is not None: - ret = cls([ret], name=name) - return ret - - asList = as_list - asDict = as_dict - getName = get_name - - -MutableMapping.register(ParseResults) -MutableSequence.register(ParseResults) diff --git a/venv/lib/python3.10/site-packages/setuptools/_vendor/pyparsing/testing.py b/venv/lib/python3.10/site-packages/setuptools/_vendor/pyparsing/testing.py deleted file mode 100644 index 84a0ef1..0000000 --- a/venv/lib/python3.10/site-packages/setuptools/_vendor/pyparsing/testing.py +++ /dev/null @@ -1,331 +0,0 @@ -# testing.py - -from contextlib import contextmanager -import typing - -from .core import ( - ParserElement, - ParseException, - Keyword, - __diag__, - __compat__, -) - - -class pyparsing_test: - """ - namespace class for classes useful in writing unit tests - """ - - class reset_pyparsing_context: - """ - Context manager to be used when writing unit tests that modify pyparsing config values: - - packrat parsing - - bounded recursion parsing - - default whitespace characters. - - default keyword characters - - literal string auto-conversion class - - __diag__ settings - - Example:: - - with reset_pyparsing_context(): - # test that literals used to construct a grammar are automatically suppressed - ParserElement.inlineLiteralsUsing(Suppress) - - term = Word(alphas) | Word(nums) - group = Group('(' + term[...] + ')') - - # assert that the '()' characters are not included in the parsed tokens - self.assertParseAndCheckList(group, "(abc 123 def)", ['abc', '123', 'def']) - - # after exiting context manager, literals are converted to Literal expressions again - """ - - def __init__(self): - self._save_context = {} - - def save(self): - self._save_context["default_whitespace"] = ParserElement.DEFAULT_WHITE_CHARS - self._save_context["default_keyword_chars"] = Keyword.DEFAULT_KEYWORD_CHARS - - self._save_context[ - "literal_string_class" - ] = ParserElement._literalStringClass - - self._save_context["verbose_stacktrace"] = ParserElement.verbose_stacktrace - - self._save_context["packrat_enabled"] = ParserElement._packratEnabled - if ParserElement._packratEnabled: - self._save_context[ - "packrat_cache_size" - ] = ParserElement.packrat_cache.size - else: - self._save_context["packrat_cache_size"] = None - self._save_context["packrat_parse"] = ParserElement._parse - self._save_context[ - "recursion_enabled" - ] = ParserElement._left_recursion_enabled - - self._save_context["__diag__"] = { - name: getattr(__diag__, name) for name in __diag__._all_names - } - - self._save_context["__compat__"] = { - "collect_all_And_tokens": __compat__.collect_all_And_tokens - } - - return self - - def restore(self): - # reset pyparsing global state - if ( - ParserElement.DEFAULT_WHITE_CHARS - != self._save_context["default_whitespace"] - ): - ParserElement.set_default_whitespace_chars( - self._save_context["default_whitespace"] - ) - - ParserElement.verbose_stacktrace = self._save_context["verbose_stacktrace"] - - Keyword.DEFAULT_KEYWORD_CHARS = self._save_context["default_keyword_chars"] - ParserElement.inlineLiteralsUsing( - self._save_context["literal_string_class"] - ) - - for name, value in self._save_context["__diag__"].items(): - (__diag__.enable if value else __diag__.disable)(name) - - ParserElement._packratEnabled = False - if self._save_context["packrat_enabled"]: - ParserElement.enable_packrat(self._save_context["packrat_cache_size"]) - else: - ParserElement._parse = self._save_context["packrat_parse"] - ParserElement._left_recursion_enabled = self._save_context[ - "recursion_enabled" - ] - - __compat__.collect_all_And_tokens = self._save_context["__compat__"] - - return self - - def copy(self): - ret = type(self)() - ret._save_context.update(self._save_context) - return ret - - def __enter__(self): - return self.save() - - def __exit__(self, *args): - self.restore() - - class TestParseResultsAsserts: - """ - A mixin class to add parse results assertion methods to normal unittest.TestCase classes. - """ - - def assertParseResultsEquals( - self, result, expected_list=None, expected_dict=None, msg=None - ): - """ - Unit test assertion to compare a :class:`ParseResults` object with an optional ``expected_list``, - and compare any defined results names with an optional ``expected_dict``. - """ - if expected_list is not None: - self.assertEqual(expected_list, result.as_list(), msg=msg) - if expected_dict is not None: - self.assertEqual(expected_dict, result.as_dict(), msg=msg) - - def assertParseAndCheckList( - self, expr, test_string, expected_list, msg=None, verbose=True - ): - """ - Convenience wrapper assert to test a parser element and input string, and assert that - the resulting ``ParseResults.asList()`` is equal to the ``expected_list``. - """ - result = expr.parse_string(test_string, parse_all=True) - if verbose: - print(result.dump()) - else: - print(result.as_list()) - self.assertParseResultsEquals(result, expected_list=expected_list, msg=msg) - - def assertParseAndCheckDict( - self, expr, test_string, expected_dict, msg=None, verbose=True - ): - """ - Convenience wrapper assert to test a parser element and input string, and assert that - the resulting ``ParseResults.asDict()`` is equal to the ``expected_dict``. - """ - result = expr.parse_string(test_string, parseAll=True) - if verbose: - print(result.dump()) - else: - print(result.as_list()) - self.assertParseResultsEquals(result, expected_dict=expected_dict, msg=msg) - - def assertRunTestResults( - self, run_tests_report, expected_parse_results=None, msg=None - ): - """ - Unit test assertion to evaluate output of ``ParserElement.runTests()``. If a list of - list-dict tuples is given as the ``expected_parse_results`` argument, then these are zipped - with the report tuples returned by ``runTests`` and evaluated using ``assertParseResultsEquals``. - Finally, asserts that the overall ``runTests()`` success value is ``True``. - - :param run_tests_report: tuple(bool, [tuple(str, ParseResults or Exception)]) returned from runTests - :param expected_parse_results (optional): [tuple(str, list, dict, Exception)] - """ - run_test_success, run_test_results = run_tests_report - - if expected_parse_results is not None: - merged = [ - (*rpt, expected) - for rpt, expected in zip(run_test_results, expected_parse_results) - ] - for test_string, result, expected in merged: - # expected should be a tuple containing a list and/or a dict or an exception, - # and optional failure message string - # an empty tuple will skip any result validation - fail_msg = next( - (exp for exp in expected if isinstance(exp, str)), None - ) - expected_exception = next( - ( - exp - for exp in expected - if isinstance(exp, type) and issubclass(exp, Exception) - ), - None, - ) - if expected_exception is not None: - with self.assertRaises( - expected_exception=expected_exception, msg=fail_msg or msg - ): - if isinstance(result, Exception): - raise result - else: - expected_list = next( - (exp for exp in expected if isinstance(exp, list)), None - ) - expected_dict = next( - (exp for exp in expected if isinstance(exp, dict)), None - ) - if (expected_list, expected_dict) != (None, None): - self.assertParseResultsEquals( - result, - expected_list=expected_list, - expected_dict=expected_dict, - msg=fail_msg or msg, - ) - else: - # warning here maybe? - print("no validation for {!r}".format(test_string)) - - # do this last, in case some specific test results can be reported instead - self.assertTrue( - run_test_success, msg=msg if msg is not None else "failed runTests" - ) - - @contextmanager - def assertRaisesParseException(self, exc_type=ParseException, msg=None): - with self.assertRaises(exc_type, msg=msg): - yield - - @staticmethod - def with_line_numbers( - s: str, - start_line: typing.Optional[int] = None, - end_line: typing.Optional[int] = None, - expand_tabs: bool = True, - eol_mark: str = "|", - mark_spaces: typing.Optional[str] = None, - mark_control: typing.Optional[str] = None, - ) -> str: - """ - Helpful method for debugging a parser - prints a string with line and column numbers. - (Line and column numbers are 1-based.) - - :param s: tuple(bool, str - string to be printed with line and column numbers - :param start_line: int - (optional) starting line number in s to print (default=1) - :param end_line: int - (optional) ending line number in s to print (default=len(s)) - :param expand_tabs: bool - (optional) expand tabs to spaces, to match the pyparsing default - :param eol_mark: str - (optional) string to mark the end of lines, helps visualize trailing spaces (default="|") - :param mark_spaces: str - (optional) special character to display in place of spaces - :param mark_control: str - (optional) convert non-printing control characters to a placeholding - character; valid values: - - "unicode" - replaces control chars with Unicode symbols, such as "␍" and "␊" - - any single character string - replace control characters with given string - - None (default) - string is displayed as-is - - :return: str - input string with leading line numbers and column number headers - """ - if expand_tabs: - s = s.expandtabs() - if mark_control is not None: - if mark_control == "unicode": - tbl = str.maketrans( - {c: u for c, u in zip(range(0, 33), range(0x2400, 0x2433))} - | {127: 0x2421} - ) - eol_mark = "" - else: - tbl = str.maketrans( - {c: mark_control for c in list(range(0, 32)) + [127]} - ) - s = s.translate(tbl) - if mark_spaces is not None and mark_spaces != " ": - if mark_spaces == "unicode": - tbl = str.maketrans({9: 0x2409, 32: 0x2423}) - s = s.translate(tbl) - else: - s = s.replace(" ", mark_spaces) - if start_line is None: - start_line = 1 - if end_line is None: - end_line = len(s) - end_line = min(end_line, len(s)) - start_line = min(max(1, start_line), end_line) - - if mark_control != "unicode": - s_lines = s.splitlines()[start_line - 1 : end_line] - else: - s_lines = [line + "␊" for line in s.split("␊")[start_line - 1 : end_line]] - if not s_lines: - return "" - - lineno_width = len(str(end_line)) - max_line_len = max(len(line) for line in s_lines) - lead = " " * (lineno_width + 1) - if max_line_len >= 99: - header0 = ( - lead - + "".join( - "{}{}".format(" " * 99, (i + 1) % 100) - for i in range(max(max_line_len // 100, 1)) - ) - + "\n" - ) - else: - header0 = "" - header1 = ( - header0 - + lead - + "".join( - " {}".format((i + 1) % 10) - for i in range(-(-max_line_len // 10)) - ) - + "\n" - ) - header2 = lead + "1234567890" * (-(-max_line_len // 10)) + "\n" - return ( - header1 - + header2 - + "\n".join( - "{:{}d}:{}{}".format(i, lineno_width, line, eol_mark) - for i, line in enumerate(s_lines, start=start_line) - ) - + "\n" - ) diff --git a/venv/lib/python3.10/site-packages/setuptools/_vendor/pyparsing/unicode.py b/venv/lib/python3.10/site-packages/setuptools/_vendor/pyparsing/unicode.py deleted file mode 100644 index 0652620..0000000 --- a/venv/lib/python3.10/site-packages/setuptools/_vendor/pyparsing/unicode.py +++ /dev/null @@ -1,352 +0,0 @@ -# unicode.py - -import sys -from itertools import filterfalse -from typing import List, Tuple, Union - - -class _lazyclassproperty: - def __init__(self, fn): - self.fn = fn - self.__doc__ = fn.__doc__ - self.__name__ = fn.__name__ - - def __get__(self, obj, cls): - if cls is None: - cls = type(obj) - if not hasattr(cls, "_intern") or any( - cls._intern is getattr(superclass, "_intern", []) - for superclass in cls.__mro__[1:] - ): - cls._intern = {} - attrname = self.fn.__name__ - if attrname not in cls._intern: - cls._intern[attrname] = self.fn(cls) - return cls._intern[attrname] - - -UnicodeRangeList = List[Union[Tuple[int, int], Tuple[int]]] - - -class unicode_set: - """ - A set of Unicode characters, for language-specific strings for - ``alphas``, ``nums``, ``alphanums``, and ``printables``. - A unicode_set is defined by a list of ranges in the Unicode character - set, in a class attribute ``_ranges``. Ranges can be specified using - 2-tuples or a 1-tuple, such as:: - - _ranges = [ - (0x0020, 0x007e), - (0x00a0, 0x00ff), - (0x0100,), - ] - - Ranges are left- and right-inclusive. A 1-tuple of (x,) is treated as (x, x). - - A unicode set can also be defined using multiple inheritance of other unicode sets:: - - class CJK(Chinese, Japanese, Korean): - pass - """ - - _ranges: UnicodeRangeList = [] - - @_lazyclassproperty - def _chars_for_ranges(cls): - ret = [] - for cc in cls.__mro__: - if cc is unicode_set: - break - for rr in getattr(cc, "_ranges", ()): - ret.extend(range(rr[0], rr[-1] + 1)) - return [chr(c) for c in sorted(set(ret))] - - @_lazyclassproperty - def printables(cls): - "all non-whitespace characters in this range" - return "".join(filterfalse(str.isspace, cls._chars_for_ranges)) - - @_lazyclassproperty - def alphas(cls): - "all alphabetic characters in this range" - return "".join(filter(str.isalpha, cls._chars_for_ranges)) - - @_lazyclassproperty - def nums(cls): - "all numeric digit characters in this range" - return "".join(filter(str.isdigit, cls._chars_for_ranges)) - - @_lazyclassproperty - def alphanums(cls): - "all alphanumeric characters in this range" - return cls.alphas + cls.nums - - @_lazyclassproperty - def identchars(cls): - "all characters in this range that are valid identifier characters, plus underscore '_'" - return "".join( - sorted( - set( - "".join(filter(str.isidentifier, cls._chars_for_ranges)) - + "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyzªµº" - + "ÀÁÂÃÄÅÆÇÈÉÊËÌÍÎÏÐÑÒÓÔÕÖØÙÚÛÜÝÞßàáâãäåæçèéêëìíîïðñòóôõöøùúûüýþÿ" - + "_" - ) - ) - ) - - @_lazyclassproperty - def identbodychars(cls): - """ - all characters in this range that are valid identifier body characters, - plus the digits 0-9 - """ - return "".join( - sorted( - set( - cls.identchars - + "0123456789" - + "".join( - [c for c in cls._chars_for_ranges if ("_" + c).isidentifier()] - ) - ) - ) - ) - - -class pyparsing_unicode(unicode_set): - """ - A namespace class for defining common language unicode_sets. - """ - - # fmt: off - - # define ranges in language character sets - _ranges: UnicodeRangeList = [ - (0x0020, sys.maxunicode), - ] - - class BasicMultilingualPlane(unicode_set): - "Unicode set for the Basic Multilingual Plane" - _ranges: UnicodeRangeList = [ - (0x0020, 0xFFFF), - ] - - class Latin1(unicode_set): - "Unicode set for Latin-1 Unicode Character Range" - _ranges: UnicodeRangeList = [ - (0x0020, 0x007E), - (0x00A0, 0x00FF), - ] - - class LatinA(unicode_set): - "Unicode set for Latin-A Unicode Character Range" - _ranges: UnicodeRangeList = [ - (0x0100, 0x017F), - ] - - class LatinB(unicode_set): - "Unicode set for Latin-B Unicode Character Range" - _ranges: UnicodeRangeList = [ - (0x0180, 0x024F), - ] - - class Greek(unicode_set): - "Unicode set for Greek Unicode Character Ranges" - _ranges: UnicodeRangeList = [ - (0x0342, 0x0345), - (0x0370, 0x0377), - (0x037A, 0x037F), - (0x0384, 0x038A), - (0x038C,), - (0x038E, 0x03A1), - (0x03A3, 0x03E1), - (0x03F0, 0x03FF), - (0x1D26, 0x1D2A), - (0x1D5E,), - (0x1D60,), - (0x1D66, 0x1D6A), - (0x1F00, 0x1F15), - (0x1F18, 0x1F1D), - (0x1F20, 0x1F45), - (0x1F48, 0x1F4D), - (0x1F50, 0x1F57), - (0x1F59,), - (0x1F5B,), - (0x1F5D,), - (0x1F5F, 0x1F7D), - (0x1F80, 0x1FB4), - (0x1FB6, 0x1FC4), - (0x1FC6, 0x1FD3), - (0x1FD6, 0x1FDB), - (0x1FDD, 0x1FEF), - (0x1FF2, 0x1FF4), - (0x1FF6, 0x1FFE), - (0x2129,), - (0x2719, 0x271A), - (0xAB65,), - (0x10140, 0x1018D), - (0x101A0,), - (0x1D200, 0x1D245), - (0x1F7A1, 0x1F7A7), - ] - - class Cyrillic(unicode_set): - "Unicode set for Cyrillic Unicode Character Range" - _ranges: UnicodeRangeList = [ - (0x0400, 0x052F), - (0x1C80, 0x1C88), - (0x1D2B,), - (0x1D78,), - (0x2DE0, 0x2DFF), - (0xA640, 0xA672), - (0xA674, 0xA69F), - (0xFE2E, 0xFE2F), - ] - - class Chinese(unicode_set): - "Unicode set for Chinese Unicode Character Range" - _ranges: UnicodeRangeList = [ - (0x2E80, 0x2E99), - (0x2E9B, 0x2EF3), - (0x31C0, 0x31E3), - (0x3400, 0x4DB5), - (0x4E00, 0x9FEF), - (0xA700, 0xA707), - (0xF900, 0xFA6D), - (0xFA70, 0xFAD9), - (0x16FE2, 0x16FE3), - (0x1F210, 0x1F212), - (0x1F214, 0x1F23B), - (0x1F240, 0x1F248), - (0x20000, 0x2A6D6), - (0x2A700, 0x2B734), - (0x2B740, 0x2B81D), - (0x2B820, 0x2CEA1), - (0x2CEB0, 0x2EBE0), - (0x2F800, 0x2FA1D), - ] - - class Japanese(unicode_set): - "Unicode set for Japanese Unicode Character Range, combining Kanji, Hiragana, and Katakana ranges" - _ranges: UnicodeRangeList = [] - - class Kanji(unicode_set): - "Unicode set for Kanji Unicode Character Range" - _ranges: UnicodeRangeList = [ - (0x4E00, 0x9FBF), - (0x3000, 0x303F), - ] - - class Hiragana(unicode_set): - "Unicode set for Hiragana Unicode Character Range" - _ranges: UnicodeRangeList = [ - (0x3041, 0x3096), - (0x3099, 0x30A0), - (0x30FC,), - (0xFF70,), - (0x1B001,), - (0x1B150, 0x1B152), - (0x1F200,), - ] - - class Katakana(unicode_set): - "Unicode set for Katakana Unicode Character Range" - _ranges: UnicodeRangeList = [ - (0x3099, 0x309C), - (0x30A0, 0x30FF), - (0x31F0, 0x31FF), - (0x32D0, 0x32FE), - (0xFF65, 0xFF9F), - (0x1B000,), - (0x1B164, 0x1B167), - (0x1F201, 0x1F202), - (0x1F213,), - ] - - class Hangul(unicode_set): - "Unicode set for Hangul (Korean) Unicode Character Range" - _ranges: UnicodeRangeList = [ - (0x1100, 0x11FF), - (0x302E, 0x302F), - (0x3131, 0x318E), - (0x3200, 0x321C), - (0x3260, 0x327B), - (0x327E,), - (0xA960, 0xA97C), - (0xAC00, 0xD7A3), - (0xD7B0, 0xD7C6), - (0xD7CB, 0xD7FB), - (0xFFA0, 0xFFBE), - (0xFFC2, 0xFFC7), - (0xFFCA, 0xFFCF), - (0xFFD2, 0xFFD7), - (0xFFDA, 0xFFDC), - ] - - Korean = Hangul - - class CJK(Chinese, Japanese, Hangul): - "Unicode set for combined Chinese, Japanese, and Korean (CJK) Unicode Character Range" - - class Thai(unicode_set): - "Unicode set for Thai Unicode Character Range" - _ranges: UnicodeRangeList = [ - (0x0E01, 0x0E3A), - (0x0E3F, 0x0E5B) - ] - - class Arabic(unicode_set): - "Unicode set for Arabic Unicode Character Range" - _ranges: UnicodeRangeList = [ - (0x0600, 0x061B), - (0x061E, 0x06FF), - (0x0700, 0x077F), - ] - - class Hebrew(unicode_set): - "Unicode set for Hebrew Unicode Character Range" - _ranges: UnicodeRangeList = [ - (0x0591, 0x05C7), - (0x05D0, 0x05EA), - (0x05EF, 0x05F4), - (0xFB1D, 0xFB36), - (0xFB38, 0xFB3C), - (0xFB3E,), - (0xFB40, 0xFB41), - (0xFB43, 0xFB44), - (0xFB46, 0xFB4F), - ] - - class Devanagari(unicode_set): - "Unicode set for Devanagari Unicode Character Range" - _ranges: UnicodeRangeList = [ - (0x0900, 0x097F), - (0xA8E0, 0xA8FF) - ] - - # fmt: on - - -pyparsing_unicode.Japanese._ranges = ( - pyparsing_unicode.Japanese.Kanji._ranges - + pyparsing_unicode.Japanese.Hiragana._ranges - + pyparsing_unicode.Japanese.Katakana._ranges -) - -pyparsing_unicode.BMP = pyparsing_unicode.BasicMultilingualPlane - -# add language identifiers using language Unicode -pyparsing_unicode.العربية = pyparsing_unicode.Arabic -pyparsing_unicode.中文 = pyparsing_unicode.Chinese -pyparsing_unicode.кириллица = pyparsing_unicode.Cyrillic -pyparsing_unicode.Ελληνικά = pyparsing_unicode.Greek -pyparsing_unicode.עִברִית = pyparsing_unicode.Hebrew -pyparsing_unicode.日本語 = pyparsing_unicode.Japanese -pyparsing_unicode.Japanese.漢字 = pyparsing_unicode.Japanese.Kanji -pyparsing_unicode.Japanese.カタカナ = pyparsing_unicode.Japanese.Katakana -pyparsing_unicode.Japanese.ひらがな = pyparsing_unicode.Japanese.Hiragana -pyparsing_unicode.한국어 = pyparsing_unicode.Korean -pyparsing_unicode.ไทย = pyparsing_unicode.Thai -pyparsing_unicode.देवनागरी = pyparsing_unicode.Devanagari diff --git a/venv/lib/python3.10/site-packages/setuptools/_vendor/pyparsing/util.py b/venv/lib/python3.10/site-packages/setuptools/_vendor/pyparsing/util.py deleted file mode 100644 index 34ce092..0000000 --- a/venv/lib/python3.10/site-packages/setuptools/_vendor/pyparsing/util.py +++ /dev/null @@ -1,235 +0,0 @@ -# util.py -import warnings -import types -import collections -import itertools -from functools import lru_cache -from typing import List, Union, Iterable - -_bslash = chr(92) - - -class __config_flags: - """Internal class for defining compatibility and debugging flags""" - - _all_names: List[str] = [] - _fixed_names: List[str] = [] - _type_desc = "configuration" - - @classmethod - def _set(cls, dname, value): - if dname in cls._fixed_names: - warnings.warn( - "{}.{} {} is {} and cannot be overridden".format( - cls.__name__, - dname, - cls._type_desc, - str(getattr(cls, dname)).upper(), - ) - ) - return - if dname in cls._all_names: - setattr(cls, dname, value) - else: - raise ValueError("no such {} {!r}".format(cls._type_desc, dname)) - - enable = classmethod(lambda cls, name: cls._set(name, True)) - disable = classmethod(lambda cls, name: cls._set(name, False)) - - -@lru_cache(maxsize=128) -def col(loc: int, strg: str) -> int: - """ - Returns current column within a string, counting newlines as line separators. - The first column is number 1. - - Note: the default parsing behavior is to expand tabs in the input string - before starting the parsing process. See - :class:`ParserElement.parseString` for more - information on parsing strings containing ```` s, and suggested - methods to maintain a consistent view of the parsed string, the parse - location, and line and column positions within the parsed string. - """ - s = strg - return 1 if 0 < loc < len(s) and s[loc - 1] == "\n" else loc - s.rfind("\n", 0, loc) - - -@lru_cache(maxsize=128) -def lineno(loc: int, strg: str) -> int: - """Returns current line number within a string, counting newlines as line separators. - The first line is number 1. - - Note - the default parsing behavior is to expand tabs in the input string - before starting the parsing process. See :class:`ParserElement.parseString` - for more information on parsing strings containing ```` s, and - suggested methods to maintain a consistent view of the parsed string, the - parse location, and line and column positions within the parsed string. - """ - return strg.count("\n", 0, loc) + 1 - - -@lru_cache(maxsize=128) -def line(loc: int, strg: str) -> str: - """ - Returns the line of text containing loc within a string, counting newlines as line separators. - """ - last_cr = strg.rfind("\n", 0, loc) - next_cr = strg.find("\n", loc) - return strg[last_cr + 1 : next_cr] if next_cr >= 0 else strg[last_cr + 1 :] - - -class _UnboundedCache: - def __init__(self): - cache = {} - cache_get = cache.get - self.not_in_cache = not_in_cache = object() - - def get(_, key): - return cache_get(key, not_in_cache) - - def set_(_, key, value): - cache[key] = value - - def clear(_): - cache.clear() - - self.size = None - self.get = types.MethodType(get, self) - self.set = types.MethodType(set_, self) - self.clear = types.MethodType(clear, self) - - -class _FifoCache: - def __init__(self, size): - self.not_in_cache = not_in_cache = object() - cache = collections.OrderedDict() - cache_get = cache.get - - def get(_, key): - return cache_get(key, not_in_cache) - - def set_(_, key, value): - cache[key] = value - while len(cache) > size: - cache.popitem(last=False) - - def clear(_): - cache.clear() - - self.size = size - self.get = types.MethodType(get, self) - self.set = types.MethodType(set_, self) - self.clear = types.MethodType(clear, self) - - -class LRUMemo: - """ - A memoizing mapping that retains `capacity` deleted items - - The memo tracks retained items by their access order; once `capacity` items - are retained, the least recently used item is discarded. - """ - - def __init__(self, capacity): - self._capacity = capacity - self._active = {} - self._memory = collections.OrderedDict() - - def __getitem__(self, key): - try: - return self._active[key] - except KeyError: - self._memory.move_to_end(key) - return self._memory[key] - - def __setitem__(self, key, value): - self._memory.pop(key, None) - self._active[key] = value - - def __delitem__(self, key): - try: - value = self._active.pop(key) - except KeyError: - pass - else: - while len(self._memory) >= self._capacity: - self._memory.popitem(last=False) - self._memory[key] = value - - def clear(self): - self._active.clear() - self._memory.clear() - - -class UnboundedMemo(dict): - """ - A memoizing mapping that retains all deleted items - """ - - def __delitem__(self, key): - pass - - -def _escape_regex_range_chars(s: str) -> str: - # escape these chars: ^-[] - for c in r"\^-[]": - s = s.replace(c, _bslash + c) - s = s.replace("\n", r"\n") - s = s.replace("\t", r"\t") - return str(s) - - -def _collapse_string_to_ranges( - s: Union[str, Iterable[str]], re_escape: bool = True -) -> str: - def is_consecutive(c): - c_int = ord(c) - is_consecutive.prev, prev = c_int, is_consecutive.prev - if c_int - prev > 1: - is_consecutive.value = next(is_consecutive.counter) - return is_consecutive.value - - is_consecutive.prev = 0 - is_consecutive.counter = itertools.count() - is_consecutive.value = -1 - - def escape_re_range_char(c): - return "\\" + c if c in r"\^-][" else c - - def no_escape_re_range_char(c): - return c - - if not re_escape: - escape_re_range_char = no_escape_re_range_char - - ret = [] - s = "".join(sorted(set(s))) - if len(s) > 3: - for _, chars in itertools.groupby(s, key=is_consecutive): - first = last = next(chars) - last = collections.deque( - itertools.chain(iter([last]), chars), maxlen=1 - ).pop() - if first == last: - ret.append(escape_re_range_char(first)) - else: - sep = "" if ord(last) == ord(first) + 1 else "-" - ret.append( - "{}{}{}".format( - escape_re_range_char(first), sep, escape_re_range_char(last) - ) - ) - else: - ret = [escape_re_range_char(c) for c in s] - - return "".join(ret) - - -def _flatten(ll: list) -> list: - ret = [] - for i in ll: - if isinstance(i, list): - ret.extend(_flatten(i)) - else: - ret.append(i) - return ret diff --git a/venv/lib/python3.10/site-packages/setuptools/_vendor/tomli/_types.py b/venv/lib/python3.10/site-packages/setuptools/_vendor/tomli/_types.py deleted file mode 100644 index d949412..0000000 --- a/venv/lib/python3.10/site-packages/setuptools/_vendor/tomli/_types.py +++ /dev/null @@ -1,10 +0,0 @@ -# SPDX-License-Identifier: MIT -# SPDX-FileCopyrightText: 2021 Taneli Hukkinen -# Licensed to PSF under a Contributor Agreement. - -from typing import Any, Callable, Tuple - -# Type annotations -ParseFloat = Callable[[str], Any] -Key = Tuple[str, ...] -Pos = int diff --git a/venv/lib/python3.10/site-packages/setuptools/_vendor/typing_extensions.py b/venv/lib/python3.10/site-packages/setuptools/_vendor/typing_extensions.py deleted file mode 100644 index 9f1c7aa..0000000 --- a/venv/lib/python3.10/site-packages/setuptools/_vendor/typing_extensions.py +++ /dev/null @@ -1,2296 +0,0 @@ -import abc -import collections -import collections.abc -import operator -import sys -import typing - -# After PEP 560, internal typing API was substantially reworked. -# This is especially important for Protocol class which uses internal APIs -# quite extensively. -PEP_560 = sys.version_info[:3] >= (3, 7, 0) - -if PEP_560: - GenericMeta = type -else: - # 3.6 - from typing import GenericMeta, _type_vars # noqa - -# The two functions below are copies of typing internal helpers. -# They are needed by _ProtocolMeta - - -def _no_slots_copy(dct): - dict_copy = dict(dct) - if '__slots__' in dict_copy: - for slot in dict_copy['__slots__']: - dict_copy.pop(slot, None) - return dict_copy - - -def _check_generic(cls, parameters): - if not cls.__parameters__: - raise TypeError(f"{cls} is not a generic class") - alen = len(parameters) - elen = len(cls.__parameters__) - if alen != elen: - raise TypeError(f"Too {'many' if alen > elen else 'few'} arguments for {cls};" - f" actual {alen}, expected {elen}") - - -# Please keep __all__ alphabetized within each category. -__all__ = [ - # Super-special typing primitives. - 'ClassVar', - 'Concatenate', - 'Final', - 'ParamSpec', - 'Self', - 'Type', - - # ABCs (from collections.abc). - 'Awaitable', - 'AsyncIterator', - 'AsyncIterable', - 'Coroutine', - 'AsyncGenerator', - 'AsyncContextManager', - 'ChainMap', - - # Concrete collection types. - 'ContextManager', - 'Counter', - 'Deque', - 'DefaultDict', - 'OrderedDict', - 'TypedDict', - - # Structural checks, a.k.a. protocols. - 'SupportsIndex', - - # One-off things. - 'Annotated', - 'final', - 'IntVar', - 'Literal', - 'NewType', - 'overload', - 'Protocol', - 'runtime', - 'runtime_checkable', - 'Text', - 'TypeAlias', - 'TypeGuard', - 'TYPE_CHECKING', -] - -if PEP_560: - __all__.extend(["get_args", "get_origin", "get_type_hints"]) - -# 3.6.2+ -if hasattr(typing, 'NoReturn'): - NoReturn = typing.NoReturn -# 3.6.0-3.6.1 -else: - class _NoReturn(typing._FinalTypingBase, _root=True): - """Special type indicating functions that never return. - Example:: - - from typing import NoReturn - - def stop() -> NoReturn: - raise Exception('no way') - - This type is invalid in other positions, e.g., ``List[NoReturn]`` - will fail in static type checkers. - """ - __slots__ = () - - def __instancecheck__(self, obj): - raise TypeError("NoReturn cannot be used with isinstance().") - - def __subclasscheck__(self, cls): - raise TypeError("NoReturn cannot be used with issubclass().") - - NoReturn = _NoReturn(_root=True) - -# Some unconstrained type variables. These are used by the container types. -# (These are not for export.) -T = typing.TypeVar('T') # Any type. -KT = typing.TypeVar('KT') # Key type. -VT = typing.TypeVar('VT') # Value type. -T_co = typing.TypeVar('T_co', covariant=True) # Any type covariant containers. -T_contra = typing.TypeVar('T_contra', contravariant=True) # Ditto contravariant. - -ClassVar = typing.ClassVar - -# On older versions of typing there is an internal class named "Final". -# 3.8+ -if hasattr(typing, 'Final') and sys.version_info[:2] >= (3, 7): - Final = typing.Final -# 3.7 -elif sys.version_info[:2] >= (3, 7): - class _FinalForm(typing._SpecialForm, _root=True): - - def __repr__(self): - return 'typing_extensions.' + self._name - - def __getitem__(self, parameters): - item = typing._type_check(parameters, - f'{self._name} accepts only single type') - return typing._GenericAlias(self, (item,)) - - Final = _FinalForm('Final', - doc="""A special typing construct to indicate that a name - cannot be re-assigned or overridden in a subclass. - For example: - - MAX_SIZE: Final = 9000 - MAX_SIZE += 1 # Error reported by type checker - - class Connection: - TIMEOUT: Final[int] = 10 - class FastConnector(Connection): - TIMEOUT = 1 # Error reported by type checker - - There is no runtime checking of these properties.""") -# 3.6 -else: - class _Final(typing._FinalTypingBase, _root=True): - """A special typing construct to indicate that a name - cannot be re-assigned or overridden in a subclass. - For example: - - MAX_SIZE: Final = 9000 - MAX_SIZE += 1 # Error reported by type checker - - class Connection: - TIMEOUT: Final[int] = 10 - class FastConnector(Connection): - TIMEOUT = 1 # Error reported by type checker - - There is no runtime checking of these properties. - """ - - __slots__ = ('__type__',) - - def __init__(self, tp=None, **kwds): - self.__type__ = tp - - def __getitem__(self, item): - cls = type(self) - if self.__type__ is None: - return cls(typing._type_check(item, - f'{cls.__name__[1:]} accepts only single type.'), - _root=True) - raise TypeError(f'{cls.__name__[1:]} cannot be further subscripted') - - def _eval_type(self, globalns, localns): - new_tp = typing._eval_type(self.__type__, globalns, localns) - if new_tp == self.__type__: - return self - return type(self)(new_tp, _root=True) - - def __repr__(self): - r = super().__repr__() - if self.__type__ is not None: - r += f'[{typing._type_repr(self.__type__)}]' - return r - - def __hash__(self): - return hash((type(self).__name__, self.__type__)) - - def __eq__(self, other): - if not isinstance(other, _Final): - return NotImplemented - if self.__type__ is not None: - return self.__type__ == other.__type__ - return self is other - - Final = _Final(_root=True) - - -# 3.8+ -if hasattr(typing, 'final'): - final = typing.final -# 3.6-3.7 -else: - def final(f): - """This decorator can be used to indicate to type checkers that - the decorated method cannot be overridden, and decorated class - cannot be subclassed. For example: - - class Base: - @final - def done(self) -> None: - ... - class Sub(Base): - def done(self) -> None: # Error reported by type checker - ... - @final - class Leaf: - ... - class Other(Leaf): # Error reported by type checker - ... - - There is no runtime checking of these properties. - """ - return f - - -def IntVar(name): - return typing.TypeVar(name) - - -# 3.8+: -if hasattr(typing, 'Literal'): - Literal = typing.Literal -# 3.7: -elif sys.version_info[:2] >= (3, 7): - class _LiteralForm(typing._SpecialForm, _root=True): - - def __repr__(self): - return 'typing_extensions.' + self._name - - def __getitem__(self, parameters): - return typing._GenericAlias(self, parameters) - - Literal = _LiteralForm('Literal', - doc="""A type that can be used to indicate to type checkers - that the corresponding value has a value literally equivalent - to the provided parameter. For example: - - var: Literal[4] = 4 - - The type checker understands that 'var' is literally equal to - the value 4 and no other value. - - Literal[...] cannot be subclassed. There is no runtime - checking verifying that the parameter is actually a value - instead of a type.""") -# 3.6: -else: - class _Literal(typing._FinalTypingBase, _root=True): - """A type that can be used to indicate to type checkers that the - corresponding value has a value literally equivalent to the - provided parameter. For example: - - var: Literal[4] = 4 - - The type checker understands that 'var' is literally equal to the - value 4 and no other value. - - Literal[...] cannot be subclassed. There is no runtime checking - verifying that the parameter is actually a value instead of a type. - """ - - __slots__ = ('__values__',) - - def __init__(self, values=None, **kwds): - self.__values__ = values - - def __getitem__(self, values): - cls = type(self) - if self.__values__ is None: - if not isinstance(values, tuple): - values = (values,) - return cls(values, _root=True) - raise TypeError(f'{cls.__name__[1:]} cannot be further subscripted') - - def _eval_type(self, globalns, localns): - return self - - def __repr__(self): - r = super().__repr__() - if self.__values__ is not None: - r += f'[{", ".join(map(typing._type_repr, self.__values__))}]' - return r - - def __hash__(self): - return hash((type(self).__name__, self.__values__)) - - def __eq__(self, other): - if not isinstance(other, _Literal): - return NotImplemented - if self.__values__ is not None: - return self.__values__ == other.__values__ - return self is other - - Literal = _Literal(_root=True) - - -_overload_dummy = typing._overload_dummy # noqa -overload = typing.overload - - -# This is not a real generic class. Don't use outside annotations. -Type = typing.Type - -# Various ABCs mimicking those in collections.abc. -# A few are simply re-exported for completeness. - - -class _ExtensionsGenericMeta(GenericMeta): - def __subclasscheck__(self, subclass): - """This mimics a more modern GenericMeta.__subclasscheck__() logic - (that does not have problems with recursion) to work around interactions - between collections, typing, and typing_extensions on older - versions of Python, see https://github.com/python/typing/issues/501. - """ - if self.__origin__ is not None: - if sys._getframe(1).f_globals['__name__'] not in ['abc', 'functools']: - raise TypeError("Parameterized generics cannot be used with class " - "or instance checks") - return False - if not self.__extra__: - return super().__subclasscheck__(subclass) - res = self.__extra__.__subclasshook__(subclass) - if res is not NotImplemented: - return res - if self.__extra__ in subclass.__mro__: - return True - for scls in self.__extra__.__subclasses__(): - if isinstance(scls, GenericMeta): - continue - if issubclass(subclass, scls): - return True - return False - - -Awaitable = typing.Awaitable -Coroutine = typing.Coroutine -AsyncIterable = typing.AsyncIterable -AsyncIterator = typing.AsyncIterator - -# 3.6.1+ -if hasattr(typing, 'Deque'): - Deque = typing.Deque -# 3.6.0 -else: - class Deque(collections.deque, typing.MutableSequence[T], - metaclass=_ExtensionsGenericMeta, - extra=collections.deque): - __slots__ = () - - def __new__(cls, *args, **kwds): - if cls._gorg is Deque: - return collections.deque(*args, **kwds) - return typing._generic_new(collections.deque, cls, *args, **kwds) - -ContextManager = typing.ContextManager -# 3.6.2+ -if hasattr(typing, 'AsyncContextManager'): - AsyncContextManager = typing.AsyncContextManager -# 3.6.0-3.6.1 -else: - from _collections_abc import _check_methods as _check_methods_in_mro # noqa - - class AsyncContextManager(typing.Generic[T_co]): - __slots__ = () - - async def __aenter__(self): - return self - - @abc.abstractmethod - async def __aexit__(self, exc_type, exc_value, traceback): - return None - - @classmethod - def __subclasshook__(cls, C): - if cls is AsyncContextManager: - return _check_methods_in_mro(C, "__aenter__", "__aexit__") - return NotImplemented - -DefaultDict = typing.DefaultDict - -# 3.7.2+ -if hasattr(typing, 'OrderedDict'): - OrderedDict = typing.OrderedDict -# 3.7.0-3.7.2 -elif (3, 7, 0) <= sys.version_info[:3] < (3, 7, 2): - OrderedDict = typing._alias(collections.OrderedDict, (KT, VT)) -# 3.6 -else: - class OrderedDict(collections.OrderedDict, typing.MutableMapping[KT, VT], - metaclass=_ExtensionsGenericMeta, - extra=collections.OrderedDict): - - __slots__ = () - - def __new__(cls, *args, **kwds): - if cls._gorg is OrderedDict: - return collections.OrderedDict(*args, **kwds) - return typing._generic_new(collections.OrderedDict, cls, *args, **kwds) - -# 3.6.2+ -if hasattr(typing, 'Counter'): - Counter = typing.Counter -# 3.6.0-3.6.1 -else: - class Counter(collections.Counter, - typing.Dict[T, int], - metaclass=_ExtensionsGenericMeta, extra=collections.Counter): - - __slots__ = () - - def __new__(cls, *args, **kwds): - if cls._gorg is Counter: - return collections.Counter(*args, **kwds) - return typing._generic_new(collections.Counter, cls, *args, **kwds) - -# 3.6.1+ -if hasattr(typing, 'ChainMap'): - ChainMap = typing.ChainMap -elif hasattr(collections, 'ChainMap'): - class ChainMap(collections.ChainMap, typing.MutableMapping[KT, VT], - metaclass=_ExtensionsGenericMeta, - extra=collections.ChainMap): - - __slots__ = () - - def __new__(cls, *args, **kwds): - if cls._gorg is ChainMap: - return collections.ChainMap(*args, **kwds) - return typing._generic_new(collections.ChainMap, cls, *args, **kwds) - -# 3.6.1+ -if hasattr(typing, 'AsyncGenerator'): - AsyncGenerator = typing.AsyncGenerator -# 3.6.0 -else: - class AsyncGenerator(AsyncIterator[T_co], typing.Generic[T_co, T_contra], - metaclass=_ExtensionsGenericMeta, - extra=collections.abc.AsyncGenerator): - __slots__ = () - -NewType = typing.NewType -Text = typing.Text -TYPE_CHECKING = typing.TYPE_CHECKING - - -def _gorg(cls): - """This function exists for compatibility with old typing versions.""" - assert isinstance(cls, GenericMeta) - if hasattr(cls, '_gorg'): - return cls._gorg - while cls.__origin__ is not None: - cls = cls.__origin__ - return cls - - -_PROTO_WHITELIST = ['Callable', 'Awaitable', - 'Iterable', 'Iterator', 'AsyncIterable', 'AsyncIterator', - 'Hashable', 'Sized', 'Container', 'Collection', 'Reversible', - 'ContextManager', 'AsyncContextManager'] - - -def _get_protocol_attrs(cls): - attrs = set() - for base in cls.__mro__[:-1]: # without object - if base.__name__ in ('Protocol', 'Generic'): - continue - annotations = getattr(base, '__annotations__', {}) - for attr in list(base.__dict__.keys()) + list(annotations.keys()): - if (not attr.startswith('_abc_') and attr not in ( - '__abstractmethods__', '__annotations__', '__weakref__', - '_is_protocol', '_is_runtime_protocol', '__dict__', - '__args__', '__slots__', - '__next_in_mro__', '__parameters__', '__origin__', - '__orig_bases__', '__extra__', '__tree_hash__', - '__doc__', '__subclasshook__', '__init__', '__new__', - '__module__', '_MutableMapping__marker', '_gorg')): - attrs.add(attr) - return attrs - - -def _is_callable_members_only(cls): - return all(callable(getattr(cls, attr, None)) for attr in _get_protocol_attrs(cls)) - - -# 3.8+ -if hasattr(typing, 'Protocol'): - Protocol = typing.Protocol -# 3.7 -elif PEP_560: - from typing import _collect_type_vars # noqa - - def _no_init(self, *args, **kwargs): - if type(self)._is_protocol: - raise TypeError('Protocols cannot be instantiated') - - class _ProtocolMeta(abc.ABCMeta): - # This metaclass is a bit unfortunate and exists only because of the lack - # of __instancehook__. - def __instancecheck__(cls, instance): - # We need this method for situations where attributes are - # assigned in __init__. - if ((not getattr(cls, '_is_protocol', False) or - _is_callable_members_only(cls)) and - issubclass(instance.__class__, cls)): - return True - if cls._is_protocol: - if all(hasattr(instance, attr) and - (not callable(getattr(cls, attr, None)) or - getattr(instance, attr) is not None) - for attr in _get_protocol_attrs(cls)): - return True - return super().__instancecheck__(instance) - - class Protocol(metaclass=_ProtocolMeta): - # There is quite a lot of overlapping code with typing.Generic. - # Unfortunately it is hard to avoid this while these live in two different - # modules. The duplicated code will be removed when Protocol is moved to typing. - """Base class for protocol classes. Protocol classes are defined as:: - - class Proto(Protocol): - def meth(self) -> int: - ... - - Such classes are primarily used with static type checkers that recognize - structural subtyping (static duck-typing), for example:: - - class C: - def meth(self) -> int: - return 0 - - def func(x: Proto) -> int: - return x.meth() - - func(C()) # Passes static type check - - See PEP 544 for details. Protocol classes decorated with - @typing_extensions.runtime act as simple-minded runtime protocol that checks - only the presence of given attributes, ignoring their type signatures. - - Protocol classes can be generic, they are defined as:: - - class GenProto(Protocol[T]): - def meth(self) -> T: - ... - """ - __slots__ = () - _is_protocol = True - - def __new__(cls, *args, **kwds): - if cls is Protocol: - raise TypeError("Type Protocol cannot be instantiated; " - "it can only be used as a base class") - return super().__new__(cls) - - @typing._tp_cache - def __class_getitem__(cls, params): - if not isinstance(params, tuple): - params = (params,) - if not params and cls is not typing.Tuple: - raise TypeError( - f"Parameter list to {cls.__qualname__}[...] cannot be empty") - msg = "Parameters to generic types must be types." - params = tuple(typing._type_check(p, msg) for p in params) # noqa - if cls is Protocol: - # Generic can only be subscripted with unique type variables. - if not all(isinstance(p, typing.TypeVar) for p in params): - i = 0 - while isinstance(params[i], typing.TypeVar): - i += 1 - raise TypeError( - "Parameters to Protocol[...] must all be type variables." - f" Parameter {i + 1} is {params[i]}") - if len(set(params)) != len(params): - raise TypeError( - "Parameters to Protocol[...] must all be unique") - else: - # Subscripting a regular Generic subclass. - _check_generic(cls, params) - return typing._GenericAlias(cls, params) - - def __init_subclass__(cls, *args, **kwargs): - tvars = [] - if '__orig_bases__' in cls.__dict__: - error = typing.Generic in cls.__orig_bases__ - else: - error = typing.Generic in cls.__bases__ - if error: - raise TypeError("Cannot inherit from plain Generic") - if '__orig_bases__' in cls.__dict__: - tvars = _collect_type_vars(cls.__orig_bases__) - # Look for Generic[T1, ..., Tn] or Protocol[T1, ..., Tn]. - # If found, tvars must be a subset of it. - # If not found, tvars is it. - # Also check for and reject plain Generic, - # and reject multiple Generic[...] and/or Protocol[...]. - gvars = None - for base in cls.__orig_bases__: - if (isinstance(base, typing._GenericAlias) and - base.__origin__ in (typing.Generic, Protocol)): - # for error messages - the_base = base.__origin__.__name__ - if gvars is not None: - raise TypeError( - "Cannot inherit from Generic[...]" - " and/or Protocol[...] multiple types.") - gvars = base.__parameters__ - if gvars is None: - gvars = tvars - else: - tvarset = set(tvars) - gvarset = set(gvars) - if not tvarset <= gvarset: - s_vars = ', '.join(str(t) for t in tvars if t not in gvarset) - s_args = ', '.join(str(g) for g in gvars) - raise TypeError(f"Some type variables ({s_vars}) are" - f" not listed in {the_base}[{s_args}]") - tvars = gvars - cls.__parameters__ = tuple(tvars) - - # Determine if this is a protocol or a concrete subclass. - if not cls.__dict__.get('_is_protocol', None): - cls._is_protocol = any(b is Protocol for b in cls.__bases__) - - # Set (or override) the protocol subclass hook. - def _proto_hook(other): - if not cls.__dict__.get('_is_protocol', None): - return NotImplemented - if not getattr(cls, '_is_runtime_protocol', False): - if sys._getframe(2).f_globals['__name__'] in ['abc', 'functools']: - return NotImplemented - raise TypeError("Instance and class checks can only be used with" - " @runtime protocols") - if not _is_callable_members_only(cls): - if sys._getframe(2).f_globals['__name__'] in ['abc', 'functools']: - return NotImplemented - raise TypeError("Protocols with non-method members" - " don't support issubclass()") - if not isinstance(other, type): - # Same error as for issubclass(1, int) - raise TypeError('issubclass() arg 1 must be a class') - for attr in _get_protocol_attrs(cls): - for base in other.__mro__: - if attr in base.__dict__: - if base.__dict__[attr] is None: - return NotImplemented - break - annotations = getattr(base, '__annotations__', {}) - if (isinstance(annotations, typing.Mapping) and - attr in annotations and - isinstance(other, _ProtocolMeta) and - other._is_protocol): - break - else: - return NotImplemented - return True - if '__subclasshook__' not in cls.__dict__: - cls.__subclasshook__ = _proto_hook - - # We have nothing more to do for non-protocols. - if not cls._is_protocol: - return - - # Check consistency of bases. - for base in cls.__bases__: - if not (base in (object, typing.Generic) or - base.__module__ == 'collections.abc' and - base.__name__ in _PROTO_WHITELIST or - isinstance(base, _ProtocolMeta) and base._is_protocol): - raise TypeError('Protocols can only inherit from other' - f' protocols, got {repr(base)}') - cls.__init__ = _no_init -# 3.6 -else: - from typing import _next_in_mro, _type_check # noqa - - def _no_init(self, *args, **kwargs): - if type(self)._is_protocol: - raise TypeError('Protocols cannot be instantiated') - - class _ProtocolMeta(GenericMeta): - """Internal metaclass for Protocol. - - This exists so Protocol classes can be generic without deriving - from Generic. - """ - def __new__(cls, name, bases, namespace, - tvars=None, args=None, origin=None, extra=None, orig_bases=None): - # This is just a version copied from GenericMeta.__new__ that - # includes "Protocol" special treatment. (Comments removed for brevity.) - assert extra is None # Protocols should not have extra - if tvars is not None: - assert origin is not None - assert all(isinstance(t, typing.TypeVar) for t in tvars), tvars - else: - tvars = _type_vars(bases) - gvars = None - for base in bases: - if base is typing.Generic: - raise TypeError("Cannot inherit from plain Generic") - if (isinstance(base, GenericMeta) and - base.__origin__ in (typing.Generic, Protocol)): - if gvars is not None: - raise TypeError( - "Cannot inherit from Generic[...] or" - " Protocol[...] multiple times.") - gvars = base.__parameters__ - if gvars is None: - gvars = tvars - else: - tvarset = set(tvars) - gvarset = set(gvars) - if not tvarset <= gvarset: - s_vars = ", ".join(str(t) for t in tvars if t not in gvarset) - s_args = ", ".join(str(g) for g in gvars) - cls_name = "Generic" if any(b.__origin__ is typing.Generic - for b in bases) else "Protocol" - raise TypeError(f"Some type variables ({s_vars}) are" - f" not listed in {cls_name}[{s_args}]") - tvars = gvars - - initial_bases = bases - if (extra is not None and type(extra) is abc.ABCMeta and - extra not in bases): - bases = (extra,) + bases - bases = tuple(_gorg(b) if isinstance(b, GenericMeta) else b - for b in bases) - if any(isinstance(b, GenericMeta) and b is not typing.Generic for b in bases): - bases = tuple(b for b in bases if b is not typing.Generic) - namespace.update({'__origin__': origin, '__extra__': extra}) - self = super(GenericMeta, cls).__new__(cls, name, bases, namespace, - _root=True) - super(GenericMeta, self).__setattr__('_gorg', - self if not origin else - _gorg(origin)) - self.__parameters__ = tvars - self.__args__ = tuple(... if a is typing._TypingEllipsis else - () if a is typing._TypingEmpty else - a for a in args) if args else None - self.__next_in_mro__ = _next_in_mro(self) - if orig_bases is None: - self.__orig_bases__ = initial_bases - elif origin is not None: - self._abc_registry = origin._abc_registry - self._abc_cache = origin._abc_cache - if hasattr(self, '_subs_tree'): - self.__tree_hash__ = (hash(self._subs_tree()) if origin else - super(GenericMeta, self).__hash__()) - return self - - def __init__(cls, *args, **kwargs): - super().__init__(*args, **kwargs) - if not cls.__dict__.get('_is_protocol', None): - cls._is_protocol = any(b is Protocol or - isinstance(b, _ProtocolMeta) and - b.__origin__ is Protocol - for b in cls.__bases__) - if cls._is_protocol: - for base in cls.__mro__[1:]: - if not (base in (object, typing.Generic) or - base.__module__ == 'collections.abc' and - base.__name__ in _PROTO_WHITELIST or - isinstance(base, typing.TypingMeta) and base._is_protocol or - isinstance(base, GenericMeta) and - base.__origin__ is typing.Generic): - raise TypeError(f'Protocols can only inherit from other' - f' protocols, got {repr(base)}') - - cls.__init__ = _no_init - - def _proto_hook(other): - if not cls.__dict__.get('_is_protocol', None): - return NotImplemented - if not isinstance(other, type): - # Same error as for issubclass(1, int) - raise TypeError('issubclass() arg 1 must be a class') - for attr in _get_protocol_attrs(cls): - for base in other.__mro__: - if attr in base.__dict__: - if base.__dict__[attr] is None: - return NotImplemented - break - annotations = getattr(base, '__annotations__', {}) - if (isinstance(annotations, typing.Mapping) and - attr in annotations and - isinstance(other, _ProtocolMeta) and - other._is_protocol): - break - else: - return NotImplemented - return True - if '__subclasshook__' not in cls.__dict__: - cls.__subclasshook__ = _proto_hook - - def __instancecheck__(self, instance): - # We need this method for situations where attributes are - # assigned in __init__. - if ((not getattr(self, '_is_protocol', False) or - _is_callable_members_only(self)) and - issubclass(instance.__class__, self)): - return True - if self._is_protocol: - if all(hasattr(instance, attr) and - (not callable(getattr(self, attr, None)) or - getattr(instance, attr) is not None) - for attr in _get_protocol_attrs(self)): - return True - return super(GenericMeta, self).__instancecheck__(instance) - - def __subclasscheck__(self, cls): - if self.__origin__ is not None: - if sys._getframe(1).f_globals['__name__'] not in ['abc', 'functools']: - raise TypeError("Parameterized generics cannot be used with class " - "or instance checks") - return False - if (self.__dict__.get('_is_protocol', None) and - not self.__dict__.get('_is_runtime_protocol', None)): - if sys._getframe(1).f_globals['__name__'] in ['abc', - 'functools', - 'typing']: - return False - raise TypeError("Instance and class checks can only be used with" - " @runtime protocols") - if (self.__dict__.get('_is_runtime_protocol', None) and - not _is_callable_members_only(self)): - if sys._getframe(1).f_globals['__name__'] in ['abc', - 'functools', - 'typing']: - return super(GenericMeta, self).__subclasscheck__(cls) - raise TypeError("Protocols with non-method members" - " don't support issubclass()") - return super(GenericMeta, self).__subclasscheck__(cls) - - @typing._tp_cache - def __getitem__(self, params): - # We also need to copy this from GenericMeta.__getitem__ to get - # special treatment of "Protocol". (Comments removed for brevity.) - if not isinstance(params, tuple): - params = (params,) - if not params and _gorg(self) is not typing.Tuple: - raise TypeError( - f"Parameter list to {self.__qualname__}[...] cannot be empty") - msg = "Parameters to generic types must be types." - params = tuple(_type_check(p, msg) for p in params) - if self in (typing.Generic, Protocol): - if not all(isinstance(p, typing.TypeVar) for p in params): - raise TypeError( - f"Parameters to {repr(self)}[...] must all be type variables") - if len(set(params)) != len(params): - raise TypeError( - f"Parameters to {repr(self)}[...] must all be unique") - tvars = params - args = params - elif self in (typing.Tuple, typing.Callable): - tvars = _type_vars(params) - args = params - elif self.__origin__ in (typing.Generic, Protocol): - raise TypeError(f"Cannot subscript already-subscripted {repr(self)}") - else: - _check_generic(self, params) - tvars = _type_vars(params) - args = params - - prepend = (self,) if self.__origin__ is None else () - return self.__class__(self.__name__, - prepend + self.__bases__, - _no_slots_copy(self.__dict__), - tvars=tvars, - args=args, - origin=self, - extra=self.__extra__, - orig_bases=self.__orig_bases__) - - class Protocol(metaclass=_ProtocolMeta): - """Base class for protocol classes. Protocol classes are defined as:: - - class Proto(Protocol): - def meth(self) -> int: - ... - - Such classes are primarily used with static type checkers that recognize - structural subtyping (static duck-typing), for example:: - - class C: - def meth(self) -> int: - return 0 - - def func(x: Proto) -> int: - return x.meth() - - func(C()) # Passes static type check - - See PEP 544 for details. Protocol classes decorated with - @typing_extensions.runtime act as simple-minded runtime protocol that checks - only the presence of given attributes, ignoring their type signatures. - - Protocol classes can be generic, they are defined as:: - - class GenProto(Protocol[T]): - def meth(self) -> T: - ... - """ - __slots__ = () - _is_protocol = True - - def __new__(cls, *args, **kwds): - if _gorg(cls) is Protocol: - raise TypeError("Type Protocol cannot be instantiated; " - "it can be used only as a base class") - return typing._generic_new(cls.__next_in_mro__, cls, *args, **kwds) - - -# 3.8+ -if hasattr(typing, 'runtime_checkable'): - runtime_checkable = typing.runtime_checkable -# 3.6-3.7 -else: - def runtime_checkable(cls): - """Mark a protocol class as a runtime protocol, so that it - can be used with isinstance() and issubclass(). Raise TypeError - if applied to a non-protocol class. - - This allows a simple-minded structural check very similar to the - one-offs in collections.abc such as Hashable. - """ - if not isinstance(cls, _ProtocolMeta) or not cls._is_protocol: - raise TypeError('@runtime_checkable can be only applied to protocol classes,' - f' got {cls!r}') - cls._is_runtime_protocol = True - return cls - - -# Exists for backwards compatibility. -runtime = runtime_checkable - - -# 3.8+ -if hasattr(typing, 'SupportsIndex'): - SupportsIndex = typing.SupportsIndex -# 3.6-3.7 -else: - @runtime_checkable - class SupportsIndex(Protocol): - __slots__ = () - - @abc.abstractmethod - def __index__(self) -> int: - pass - - -if sys.version_info >= (3, 9, 2): - # The standard library TypedDict in Python 3.8 does not store runtime information - # about which (if any) keys are optional. See https://bugs.python.org/issue38834 - # The standard library TypedDict in Python 3.9.0/1 does not honour the "total" - # keyword with old-style TypedDict(). See https://bugs.python.org/issue42059 - TypedDict = typing.TypedDict -else: - def _check_fails(cls, other): - try: - if sys._getframe(1).f_globals['__name__'] not in ['abc', - 'functools', - 'typing']: - # Typed dicts are only for static structural subtyping. - raise TypeError('TypedDict does not support instance and class checks') - except (AttributeError, ValueError): - pass - return False - - def _dict_new(*args, **kwargs): - if not args: - raise TypeError('TypedDict.__new__(): not enough arguments') - _, args = args[0], args[1:] # allow the "cls" keyword be passed - return dict(*args, **kwargs) - - _dict_new.__text_signature__ = '($cls, _typename, _fields=None, /, **kwargs)' - - def _typeddict_new(*args, total=True, **kwargs): - if not args: - raise TypeError('TypedDict.__new__(): not enough arguments') - _, args = args[0], args[1:] # allow the "cls" keyword be passed - if args: - typename, args = args[0], args[1:] # allow the "_typename" keyword be passed - elif '_typename' in kwargs: - typename = kwargs.pop('_typename') - import warnings - warnings.warn("Passing '_typename' as keyword argument is deprecated", - DeprecationWarning, stacklevel=2) - else: - raise TypeError("TypedDict.__new__() missing 1 required positional " - "argument: '_typename'") - if args: - try: - fields, = args # allow the "_fields" keyword be passed - except ValueError: - raise TypeError('TypedDict.__new__() takes from 2 to 3 ' - f'positional arguments but {len(args) + 2} ' - 'were given') - elif '_fields' in kwargs and len(kwargs) == 1: - fields = kwargs.pop('_fields') - import warnings - warnings.warn("Passing '_fields' as keyword argument is deprecated", - DeprecationWarning, stacklevel=2) - else: - fields = None - - if fields is None: - fields = kwargs - elif kwargs: - raise TypeError("TypedDict takes either a dict or keyword arguments," - " but not both") - - ns = {'__annotations__': dict(fields)} - try: - # Setting correct module is necessary to make typed dict classes pickleable. - ns['__module__'] = sys._getframe(1).f_globals.get('__name__', '__main__') - except (AttributeError, ValueError): - pass - - return _TypedDictMeta(typename, (), ns, total=total) - - _typeddict_new.__text_signature__ = ('($cls, _typename, _fields=None,' - ' /, *, total=True, **kwargs)') - - class _TypedDictMeta(type): - def __init__(cls, name, bases, ns, total=True): - super().__init__(name, bases, ns) - - def __new__(cls, name, bases, ns, total=True): - # Create new typed dict class object. - # This method is called directly when TypedDict is subclassed, - # or via _typeddict_new when TypedDict is instantiated. This way - # TypedDict supports all three syntaxes described in its docstring. - # Subclasses and instances of TypedDict return actual dictionaries - # via _dict_new. - ns['__new__'] = _typeddict_new if name == 'TypedDict' else _dict_new - tp_dict = super().__new__(cls, name, (dict,), ns) - - annotations = {} - own_annotations = ns.get('__annotations__', {}) - own_annotation_keys = set(own_annotations.keys()) - msg = "TypedDict('Name', {f0: t0, f1: t1, ...}); each t must be a type" - own_annotations = { - n: typing._type_check(tp, msg) for n, tp in own_annotations.items() - } - required_keys = set() - optional_keys = set() - - for base in bases: - annotations.update(base.__dict__.get('__annotations__', {})) - required_keys.update(base.__dict__.get('__required_keys__', ())) - optional_keys.update(base.__dict__.get('__optional_keys__', ())) - - annotations.update(own_annotations) - if total: - required_keys.update(own_annotation_keys) - else: - optional_keys.update(own_annotation_keys) - - tp_dict.__annotations__ = annotations - tp_dict.__required_keys__ = frozenset(required_keys) - tp_dict.__optional_keys__ = frozenset(optional_keys) - if not hasattr(tp_dict, '__total__'): - tp_dict.__total__ = total - return tp_dict - - __instancecheck__ = __subclasscheck__ = _check_fails - - TypedDict = _TypedDictMeta('TypedDict', (dict,), {}) - TypedDict.__module__ = __name__ - TypedDict.__doc__ = \ - """A simple typed name space. At runtime it is equivalent to a plain dict. - - TypedDict creates a dictionary type that expects all of its - instances to have a certain set of keys, with each key - associated with a value of a consistent type. This expectation - is not checked at runtime but is only enforced by type checkers. - Usage:: - - class Point2D(TypedDict): - x: int - y: int - label: str - - a: Point2D = {'x': 1, 'y': 2, 'label': 'good'} # OK - b: Point2D = {'z': 3, 'label': 'bad'} # Fails type check - - assert Point2D(x=1, y=2, label='first') == dict(x=1, y=2, label='first') - - The type info can be accessed via the Point2D.__annotations__ dict, and - the Point2D.__required_keys__ and Point2D.__optional_keys__ frozensets. - TypedDict supports two additional equivalent forms:: - - Point2D = TypedDict('Point2D', x=int, y=int, label=str) - Point2D = TypedDict('Point2D', {'x': int, 'y': int, 'label': str}) - - The class syntax is only supported in Python 3.6+, while two other - syntax forms work for Python 2.7 and 3.2+ - """ - - -# Python 3.9+ has PEP 593 (Annotated and modified get_type_hints) -if hasattr(typing, 'Annotated'): - Annotated = typing.Annotated - get_type_hints = typing.get_type_hints - # Not exported and not a public API, but needed for get_origin() and get_args() - # to work. - _AnnotatedAlias = typing._AnnotatedAlias -# 3.7-3.8 -elif PEP_560: - class _AnnotatedAlias(typing._GenericAlias, _root=True): - """Runtime representation of an annotated type. - - At its core 'Annotated[t, dec1, dec2, ...]' is an alias for the type 't' - with extra annotations. The alias behaves like a normal typing alias, - instantiating is the same as instantiating the underlying type, binding - it to types is also the same. - """ - def __init__(self, origin, metadata): - if isinstance(origin, _AnnotatedAlias): - metadata = origin.__metadata__ + metadata - origin = origin.__origin__ - super().__init__(origin, origin) - self.__metadata__ = metadata - - def copy_with(self, params): - assert len(params) == 1 - new_type = params[0] - return _AnnotatedAlias(new_type, self.__metadata__) - - def __repr__(self): - return (f"typing_extensions.Annotated[{typing._type_repr(self.__origin__)}, " - f"{', '.join(repr(a) for a in self.__metadata__)}]") - - def __reduce__(self): - return operator.getitem, ( - Annotated, (self.__origin__,) + self.__metadata__ - ) - - def __eq__(self, other): - if not isinstance(other, _AnnotatedAlias): - return NotImplemented - if self.__origin__ != other.__origin__: - return False - return self.__metadata__ == other.__metadata__ - - def __hash__(self): - return hash((self.__origin__, self.__metadata__)) - - class Annotated: - """Add context specific metadata to a type. - - Example: Annotated[int, runtime_check.Unsigned] indicates to the - hypothetical runtime_check module that this type is an unsigned int. - Every other consumer of this type can ignore this metadata and treat - this type as int. - - The first argument to Annotated must be a valid type (and will be in - the __origin__ field), the remaining arguments are kept as a tuple in - the __extra__ field. - - Details: - - - It's an error to call `Annotated` with less than two arguments. - - Nested Annotated are flattened:: - - Annotated[Annotated[T, Ann1, Ann2], Ann3] == Annotated[T, Ann1, Ann2, Ann3] - - - Instantiating an annotated type is equivalent to instantiating the - underlying type:: - - Annotated[C, Ann1](5) == C(5) - - - Annotated can be used as a generic type alias:: - - Optimized = Annotated[T, runtime.Optimize()] - Optimized[int] == Annotated[int, runtime.Optimize()] - - OptimizedList = Annotated[List[T], runtime.Optimize()] - OptimizedList[int] == Annotated[List[int], runtime.Optimize()] - """ - - __slots__ = () - - def __new__(cls, *args, **kwargs): - raise TypeError("Type Annotated cannot be instantiated.") - - @typing._tp_cache - def __class_getitem__(cls, params): - if not isinstance(params, tuple) or len(params) < 2: - raise TypeError("Annotated[...] should be used " - "with at least two arguments (a type and an " - "annotation).") - msg = "Annotated[t, ...]: t must be a type." - origin = typing._type_check(params[0], msg) - metadata = tuple(params[1:]) - return _AnnotatedAlias(origin, metadata) - - def __init_subclass__(cls, *args, **kwargs): - raise TypeError( - f"Cannot subclass {cls.__module__}.Annotated" - ) - - def _strip_annotations(t): - """Strips the annotations from a given type. - """ - if isinstance(t, _AnnotatedAlias): - return _strip_annotations(t.__origin__) - if isinstance(t, typing._GenericAlias): - stripped_args = tuple(_strip_annotations(a) for a in t.__args__) - if stripped_args == t.__args__: - return t - res = t.copy_with(stripped_args) - res._special = t._special - return res - return t - - def get_type_hints(obj, globalns=None, localns=None, include_extras=False): - """Return type hints for an object. - - This is often the same as obj.__annotations__, but it handles - forward references encoded as string literals, adds Optional[t] if a - default value equal to None is set and recursively replaces all - 'Annotated[T, ...]' with 'T' (unless 'include_extras=True'). - - The argument may be a module, class, method, or function. The annotations - are returned as a dictionary. For classes, annotations include also - inherited members. - - TypeError is raised if the argument is not of a type that can contain - annotations, and an empty dictionary is returned if no annotations are - present. - - BEWARE -- the behavior of globalns and localns is counterintuitive - (unless you are familiar with how eval() and exec() work). The - search order is locals first, then globals. - - - If no dict arguments are passed, an attempt is made to use the - globals from obj (or the respective module's globals for classes), - and these are also used as the locals. If the object does not appear - to have globals, an empty dictionary is used. - - - If one dict argument is passed, it is used for both globals and - locals. - - - If two dict arguments are passed, they specify globals and - locals, respectively. - """ - hint = typing.get_type_hints(obj, globalns=globalns, localns=localns) - if include_extras: - return hint - return {k: _strip_annotations(t) for k, t in hint.items()} -# 3.6 -else: - - def _is_dunder(name): - """Returns True if name is a __dunder_variable_name__.""" - return len(name) > 4 and name.startswith('__') and name.endswith('__') - - # Prior to Python 3.7 types did not have `copy_with`. A lot of the equality - # checks, argument expansion etc. are done on the _subs_tre. As a result we - # can't provide a get_type_hints function that strips out annotations. - - class AnnotatedMeta(typing.GenericMeta): - """Metaclass for Annotated""" - - def __new__(cls, name, bases, namespace, **kwargs): - if any(b is not object for b in bases): - raise TypeError("Cannot subclass " + str(Annotated)) - return super().__new__(cls, name, bases, namespace, **kwargs) - - @property - def __metadata__(self): - return self._subs_tree()[2] - - def _tree_repr(self, tree): - cls, origin, metadata = tree - if not isinstance(origin, tuple): - tp_repr = typing._type_repr(origin) - else: - tp_repr = origin[0]._tree_repr(origin) - metadata_reprs = ", ".join(repr(arg) for arg in metadata) - return f'{cls}[{tp_repr}, {metadata_reprs}]' - - def _subs_tree(self, tvars=None, args=None): # noqa - if self is Annotated: - return Annotated - res = super()._subs_tree(tvars=tvars, args=args) - # Flatten nested Annotated - if isinstance(res[1], tuple) and res[1][0] is Annotated: - sub_tp = res[1][1] - sub_annot = res[1][2] - return (Annotated, sub_tp, sub_annot + res[2]) - return res - - def _get_cons(self): - """Return the class used to create instance of this type.""" - if self.__origin__ is None: - raise TypeError("Cannot get the underlying type of a " - "non-specialized Annotated type.") - tree = self._subs_tree() - while isinstance(tree, tuple) and tree[0] is Annotated: - tree = tree[1] - if isinstance(tree, tuple): - return tree[0] - else: - return tree - - @typing._tp_cache - def __getitem__(self, params): - if not isinstance(params, tuple): - params = (params,) - if self.__origin__ is not None: # specializing an instantiated type - return super().__getitem__(params) - elif not isinstance(params, tuple) or len(params) < 2: - raise TypeError("Annotated[...] should be instantiated " - "with at least two arguments (a type and an " - "annotation).") - else: - msg = "Annotated[t, ...]: t must be a type." - tp = typing._type_check(params[0], msg) - metadata = tuple(params[1:]) - return self.__class__( - self.__name__, - self.__bases__, - _no_slots_copy(self.__dict__), - tvars=_type_vars((tp,)), - # Metadata is a tuple so it won't be touched by _replace_args et al. - args=(tp, metadata), - origin=self, - ) - - def __call__(self, *args, **kwargs): - cons = self._get_cons() - result = cons(*args, **kwargs) - try: - result.__orig_class__ = self - except AttributeError: - pass - return result - - def __getattr__(self, attr): - # For simplicity we just don't relay all dunder names - if self.__origin__ is not None and not _is_dunder(attr): - return getattr(self._get_cons(), attr) - raise AttributeError(attr) - - def __setattr__(self, attr, value): - if _is_dunder(attr) or attr.startswith('_abc_'): - super().__setattr__(attr, value) - elif self.__origin__ is None: - raise AttributeError(attr) - else: - setattr(self._get_cons(), attr, value) - - def __instancecheck__(self, obj): - raise TypeError("Annotated cannot be used with isinstance().") - - def __subclasscheck__(self, cls): - raise TypeError("Annotated cannot be used with issubclass().") - - class Annotated(metaclass=AnnotatedMeta): - """Add context specific metadata to a type. - - Example: Annotated[int, runtime_check.Unsigned] indicates to the - hypothetical runtime_check module that this type is an unsigned int. - Every other consumer of this type can ignore this metadata and treat - this type as int. - - The first argument to Annotated must be a valid type, the remaining - arguments are kept as a tuple in the __metadata__ field. - - Details: - - - It's an error to call `Annotated` with less than two arguments. - - Nested Annotated are flattened:: - - Annotated[Annotated[T, Ann1, Ann2], Ann3] == Annotated[T, Ann1, Ann2, Ann3] - - - Instantiating an annotated type is equivalent to instantiating the - underlying type:: - - Annotated[C, Ann1](5) == C(5) - - - Annotated can be used as a generic type alias:: - - Optimized = Annotated[T, runtime.Optimize()] - Optimized[int] == Annotated[int, runtime.Optimize()] - - OptimizedList = Annotated[List[T], runtime.Optimize()] - OptimizedList[int] == Annotated[List[int], runtime.Optimize()] - """ - -# Python 3.8 has get_origin() and get_args() but those implementations aren't -# Annotated-aware, so we can't use those. Python 3.9's versions don't support -# ParamSpecArgs and ParamSpecKwargs, so only Python 3.10's versions will do. -if sys.version_info[:2] >= (3, 10): - get_origin = typing.get_origin - get_args = typing.get_args -# 3.7-3.9 -elif PEP_560: - try: - # 3.9+ - from typing import _BaseGenericAlias - except ImportError: - _BaseGenericAlias = typing._GenericAlias - try: - # 3.9+ - from typing import GenericAlias - except ImportError: - GenericAlias = typing._GenericAlias - - def get_origin(tp): - """Get the unsubscripted version of a type. - - This supports generic types, Callable, Tuple, Union, Literal, Final, ClassVar - and Annotated. Return None for unsupported types. Examples:: - - get_origin(Literal[42]) is Literal - get_origin(int) is None - get_origin(ClassVar[int]) is ClassVar - get_origin(Generic) is Generic - get_origin(Generic[T]) is Generic - get_origin(Union[T, int]) is Union - get_origin(List[Tuple[T, T]][int]) == list - get_origin(P.args) is P - """ - if isinstance(tp, _AnnotatedAlias): - return Annotated - if isinstance(tp, (typing._GenericAlias, GenericAlias, _BaseGenericAlias, - ParamSpecArgs, ParamSpecKwargs)): - return tp.__origin__ - if tp is typing.Generic: - return typing.Generic - return None - - def get_args(tp): - """Get type arguments with all substitutions performed. - - For unions, basic simplifications used by Union constructor are performed. - Examples:: - get_args(Dict[str, int]) == (str, int) - get_args(int) == () - get_args(Union[int, Union[T, int], str][int]) == (int, str) - get_args(Union[int, Tuple[T, int]][str]) == (int, Tuple[str, int]) - get_args(Callable[[], T][int]) == ([], int) - """ - if isinstance(tp, _AnnotatedAlias): - return (tp.__origin__,) + tp.__metadata__ - if isinstance(tp, (typing._GenericAlias, GenericAlias)): - if getattr(tp, "_special", False): - return () - res = tp.__args__ - if get_origin(tp) is collections.abc.Callable and res[0] is not Ellipsis: - res = (list(res[:-1]), res[-1]) - return res - return () - - -# 3.10+ -if hasattr(typing, 'TypeAlias'): - TypeAlias = typing.TypeAlias -# 3.9 -elif sys.version_info[:2] >= (3, 9): - class _TypeAliasForm(typing._SpecialForm, _root=True): - def __repr__(self): - return 'typing_extensions.' + self._name - - @_TypeAliasForm - def TypeAlias(self, parameters): - """Special marker indicating that an assignment should - be recognized as a proper type alias definition by type - checkers. - - For example:: - - Predicate: TypeAlias = Callable[..., bool] - - It's invalid when used anywhere except as in the example above. - """ - raise TypeError(f"{self} is not subscriptable") -# 3.7-3.8 -elif sys.version_info[:2] >= (3, 7): - class _TypeAliasForm(typing._SpecialForm, _root=True): - def __repr__(self): - return 'typing_extensions.' + self._name - - TypeAlias = _TypeAliasForm('TypeAlias', - doc="""Special marker indicating that an assignment should - be recognized as a proper type alias definition by type - checkers. - - For example:: - - Predicate: TypeAlias = Callable[..., bool] - - It's invalid when used anywhere except as in the example - above.""") -# 3.6 -else: - class _TypeAliasMeta(typing.TypingMeta): - """Metaclass for TypeAlias""" - - def __repr__(self): - return 'typing_extensions.TypeAlias' - - class _TypeAliasBase(typing._FinalTypingBase, metaclass=_TypeAliasMeta, _root=True): - """Special marker indicating that an assignment should - be recognized as a proper type alias definition by type - checkers. - - For example:: - - Predicate: TypeAlias = Callable[..., bool] - - It's invalid when used anywhere except as in the example above. - """ - __slots__ = () - - def __instancecheck__(self, obj): - raise TypeError("TypeAlias cannot be used with isinstance().") - - def __subclasscheck__(self, cls): - raise TypeError("TypeAlias cannot be used with issubclass().") - - def __repr__(self): - return 'typing_extensions.TypeAlias' - - TypeAlias = _TypeAliasBase(_root=True) - - -# Python 3.10+ has PEP 612 -if hasattr(typing, 'ParamSpecArgs'): - ParamSpecArgs = typing.ParamSpecArgs - ParamSpecKwargs = typing.ParamSpecKwargs -# 3.6-3.9 -else: - class _Immutable: - """Mixin to indicate that object should not be copied.""" - __slots__ = () - - def __copy__(self): - return self - - def __deepcopy__(self, memo): - return self - - class ParamSpecArgs(_Immutable): - """The args for a ParamSpec object. - - Given a ParamSpec object P, P.args is an instance of ParamSpecArgs. - - ParamSpecArgs objects have a reference back to their ParamSpec: - - P.args.__origin__ is P - - This type is meant for runtime introspection and has no special meaning to - static type checkers. - """ - def __init__(self, origin): - self.__origin__ = origin - - def __repr__(self): - return f"{self.__origin__.__name__}.args" - - class ParamSpecKwargs(_Immutable): - """The kwargs for a ParamSpec object. - - Given a ParamSpec object P, P.kwargs is an instance of ParamSpecKwargs. - - ParamSpecKwargs objects have a reference back to their ParamSpec: - - P.kwargs.__origin__ is P - - This type is meant for runtime introspection and has no special meaning to - static type checkers. - """ - def __init__(self, origin): - self.__origin__ = origin - - def __repr__(self): - return f"{self.__origin__.__name__}.kwargs" - -# 3.10+ -if hasattr(typing, 'ParamSpec'): - ParamSpec = typing.ParamSpec -# 3.6-3.9 -else: - - # Inherits from list as a workaround for Callable checks in Python < 3.9.2. - class ParamSpec(list): - """Parameter specification variable. - - Usage:: - - P = ParamSpec('P') - - Parameter specification variables exist primarily for the benefit of static - type checkers. They are used to forward the parameter types of one - callable to another callable, a pattern commonly found in higher order - functions and decorators. They are only valid when used in ``Concatenate``, - or s the first argument to ``Callable``. In Python 3.10 and higher, - they are also supported in user-defined Generics at runtime. - See class Generic for more information on generic types. An - example for annotating a decorator:: - - T = TypeVar('T') - P = ParamSpec('P') - - def add_logging(f: Callable[P, T]) -> Callable[P, T]: - '''A type-safe decorator to add logging to a function.''' - def inner(*args: P.args, **kwargs: P.kwargs) -> T: - logging.info(f'{f.__name__} was called') - return f(*args, **kwargs) - return inner - - @add_logging - def add_two(x: float, y: float) -> float: - '''Add two numbers together.''' - return x + y - - Parameter specification variables defined with covariant=True or - contravariant=True can be used to declare covariant or contravariant - generic types. These keyword arguments are valid, but their actual semantics - are yet to be decided. See PEP 612 for details. - - Parameter specification variables can be introspected. e.g.: - - P.__name__ == 'T' - P.__bound__ == None - P.__covariant__ == False - P.__contravariant__ == False - - Note that only parameter specification variables defined in global scope can - be pickled. - """ - - # Trick Generic __parameters__. - __class__ = typing.TypeVar - - @property - def args(self): - return ParamSpecArgs(self) - - @property - def kwargs(self): - return ParamSpecKwargs(self) - - def __init__(self, name, *, bound=None, covariant=False, contravariant=False): - super().__init__([self]) - self.__name__ = name - self.__covariant__ = bool(covariant) - self.__contravariant__ = bool(contravariant) - if bound: - self.__bound__ = typing._type_check(bound, 'Bound must be a type.') - else: - self.__bound__ = None - - # for pickling: - try: - def_mod = sys._getframe(1).f_globals.get('__name__', '__main__') - except (AttributeError, ValueError): - def_mod = None - if def_mod != 'typing_extensions': - self.__module__ = def_mod - - def __repr__(self): - if self.__covariant__: - prefix = '+' - elif self.__contravariant__: - prefix = '-' - else: - prefix = '~' - return prefix + self.__name__ - - def __hash__(self): - return object.__hash__(self) - - def __eq__(self, other): - return self is other - - def __reduce__(self): - return self.__name__ - - # Hack to get typing._type_check to pass. - def __call__(self, *args, **kwargs): - pass - - if not PEP_560: - # Only needed in 3.6. - def _get_type_vars(self, tvars): - if self not in tvars: - tvars.append(self) - - -# 3.6-3.9 -if not hasattr(typing, 'Concatenate'): - # Inherits from list as a workaround for Callable checks in Python < 3.9.2. - class _ConcatenateGenericAlias(list): - - # Trick Generic into looking into this for __parameters__. - if PEP_560: - __class__ = typing._GenericAlias - else: - __class__ = typing._TypingBase - - # Flag in 3.8. - _special = False - # Attribute in 3.6 and earlier. - _gorg = typing.Generic - - def __init__(self, origin, args): - super().__init__(args) - self.__origin__ = origin - self.__args__ = args - - def __repr__(self): - _type_repr = typing._type_repr - return (f'{_type_repr(self.__origin__)}' - f'[{", ".join(_type_repr(arg) for arg in self.__args__)}]') - - def __hash__(self): - return hash((self.__origin__, self.__args__)) - - # Hack to get typing._type_check to pass in Generic. - def __call__(self, *args, **kwargs): - pass - - @property - def __parameters__(self): - return tuple( - tp for tp in self.__args__ if isinstance(tp, (typing.TypeVar, ParamSpec)) - ) - - if not PEP_560: - # Only required in 3.6. - def _get_type_vars(self, tvars): - if self.__origin__ and self.__parameters__: - typing._get_type_vars(self.__parameters__, tvars) - - -# 3.6-3.9 -@typing._tp_cache -def _concatenate_getitem(self, parameters): - if parameters == (): - raise TypeError("Cannot take a Concatenate of no types.") - if not isinstance(parameters, tuple): - parameters = (parameters,) - if not isinstance(parameters[-1], ParamSpec): - raise TypeError("The last parameter to Concatenate should be a " - "ParamSpec variable.") - msg = "Concatenate[arg, ...]: each arg must be a type." - parameters = tuple(typing._type_check(p, msg) for p in parameters) - return _ConcatenateGenericAlias(self, parameters) - - -# 3.10+ -if hasattr(typing, 'Concatenate'): - Concatenate = typing.Concatenate - _ConcatenateGenericAlias = typing._ConcatenateGenericAlias # noqa -# 3.9 -elif sys.version_info[:2] >= (3, 9): - @_TypeAliasForm - def Concatenate(self, parameters): - """Used in conjunction with ``ParamSpec`` and ``Callable`` to represent a - higher order function which adds, removes or transforms parameters of a - callable. - - For example:: - - Callable[Concatenate[int, P], int] - - See PEP 612 for detailed information. - """ - return _concatenate_getitem(self, parameters) -# 3.7-8 -elif sys.version_info[:2] >= (3, 7): - class _ConcatenateForm(typing._SpecialForm, _root=True): - def __repr__(self): - return 'typing_extensions.' + self._name - - def __getitem__(self, parameters): - return _concatenate_getitem(self, parameters) - - Concatenate = _ConcatenateForm( - 'Concatenate', - doc="""Used in conjunction with ``ParamSpec`` and ``Callable`` to represent a - higher order function which adds, removes or transforms parameters of a - callable. - - For example:: - - Callable[Concatenate[int, P], int] - - See PEP 612 for detailed information. - """) -# 3.6 -else: - class _ConcatenateAliasMeta(typing.TypingMeta): - """Metaclass for Concatenate.""" - - def __repr__(self): - return 'typing_extensions.Concatenate' - - class _ConcatenateAliasBase(typing._FinalTypingBase, - metaclass=_ConcatenateAliasMeta, - _root=True): - """Used in conjunction with ``ParamSpec`` and ``Callable`` to represent a - higher order function which adds, removes or transforms parameters of a - callable. - - For example:: - - Callable[Concatenate[int, P], int] - - See PEP 612 for detailed information. - """ - __slots__ = () - - def __instancecheck__(self, obj): - raise TypeError("Concatenate cannot be used with isinstance().") - - def __subclasscheck__(self, cls): - raise TypeError("Concatenate cannot be used with issubclass().") - - def __repr__(self): - return 'typing_extensions.Concatenate' - - def __getitem__(self, parameters): - return _concatenate_getitem(self, parameters) - - Concatenate = _ConcatenateAliasBase(_root=True) - -# 3.10+ -if hasattr(typing, 'TypeGuard'): - TypeGuard = typing.TypeGuard -# 3.9 -elif sys.version_info[:2] >= (3, 9): - class _TypeGuardForm(typing._SpecialForm, _root=True): - def __repr__(self): - return 'typing_extensions.' + self._name - - @_TypeGuardForm - def TypeGuard(self, parameters): - """Special typing form used to annotate the return type of a user-defined - type guard function. ``TypeGuard`` only accepts a single type argument. - At runtime, functions marked this way should return a boolean. - - ``TypeGuard`` aims to benefit *type narrowing* -- a technique used by static - type checkers to determine a more precise type of an expression within a - program's code flow. Usually type narrowing is done by analyzing - conditional code flow and applying the narrowing to a block of code. The - conditional expression here is sometimes referred to as a "type guard". - - Sometimes it would be convenient to use a user-defined boolean function - as a type guard. Such a function should use ``TypeGuard[...]`` as its - return type to alert static type checkers to this intention. - - Using ``-> TypeGuard`` tells the static type checker that for a given - function: - - 1. The return value is a boolean. - 2. If the return value is ``True``, the type of its argument - is the type inside ``TypeGuard``. - - For example:: - - def is_str(val: Union[str, float]): - # "isinstance" type guard - if isinstance(val, str): - # Type of ``val`` is narrowed to ``str`` - ... - else: - # Else, type of ``val`` is narrowed to ``float``. - ... - - Strict type narrowing is not enforced -- ``TypeB`` need not be a narrower - form of ``TypeA`` (it can even be a wider form) and this may lead to - type-unsafe results. The main reason is to allow for things like - narrowing ``List[object]`` to ``List[str]`` even though the latter is not - a subtype of the former, since ``List`` is invariant. The responsibility of - writing type-safe type guards is left to the user. - - ``TypeGuard`` also works with type variables. For more information, see - PEP 647 (User-Defined Type Guards). - """ - item = typing._type_check(parameters, f'{self} accepts only single type.') - return typing._GenericAlias(self, (item,)) -# 3.7-3.8 -elif sys.version_info[:2] >= (3, 7): - class _TypeGuardForm(typing._SpecialForm, _root=True): - - def __repr__(self): - return 'typing_extensions.' + self._name - - def __getitem__(self, parameters): - item = typing._type_check(parameters, - f'{self._name} accepts only a single type') - return typing._GenericAlias(self, (item,)) - - TypeGuard = _TypeGuardForm( - 'TypeGuard', - doc="""Special typing form used to annotate the return type of a user-defined - type guard function. ``TypeGuard`` only accepts a single type argument. - At runtime, functions marked this way should return a boolean. - - ``TypeGuard`` aims to benefit *type narrowing* -- a technique used by static - type checkers to determine a more precise type of an expression within a - program's code flow. Usually type narrowing is done by analyzing - conditional code flow and applying the narrowing to a block of code. The - conditional expression here is sometimes referred to as a "type guard". - - Sometimes it would be convenient to use a user-defined boolean function - as a type guard. Such a function should use ``TypeGuard[...]`` as its - return type to alert static type checkers to this intention. - - Using ``-> TypeGuard`` tells the static type checker that for a given - function: - - 1. The return value is a boolean. - 2. If the return value is ``True``, the type of its argument - is the type inside ``TypeGuard``. - - For example:: - - def is_str(val: Union[str, float]): - # "isinstance" type guard - if isinstance(val, str): - # Type of ``val`` is narrowed to ``str`` - ... - else: - # Else, type of ``val`` is narrowed to ``float``. - ... - - Strict type narrowing is not enforced -- ``TypeB`` need not be a narrower - form of ``TypeA`` (it can even be a wider form) and this may lead to - type-unsafe results. The main reason is to allow for things like - narrowing ``List[object]`` to ``List[str]`` even though the latter is not - a subtype of the former, since ``List`` is invariant. The responsibility of - writing type-safe type guards is left to the user. - - ``TypeGuard`` also works with type variables. For more information, see - PEP 647 (User-Defined Type Guards). - """) -# 3.6 -else: - class _TypeGuard(typing._FinalTypingBase, _root=True): - """Special typing form used to annotate the return type of a user-defined - type guard function. ``TypeGuard`` only accepts a single type argument. - At runtime, functions marked this way should return a boolean. - - ``TypeGuard`` aims to benefit *type narrowing* -- a technique used by static - type checkers to determine a more precise type of an expression within a - program's code flow. Usually type narrowing is done by analyzing - conditional code flow and applying the narrowing to a block of code. The - conditional expression here is sometimes referred to as a "type guard". - - Sometimes it would be convenient to use a user-defined boolean function - as a type guard. Such a function should use ``TypeGuard[...]`` as its - return type to alert static type checkers to this intention. - - Using ``-> TypeGuard`` tells the static type checker that for a given - function: - - 1. The return value is a boolean. - 2. If the return value is ``True``, the type of its argument - is the type inside ``TypeGuard``. - - For example:: - - def is_str(val: Union[str, float]): - # "isinstance" type guard - if isinstance(val, str): - # Type of ``val`` is narrowed to ``str`` - ... - else: - # Else, type of ``val`` is narrowed to ``float``. - ... - - Strict type narrowing is not enforced -- ``TypeB`` need not be a narrower - form of ``TypeA`` (it can even be a wider form) and this may lead to - type-unsafe results. The main reason is to allow for things like - narrowing ``List[object]`` to ``List[str]`` even though the latter is not - a subtype of the former, since ``List`` is invariant. The responsibility of - writing type-safe type guards is left to the user. - - ``TypeGuard`` also works with type variables. For more information, see - PEP 647 (User-Defined Type Guards). - """ - - __slots__ = ('__type__',) - - def __init__(self, tp=None, **kwds): - self.__type__ = tp - - def __getitem__(self, item): - cls = type(self) - if self.__type__ is None: - return cls(typing._type_check(item, - f'{cls.__name__[1:]} accepts only a single type.'), - _root=True) - raise TypeError(f'{cls.__name__[1:]} cannot be further subscripted') - - def _eval_type(self, globalns, localns): - new_tp = typing._eval_type(self.__type__, globalns, localns) - if new_tp == self.__type__: - return self - return type(self)(new_tp, _root=True) - - def __repr__(self): - r = super().__repr__() - if self.__type__ is not None: - r += f'[{typing._type_repr(self.__type__)}]' - return r - - def __hash__(self): - return hash((type(self).__name__, self.__type__)) - - def __eq__(self, other): - if not isinstance(other, _TypeGuard): - return NotImplemented - if self.__type__ is not None: - return self.__type__ == other.__type__ - return self is other - - TypeGuard = _TypeGuard(_root=True) - -if hasattr(typing, "Self"): - Self = typing.Self -elif sys.version_info[:2] >= (3, 7): - # Vendored from cpython typing._SpecialFrom - class _SpecialForm(typing._Final, _root=True): - __slots__ = ('_name', '__doc__', '_getitem') - - def __init__(self, getitem): - self._getitem = getitem - self._name = getitem.__name__ - self.__doc__ = getitem.__doc__ - - def __getattr__(self, item): - if item in {'__name__', '__qualname__'}: - return self._name - - raise AttributeError(item) - - def __mro_entries__(self, bases): - raise TypeError(f"Cannot subclass {self!r}") - - def __repr__(self): - return f'typing_extensions.{self._name}' - - def __reduce__(self): - return self._name - - def __call__(self, *args, **kwds): - raise TypeError(f"Cannot instantiate {self!r}") - - def __or__(self, other): - return typing.Union[self, other] - - def __ror__(self, other): - return typing.Union[other, self] - - def __instancecheck__(self, obj): - raise TypeError(f"{self} cannot be used with isinstance()") - - def __subclasscheck__(self, cls): - raise TypeError(f"{self} cannot be used with issubclass()") - - @typing._tp_cache - def __getitem__(self, parameters): - return self._getitem(self, parameters) - - @_SpecialForm - def Self(self, params): - """Used to spell the type of "self" in classes. - - Example:: - - from typing import Self - - class ReturnsSelf: - def parse(self, data: bytes) -> Self: - ... - return self - - """ - - raise TypeError(f"{self} is not subscriptable") -else: - class _Self(typing._FinalTypingBase, _root=True): - """Used to spell the type of "self" in classes. - - Example:: - - from typing import Self - - class ReturnsSelf: - def parse(self, data: bytes) -> Self: - ... - return self - - """ - - __slots__ = () - - def __instancecheck__(self, obj): - raise TypeError(f"{self} cannot be used with isinstance().") - - def __subclasscheck__(self, cls): - raise TypeError(f"{self} cannot be used with issubclass().") - - Self = _Self(_root=True) - - -if hasattr(typing, 'Required'): - Required = typing.Required - NotRequired = typing.NotRequired -elif sys.version_info[:2] >= (3, 9): - class _ExtensionsSpecialForm(typing._SpecialForm, _root=True): - def __repr__(self): - return 'typing_extensions.' + self._name - - @_ExtensionsSpecialForm - def Required(self, parameters): - """A special typing construct to mark a key of a total=False TypedDict - as required. For example: - - class Movie(TypedDict, total=False): - title: Required[str] - year: int - - m = Movie( - title='The Matrix', # typechecker error if key is omitted - year=1999, - ) - - There is no runtime checking that a required key is actually provided - when instantiating a related TypedDict. - """ - item = typing._type_check(parameters, f'{self._name} accepts only single type') - return typing._GenericAlias(self, (item,)) - - @_ExtensionsSpecialForm - def NotRequired(self, parameters): - """A special typing construct to mark a key of a TypedDict as - potentially missing. For example: - - class Movie(TypedDict): - title: str - year: NotRequired[int] - - m = Movie( - title='The Matrix', # typechecker error if key is omitted - year=1999, - ) - """ - item = typing._type_check(parameters, f'{self._name} accepts only single type') - return typing._GenericAlias(self, (item,)) - -elif sys.version_info[:2] >= (3, 7): - class _RequiredForm(typing._SpecialForm, _root=True): - def __repr__(self): - return 'typing_extensions.' + self._name - - def __getitem__(self, parameters): - item = typing._type_check(parameters, - '{} accepts only single type'.format(self._name)) - return typing._GenericAlias(self, (item,)) - - Required = _RequiredForm( - 'Required', - doc="""A special typing construct to mark a key of a total=False TypedDict - as required. For example: - - class Movie(TypedDict, total=False): - title: Required[str] - year: int - - m = Movie( - title='The Matrix', # typechecker error if key is omitted - year=1999, - ) - - There is no runtime checking that a required key is actually provided - when instantiating a related TypedDict. - """) - NotRequired = _RequiredForm( - 'NotRequired', - doc="""A special typing construct to mark a key of a TypedDict as - potentially missing. For example: - - class Movie(TypedDict): - title: str - year: NotRequired[int] - - m = Movie( - title='The Matrix', # typechecker error if key is omitted - year=1999, - ) - """) -else: - # NOTE: Modeled after _Final's implementation when _FinalTypingBase available - class _MaybeRequired(typing._FinalTypingBase, _root=True): - __slots__ = ('__type__',) - - def __init__(self, tp=None, **kwds): - self.__type__ = tp - - def __getitem__(self, item): - cls = type(self) - if self.__type__ is None: - return cls(typing._type_check(item, - '{} accepts only single type.'.format(cls.__name__[1:])), - _root=True) - raise TypeError('{} cannot be further subscripted' - .format(cls.__name__[1:])) - - def _eval_type(self, globalns, localns): - new_tp = typing._eval_type(self.__type__, globalns, localns) - if new_tp == self.__type__: - return self - return type(self)(new_tp, _root=True) - - def __repr__(self): - r = super().__repr__() - if self.__type__ is not None: - r += '[{}]'.format(typing._type_repr(self.__type__)) - return r - - def __hash__(self): - return hash((type(self).__name__, self.__type__)) - - def __eq__(self, other): - if not isinstance(other, type(self)): - return NotImplemented - if self.__type__ is not None: - return self.__type__ == other.__type__ - return self is other - - class _Required(_MaybeRequired, _root=True): - """A special typing construct to mark a key of a total=False TypedDict - as required. For example: - - class Movie(TypedDict, total=False): - title: Required[str] - year: int - - m = Movie( - title='The Matrix', # typechecker error if key is omitted - year=1999, - ) - - There is no runtime checking that a required key is actually provided - when instantiating a related TypedDict. - """ - - class _NotRequired(_MaybeRequired, _root=True): - """A special typing construct to mark a key of a TypedDict as - potentially missing. For example: - - class Movie(TypedDict): - title: str - year: NotRequired[int] - - m = Movie( - title='The Matrix', # typechecker error if key is omitted - year=1999, - ) - """ - - Required = _Required(_root=True) - NotRequired = _NotRequired(_root=True) diff --git a/venv/lib/python3.10/site-packages/setuptools/_vendor/zipp.py b/venv/lib/python3.10/site-packages/setuptools/_vendor/zipp.py deleted file mode 100644 index 26b723c..0000000 --- a/venv/lib/python3.10/site-packages/setuptools/_vendor/zipp.py +++ /dev/null @@ -1,329 +0,0 @@ -import io -import posixpath -import zipfile -import itertools -import contextlib -import sys -import pathlib - -if sys.version_info < (3, 7): - from collections import OrderedDict -else: - OrderedDict = dict - - -__all__ = ['Path'] - - -def _parents(path): - """ - Given a path with elements separated by - posixpath.sep, generate all parents of that path. - - >>> list(_parents('b/d')) - ['b'] - >>> list(_parents('/b/d/')) - ['/b'] - >>> list(_parents('b/d/f/')) - ['b/d', 'b'] - >>> list(_parents('b')) - [] - >>> list(_parents('')) - [] - """ - return itertools.islice(_ancestry(path), 1, None) - - -def _ancestry(path): - """ - Given a path with elements separated by - posixpath.sep, generate all elements of that path - - >>> list(_ancestry('b/d')) - ['b/d', 'b'] - >>> list(_ancestry('/b/d/')) - ['/b/d', '/b'] - >>> list(_ancestry('b/d/f/')) - ['b/d/f', 'b/d', 'b'] - >>> list(_ancestry('b')) - ['b'] - >>> list(_ancestry('')) - [] - """ - path = path.rstrip(posixpath.sep) - while path and path != posixpath.sep: - yield path - path, tail = posixpath.split(path) - - -_dedupe = OrderedDict.fromkeys -"""Deduplicate an iterable in original order""" - - -def _difference(minuend, subtrahend): - """ - Return items in minuend not in subtrahend, retaining order - with O(1) lookup. - """ - return itertools.filterfalse(set(subtrahend).__contains__, minuend) - - -class CompleteDirs(zipfile.ZipFile): - """ - A ZipFile subclass that ensures that implied directories - are always included in the namelist. - """ - - @staticmethod - def _implied_dirs(names): - parents = itertools.chain.from_iterable(map(_parents, names)) - as_dirs = (p + posixpath.sep for p in parents) - return _dedupe(_difference(as_dirs, names)) - - def namelist(self): - names = super(CompleteDirs, self).namelist() - return names + list(self._implied_dirs(names)) - - def _name_set(self): - return set(self.namelist()) - - def resolve_dir(self, name): - """ - If the name represents a directory, return that name - as a directory (with the trailing slash). - """ - names = self._name_set() - dirname = name + '/' - dir_match = name not in names and dirname in names - return dirname if dir_match else name - - @classmethod - def make(cls, source): - """ - Given a source (filename or zipfile), return an - appropriate CompleteDirs subclass. - """ - if isinstance(source, CompleteDirs): - return source - - if not isinstance(source, zipfile.ZipFile): - return cls(_pathlib_compat(source)) - - # Only allow for FastLookup when supplied zipfile is read-only - if 'r' not in source.mode: - cls = CompleteDirs - - source.__class__ = cls - return source - - -class FastLookup(CompleteDirs): - """ - ZipFile subclass to ensure implicit - dirs exist and are resolved rapidly. - """ - - def namelist(self): - with contextlib.suppress(AttributeError): - return self.__names - self.__names = super(FastLookup, self).namelist() - return self.__names - - def _name_set(self): - with contextlib.suppress(AttributeError): - return self.__lookup - self.__lookup = super(FastLookup, self)._name_set() - return self.__lookup - - -def _pathlib_compat(path): - """ - For path-like objects, convert to a filename for compatibility - on Python 3.6.1 and earlier. - """ - try: - return path.__fspath__() - except AttributeError: - return str(path) - - -class Path: - """ - A pathlib-compatible interface for zip files. - - Consider a zip file with this structure:: - - . - ├── a.txt - └── b - ├── c.txt - └── d - └── e.txt - - >>> data = io.BytesIO() - >>> zf = zipfile.ZipFile(data, 'w') - >>> zf.writestr('a.txt', 'content of a') - >>> zf.writestr('b/c.txt', 'content of c') - >>> zf.writestr('b/d/e.txt', 'content of e') - >>> zf.filename = 'mem/abcde.zip' - - Path accepts the zipfile object itself or a filename - - >>> root = Path(zf) - - From there, several path operations are available. - - Directory iteration (including the zip file itself): - - >>> a, b = root.iterdir() - >>> a - Path('mem/abcde.zip', 'a.txt') - >>> b - Path('mem/abcde.zip', 'b/') - - name property: - - >>> b.name - 'b' - - join with divide operator: - - >>> c = b / 'c.txt' - >>> c - Path('mem/abcde.zip', 'b/c.txt') - >>> c.name - 'c.txt' - - Read text: - - >>> c.read_text() - 'content of c' - - existence: - - >>> c.exists() - True - >>> (b / 'missing.txt').exists() - False - - Coercion to string: - - >>> import os - >>> str(c).replace(os.sep, posixpath.sep) - 'mem/abcde.zip/b/c.txt' - - At the root, ``name``, ``filename``, and ``parent`` - resolve to the zipfile. Note these attributes are not - valid and will raise a ``ValueError`` if the zipfile - has no filename. - - >>> root.name - 'abcde.zip' - >>> str(root.filename).replace(os.sep, posixpath.sep) - 'mem/abcde.zip' - >>> str(root.parent) - 'mem' - """ - - __repr = "{self.__class__.__name__}({self.root.filename!r}, {self.at!r})" - - def __init__(self, root, at=""): - """ - Construct a Path from a ZipFile or filename. - - Note: When the source is an existing ZipFile object, - its type (__class__) will be mutated to a - specialized type. If the caller wishes to retain the - original type, the caller should either create a - separate ZipFile object or pass a filename. - """ - self.root = FastLookup.make(root) - self.at = at - - def open(self, mode='r', *args, pwd=None, **kwargs): - """ - Open this entry as text or binary following the semantics - of ``pathlib.Path.open()`` by passing arguments through - to io.TextIOWrapper(). - """ - if self.is_dir(): - raise IsADirectoryError(self) - zip_mode = mode[0] - if not self.exists() and zip_mode == 'r': - raise FileNotFoundError(self) - stream = self.root.open(self.at, zip_mode, pwd=pwd) - if 'b' in mode: - if args or kwargs: - raise ValueError("encoding args invalid for binary operation") - return stream - return io.TextIOWrapper(stream, *args, **kwargs) - - @property - def name(self): - return pathlib.Path(self.at).name or self.filename.name - - @property - def suffix(self): - return pathlib.Path(self.at).suffix or self.filename.suffix - - @property - def suffixes(self): - return pathlib.Path(self.at).suffixes or self.filename.suffixes - - @property - def stem(self): - return pathlib.Path(self.at).stem or self.filename.stem - - @property - def filename(self): - return pathlib.Path(self.root.filename).joinpath(self.at) - - def read_text(self, *args, **kwargs): - with self.open('r', *args, **kwargs) as strm: - return strm.read() - - def read_bytes(self): - with self.open('rb') as strm: - return strm.read() - - def _is_child(self, path): - return posixpath.dirname(path.at.rstrip("/")) == self.at.rstrip("/") - - def _next(self, at): - return self.__class__(self.root, at) - - def is_dir(self): - return not self.at or self.at.endswith("/") - - def is_file(self): - return self.exists() and not self.is_dir() - - def exists(self): - return self.at in self.root._name_set() - - def iterdir(self): - if not self.is_dir(): - raise ValueError("Can't listdir a file") - subs = map(self._next, self.root.namelist()) - return filter(self._is_child, subs) - - def __str__(self): - return posixpath.join(self.root.filename, self.at) - - def __repr__(self): - return self.__repr.format(self=self) - - def joinpath(self, *other): - next = posixpath.join(self.at, *map(_pathlib_compat, other)) - return self._next(self.root.resolve_dir(next)) - - __truediv__ = joinpath - - @property - def parent(self): - if not self.at: - return self.filename.parent - parent_at = posixpath.dirname(self.at.rstrip('/')) - if parent_at: - parent_at += '/' - return self._next(parent_at) diff --git a/venv/lib/python3.10/site-packages/setuptools/archive_util.py b/venv/lib/python3.10/site-packages/setuptools/archive_util.py index d8e10c1..0f70284 100644 --- a/venv/lib/python3.10/site-packages/setuptools/archive_util.py +++ b/venv/lib/python3.10/site-packages/setuptools/archive_util.py @@ -8,7 +8,7 @@ import contextlib from distutils.errors import DistutilsError -from ._path import ensure_directory +from pkg_resources import ensure_directory __all__ = [ "unpack_archive", "unpack_zipfile", "unpack_tarfile", "default_filter", @@ -100,37 +100,29 @@ def unpack_zipfile(filename, extract_dir, progress_filter=default_filter): raise UnrecognizedFormat("%s is not a zip file" % (filename,)) with zipfile.ZipFile(filename) as z: - _unpack_zipfile_obj(z, extract_dir, progress_filter) + for info in z.infolist(): + name = info.filename + # don't extract absolute paths or ones with .. in them + if name.startswith('/') or '..' in name.split('/'): + continue -def _unpack_zipfile_obj(zipfile_obj, extract_dir, progress_filter=default_filter): - """Internal/private API used by other parts of setuptools. - Similar to ``unpack_zipfile``, but receives an already opened :obj:`zipfile.ZipFile` - object instead of a filename. - """ - for info in zipfile_obj.infolist(): - name = info.filename - - # don't extract absolute paths or ones with .. in them - if name.startswith('/') or '..' in name.split('/'): - continue - - target = os.path.join(extract_dir, *name.split('/')) - target = progress_filter(name, target) - if not target: - continue - if name.endswith('/'): - # directory - ensure_directory(target) - else: - # file - ensure_directory(target) - data = zipfile_obj.read(info.filename) - with open(target, 'wb') as f: - f.write(data) - unix_attributes = info.external_attr >> 16 - if unix_attributes: - os.chmod(target, unix_attributes) + target = os.path.join(extract_dir, *name.split('/')) + target = progress_filter(name, target) + if not target: + continue + if name.endswith('/'): + # directory + ensure_directory(target) + else: + # file + ensure_directory(target) + data = z.read(info.filename) + with open(target, 'wb') as f: + f.write(data) + unix_attributes = info.external_attr >> 16 + if unix_attributes: + os.chmod(target, unix_attributes) def _resolve_tar_file_or_dir(tar_obj, tar_member_obj): diff --git a/venv/lib/python3.10/site-packages/setuptools/build_meta.py b/venv/lib/python3.10/site-packages/setuptools/build_meta.py index e8f1c72..d0ac613 100644 --- a/venv/lib/python3.10/site-packages/setuptools/build_meta.py +++ b/venv/lib/python3.10/site-packages/setuptools/build_meta.py @@ -28,39 +28,26 @@ import io import os -import shlex import sys import tokenize import shutil import contextlib import tempfile import warnings -from pathlib import Path -from typing import Dict, Iterator, List, Optional, Union import setuptools import distutils -from . import errors -from ._path import same_path -from ._reqs import parse_strings -from ._deprecation_warning import SetuptoolsDeprecationWarning -from distutils.util import strtobool +from pkg_resources import parse_requirements __all__ = ['get_requires_for_build_sdist', 'get_requires_for_build_wheel', 'prepare_metadata_for_build_wheel', 'build_wheel', 'build_sdist', - 'get_requires_for_build_editable', - 'prepare_metadata_for_build_editable', - 'build_editable', '__legacy__', 'SetupRequirementsError'] -SETUPTOOLS_ENABLE_FEATURES = os.getenv("SETUPTOOLS_ENABLE_FEATURES", "").lower() -LEGACY_EDITABLE = "legacy-editable" in SETUPTOOLS_ENABLE_FEATURES.replace("_", "-") - class SetupRequirementsError(BaseException): def __init__(self, specifiers): @@ -69,7 +56,7 @@ def __init__(self, specifiers): class Distribution(setuptools.dist.Distribution): def fetch_build_eggs(self, specifiers): - specifier_list = list(parse_strings(specifiers)) + specifier_list = list(map(str, parse_requirements(specifiers))) raise SetupRequirementsError(specifier_list) @@ -139,182 +126,18 @@ def suppress_known_deprecation(): yield -_ConfigSettings = Optional[Dict[str, Union[str, List[str], None]]] -""" -Currently the user can run:: - - pip install -e . --config-settings key=value - python -m build -C--key=value -C key=value - -- pip will pass both key and value as strings and overwriting repeated keys - (pypa/pip#11059). -- build will accumulate values associated with repeated keys in a list. - It will also accept keys with no associated value. - This means that an option passed by build can be ``str | list[str] | None``. -- PEP 517 specifies that ``config_settings`` is an optional dict. -""" - - -class _ConfigSettingsTranslator: - """Translate ``config_settings`` into distutils-style command arguments. - Only a limited number of options is currently supported. - """ - # See pypa/setuptools#1928 pypa/setuptools#2491 - - def _get_config(self, key: str, config_settings: _ConfigSettings) -> List[str]: - """ - Get the value of a specific key in ``config_settings`` as a list of strings. - - >>> fn = _ConfigSettingsTranslator()._get_config - >>> fn("--global-option", None) - [] - >>> fn("--global-option", {}) - [] - >>> fn("--global-option", {'--global-option': 'foo'}) - ['foo'] - >>> fn("--global-option", {'--global-option': ['foo']}) - ['foo'] - >>> fn("--global-option", {'--global-option': 'foo'}) - ['foo'] - >>> fn("--global-option", {'--global-option': 'foo bar'}) - ['foo', 'bar'] - """ - cfg = config_settings or {} - opts = cfg.get(key) or [] - return shlex.split(opts) if isinstance(opts, str) else opts - - def _valid_global_options(self): - """Global options accepted by setuptools (e.g. quiet or verbose).""" - options = (opt[:2] for opt in setuptools.dist.Distribution.global_options) - return {flag for long_and_short in options for flag in long_and_short if flag} - - def _global_args(self, config_settings: _ConfigSettings) -> Iterator[str]: - """ - Let the user specify ``verbose`` or ``quiet`` + escape hatch via - ``--global-option``. - Note: ``-v``, ``-vv``, ``-vvv`` have similar effects in setuptools, - so we just have to cover the basic scenario ``-v``. - - >>> fn = _ConfigSettingsTranslator()._global_args - >>> list(fn(None)) - [] - >>> list(fn({"verbose": "False"})) - ['-q'] - >>> list(fn({"verbose": "1"})) - ['-v'] - >>> list(fn({"--verbose": None})) - ['-v'] - >>> list(fn({"verbose": "true", "--global-option": "-q --no-user-cfg"})) - ['-v', '-q', '--no-user-cfg'] - >>> list(fn({"--quiet": None})) - ['-q'] - """ - cfg = config_settings or {} - falsey = {"false", "no", "0", "off"} - if "verbose" in cfg or "--verbose" in cfg: - level = str(cfg.get("verbose") or cfg.get("--verbose") or "1") - yield ("-q" if level.lower() in falsey else "-v") - if "quiet" in cfg or "--quiet" in cfg: - level = str(cfg.get("quiet") or cfg.get("--quiet") or "1") - yield ("-v" if level.lower() in falsey else "-q") - - valid = self._valid_global_options() - args = self._get_config("--global-option", config_settings) - yield from (arg for arg in args if arg.strip("-") in valid) - - def __dist_info_args(self, config_settings: _ConfigSettings) -> Iterator[str]: - """ - The ``dist_info`` command accepts ``tag-date`` and ``tag-build``. - - .. warning:: - We cannot use this yet as it requires the ``sdist`` and ``bdist_wheel`` - commands run in ``build_sdist`` and ``build_wheel`` to re-use the egg-info - directory created in ``prepare_metadata_for_build_wheel``. - - >>> fn = _ConfigSettingsTranslator()._ConfigSettingsTranslator__dist_info_args - >>> list(fn(None)) - [] - >>> list(fn({"tag-date": "False"})) - ['--no-date'] - >>> list(fn({"tag-date": None})) - ['--no-date'] - >>> list(fn({"tag-date": "true", "tag-build": ".a"})) - ['--tag-date', '--tag-build', '.a'] - """ - cfg = config_settings or {} - if "tag-date" in cfg: - val = strtobool(str(cfg["tag-date"] or "false")) - yield ("--tag-date" if val else "--no-date") - if "tag-build" in cfg: - yield from ["--tag-build", str(cfg["tag-build"])] - - def _editable_args(self, config_settings: _ConfigSettings) -> Iterator[str]: - """ - The ``editable_wheel`` command accepts ``editable-mode=strict``. +class _BuildMetaBackend(object): - >>> fn = _ConfigSettingsTranslator()._editable_args - >>> list(fn(None)) - [] - >>> list(fn({"editable-mode": "strict"})) - ['--mode', 'strict'] - """ - cfg = config_settings or {} - mode = cfg.get("editable-mode") or cfg.get("editable_mode") - if not mode: - return - yield from ["--mode", str(mode)] + def _fix_config(self, config_settings): + config_settings = config_settings or {} + config_settings.setdefault('--global-option', []) + return config_settings - def _arbitrary_args(self, config_settings: _ConfigSettings) -> Iterator[str]: - """ - Users may expect to pass arbitrary lists of arguments to a command - via "--global-option" (example provided in PEP 517 of a "escape hatch"). - - >>> fn = _ConfigSettingsTranslator()._arbitrary_args - >>> list(fn(None)) - [] - >>> list(fn({})) - [] - >>> list(fn({'--build-option': 'foo'})) - ['foo'] - >>> list(fn({'--build-option': ['foo']})) - ['foo'] - >>> list(fn({'--build-option': 'foo'})) - ['foo'] - >>> list(fn({'--build-option': 'foo bar'})) - ['foo', 'bar'] - >>> warnings.simplefilter('error', SetuptoolsDeprecationWarning) - >>> list(fn({'--global-option': 'foo'})) # doctest: +IGNORE_EXCEPTION_DETAIL - Traceback (most recent call last): - SetuptoolsDeprecationWarning: ...arguments given via `--global-option`... - """ - args = self._get_config("--global-option", config_settings) - global_opts = self._valid_global_options() - bad_args = [] - - for arg in args: - if arg.strip("-") not in global_opts: - bad_args.append(arg) - yield arg - - yield from self._get_config("--build-option", config_settings) - - if bad_args: - msg = f""" - The arguments {bad_args!r} were given via `--global-option`. - Please use `--build-option` instead, - `--global-option` is reserved to flags like `--verbose` or `--quiet`. - """ - warnings.warn(msg, SetuptoolsDeprecationWarning) - - -class _BuildMetaBackend(_ConfigSettingsTranslator): def _get_build_requires(self, config_settings, requirements): - sys.argv = [ - *sys.argv[:1], - *self._global_args(config_settings), - "egg_info", - *self._arbitrary_args(config_settings), - ] + config_settings = self._fix_config(config_settings) + + sys.argv = sys.argv[:1] + ['egg_info'] + \ + config_settings["--global-option"] try: with Distribution.patch(): self.run_setup() @@ -332,67 +155,62 @@ def run_setup(self, setup_script='setup.py'): with _open_setup_script(__file__) as f: code = f.read().replace(r'\r\n', r'\n') - exec(code, locals()) + exec(compile(code, __file__, 'exec'), locals()) def get_requires_for_build_wheel(self, config_settings=None): - return self._get_build_requires(config_settings, requirements=['wheel']) + config_settings = self._fix_config(config_settings) + return self._get_build_requires( + config_settings, requirements=['wheel']) def get_requires_for_build_sdist(self, config_settings=None): + config_settings = self._fix_config(config_settings) return self._get_build_requires(config_settings, requirements=[]) - def _bubble_up_info_directory(self, metadata_directory: str, suffix: str) -> str: - """ - PEP 517 requires that the .dist-info directory be placed in the - metadata_directory. To comply, we MUST copy the directory to the root. + def prepare_metadata_for_build_wheel(self, metadata_directory, + config_settings=None): + sys.argv = sys.argv[:1] + [ + 'dist_info', '--egg-base', metadata_directory] + with no_install_setup_requires(): + self.run_setup() - Returns the basename of the info directory, e.g. `proj-0.0.0.dist-info`. - """ - info_dir = self._find_info_directory(metadata_directory, suffix) - if not same_path(info_dir.parent, metadata_directory): - shutil.move(str(info_dir), metadata_directory) - # PEP 517 allow other files and dirs to exist in metadata_directory - return info_dir.name + dist_info_directory = metadata_directory + while True: + dist_infos = [f for f in os.listdir(dist_info_directory) + if f.endswith('.dist-info')] - def _find_info_directory(self, metadata_directory: str, suffix: str) -> Path: - for parent, dirs, _ in os.walk(metadata_directory): - candidates = [f for f in dirs if f.endswith(suffix)] + if ( + len(dist_infos) == 0 and + len(_get_immediate_subdirectories(dist_info_directory)) == 1 + ): - if len(candidates) != 0 or len(dirs) != 1: - assert len(candidates) == 1, f"Multiple {suffix} directories found" - return Path(parent, candidates[0]) + dist_info_directory = os.path.join( + dist_info_directory, os.listdir(dist_info_directory)[0]) + continue - msg = f"No {suffix} directory found in {metadata_directory}" - raise errors.InternalError(msg) + assert len(dist_infos) == 1 + break - def prepare_metadata_for_build_wheel(self, metadata_directory, - config_settings=None): - sys.argv = [ - *sys.argv[:1], - *self._global_args(config_settings), - "dist_info", - "--output-dir", metadata_directory, - "--keep-egg-info", - ] - with no_install_setup_requires(): - self.run_setup() + # PEP 517 requires that the .dist-info directory be placed in the + # metadata_directory. To comply, we MUST copy the directory to the root + if dist_info_directory != metadata_directory: + shutil.move( + os.path.join(dist_info_directory, dist_infos[0]), + metadata_directory) + shutil.rmtree(dist_info_directory, ignore_errors=True) - self._bubble_up_info_directory(metadata_directory, ".egg-info") - return self._bubble_up_info_directory(metadata_directory, ".dist-info") + return dist_infos[0] def _build_with_temp_dir(self, setup_command, result_extension, result_directory, config_settings): + config_settings = self._fix_config(config_settings) result_directory = os.path.abspath(result_directory) # Build in a temporary directory, then copy to the target. os.makedirs(result_directory, exist_ok=True) with tempfile.TemporaryDirectory(dir=result_directory) as tmp_dist_dir: - sys.argv = [ - *sys.argv[:1], - *self._global_args(config_settings), - *setup_command, - "--dist-dir", tmp_dist_dir, - *self._arbitrary_args(config_settings), - ] + sys.argv = (sys.argv[:1] + setup_command + + ['--dist-dir', tmp_dist_dir] + + config_settings["--global-option"]) with no_install_setup_requires(): self.run_setup() @@ -417,40 +235,6 @@ def build_sdist(self, sdist_directory, config_settings=None): '.tar.gz', sdist_directory, config_settings) - def _get_dist_info_dir(self, metadata_directory: Optional[str]) -> Optional[str]: - if not metadata_directory: - return None - dist_info_candidates = list(Path(metadata_directory).glob("*.dist-info")) - assert len(dist_info_candidates) <= 1 - return str(dist_info_candidates[0]) if dist_info_candidates else None - - if not LEGACY_EDITABLE: - - # PEP660 hooks: - # build_editable - # get_requires_for_build_editable - # prepare_metadata_for_build_editable - def build_editable( - self, wheel_directory, config_settings=None, metadata_directory=None - ): - # XXX can or should we hide our editable_wheel command normally? - info_dir = self._get_dist_info_dir(metadata_directory) - opts = ["--dist-info-dir", info_dir] if info_dir else [] - cmd = ["editable_wheel", *opts, *self._editable_args(config_settings)] - with suppress_known_deprecation(): - return self._build_with_temp_dir( - cmd, ".whl", wheel_directory, config_settings - ) - - def get_requires_for_build_editable(self, config_settings=None): - return self.get_requires_for_build_wheel(config_settings) - - def prepare_metadata_for_build_editable(self, metadata_directory, - config_settings=None): - return self.prepare_metadata_for_build_wheel( - metadata_directory, config_settings - ) - class _BuildMetaLegacyBackend(_BuildMetaBackend): """Compatibility backend for setuptools @@ -501,11 +285,6 @@ def run_setup(self, setup_script='setup.py'): build_wheel = _BACKEND.build_wheel build_sdist = _BACKEND.build_sdist -if not LEGACY_EDITABLE: - get_requires_for_build_editable = _BACKEND.get_requires_for_build_editable - prepare_metadata_for_build_editable = _BACKEND.prepare_metadata_for_build_editable - build_editable = _BACKEND.build_editable - # The legacy backend __legacy__ = _BuildMetaLegacyBackend() diff --git a/venv/lib/python3.10/site-packages/setuptools/command/__init__.py b/venv/lib/python3.10/site-packages/setuptools/command/__init__.py index 5acd768..b966dce 100644 --- a/venv/lib/python3.10/site-packages/setuptools/command/__init__.py +++ b/venv/lib/python3.10/site-packages/setuptools/command/__init__.py @@ -2,11 +2,7 @@ import sys if 'egg' not in bdist.format_commands: - try: - bdist.format_commands['egg'] = ('bdist_egg', "Python .egg file") - except TypeError: - # For backward compatibility with older distutils (stdlib) - bdist.format_command['egg'] = ('bdist_egg', "Python .egg file") - bdist.format_commands.append('egg') + bdist.format_command['egg'] = ('bdist_egg', "Python .egg file") + bdist.format_commands.append('egg') del bdist, sys diff --git a/venv/lib/python3.10/site-packages/setuptools/command/bdist_egg.py b/venv/lib/python3.10/site-packages/setuptools/command/bdist_egg.py index 11a1c6b..e6b1609 100644 --- a/venv/lib/python3.10/site-packages/setuptools/command/bdist_egg.py +++ b/venv/lib/python3.10/site-packages/setuptools/command/bdist_egg.py @@ -11,10 +11,9 @@ import textwrap import marshal -from pkg_resources import get_build_platform, Distribution +from pkg_resources import get_build_platform, Distribution, ensure_directory from setuptools.extension import Library from setuptools import Command -from .._path import ensure_directory from sysconfig import get_path, get_python_version diff --git a/venv/lib/python3.10/site-packages/setuptools/command/build.py b/venv/lib/python3.10/site-packages/setuptools/command/build.py deleted file mode 100644 index fa3c99e..0000000 --- a/venv/lib/python3.10/site-packages/setuptools/command/build.py +++ /dev/null @@ -1,146 +0,0 @@ -import sys -import warnings -from typing import TYPE_CHECKING, List, Dict -from distutils.command.build import build as _build - -from setuptools import SetuptoolsDeprecationWarning - -if sys.version_info >= (3, 8): - from typing import Protocol -elif TYPE_CHECKING: - from typing_extensions import Protocol -else: - from abc import ABC as Protocol - - -_ORIGINAL_SUBCOMMANDS = {"build_py", "build_clib", "build_ext", "build_scripts"} - - -class build(_build): - # copy to avoid sharing the object with parent class - sub_commands = _build.sub_commands[:] - - def get_sub_commands(self): - subcommands = {cmd[0] for cmd in _build.sub_commands} - if subcommands - _ORIGINAL_SUBCOMMANDS: - msg = """ - It seems that you are using `distutils.command.build` to add - new subcommands. Using `distutils` directly is considered deprecated, - please use `setuptools.command.build`. - """ - warnings.warn(msg, SetuptoolsDeprecationWarning) - self.sub_commands = _build.sub_commands - return super().get_sub_commands() - - -class SubCommand(Protocol): - """In order to support editable installations (see :pep:`660`) all - build subcommands **SHOULD** implement this protocol. They also **MUST** inherit - from ``setuptools.Command``. - - When creating an :pep:`editable wheel <660>`, ``setuptools`` will try to evaluate - custom ``build`` subcommands using the following procedure: - - 1. ``setuptools`` will set the ``editable_mode`` attribute to ``True`` - 2. ``setuptools`` will execute the ``run()`` command. - - .. important:: - Subcommands **SHOULD** take advantage of ``editable_mode=True`` to adequate - its behaviour or perform optimisations. - - For example, if a subcommand doesn't need to generate an extra file and - all it does is to copy a source file into the build directory, - ``run()`` **SHOULD** simply "early return". - - Similarly, if the subcommand creates files that would be placed alongside - Python files in the final distribution, during an editable install - the command **SHOULD** generate these files "in place" (i.e. write them to - the original source directory, instead of using the build directory). - Note that ``get_output_mapping()`` should reflect that and include mappings - for "in place" builds accordingly. - - 3. ``setuptools`` use any knowledge it can derive from the return values of - ``get_outputs()`` and ``get_output_mapping()`` to create an editable wheel. - When relevant ``setuptools`` **MAY** attempt to use file links based on the value - of ``get_output_mapping()``. Alternatively, ``setuptools`` **MAY** attempt to use - :doc:`import hooks ` to redirect any attempt to import - to the directory with the original source code and other files built in place. - - Please note that custom sub-commands **SHOULD NOT** rely on ``run()`` being - executed (or not) to provide correct return values for ``get_outputs()``, - ``get_output_mapping()`` or ``get_source_files()``. The ``get_*`` methods should - work independently of ``run()``. - """ - - editable_mode: bool = False - """Boolean flag that will be set to ``True`` when setuptools is used for an - editable installation (see :pep:`660`). - Implementations **SHOULD** explicitly set the default value of this attribute to - ``False``. - When subcommands run, they can use this flag to perform optimizations or change - their behaviour accordingly. - """ - - build_lib: str - """String representing the directory where the build artifacts should be stored, - e.g. ``build/lib``. - For example, if a distribution wants to provide a Python module named ``pkg.mod``, - then a corresponding file should be written to ``{build_lib}/package/module.py``. - A way of thinking about this is that the files saved under ``build_lib`` - would be eventually copied to one of the directories in :obj:`site.PREFIXES` - upon installation. - - A command that produces platform-independent files (e.g. compiling text templates - into Python functions), **CAN** initialize ``build_lib`` by copying its value from - the ``build_py`` command. On the other hand, a command that produces - platform-specific files **CAN** initialize ``build_lib`` by copying its value from - the ``build_ext`` command. In general this is done inside the ``finalize_options`` - method with the help of the ``set_undefined_options`` command:: - - def finalize_options(self): - self.set_undefined_options("build_py", ("build_lib", "build_lib")) - ... - """ - - def initialize_options(self): - """(Required by the original :class:`setuptools.Command` interface)""" - - def finalize_options(self): - """(Required by the original :class:`setuptools.Command` interface)""" - - def run(self): - """(Required by the original :class:`setuptools.Command` interface)""" - - def get_source_files(self) -> List[str]: - """ - Return a list of all files that are used by the command to create the expected - outputs. - For example, if your build command transpiles Java files into Python, you should - list here all the Java files. - The primary purpose of this function is to help populating the ``sdist`` - with all the files necessary to build the distribution. - All files should be strings relative to the project root directory. - """ - - def get_outputs(self) -> List[str]: - """ - Return a list of files intended for distribution as they would have been - produced by the build. - These files should be strings in the form of - ``"{build_lib}/destination/file/path"``. - - .. note:: - The return value of ``get_output()`` should include all files used as keys - in ``get_output_mapping()`` plus files that are generated during the build - and don't correspond to any source file already present in the project. - """ - - def get_output_mapping(self) -> Dict[str, str]: - """ - Return a mapping between destination files as they would be produced by the - build (dict keys) into the respective existing (source) files (dict values). - Existing (source) files should be represented as strings relative to the project - root directory. - Destination files should be strings in the form of - ``"{build_lib}/destination/file/path"``. - """ diff --git a/venv/lib/python3.10/site-packages/setuptools/command/build_ext.py b/venv/lib/python3.10/site-packages/setuptools/command/build_ext.py index cbfe3ec..c59eff8 100644 --- a/venv/lib/python3.10/site-packages/setuptools/command/build_ext.py +++ b/venv/lib/python3.10/site-packages/setuptools/command/build_ext.py @@ -2,16 +2,14 @@ import sys import itertools from importlib.machinery import EXTENSION_SUFFIXES -from importlib.util import cache_from_source as _compiled_file_name -from typing import Dict, Iterator, List, Tuple - from distutils.command.build_ext import build_ext as _du_build_ext +from distutils.file_util import copy_file from distutils.ccompiler import new_compiler from distutils.sysconfig import customize_compiler, get_config_var +from distutils.errors import DistutilsError from distutils import log -from setuptools.errors import BaseError -from setuptools.extension import Extension, Library +from setuptools.extension import Library try: # Attempt to use Cython for building extensions, if available @@ -75,9 +73,6 @@ def get_abi3_suffix(): class build_ext(_build_ext): - editable_mode: bool = False - inplace: bool = False - def run(self): """Build extensions in build directory, then copy if --inplace""" old_inplace, self.inplace = self.inplace, 0 @@ -86,62 +81,27 @@ def run(self): if old_inplace: self.copy_extensions_to_source() - def _get_inplace_equivalent(self, build_py, ext: Extension) -> Tuple[str, str]: - fullname = self.get_ext_fullname(ext.name) - filename = self.get_ext_filename(fullname) - modpath = fullname.split('.') - package = '.'.join(modpath[:-1]) - package_dir = build_py.get_package_dir(package) - inplace_file = os.path.join(package_dir, os.path.basename(filename)) - regular_file = os.path.join(self.build_lib, filename) - return (inplace_file, regular_file) - def copy_extensions_to_source(self): build_py = self.get_finalized_command('build_py') for ext in self.extensions: - inplace_file, regular_file = self._get_inplace_equivalent(build_py, ext) + fullname = self.get_ext_fullname(ext.name) + filename = self.get_ext_filename(fullname) + modpath = fullname.split('.') + package = '.'.join(modpath[:-1]) + package_dir = build_py.get_package_dir(package) + dest_filename = os.path.join(package_dir, + os.path.basename(filename)) + src_filename = os.path.join(self.build_lib, filename) # Always copy, even if source is older than destination, to ensure # that the right extensions for the current Python/platform are # used. - if os.path.exists(regular_file) or not ext.optional: - self.copy_file(regular_file, inplace_file, level=self.verbose) - - if ext._needs_stub: - inplace_stub = self._get_equivalent_stub(ext, inplace_file) - self._write_stub_file(inplace_stub, ext, compile=True) - # Always compile stub and remove the original (leave the cache behind) - # (this behaviour was observed in previous iterations of the code) - - def _get_equivalent_stub(self, ext: Extension, output_file: str) -> str: - dir_ = os.path.dirname(output_file) - _, _, name = ext.name.rpartition(".") - return f"{os.path.join(dir_, name)}.py" - - def _get_output_mapping(self) -> Iterator[Tuple[str, str]]: - if not self.inplace: - return - - build_py = self.get_finalized_command('build_py') - opt = self.get_finalized_command('install_lib').optimize or "" - - for ext in self.extensions: - inplace_file, regular_file = self._get_inplace_equivalent(build_py, ext) - yield (regular_file, inplace_file) - + copy_file( + src_filename, dest_filename, verbose=self.verbose, + dry_run=self.dry_run + ) if ext._needs_stub: - # This version of `build_ext` always builds artifacts in another dir, - # when "inplace=True" is given it just copies them back. - # This is done in the `copy_extensions_to_source` function, which - # always compile stub files via `_compile_and_remove_stub`. - # At the end of the process, a `.pyc` stub file is created without the - # corresponding `.py`. - - inplace_stub = self._get_equivalent_stub(ext, inplace_file) - regular_stub = self._get_equivalent_stub(ext, regular_file) - inplace_cache = _compiled_file_name(inplace_stub, optimization=opt) - output_cache = _compiled_file_name(regular_stub, optimization=opt) - yield (output_cache, inplace_cache) + self.write_stub(package_dir or os.curdir, ext, True) def get_ext_filename(self, fullname): so_ext = os.getenv('SETUPTOOLS_EXT_SUFFIX') @@ -171,7 +131,6 @@ def initialize_options(self): self.shlib_compiler = None self.shlibs = [] self.ext_map = {} - self.editable_mode = False def finalize_options(self): _build_ext.finalize_options(self) @@ -202,9 +161,6 @@ def finalize_options(self): if ltd and use_stubs and os.curdir not in ext.runtime_library_dirs: ext.runtime_library_dirs.append(os.curdir) - if self.editable_mode: - self.inplace = True - def setup_shlib_compiler(self): compiler = self.shlib_compiler = new_compiler( compiler=self.compiler, dry_run=self.dry_run, force=self.force @@ -245,8 +201,8 @@ def build_extension(self, ext): self.compiler = self.shlib_compiler _build_ext.build_extension(self, ext) if ext._needs_stub: - build_lib = self.get_finalized_command('build_py').build_lib - self.write_stub(build_lib, ext) + cmd = self.get_finalized_command('build_py').build_lib + self.write_stub(cmd, ext) finally: self.compiler = _compiler @@ -259,15 +215,8 @@ def links_to_dynamic(self, ext): pkg = '.'.join(ext._full_name.split('.')[:-1] + ['']) return any(pkg + libname in libnames for libname in ext.libraries) - def get_outputs(self) -> List[str]: - if self.inplace: - return list(self.get_output_mapping().keys()) - return sorted(_build_ext.get_outputs(self) + self.__get_stubs_outputs()) - - def get_output_mapping(self) -> Dict[str, str]: - """See :class:`setuptools.commands.build.SubCommand`""" - mapping = self._get_output_mapping() - return dict(sorted(mapping, key=lambda x: x[0])) + def get_outputs(self): + return _build_ext.get_outputs(self) + self.__get_stubs_outputs() def __get_stubs_outputs(self): # assemble the base name for each extension that needs a stub @@ -287,13 +236,12 @@ def __get_output_extensions(self): yield '.pyo' def write_stub(self, output_dir, ext, compile=False): - stub_file = os.path.join(output_dir, *ext._full_name.split('.')) + '.py' - self._write_stub_file(stub_file, ext, compile) - - def _write_stub_file(self, stub_file: str, ext: Extension, compile=False): - log.info("writing stub loader for %s to %s", ext._full_name, stub_file) + log.info("writing stub loader for %s to %s", ext._full_name, + output_dir) + stub_file = (os.path.join(output_dir, *ext._full_name.split('.')) + + '.py') if compile and os.path.exists(stub_file): - raise BaseError(stub_file + " already exists! Please delete.") + raise DistutilsError(stub_file + " already exists! Please delete.") if not self.dry_run: f = open(stub_file, 'w') f.write( @@ -326,19 +274,16 @@ def _write_stub_file(self, stub_file: str, ext: Extension, compile=False): ) f.close() if compile: - self._compile_and_remove_stub(stub_file) - - def _compile_and_remove_stub(self, stub_file: str): - from distutils.util import byte_compile + from distutils.util import byte_compile - byte_compile([stub_file], optimize=0, - force=True, dry_run=self.dry_run) - optimize = self.get_finalized_command('install_lib').optimize - if optimize > 0: - byte_compile([stub_file], optimize=optimize, + byte_compile([stub_file], optimize=0, force=True, dry_run=self.dry_run) - if os.path.exists(stub_file) and not self.dry_run: - os.unlink(stub_file) + optimize = self.get_finalized_command('install_lib').optimize + if optimize > 0: + byte_compile([stub_file], optimize=optimize, + force=True, dry_run=self.dry_run) + if os.path.exists(stub_file) and not self.dry_run: + os.unlink(stub_file) if use_stubs or os.name == 'nt': diff --git a/venv/lib/python3.10/site-packages/setuptools/command/build_py.py b/venv/lib/python3.10/site-packages/setuptools/command/build_py.py index ec06274..c3fdc09 100644 --- a/venv/lib/python3.10/site-packages/setuptools/command/build_py.py +++ b/venv/lib/python3.10/site-packages/setuptools/command/build_py.py @@ -1,4 +1,3 @@ -from functools import partial from glob import glob from distutils.util import convert_path import distutils.command.build_py as orig @@ -9,11 +8,6 @@ import distutils.errors import itertools import stat -import warnings -from pathlib import Path -from typing import Dict, Iterable, Iterator, List, Optional, Tuple - -from setuptools._deprecation_warning import SetuptoolsDeprecationWarning from setuptools.extern.more_itertools import unique_everseen @@ -30,8 +24,6 @@ class build_py(orig.build_py): Also, this version of the 'build_py' command allows you to specify both 'py_modules' and 'packages' in the same setup operation. """ - editable_mode: bool = False - existing_egg_info_dir: Optional[str] = None #: Private API, internal use only. def finalize_options(self): orig.build_py.finalize_options(self) @@ -41,18 +33,9 @@ def finalize_options(self): del self.__dict__['data_files'] self.__updated_files = [] - def copy_file(self, infile, outfile, preserve_mode=1, preserve_times=1, - link=None, level=1): - # Overwrite base class to allow using links - if link: - infile = str(Path(infile).resolve()) - outfile = str(Path(outfile).resolve()) - return super().copy_file(infile, outfile, preserve_mode, preserve_times, - link, level) - def run(self): """Build modules, packages, and copy data files to build directory""" - if not (self.py_modules or self.packages) or self.editable_mode: + if not self.py_modules and not self.packages: return if self.py_modules: @@ -115,7 +98,7 @@ def find_data_files(self, package, src_dir): package, src_dir, ) - globs_expanded = map(partial(glob, recursive=True), patterns) + globs_expanded = map(glob, patterns) # flatten the expanded globs into an iterable of matches globs_matches = itertools.chain.from_iterable(globs_expanded) glob_files = filter(os.path.isfile, globs_matches) @@ -125,41 +108,16 @@ def find_data_files(self, package, src_dir): ) return self.exclude_data_files(package, src_dir, files) - def get_outputs(self, include_bytecode=1) -> List[str]: - """See :class:`setuptools.commands.build.SubCommand`""" - if self.editable_mode: - return list(self.get_output_mapping().keys()) - return super().get_outputs(include_bytecode) - - def get_output_mapping(self) -> Dict[str, str]: - """See :class:`setuptools.commands.build.SubCommand`""" - mapping = itertools.chain( - self._get_package_data_output_mapping(), - self._get_module_mapping(), - ) - return dict(sorted(mapping, key=lambda x: x[0])) - - def _get_module_mapping(self) -> Iterator[Tuple[str, str]]: - """Iterate over all modules producing (dest, src) pairs.""" - for (package, module, module_file) in self.find_all_modules(): - package = package.split('.') - filename = self.get_module_outfile(self.build_lib, package, module) - yield (filename, module_file) - - def _get_package_data_output_mapping(self) -> Iterator[Tuple[str, str]]: - """Iterate over package data producing (dest, src) pairs.""" + def build_package_data(self): + """Copy data files into build directory""" for package, src_dir, build_dir, filenames in self.data_files: for filename in filenames: target = os.path.join(build_dir, filename) + self.mkpath(os.path.dirname(target)) srcfile = os.path.join(src_dir, filename) - yield (target, srcfile) - - def build_package_data(self): - """Copy data files into build directory""" - for target, srcfile in self._get_package_data_output_mapping(): - self.mkpath(os.path.dirname(target)) - _outf, _copied = self.copy_file(srcfile, target) - make_writable(target) + outf, copied = self.copy_file(srcfile, target) + make_writable(target) + srcfile = os.path.abspath(srcfile) def analyze_manifest(self): self.manifest_files = mf = {} @@ -170,21 +128,9 @@ def analyze_manifest(self): # Locate package source directory src_dirs[assert_relative(self.get_package_dir(package))] = package - if ( - getattr(self, 'existing_egg_info_dir', None) - and Path(self.existing_egg_info_dir, "SOURCES.txt").exists() - ): - egg_info_dir = self.existing_egg_info_dir - manifest = Path(egg_info_dir, "SOURCES.txt") - files = manifest.read_text(encoding="utf-8").splitlines() - else: - self.run_command('egg_info') - ei_cmd = self.get_finalized_command('egg_info') - egg_info_dir = ei_cmd.egg_info - files = ei_cmd.filelist.files - - check = _IncludePackageDataAbuse() - for path in self._filter_build_files(files, egg_info_dir): + self.run_command('egg_info') + ei_cmd = self.get_finalized_command('egg_info') + for path in ei_cmd.filelist.files: d, f = os.path.split(assert_relative(path)) prev = None oldf = f @@ -193,34 +139,10 @@ def analyze_manifest(self): d, df = os.path.split(d) f = os.path.join(df, f) if d in src_dirs: - if f == oldf: - if check.is_module(f): - continue # it's a module, not data - else: - importable = check.importable_subpackage(src_dirs[d], f) - if importable: - check.warn(importable) + if path.endswith('.py') and f == oldf: + continue # it's a module, not data mf.setdefault(src_dirs[d], []).append(path) - def _filter_build_files(self, files: Iterable[str], egg_info: str) -> Iterator[str]: - """ - ``build_meta`` may try to create egg_info outside of the project directory, - and this can be problematic for certain plugins (reported in issue #3500). - - Extensions might also include between their sources files created on the - ``build_lib`` and ``build_temp`` directories. - - This function should filter this case of invalid files out. - """ - build = self.get_finalized_command("build") - build_dirs = (egg_info, self.build_lib, build.build_temp, build.build_base) - norm_dirs = [os.path.normpath(p) for p in build_dirs if p] - - for file in files: - norm_path = os.path.normpath(file) - if not os.path.isabs(file) or all(d not in norm_path for d in norm_dirs): - yield file - def get_data_files(self): pass # Lazily compute data files in _get_data_files() function. @@ -257,8 +179,6 @@ def check_package(self, package, package_dir): def initialize_options(self): self.packages_checked = {} orig.build_py.initialize_options(self) - self.editable_mode = False - self.existing_egg_info_dir = None def get_package_dir(self, package): res = orig.build_py.get_package_dir(self, package) @@ -320,49 +240,3 @@ def assert_relative(path): % path ) raise DistutilsSetupError(msg) - - -class _IncludePackageDataAbuse: - """Inform users that package or module is included as 'data file'""" - - MESSAGE = """\ - Installing {importable!r} as data is deprecated, please list it in `packages`. - !!\n\n - ############################ - # Package would be ignored # - ############################ - Python recognizes {importable!r} as an importable package, - but it is not listed in the `packages` configuration of setuptools. - - {importable!r} has been automatically added to the distribution only - because it may contain data files, but this behavior is likely to change - in future versions of setuptools (and therefore is considered deprecated). - - Please make sure that {importable!r} is included as a package by using - the `packages` configuration field or the proper discovery methods - (for example by using `find_namespace_packages(...)`/`find_namespace:` - instead of `find_packages(...)`/`find:`). - - You can read more about "package discovery" and "data files" on setuptools - documentation page. - \n\n!! - """ - - def __init__(self): - self._already_warned = set() - - def is_module(self, file): - return file.endswith(".py") and file[:-len(".py")].isidentifier() - - def importable_subpackage(self, parent, file): - pkg = Path(file).parent - parts = list(itertools.takewhile(str.isidentifier, pkg.parts)) - if parts: - return ".".join([parent, *parts]) - return None - - def warn(self, importable): - if importable not in self._already_warned: - msg = textwrap.dedent(self.MESSAGE).format(importable=importable) - warnings.warn(msg, SetuptoolsDeprecationWarning, stacklevel=2) - self._already_warned.add(importable) diff --git a/venv/lib/python3.10/site-packages/setuptools/command/dist_info.py b/venv/lib/python3.10/site-packages/setuptools/command/dist_info.py index 0685c94..c45258f 100644 --- a/venv/lib/python3.10/site-packages/setuptools/command/dist_info.py +++ b/venv/lib/python3.10/site-packages/setuptools/command/dist_info.py @@ -4,18 +4,9 @@ """ import os -import re -import shutil -import sys -import warnings -from contextlib import contextmanager -from inspect import cleandoc -from pathlib import Path from distutils.core import Command from distutils import log -from setuptools.extern import packaging -from setuptools._deprecation_warning import SetuptoolsDeprecationWarning class dist_info(Command): @@ -24,119 +15,22 @@ class dist_info(Command): user_options = [ ('egg-base=', 'e', "directory containing .egg-info directories" - " (default: top of the source tree)" - " DEPRECATED: use --output-dir."), - ('output-dir=', 'o', "directory inside of which the .dist-info will be" - "created (default: top of the source tree)"), - ('tag-date', 'd', "Add date stamp (e.g. 20050528) to version number"), - ('tag-build=', 'b', "Specify explicit tag to add to version number"), - ('no-date', 'D', "Don't include date stamp [default]"), - ('keep-egg-info', None, "*TRANSITIONAL* will be removed in the future"), + " (default: top of the source tree)"), ] - boolean_options = ['tag-date', 'keep-egg-info'] - negative_opt = {'no-date': 'tag-date'} - def initialize_options(self): self.egg_base = None - self.output_dir = None - self.name = None - self.dist_info_dir = None - self.tag_date = None - self.tag_build = None - self.keep_egg_info = False def finalize_options(self): - if self.egg_base: - msg = "--egg-base is deprecated for dist_info command. Use --output-dir." - warnings.warn(msg, SetuptoolsDeprecationWarning) - self.output_dir = self.egg_base or self.output_dir - - dist = self.distribution - project_dir = dist.src_root or os.curdir - self.output_dir = Path(self.output_dir or project_dir) - - egg_info = self.reinitialize_command("egg_info") - egg_info.egg_base = str(self.output_dir) - - if self.tag_date: - egg_info.tag_date = self.tag_date - else: - self.tag_date = egg_info.tag_date - - if self.tag_build: - egg_info.tag_build = self.tag_build - else: - self.tag_build = egg_info.tag_build - - egg_info.finalize_options() - self.egg_info = egg_info - - name = _safe(dist.get_name()) - version = _version(dist.get_version()) - self.name = f"{name}-{version}" - self.dist_info_dir = os.path.join(self.output_dir, f"{self.name}.dist-info") - - @contextmanager - def _maybe_bkp_dir(self, dir_path: str, requires_bkp: bool): - if requires_bkp: - bkp_name = f"{dir_path}.__bkp__" - _rm(bkp_name, ignore_errors=True) - _copy(dir_path, bkp_name, dirs_exist_ok=True, symlinks=True) - try: - yield - finally: - _rm(dir_path, ignore_errors=True) - shutil.move(bkp_name, dir_path) - else: - yield + pass def run(self): - self.output_dir.mkdir(parents=True, exist_ok=True) - self.egg_info.run() - egg_info_dir = self.egg_info.egg_info - assert os.path.isdir(egg_info_dir), ".egg-info dir should have been created" + egg_info = self.get_finalized_command('egg_info') + egg_info.egg_base = self.egg_base + egg_info.finalize_options() + egg_info.run() + dist_info_dir = egg_info.egg_info[:-len('.egg-info')] + '.dist-info' + log.info("creating '{}'".format(os.path.abspath(dist_info_dir))) - log.info("creating '{}'".format(os.path.abspath(self.dist_info_dir))) bdist_wheel = self.get_finalized_command('bdist_wheel') - - # TODO: if bdist_wheel if merged into setuptools, just add "keep_egg_info" there - with self._maybe_bkp_dir(egg_info_dir, self.keep_egg_info): - bdist_wheel.egg2dist(egg_info_dir, self.dist_info_dir) - - -def _safe(component: str) -> str: - """Escape a component used to form a wheel name according to PEP 491""" - return re.sub(r"[^\w\d.]+", "_", component) - - -def _version(version: str) -> str: - """Convert an arbitrary string to a version string.""" - v = version.replace(' ', '.') - try: - return str(packaging.version.Version(v)).replace("-", "_") - except packaging.version.InvalidVersion: - msg = f"""Invalid version: {version!r}. - !!\n\n - ################### - # Invalid version # - ################### - {version!r} is not valid according to PEP 440.\n - Please make sure specify a valid version for your package. - Also note that future releases of setuptools may halt the build process - if an invalid version is given. - \n\n!! - """ - warnings.warn(cleandoc(msg)) - return _safe(v).strip("_") - - -def _rm(dir_name, **opts): - if os.path.isdir(dir_name): - shutil.rmtree(dir_name, **opts) - - -def _copy(src, dst, **opts): - if sys.version_info < (3, 8): - opts.pop("dirs_exist_ok", None) - shutil.copytree(src, dst, **opts) + bdist_wheel.egg2dist(egg_info.egg_info, dist_info_dir) diff --git a/venv/lib/python3.10/site-packages/setuptools/command/easy_install.py b/venv/lib/python3.10/site-packages/setuptools/command/easy_install.py index 444d3b3..2ebf9e5 100644 --- a/venv/lib/python3.10/site-packages/setuptools/command/easy_install.py +++ b/venv/lib/python3.10/site-packages/setuptools/command/easy_install.py @@ -17,10 +17,10 @@ DistutilsArgError, DistutilsOptionError, DistutilsError, DistutilsPlatformError, ) +from distutils.command.install import INSTALL_SCHEMES, SCHEME_KEYS from distutils import log, dir_util from distutils.command.build_scripts import first_line_re from distutils.spawn import find_executable -from distutils.command import install import sys import os import zipimport @@ -39,10 +39,9 @@ import shlex import io import configparser -import sysconfig -from sysconfig import get_path +from sysconfig import get_config_vars, get_path from setuptools import SetuptoolsDeprecationWarning @@ -56,21 +55,18 @@ from setuptools.command import bdist_egg, egg_info from setuptools.wheel import Wheel from pkg_resources import ( - normalize_path, resource_string, + yield_lines, normalize_path, resource_string, ensure_directory, get_distribution, find_distributions, Environment, Requirement, Distribution, PathMetadata, EggMetadata, WorkingSet, DistributionNotFound, VersionConflict, DEVELOP_DIST, ) import pkg_resources -from .._path import ensure_directory -from ..extern.jaraco.text import yield_lines - # Turn on PEP440Warnings warnings.filterwarnings("default", category=pkg_resources.PEP440Warning) __all__ = [ - 'easy_install', 'PthDistributions', 'extract_wininst_cfg', + 'samefile', 'easy_install', 'PthDistributions', 'extract_wininst_cfg', 'get_exe_prefixes', ] @@ -79,6 +75,22 @@ def is_64bit(): return struct.calcsize("P") == 8 +def samefile(p1, p2): + """ + Determine if two paths reference the same file. + + Augments os.path.samefile to work on Windows and + suppresses errors if the path doesn't exist. + """ + both_exist = os.path.exists(p1) and os.path.exists(p2) + use_samefile = hasattr(os.path, 'samefile') and both_exist + if use_samefile: + return os.path.samefile(p1, p2) + norm_p1 = os.path.normpath(os.path.normcase(p1)) + norm_p2 = os.path.normpath(os.path.normcase(p2)) + return norm_p1 == norm_p2 + + def _to_bytes(s): return s.encode('utf8') @@ -126,6 +138,8 @@ class easy_install(Command): ('local-snapshots-ok', 'l', "allow building eggs from local checkouts"), ('version', None, "print version information and exit"), + ('install-layout=', None, "installation layout to choose (known values: deb)"), + ('force-installation-into-system-dir', '0', "force installation into /usr"), ('no-find-links', None, "Don't load find-links defined in packages being installed"), ('user', None, "install in user site-package '%s'" % site.USER_SITE) @@ -133,7 +147,7 @@ class easy_install(Command): boolean_options = [ 'zip-ok', 'multi-version', 'exclude-scripts', 'upgrade', 'always-copy', 'editable', - 'no-deps', 'local-snapshots-ok', 'version', + 'no-deps', 'local-snapshots-ok', 'version', 'force-installation-into-system-dir' 'user' ] @@ -169,8 +183,12 @@ def initialize_options(self): self.install_data = None self.install_base = None self.install_platbase = None - self.install_userbase = site.USER_BASE - self.install_usersite = site.USER_SITE + if site.ENABLE_USER_SITE: + self.install_userbase = site.USER_BASE + self.install_usersite = site.USER_SITE + else: + self.install_userbase = None + self.install_usersite = None self.no_find_links = None # Options not specifiable via command line @@ -178,6 +196,11 @@ def initialize_options(self): self.pth_file = self.always_copy_from = None self.site_dirs = None self.installed_projects = {} + # enable custom installation, known values: deb + self.install_layout = None + self.force_installation_into_system_dir = None + self.multiarch = None + # Always read easy_install options, even if we are subclassed, or have # an independent instance created. This ensures that defaults will # always come from the standard configuration file(s)' "easy_install" @@ -220,38 +243,28 @@ def finalize_options(self): # noqa: C901 # is too complex (25) # FIXME self.version and self._render_version() py_version = sys.version.split()[0] + prefix, exec_prefix = get_config_vars('prefix', 'exec_prefix') - self.config_vars = dict(sysconfig.get_config_vars()) - - self.config_vars.update({ + self.config_vars = { 'dist_name': self.distribution.get_name(), 'dist_version': self.distribution.get_version(), 'dist_fullname': self.distribution.get_fullname(), 'py_version': py_version, - 'py_version_short': f'{sys.version_info.major}.{sys.version_info.minor}', - 'py_version_nodot': f'{sys.version_info.major}{sys.version_info.minor}', - 'sys_prefix': self.config_vars['prefix'], - 'sys_exec_prefix': self.config_vars['exec_prefix'], + 'py_version_short': py_version[0:3], + 'py_version_nodot': py_version[0] + py_version[2], + 'sys_prefix': prefix, + 'prefix': prefix, + 'sys_exec_prefix': exec_prefix, + 'exec_prefix': exec_prefix, # Only python 3.2+ has abiflags 'abiflags': getattr(sys, 'abiflags', ''), - 'platlibdir': getattr(sys, 'platlibdir', 'lib'), - }) - with contextlib.suppress(AttributeError): - # only for distutils outside stdlib - self.config_vars.update({ - 'implementation_lower': install._get_implementation().lower(), - 'implementation': install._get_implementation(), - }) - - # pypa/distutils#113 Python 3.9 compat - self.config_vars.setdefault( - 'py_version_nodot_plat', - getattr(sys, 'windir', '').replace('.', ''), - ) + } + + if site.ENABLE_USER_SITE: + self.config_vars['userbase'] = self.install_userbase + self.config_vars['usersite'] = self.install_usersite - self.config_vars['userbase'] = self.install_userbase - self.config_vars['usersite'] = self.install_usersite - if self.user and not site.ENABLE_USER_SITE: + elif self.user: log.warn("WARNING: The user site-packages directory is disabled.") self._fix_install_dir_for_user_site() @@ -259,6 +272,15 @@ def finalize_options(self): # noqa: C901 # is too complex (25) # FIXME self.expand_basedirs() self.expand_dirs() + if self.install_layout: + if not self.install_layout.lower() in ['deb']: + raise DistutilsOptionError("unknown value for --install-layout") + self.install_layout = self.install_layout.lower() + + import sysconfig + if sys.version_info[:2] >= (3, 3): + self.multiarch = sysconfig.get_config_var('MULTIARCH') + self._expand( 'install_dir', 'script_dir', 'build_directory', 'site_dirs', @@ -285,16 +307,38 @@ def finalize_options(self): # noqa: C901 # is too complex (25) # FIXME if self.user and self.install_purelib: self.install_dir = self.install_purelib self.script_dir = self.install_scripts + + if self.prefix == '/usr' and not self.force_installation_into_system_dir: + raise DistutilsOptionError("""installation into /usr + +Trying to install into the system managed parts of the file system. Please +consider to install to another location, or use the option +--force-installation-into-system-dir to overwrite this warning. +""") + # default --record from the install command self.set_undefined_options('install', ('record', 'record')) + # Should this be moved to the if statement below? It's not used + # elsewhere + normpath = map(normalize_path, sys.path) self.all_site_dirs = get_site_dirs() - self.all_site_dirs.extend(self._process_site_dirs(self.site_dirs)) - + if self.site_dirs is not None: + site_dirs = [ + os.path.expanduser(s.strip()) for s in + self.site_dirs.split(',') + ] + for d in site_dirs: + if not os.path.isdir(d): + log.warn("%s (in --site-dirs) does not exist", d) + elif normalize_path(d) not in normpath: + raise DistutilsOptionError( + d + " (in --site-dirs) is not on sys.path" + ) + else: + self.all_site_dirs.append(normalize_path(d)) if not self.editable: self.check_site_dir() - default_index = os.getenv("__EASYINSTALL_INDEX", "https://pypi.org/simple/") - # ^ Private API for testing purposes only - self.index_url = self.index_url or default_index + self.index_url = self.index_url or "https://pypi.org/simple/" self.shadow_path = self.all_site_dirs[:] for path_item in self.install_dir, normalize_path(self.script_dir): if path_item not in self.shadow_path: @@ -320,7 +364,15 @@ def finalize_options(self): # noqa: C901 # is too complex (25) # FIXME if not self.no_find_links: self.package_index.add_find_links(self.find_links) self.set_undefined_options('install_lib', ('optimize', 'optimize')) - self.optimize = self._validate_optimize(self.optimize) + if not isinstance(self.optimize, int): + try: + self.optimize = int(self.optimize) + if not (0 <= self.optimize <= 2): + raise ValueError + except ValueError as e: + raise DistutilsOptionError( + "--optimize must be 0, 1, or 2" + ) from e if self.editable and not self.build_directory: raise DistutilsArgError( @@ -332,44 +384,11 @@ def finalize_options(self): # noqa: C901 # is too complex (25) # FIXME self.outputs = [] - @staticmethod - def _process_site_dirs(site_dirs): - if site_dirs is None: - return - - normpath = map(normalize_path, sys.path) - site_dirs = [ - os.path.expanduser(s.strip()) for s in - site_dirs.split(',') - ] - for d in site_dirs: - if not os.path.isdir(d): - log.warn("%s (in --site-dirs) does not exist", d) - elif normalize_path(d) not in normpath: - raise DistutilsOptionError( - d + " (in --site-dirs) is not on sys.path" - ) - else: - yield normalize_path(d) - - @staticmethod - def _validate_optimize(value): - try: - value = int(value) - if value not in range(3): - raise ValueError - except ValueError as e: - raise DistutilsOptionError( - "--optimize must be 0, 1, or 2" - ) from e - - return value - def _fix_install_dir_for_user_site(self): """ Fix the install_dir if "--user" was used. """ - if not self.user: + if not self.user or not site.ENABLE_USER_SITE: return self.create_home_path() @@ -377,7 +396,7 @@ def _fix_install_dir_for_user_site(self): msg = "User base directory is not specified" raise DistutilsPlatformError(msg) self.install_base = self.install_platbase = self.install_userbase - scheme_name = f'{os.name}_user' + scheme_name = os.name.replace('posix', 'unix') + '_user' self.select_scheme(scheme_name) def _expand_attrs(self, attrs): @@ -419,7 +438,7 @@ def run(self, show_deprecation=True): for spec in self.args: self.easy_install(spec, not self.no_deps) if self.record: - outputs = self.outputs + outputs = list(sorted(self.outputs)) if self.root: # strip any package prefix root_len = len(self.root) for counter in range(len(outputs)): @@ -717,11 +736,13 @@ def install_item(self, spec, download, tmpdir, deps, install_needed=False): return dist def select_scheme(self, name): - try: - install._select_scheme(self, name) - except AttributeError: - # stdlib distutils - install.install.select_scheme(self, name.replace('posix', 'unix')) + """Sets the install directories by applying the install schemes.""" + # it's the caller's problem if they supply a bad name! + scheme = INSTALL_SCHEMES[name] + for key in SCHEME_KEYS: + attrname = 'install_' + key + if getattr(self, attrname) is None: + setattr(self, attrname, scheme[key]) # FIXME: 'easy_install.process_distribution' is too complex (12) def process_distribution( # noqa: C901 @@ -918,9 +939,7 @@ def install_egg(self, egg_path, tmpdir): # noqa: C901 ensure_directory(destination) dist = self.egg_distribution(egg_path) - if not ( - os.path.exists(destination) and os.path.samefile(egg_path, destination) - ): + if not samefile(egg_path, destination): if os.path.isdir(destination) and not os.path.islink(destination): dir_util.remove_tree(destination, dry_run=self.dry_run) elif os.path.exists(destination): @@ -1329,16 +1348,33 @@ def create_home_path(self): if not self.user: return home = convert_path(os.path.expanduser("~")) - for path in only_strs(self.config_vars.values()): + for name, path in self.config_vars.items(): if path.startswith(home) and not os.path.isdir(path): self.debug_print("os.makedirs('%s', 0o700)" % path) os.makedirs(path, 0o700) + if sys.version[:3] in ('2.3', '2.4', '2.5') or 'real_prefix' in sys.__dict__: + sitedir_name = 'site-packages' + else: + sitedir_name = 'dist-packages' + INSTALL_SCHEMES = dict( posix=dict( install_dir='$base/lib/python$py_version_short/site-packages', script_dir='$base/bin', ), + unix_local = dict( + install_dir = '$base/local/lib/python$py_version_short/%s' % sitedir_name, + script_dir = '$base/local/bin', + ), + posix_local = dict( + install_dir = '$base/local/lib/python$py_version_short/%s' % sitedir_name, + script_dir = '$base/local/bin', + ), + deb_system = dict( + install_dir = '$base/lib/python3/%s' % sitedir_name, + script_dir = '$base/bin', + ), ) DEFAULT_SCHEME = dict( @@ -1349,11 +1385,18 @@ def create_home_path(self): def _expand(self, *attrs): config_vars = self.get_finalized_command('install').config_vars - if self.prefix: + if self.prefix or self.install_layout: + if self.install_layout and self.install_layout in ['deb']: + scheme_name = "deb_system" + self.prefix = '/usr' + elif self.prefix or 'real_prefix' in sys.__dict__: + scheme_name = os.name + else: + scheme_name = "posix_local" # Set default install_dir/scripts from --prefix - config_vars = dict(config_vars) + config_vars = config_vars.copy() config_vars['base'] = self.prefix - scheme = self.INSTALL_SCHEMES.get(os.name, self.DEFAULT_SCHEME) + scheme = self.INSTALL_SCHEMES.get(scheme_name,self.DEFAULT_SCHEME) for attr, val in scheme.items(): if getattr(self, attr, None) is None: setattr(self, attr, val) @@ -1395,11 +1438,17 @@ def get_site_dirs(): sitedirs.append(os.path.join(prefix, "Lib", "site-packages")) elif os.sep == '/': sitedirs.extend([ + os.path.join( + prefix, + "local/lib", + "python" + sys.version[:3], + "dist-packages", + ), os.path.join( prefix, "lib", "python{}.{}".format(*sys.version_info), - "site-packages", + "dist-packages", ), os.path.join(prefix, "lib", "site-python"), ]) @@ -1578,7 +1627,7 @@ def __init__(self, filename, sitedirs=()): self.sitedirs = list(map(normalize_path, sitedirs)) self.basedir = normalize_path(os.path.dirname(self.filename)) self._load() - super().__init__([], None, None) + Environment.__init__(self, [], None, None) for path in yield_lines(self.paths): list(map(self.add, find_distributions(path, True))) @@ -1651,14 +1700,14 @@ def add(self, dist): if new_path: self.paths.append(dist.location) self.dirty = True - super().add(dist) + Environment.add(self, dist) def remove(self, dist): """Remove `dist` from the distribution map""" while dist.location in self.paths: self.paths.remove(dist.location) self.dirty = True - super().remove(dist) + Environment.remove(self, dist) def make_relative(self, path): npath, last = os.path.split(normalize_path(path)) @@ -2299,13 +2348,6 @@ def current_umask(): return tmp -def only_strs(values): - """ - Exclude non-str values. Ref #3063. - """ - return filter(lambda val: isinstance(val, str), values) - - class EasyInstallDeprecationWarning(SetuptoolsDeprecationWarning): """ Warning for EasyInstall deprecations, bypassing suppression. diff --git a/venv/lib/python3.10/site-packages/setuptools/command/editable_wheel.py b/venv/lib/python3.10/site-packages/setuptools/command/editable_wheel.py deleted file mode 100644 index d60cfbe..0000000 --- a/venv/lib/python3.10/site-packages/setuptools/command/editable_wheel.py +++ /dev/null @@ -1,844 +0,0 @@ -""" -Create a wheel that, when installed, will make the source package 'editable' -(add it to the interpreter's path, including metadata) per PEP 660. Replaces -'setup.py develop'. - -.. note:: - One of the mechanisms briefly mentioned in PEP 660 to implement editable installs is - to create a separated directory inside ``build`` and use a .pth file to point to that - directory. In the context of this file such directory is referred as - *auxiliary build directory* or ``auxiliary_dir``. -""" - -import logging -import os -import re -import shutil -import sys -import traceback -import warnings -from contextlib import suppress -from enum import Enum -from inspect import cleandoc -from itertools import chain -from pathlib import Path -from tempfile import TemporaryDirectory -from typing import ( - TYPE_CHECKING, - Dict, - Iterable, - Iterator, - List, - Mapping, - Optional, - Tuple, - TypeVar, - Union, -) - -from setuptools import Command, SetuptoolsDeprecationWarning, errors, namespaces -from setuptools.command.build_py import build_py as build_py_cls -from setuptools.discovery import find_package_path -from setuptools.dist import Distribution - -if TYPE_CHECKING: - from wheel.wheelfile import WheelFile # noqa - -if sys.version_info >= (3, 8): - from typing import Protocol -elif TYPE_CHECKING: - from typing_extensions import Protocol -else: - from abc import ABC as Protocol - -_Path = Union[str, Path] -_P = TypeVar("_P", bound=_Path) -_logger = logging.getLogger(__name__) - - -class _EditableMode(Enum): - """ - Possible editable installation modes: - `lenient` (new files automatically added to the package - DEFAULT); - `strict` (requires a new installation when files are added/removed); or - `compat` (attempts to emulate `python setup.py develop` - DEPRECATED). - """ - - STRICT = "strict" - LENIENT = "lenient" - COMPAT = "compat" # TODO: Remove `compat` after Dec/2022. - - @classmethod - def convert(cls, mode: Optional[str]) -> "_EditableMode": - if not mode: - return _EditableMode.LENIENT # default - - _mode = mode.upper() - if _mode not in _EditableMode.__members__: - raise errors.OptionError(f"Invalid editable mode: {mode!r}. Try: 'strict'.") - - if _mode == "COMPAT": - msg = """ - The 'compat' editable mode is transitional and will be removed - in future versions of `setuptools`. - Please adapt your code accordingly to use either the 'strict' or the - 'lenient' modes. - - For more information, please check: - https://setuptools.pypa.io/en/latest/userguide/development_mode.html - """ - warnings.warn(msg, SetuptoolsDeprecationWarning) - - return _EditableMode[_mode] - - -_STRICT_WARNING = """ -New or renamed files may not be automatically picked up without a new installation. -""" - -_LENIENT_WARNING = """ -Options like `package-data`, `include/exclude-package-data` or -`packages.find.exclude/include` may have no effect. -""" - - -class editable_wheel(Command): - """Build 'editable' wheel for development. - (This command is reserved for internal use of setuptools). - """ - - description = "create a PEP 660 'editable' wheel" - - user_options = [ - ("dist-dir=", "d", "directory to put final built distributions in"), - ("dist-info-dir=", "I", "path to a pre-build .dist-info directory"), - ("mode=", None, cleandoc(_EditableMode.__doc__ or "")), - ] - - def initialize_options(self): - self.dist_dir = None - self.dist_info_dir = None - self.project_dir = None - self.mode = None - - def finalize_options(self): - dist = self.distribution - self.project_dir = dist.src_root or os.curdir - self.package_dir = dist.package_dir or {} - self.dist_dir = Path(self.dist_dir or os.path.join(self.project_dir, "dist")) - - def run(self): - try: - self.dist_dir.mkdir(exist_ok=True) - self._ensure_dist_info() - - # Add missing dist_info files - self.reinitialize_command("bdist_wheel") - bdist_wheel = self.get_finalized_command("bdist_wheel") - bdist_wheel.write_wheelfile(self.dist_info_dir) - - self._create_wheel_file(bdist_wheel) - except Exception as ex: - traceback.print_exc() - msg = """ - Support for editable installs via PEP 660 was recently introduced - in `setuptools`. If you are seeing this error, please report to: - - https://github.com/pypa/setuptools/issues - - Meanwhile you can try the legacy behavior by setting an - environment variable and trying to install again: - - SETUPTOOLS_ENABLE_FEATURES="legacy-editable" - """ - raise errors.InternalError(cleandoc(msg)) from ex - - def _ensure_dist_info(self): - if self.dist_info_dir is None: - dist_info = self.reinitialize_command("dist_info") - dist_info.output_dir = self.dist_dir - dist_info.ensure_finalized() - dist_info.run() - self.dist_info_dir = dist_info.dist_info_dir - else: - assert str(self.dist_info_dir).endswith(".dist-info") - assert Path(self.dist_info_dir, "METADATA").exists() - - def _install_namespaces(self, installation_dir, pth_prefix): - # XXX: Only required to support the deprecated namespace practice - dist = self.distribution - if not dist.namespace_packages: - return - - src_root = Path(self.project_dir, self.package_dir.get("", ".")).resolve() - installer = _NamespaceInstaller(dist, installation_dir, pth_prefix, src_root) - installer.install_namespaces() - - def _find_egg_info_dir(self) -> Optional[str]: - parent_dir = Path(self.dist_info_dir).parent if self.dist_info_dir else Path() - candidates = map(str, parent_dir.glob("*.egg-info")) - return next(candidates, None) - - def _configure_build( - self, name: str, unpacked_wheel: _Path, build_lib: _Path, tmp_dir: _Path - ): - """Configure commands to behave in the following ways: - - - Build commands can write to ``build_lib`` if they really want to... - (but this folder is expected to be ignored and modules are expected to live - in the project directory...) - - Binary extensions should be built in-place (editable_mode = True) - - Data/header/script files are not part of the "editable" specification - so they are written directly to the unpacked_wheel directory. - """ - # Non-editable files (data, headers, scripts) are written directly to the - # unpacked_wheel - - dist = self.distribution - wheel = str(unpacked_wheel) - build_lib = str(build_lib) - data = str(Path(unpacked_wheel, f"{name}.data", "data")) - headers = str(Path(unpacked_wheel, f"{name}.data", "headers")) - scripts = str(Path(unpacked_wheel, f"{name}.data", "scripts")) - - # egg-info may be generated again to create a manifest (used for package data) - egg_info = dist.reinitialize_command("egg_info", reinit_subcommands=True) - egg_info.egg_base = str(tmp_dir) - egg_info.ignore_egg_info_in_manifest = True - - build = dist.reinitialize_command("build", reinit_subcommands=True) - install = dist.reinitialize_command("install", reinit_subcommands=True) - - build.build_platlib = build.build_purelib = build.build_lib = build_lib - install.install_purelib = install.install_platlib = install.install_lib = wheel - install.install_scripts = build.build_scripts = scripts - install.install_headers = headers - install.install_data = data - - install_scripts = dist.get_command_obj("install_scripts") - install_scripts.no_ep = True - - build.build_temp = str(tmp_dir) - - build_py = dist.get_command_obj("build_py") - build_py.compile = False - build_py.existing_egg_info_dir = self._find_egg_info_dir() - - self._set_editable_mode() - - build.ensure_finalized() - install.ensure_finalized() - - def _set_editable_mode(self): - """Set the ``editable_mode`` flag in the build sub-commands""" - dist = self.distribution - build = dist.get_command_obj("build") - for cmd_name in build.get_sub_commands(): - cmd = dist.get_command_obj(cmd_name) - if hasattr(cmd, "editable_mode"): - cmd.editable_mode = True - elif hasattr(cmd, "inplace"): - cmd.inplace = True # backward compatibility with distutils - - def _collect_build_outputs(self) -> Tuple[List[str], Dict[str, str]]: - files: List[str] = [] - mapping: Dict[str, str] = {} - build = self.get_finalized_command("build") - - for cmd_name in build.get_sub_commands(): - cmd = self.get_finalized_command(cmd_name) - if hasattr(cmd, "get_outputs"): - files.extend(cmd.get_outputs() or []) - if hasattr(cmd, "get_output_mapping"): - mapping.update(cmd.get_output_mapping() or {}) - - return files, mapping - - def _run_build_commands( - self, dist_name: str, unpacked_wheel: _Path, build_lib: _Path, tmp_dir: _Path - ) -> Tuple[List[str], Dict[str, str]]: - self._configure_build(dist_name, unpacked_wheel, build_lib, tmp_dir) - self._run_build_subcommands() - files, mapping = self._collect_build_outputs() - self._run_install("headers") - self._run_install("scripts") - self._run_install("data") - return files, mapping - - def _run_build_subcommands(self): - """ - Issue #3501 indicates that some plugins/customizations might rely on: - - 1. ``build_py`` not running - 2. ``build_py`` always copying files to ``build_lib`` - - However both these assumptions may be false in editable_wheel. - This method implements a temporary workaround to support the ecosystem - while the implementations catch up. - """ - # TODO: Once plugins/customisations had the chance to catch up, replace - # `self._run_build_subcommands()` with `self.run_command("build")`. - # Also remove _safely_run, TestCustomBuildPy. Suggested date: Aug/2023. - build: Command = self.get_finalized_command("build") - for name in build.get_sub_commands(): - cmd = self.get_finalized_command(name) - if name == "build_py" and type(cmd) != build_py_cls: - self._safely_run(name) - else: - self.run_command(name) - - def _safely_run(self, cmd_name: str): - try: - return self.run_command(cmd_name) - except Exception: - msg = f"""{traceback.format_exc()}\n - If you are seeing this warning it is very likely that a setuptools - plugin or customization overrides the `{cmd_name}` command, without - taking into consideration how editable installs run build steps - starting from v64.0.0. - - Plugin authors and developers relying on custom build steps are encouraged - to update their `{cmd_name}` implementation considering the information in - https://setuptools.pypa.io/en/latest/userguide/extension.html - about editable installs. - - For the time being `setuptools` will silence this error and ignore - the faulty command, but this behaviour will change in future versions.\n - """ - warnings.warn(msg, SetuptoolsDeprecationWarning, stacklevel=2) - - def _create_wheel_file(self, bdist_wheel): - from wheel.wheelfile import WheelFile - - dist_info = self.get_finalized_command("dist_info") - dist_name = dist_info.name - tag = "-".join(bdist_wheel.get_tag()) - build_tag = "0.editable" # According to PEP 427 needs to start with digit - archive_name = f"{dist_name}-{build_tag}-{tag}.whl" - wheel_path = Path(self.dist_dir, archive_name) - if wheel_path.exists(): - wheel_path.unlink() - - unpacked_wheel = TemporaryDirectory(suffix=archive_name) - build_lib = TemporaryDirectory(suffix=".build-lib") - build_tmp = TemporaryDirectory(suffix=".build-temp") - - with unpacked_wheel as unpacked, build_lib as lib, build_tmp as tmp: - unpacked_dist_info = Path(unpacked, Path(self.dist_info_dir).name) - shutil.copytree(self.dist_info_dir, unpacked_dist_info) - self._install_namespaces(unpacked, dist_info.name) - files, mapping = self._run_build_commands(dist_name, unpacked, lib, tmp) - strategy = self._select_strategy(dist_name, tag, lib) - with strategy, WheelFile(wheel_path, "w") as wheel_obj: - strategy(wheel_obj, files, mapping) - wheel_obj.write_files(unpacked) - - return wheel_path - - def _run_install(self, category: str): - has_category = getattr(self.distribution, f"has_{category}", None) - if has_category and has_category(): - _logger.info(f"Installing {category} as non editable") - self.run_command(f"install_{category}") - - def _select_strategy( - self, - name: str, - tag: str, - build_lib: _Path, - ) -> "EditableStrategy": - """Decides which strategy to use to implement an editable installation.""" - build_name = f"__editable__.{name}-{tag}" - project_dir = Path(self.project_dir) - mode = _EditableMode.convert(self.mode) - - if mode is _EditableMode.STRICT: - auxiliary_dir = _empty_dir(Path(self.project_dir, "build", build_name)) - return _LinkTree(self.distribution, name, auxiliary_dir, build_lib) - - packages = _find_packages(self.distribution) - has_simple_layout = _simple_layout(packages, self.package_dir, project_dir) - is_compat_mode = mode is _EditableMode.COMPAT - if set(self.package_dir) == {""} and has_simple_layout or is_compat_mode: - # src-layout(ish) is relatively safe for a simple pth file - src_dir = self.package_dir.get("", ".") - return _StaticPth(self.distribution, name, [Path(project_dir, src_dir)]) - - # Use a MetaPathFinder to avoid adding accidental top-level packages/modules - return _TopLevelFinder(self.distribution, name) - - -class EditableStrategy(Protocol): - def __call__(self, wheel: "WheelFile", files: List[str], mapping: Dict[str, str]): - ... - - def __enter__(self): - ... - - def __exit__(self, _exc_type, _exc_value, _traceback): - ... - - -class _StaticPth: - def __init__(self, dist: Distribution, name: str, path_entries: List[Path]): - self.dist = dist - self.name = name - self.path_entries = path_entries - - def __call__(self, wheel: "WheelFile", files: List[str], mapping: Dict[str, str]): - entries = "\n".join((str(p.resolve()) for p in self.path_entries)) - contents = bytes(f"{entries}\n", "utf-8") - wheel.writestr(f"__editable__.{self.name}.pth", contents) - - def __enter__(self): - msg = f""" - Editable install will be performed using .pth file to extend `sys.path` with: - {list(map(os.fspath, self.path_entries))!r} - """ - _logger.warning(msg + _LENIENT_WARNING) - return self - - def __exit__(self, _exc_type, _exc_value, _traceback): - ... - - -class _LinkTree(_StaticPth): - """ - Creates a ``.pth`` file that points to a link tree in the ``auxiliary_dir``. - - This strategy will only link files (not dirs), so it can be implemented in - any OS, even if that means using hardlinks instead of symlinks. - - By collocating ``auxiliary_dir`` and the original source code, limitations - with hardlinks should be avoided. - """ - def __init__( - self, dist: Distribution, - name: str, - auxiliary_dir: _Path, - build_lib: _Path, - ): - self.auxiliary_dir = Path(auxiliary_dir) - self.build_lib = Path(build_lib).resolve() - self._file = dist.get_command_obj("build_py").copy_file - super().__init__(dist, name, [self.auxiliary_dir]) - - def __call__(self, wheel: "WheelFile", files: List[str], mapping: Dict[str, str]): - self._create_links(files, mapping) - super().__call__(wheel, files, mapping) - - def _normalize_output(self, file: str) -> Optional[str]: - # Files relative to build_lib will be normalized to None - with suppress(ValueError): - path = Path(file).resolve().relative_to(self.build_lib) - return str(path).replace(os.sep, '/') - return None - - def _create_file(self, relative_output: str, src_file: str, link=None): - dest = self.auxiliary_dir / relative_output - if not dest.parent.is_dir(): - dest.parent.mkdir(parents=True) - self._file(src_file, dest, link=link) - - def _create_links(self, outputs, output_mapping): - self.auxiliary_dir.mkdir(parents=True, exist_ok=True) - link_type = "sym" if _can_symlink_files(self.auxiliary_dir) else "hard" - mappings = { - self._normalize_output(k): v - for k, v in output_mapping.items() - } - mappings.pop(None, None) # remove files that are not relative to build_lib - - for output in outputs: - relative = self._normalize_output(output) - if relative and relative not in mappings: - self._create_file(relative, output) - - for relative, src in mappings.items(): - self._create_file(relative, src, link=link_type) - - def __enter__(self): - msg = "Strict editable install will be performed using a link tree.\n" - _logger.warning(msg + _STRICT_WARNING) - return self - - def __exit__(self, _exc_type, _exc_value, _traceback): - msg = f"""\n - Strict editable installation performed using the auxiliary directory: - {self.auxiliary_dir} - - Please be careful to not remove this directory, otherwise you might not be able - to import/use your package. - """ - warnings.warn(msg, InformationOnly) - - -class _TopLevelFinder: - def __init__(self, dist: Distribution, name: str): - self.dist = dist - self.name = name - - def __call__(self, wheel: "WheelFile", files: List[str], mapping: Dict[str, str]): - src_root = self.dist.src_root or os.curdir - top_level = chain(_find_packages(self.dist), _find_top_level_modules(self.dist)) - package_dir = self.dist.package_dir or {} - roots = _find_package_roots(top_level, package_dir, src_root) - - namespaces_: Dict[str, List[str]] = dict(chain( - _find_namespaces(self.dist.packages or [], roots), - ((ns, []) for ns in _find_virtual_namespaces(roots)), - )) - - name = f"__editable__.{self.name}.finder" - finder = _make_identifier(name) - content = bytes(_finder_template(name, roots, namespaces_), "utf-8") - wheel.writestr(f"{finder}.py", content) - - content = bytes(f"import {finder}; {finder}.install()", "utf-8") - wheel.writestr(f"__editable__.{self.name}.pth", content) - - def __enter__(self): - msg = "Editable install will be performed using a meta path finder.\n" - _logger.warning(msg + _LENIENT_WARNING) - return self - - def __exit__(self, _exc_type, _exc_value, _traceback): - msg = """\n - Please be careful with folders in your working directory with the same - name as your package as they may take precedence during imports. - """ - warnings.warn(msg, InformationOnly) - - -def _can_symlink_files(base_dir: Path) -> bool: - with TemporaryDirectory(dir=str(base_dir.resolve())) as tmp: - path1, path2 = Path(tmp, "file1.txt"), Path(tmp, "file2.txt") - path1.write_text("file1", encoding="utf-8") - with suppress(AttributeError, NotImplementedError, OSError): - os.symlink(path1, path2) - if path2.is_symlink() and path2.read_text(encoding="utf-8") == "file1": - return True - - try: - os.link(path1, path2) # Ensure hard links can be created - except Exception as ex: - msg = ( - "File system does not seem to support either symlinks or hard links. " - "Strict editable installs require one of them to be supported." - ) - raise LinksNotSupported(msg) from ex - return False - - -def _simple_layout( - packages: Iterable[str], package_dir: Dict[str, str], project_dir: Path -) -> bool: - """Return ``True`` if: - - all packages are contained by the same parent directory, **and** - - all packages become importable if the parent directory is added to ``sys.path``. - - >>> _simple_layout(['a'], {"": "src"}, "/tmp/myproj") - True - >>> _simple_layout(['a', 'a.b'], {"": "src"}, "/tmp/myproj") - True - >>> _simple_layout(['a', 'a.b'], {}, "/tmp/myproj") - True - >>> _simple_layout(['a', 'a.a1', 'a.a1.a2', 'b'], {"": "src"}, "/tmp/myproj") - True - >>> _simple_layout(['a', 'a.a1', 'a.a1.a2', 'b'], {"a": "a", "b": "b"}, ".") - True - >>> _simple_layout(['a', 'a.a1', 'a.a1.a2', 'b'], {"a": "_a", "b": "_b"}, ".") - False - >>> _simple_layout(['a', 'a.a1', 'a.a1.a2', 'b'], {"a": "_a"}, "/tmp/myproj") - False - >>> _simple_layout(['a', 'a.a1', 'a.a1.a2', 'b'], {"a.a1.a2": "_a2"}, ".") - False - >>> _simple_layout(['a', 'a.b'], {"": "src", "a.b": "_ab"}, "/tmp/myproj") - False - >>> # Special cases, no packages yet: - >>> _simple_layout([], {"": "src"}, "/tmp/myproj") - True - >>> _simple_layout([], {"a": "_a", "": "src"}, "/tmp/myproj") - False - """ - layout = { - pkg: find_package_path(pkg, package_dir, project_dir) - for pkg in packages - } - if not layout: - return set(package_dir) in ({}, {""}) - parent = os.path.commonpath([_parent_path(k, v) for k, v in layout.items()]) - return all( - _normalize_path(Path(parent, *key.split('.'))) == _normalize_path(value) - for key, value in layout.items() - ) - - -def _parent_path(pkg, pkg_path): - """Infer the parent path containing a package, that if added to ``sys.path`` would - allow importing that package. - When ``pkg`` is directly mapped into a directory with a different name, return its - own path. - >>> _parent_path("a", "src/a") - 'src' - >>> _parent_path("b", "src/c") - 'src/c' - """ - parent = pkg_path[:-len(pkg)] if pkg_path.endswith(pkg) else pkg_path - return parent.rstrip("/" + os.sep) - - -def _find_packages(dist: Distribution) -> Iterator[str]: - yield from iter(dist.packages or []) - - py_modules = dist.py_modules or [] - nested_modules = [mod for mod in py_modules if "." in mod] - if dist.ext_package: - yield dist.ext_package - else: - ext_modules = dist.ext_modules or [] - nested_modules += [x.name for x in ext_modules if "." in x.name] - - for module in nested_modules: - package, _, _ = module.rpartition(".") - yield package - - -def _find_top_level_modules(dist: Distribution) -> Iterator[str]: - py_modules = dist.py_modules or [] - yield from (mod for mod in py_modules if "." not in mod) - - if not dist.ext_package: - ext_modules = dist.ext_modules or [] - yield from (x.name for x in ext_modules if "." not in x.name) - - -def _find_package_roots( - packages: Iterable[str], - package_dir: Mapping[str, str], - src_root: _Path, -) -> Dict[str, str]: - pkg_roots: Dict[str, str] = { - pkg: _absolute_root(find_package_path(pkg, package_dir, src_root)) - for pkg in sorted(packages) - } - - return _remove_nested(pkg_roots) - - -def _absolute_root(path: _Path) -> str: - """Works for packages and top-level modules""" - path_ = Path(path) - parent = path_.parent - - if path_.exists(): - return str(path_.resolve()) - else: - return str(parent.resolve() / path_.name) - - -def _find_virtual_namespaces(pkg_roots: Dict[str, str]) -> Iterator[str]: - """By carefully designing ``package_dir``, it is possible to implement the logical - structure of PEP 420 in a package without the corresponding directories. - - Moreover a parent package can be purposefully/accidentally skipped in the discovery - phase (e.g. ``find_packages(include=["mypkg.*"])``, when ``mypkg.foo`` is included - by ``mypkg`` itself is not). - We consider this case to also be a virtual namespace (ignoring the original - directory) to emulate a non-editable installation. - - This function will try to find these kinds of namespaces. - """ - for pkg in pkg_roots: - if "." not in pkg: - continue - parts = pkg.split(".") - for i in range(len(parts) - 1, 0, -1): - partial_name = ".".join(parts[:i]) - path = Path(find_package_path(partial_name, pkg_roots, "")) - if not path.exists() or partial_name not in pkg_roots: - # partial_name not in pkg_roots ==> purposefully/accidentally skipped - yield partial_name - - -def _find_namespaces( - packages: List[str], pkg_roots: Dict[str, str] -) -> Iterator[Tuple[str, List[str]]]: - for pkg in packages: - path = find_package_path(pkg, pkg_roots, "") - if Path(path).exists() and not Path(path, "__init__.py").exists(): - yield (pkg, [path]) - - -def _remove_nested(pkg_roots: Dict[str, str]) -> Dict[str, str]: - output = dict(pkg_roots.copy()) - - for pkg, path in reversed(list(pkg_roots.items())): - if any( - pkg != other and _is_nested(pkg, path, other, other_path) - for other, other_path in pkg_roots.items() - ): - output.pop(pkg) - - return output - - -def _is_nested(pkg: str, pkg_path: str, parent: str, parent_path: str) -> bool: - """ - Return ``True`` if ``pkg`` is nested inside ``parent`` both logically and in the - file system. - >>> _is_nested("a.b", "path/a/b", "a", "path/a") - True - >>> _is_nested("a.b", "path/a/b", "a", "otherpath/a") - False - >>> _is_nested("a.b", "path/a/b", "c", "path/c") - False - >>> _is_nested("a.a", "path/a/a", "a", "path/a") - True - >>> _is_nested("b.a", "path/b/a", "a", "path/a") - False - """ - norm_pkg_path = _normalize_path(pkg_path) - rest = pkg.replace(parent, "", 1).strip(".").split(".") - return ( - pkg.startswith(parent) - and norm_pkg_path == _normalize_path(Path(parent_path, *rest)) - ) - - -def _normalize_path(filename: _Path) -> str: - """Normalize a file/dir name for comparison purposes""" - # See pkg_resources.normalize_path - file = os.path.abspath(filename) if sys.platform == 'cygwin' else filename - return os.path.normcase(os.path.realpath(os.path.normpath(file))) - - -def _empty_dir(dir_: _P) -> _P: - """Create a directory ensured to be empty. Existing files may be removed.""" - shutil.rmtree(dir_, ignore_errors=True) - os.makedirs(dir_) - return dir_ - - -def _make_identifier(name: str) -> str: - """Make a string safe to be used as Python identifier. - >>> _make_identifier("12abc") - '_12abc' - >>> _make_identifier("__editable__.myns.pkg-78.9.3_local") - '__editable___myns_pkg_78_9_3_local' - """ - safe = re.sub(r'\W|^(?=\d)', '_', name) - assert safe.isidentifier() - return safe - - -class _NamespaceInstaller(namespaces.Installer): - def __init__(self, distribution, installation_dir, editable_name, src_root): - self.distribution = distribution - self.src_root = src_root - self.installation_dir = installation_dir - self.editable_name = editable_name - self.outputs = [] - self.dry_run = False - - def _get_target(self): - """Installation target.""" - return os.path.join(self.installation_dir, self.editable_name) - - def _get_root(self): - """Where the modules/packages should be loaded from.""" - return repr(str(self.src_root)) - - -_FINDER_TEMPLATE = """\ -import sys -from importlib.machinery import ModuleSpec -from importlib.machinery import all_suffixes as module_suffixes -from importlib.util import spec_from_file_location -from itertools import chain -from pathlib import Path - -MAPPING = {mapping!r} -NAMESPACES = {namespaces!r} -PATH_PLACEHOLDER = {name!r} + ".__path_hook__" - - -class _EditableFinder: # MetaPathFinder - @classmethod - def find_spec(cls, fullname, path=None, target=None): - for pkg, pkg_path in reversed(list(MAPPING.items())): - if fullname == pkg or fullname.startswith(f"{{pkg}}."): - rest = fullname.replace(pkg, "", 1).strip(".").split(".") - return cls._find_spec(fullname, Path(pkg_path, *rest)) - - return None - - @classmethod - def _find_spec(cls, fullname, candidate_path): - init = candidate_path / "__init__.py" - candidates = (candidate_path.with_suffix(x) for x in module_suffixes()) - for candidate in chain([init], candidates): - if candidate.exists(): - return spec_from_file_location(fullname, candidate) - - -class _EditableNamespaceFinder: # PathEntryFinder - @classmethod - def _path_hook(cls, path): - if path == PATH_PLACEHOLDER: - return cls - raise ImportError - - @classmethod - def _paths(cls, fullname): - # Ensure __path__ is not empty for the spec to be considered a namespace. - return NAMESPACES[fullname] or MAPPING.get(fullname) or [PATH_PLACEHOLDER] - - @classmethod - def find_spec(cls, fullname, target=None): - if fullname in NAMESPACES: - spec = ModuleSpec(fullname, None, is_package=True) - spec.submodule_search_locations = cls._paths(fullname) - return spec - return None - - @classmethod - def find_module(cls, fullname): - return None - - -def install(): - if not any(finder == _EditableFinder for finder in sys.meta_path): - sys.meta_path.append(_EditableFinder) - - if not NAMESPACES: - return - - if not any(hook == _EditableNamespaceFinder._path_hook for hook in sys.path_hooks): - # PathEntryFinder is needed to create NamespaceSpec without private APIS - sys.path_hooks.append(_EditableNamespaceFinder._path_hook) - if PATH_PLACEHOLDER not in sys.path: - sys.path.append(PATH_PLACEHOLDER) # Used just to trigger the path hook -""" - - -def _finder_template( - name: str, mapping: Mapping[str, str], namespaces: Dict[str, List[str]] -) -> str: - """Create a string containing the code for the``MetaPathFinder`` and - ``PathEntryFinder``. - """ - mapping = dict(sorted(mapping.items(), key=lambda p: p[0])) - return _FINDER_TEMPLATE.format(name=name, mapping=mapping, namespaces=namespaces) - - -class InformationOnly(UserWarning): - """Currently there is no clear way of displaying messages to the users - that use the setuptools backend directly via ``pip``. - The only thing that might work is a warning, although it is not the - most appropriate tool for the job... - """ - - -class LinksNotSupported(errors.FileError): - """File system does not seem to support either symlinks or hard links.""" diff --git a/venv/lib/python3.10/site-packages/setuptools/command/egg_info.py b/venv/lib/python3.10/site-packages/setuptools/command/egg_info.py index 25888ed..37f59a2 100644 --- a/venv/lib/python3.10/site-packages/setuptools/command/egg_info.py +++ b/venv/lib/python3.10/site-packages/setuptools/command/egg_info.py @@ -17,22 +17,18 @@ import time import collections -from .._importlib import metadata -from .. import _entry_points - from setuptools import Command from setuptools.command.sdist import sdist from setuptools.command.sdist import walk_revctrl from setuptools.command.setopt import edit_config from setuptools.command import bdist_egg from pkg_resources import ( - Requirement, safe_name, parse_version, - safe_version, to_filename) + parse_requirements, safe_name, parse_version, + safe_version, yield_lines, EntryPoint, iter_entry_points, to_filename) import setuptools.unicode_utils as unicode_utils from setuptools.glob import glob from setuptools.extern import packaging -from setuptools.extern.jaraco.text import yield_lines from setuptools import SetuptoolsDeprecationWarning @@ -136,21 +132,11 @@ def _maybe_tag(self, version): in which case the version string already contains all tags. """ return ( - version if self.vtags and self._already_tagged(version) + version if self.vtags and version.endswith(self.vtags) else version + self.vtags ) - def _already_tagged(self, version: str) -> bool: - # Depending on their format, tags may change with version normalization. - # So in addition the regular tags, we have to search for the normalized ones. - return version.endswith(self.vtags) or version.endswith(self._safe_tags()) - - def _safe_tags(self) -> str: - # To implement this we can rely on `safe_version` pretending to be version 0 - # followed by tags. Then we simply discard the starting 0 (fake version number) - return safe_version(f"0{self.vtags}")[1:] - - def tags(self) -> str: + def tags(self): version = '' if self.tag_build: version += self.tag_build @@ -182,7 +168,6 @@ def initialize_options(self): self.egg_info = None self.egg_version = None self.broken_egg_info = False - self.ignore_egg_info_in_manifest = False #################################### # allow the 'tag_svn_revision' to be detected and @@ -220,8 +205,12 @@ def finalize_options(self): try: is_version = isinstance(parsed_version, packaging.version.Version) - spec = "%s==%s" if is_version else "%s===%s" - Requirement(spec % (self.egg_name, self.egg_version)) + spec = ( + "%s==%s" if is_version else "%s===%s" + ) + list( + parse_requirements(spec % (self.egg_name, self.egg_version)) + ) except ValueError as e: raise distutils.errors.DistutilsOptionError( "Invalid distribution name or version syntax: %s-%s" % @@ -296,8 +285,10 @@ def delete_file(self, filename): def run(self): self.mkpath(self.egg_info) os.utime(self.egg_info, None) - for ep in metadata.entry_points(group='egg_info.writers'): - writer = ep.load() + installer = self.distribution.fetch_build_egg + for ep in iter_entry_points('egg_info.writers'): + ep.require(installer=installer) + writer = ep.resolve() writer(self, ep.name, os.path.join(self.egg_info, ep.name)) # Get rid of native_libs.txt if it was put there by older bdist_egg @@ -311,7 +302,6 @@ def find_sources(self): """Generate SOURCES.txt manifest file""" manifest_filename = os.path.join(self.egg_info, "SOURCES.txt") mm = manifest_maker(self.distribution) - mm.ignore_egg_info_dir = self.ignore_egg_info_in_manifest mm.manifest = manifest_filename mm.run() self.filelist = mm.filelist @@ -335,10 +325,6 @@ def check_broken_egg_info(self): class FileList(_FileList): # Implementations of the various MANIFEST.in commands - def __init__(self, warn=None, debug_print=None, ignore_egg_info_dir=False): - super().__init__(warn, debug_print) - self.ignore_egg_info_dir = ignore_egg_info_dir - def process_template_line(self, line): # Parse the line: split it up, make sure the right number of words # is there, and return the relevant words. 'action' is always @@ -528,10 +514,6 @@ def _safe_path(self, path): return False try: - # ignore egg-info paths - is_egg_info = ".egg-info" in u_path or b".egg-info" in utf8_path - if self.ignore_egg_info_dir and is_egg_info: - return False # accept is either way checks out if os.path.exists(u_path) or os.path.exists(utf8_path): return True @@ -548,13 +530,12 @@ def initialize_options(self): self.prune = 1 self.manifest_only = 1 self.force_manifest = 1 - self.ignore_egg_info_dir = False def finalize_options(self): pass def run(self): - self.filelist = FileList(ignore_egg_info_dir=self.ignore_egg_info_dir) + self.filelist = FileList() if not os.path.exists(self.manifest): self.write_manifest() # it must exist so it'll get in the list self.add_defaults() @@ -694,7 +675,7 @@ def _write_requirements(stream, reqs): def append_cr(line): return line + '\n' - lines = map(append_cr, lines) + lines = map(append_cr, sorted(lines)) stream.writelines(lines) @@ -738,9 +719,20 @@ def write_arg(cmd, basename, filename, force=False): def write_entries(cmd, basename, filename): - eps = _entry_points.load(cmd.distribution.entry_points) - defn = _entry_points.render(eps) - cmd.write_or_delete_file('entry points', filename, defn, True) + ep = cmd.distribution.entry_points + + if isinstance(ep, str) or ep is None: + data = ep + elif ep is not None: + data = [] + for section, contents in sorted(ep.items()): + if not isinstance(contents, str): + contents = EntryPoint.parse_group(section, contents) + contents = '\n'.join(sorted(map(str, contents.values()))) + data.append('[%s]\n%s\n\n' % (section, contents)) + data = ''.join(data) + + cmd.write_or_delete_file('entry points', filename, data, True) def get_pkg_info_revision(): diff --git a/venv/lib/python3.10/site-packages/setuptools/command/install.py b/venv/lib/python3.10/site-packages/setuptools/command/install.py index 55fdb12..35e54d2 100644 --- a/venv/lib/python3.10/site-packages/setuptools/command/install.py +++ b/venv/lib/python3.10/site-packages/setuptools/command/install.py @@ -91,21 +91,14 @@ def _called_from_setup(run_frame): msg = "For best results, pass -X:Frames to enable call stack." warnings.warn(msg) return True - - frames = inspect.getouterframes(run_frame) - for frame in frames[2:4]: - caller, = frame[:1] - info = inspect.getframeinfo(caller) - caller_module = caller.f_globals.get('__name__', '') - - if caller_module == "setuptools.dist" and info.function == "run_command": - # Starting from v61.0.0 setuptools overwrites dist.run_command - continue - - return ( - caller_module == 'distutils.dist' - and info.function == 'run_commands' - ) + res = inspect.getouterframes(run_frame)[2] + caller, = res[:1] + info = inspect.getframeinfo(caller) + caller_module = caller.f_globals.get('__name__', '') + return ( + caller_module == 'distutils.dist' + and info.function == 'run_commands' + ) def do_egg_install(self): diff --git a/venv/lib/python3.10/site-packages/setuptools/command/install_egg_info.py b/venv/lib/python3.10/site-packages/setuptools/command/install_egg_info.py index 65ede40..5f405bc 100644 --- a/venv/lib/python3.10/site-packages/setuptools/command/install_egg_info.py +++ b/venv/lib/python3.10/site-packages/setuptools/command/install_egg_info.py @@ -1,10 +1,9 @@ from distutils import log, dir_util -import os +import os, sys from setuptools import Command from setuptools import namespaces from setuptools.archive_util import unpack_archive -from .._path import ensure_directory import pkg_resources @@ -19,14 +18,31 @@ class install_egg_info(namespaces.Installer, Command): def initialize_options(self): self.install_dir = None + self.install_layout = None + self.prefix_option = None def finalize_options(self): self.set_undefined_options('install_lib', ('install_dir', 'install_dir')) + self.set_undefined_options('install',('install_layout','install_layout')) + if sys.hexversion > 0x2060000: + self.set_undefined_options('install',('prefix_option','prefix_option')) ei_cmd = self.get_finalized_command("egg_info") basename = pkg_resources.Distribution( None, None, ei_cmd.egg_name, ei_cmd.egg_version ).egg_name() + '.egg-info' + + if self.install_layout: + if not self.install_layout.lower() in ['deb']: + raise DistutilsOptionError("unknown value for --install-layout") + self.install_layout = self.install_layout.lower() + basename = basename.replace('-py%s' % pkg_resources.PY_MAJOR, '') + elif self.prefix_option or 'real_prefix' in sys.__dict__: + # don't modify for virtualenv + pass + else: + basename = basename.replace('-py%s' % pkg_resources.PY_MAJOR, '') + self.source = ei_cmd.egg_info self.target = os.path.join(self.install_dir, basename) self.outputs = [] @@ -38,7 +54,7 @@ def run(self): elif os.path.exists(self.target): self.execute(os.unlink, (self.target,), "Removing " + self.target) if not self.dry_run: - ensure_directory(self.target) + pkg_resources.ensure_directory(self.target) self.execute( self.copytree, (), "Copying %s to %s" % (self.source, self.target) ) @@ -56,6 +72,9 @@ def skimmer(src, dst): for skip in '.svn/', 'CVS/': if src.startswith(skip) or '/' + skip in src: return None + if self.install_layout and self.install_layout in ['deb'] and src.startswith('SOURCES.txt'): + log.info("Skipping SOURCES.txt") + return None self.outputs.append(dst) log.debug("Copying %s to %s", src, dst) return dst diff --git a/venv/lib/python3.10/site-packages/setuptools/command/install_lib.py b/venv/lib/python3.10/site-packages/setuptools/command/install_lib.py index 2e9d875..65e318e 100644 --- a/venv/lib/python3.10/site-packages/setuptools/command/install_lib.py +++ b/venv/lib/python3.10/site-packages/setuptools/command/install_lib.py @@ -7,6 +7,18 @@ class install_lib(orig.install_lib): """Don't add compiled flags to filenames of non-Python files""" + def initialize_options(self): + orig.install_lib.initialize_options(self) + self.multiarch = None + self.install_layout = None + + def finalize_options(self): + orig.install_lib.finalize_options(self) + self.set_undefined_options('install',('install_layout','install_layout')) + if self.install_layout == 'deb' and sys.version_info[:2] >= (3, 3): + import sysconfig + self.multiarch = sysconfig.get_config_var('MULTIARCH') + def run(self): self.build() outfiles = self.install() @@ -92,6 +104,8 @@ def copy_tree( exclude = self.get_exclusions() if not exclude: + import distutils.dir_util + distutils.dir_util._multiarch = self.multiarch return orig.install_lib.copy_tree(self, infile, outfile) # Exclude namespace package __init__.py* files from the output @@ -101,12 +115,24 @@ def copy_tree( outfiles = [] + if self.multiarch: + import sysconfig + ext_suffix = sysconfig.get_config_var ('EXT_SUFFIX') + if ext_suffix.endswith(self.multiarch + ext_suffix[-3:]): + new_suffix = None + else: + new_suffix = "%s-%s%s" % (ext_suffix[:-3], self.multiarch, ext_suffix[-3:]) + def pf(src, dst): if dst in exclude: log.warn("Skipping installation of %s (namespace package)", dst) return False + if self.multiarch and new_suffix and dst.endswith(ext_suffix) and not dst.endswith(new_suffix): + dst = dst.replace(ext_suffix, new_suffix) + log.info("renaming extension to %s", os.path.basename(dst)) + log.info("copying %s -> %s", src, os.path.dirname(dst)) outfiles.append(dst) return dst diff --git a/venv/lib/python3.10/site-packages/setuptools/command/install_scripts.py b/venv/lib/python3.10/site-packages/setuptools/command/install_scripts.py index aeb0e42..9cd8eb0 100644 --- a/venv/lib/python3.10/site-packages/setuptools/command/install_scripts.py +++ b/venv/lib/python3.10/site-packages/setuptools/command/install_scripts.py @@ -4,8 +4,7 @@ import os import sys -from pkg_resources import Distribution, PathMetadata -from .._path import ensure_directory +from pkg_resources import Distribution, PathMetadata, ensure_directory class install_scripts(orig.install_scripts): diff --git a/venv/lib/python3.10/site-packages/setuptools/command/sdist.py b/venv/lib/python3.10/site-packages/setuptools/command/sdist.py index 4a8cde7..0285b69 100644 --- a/venv/lib/python3.10/site-packages/setuptools/command/sdist.py +++ b/venv/lib/python3.10/site-packages/setuptools/command/sdist.py @@ -4,19 +4,17 @@ import sys import io import contextlib -from itertools import chain from .py36compat import sdist_add_defaults -from .._importlib import metadata -from .build import _ORIGINAL_SUBCOMMANDS +import pkg_resources _default_revctrl = list def walk_revctrl(dirname=''): """Find all files under revision control""" - for ep in metadata.entry_points(group='setuptools.file_finders'): + for ep in pkg_resources.iter_entry_points('setuptools.file_finders'): for item in ep.load()(dirname): yield item @@ -102,10 +100,6 @@ class NoValue: if orig_val is not NoValue: setattr(os, 'link', orig_val) - def add_defaults(self): - super().add_defaults() - self._add_defaults_build_sub_commands() - def _add_defaults_optional(self): super()._add_defaults_optional() if os.path.isfile('pyproject.toml'): @@ -118,14 +112,6 @@ def _add_defaults_python(self): self.filelist.extend(build_py.get_source_files()) self._add_data_files(self._safe_data_files(build_py)) - def _add_defaults_build_sub_commands(self): - build = self.get_finalized_command("build") - missing_cmds = set(build.get_sub_commands()) - _ORIGINAL_SUBCOMMANDS - # ^-- the original built-in sub-commands are already handled by default. - cmds = (self.get_finalized_command(c) for c in missing_cmds) - files = (c.get_source_files() for c in cmds if hasattr(c, "get_source_files")) - self.filelist.extend(chain.from_iterable(files)) - def _safe_data_files(self, build_py): """ Since the ``sdist`` class is also used to compute the MANIFEST diff --git a/venv/lib/python3.10/site-packages/setuptools/command/test.py b/venv/lib/python3.10/site-packages/setuptools/command/test.py index 8dde513..4a389e4 100644 --- a/venv/lib/python3.10/site-packages/setuptools/command/test.py +++ b/venv/lib/python3.10/site-packages/setuptools/command/test.py @@ -16,11 +16,10 @@ evaluate_marker, add_activation_listener, require, + EntryPoint, ) -from .._importlib import metadata from setuptools import Command from setuptools.extern.more_itertools import unique_everseen -from setuptools.extern.jaraco.functools import pass_none class ScanningLoader(TestLoader): @@ -118,7 +117,7 @@ def test_args(self): return list(self._test_args()) def _test_args(self): - if not self.test_suite: + if not self.test_suite and sys.version_info >= (2, 7): yield 'discover' if self.verbose: yield '--verbose' @@ -242,10 +241,12 @@ def _argv(self): return ['unittest'] + self.test_args @staticmethod - @pass_none def _resolve_as_ep(val): """ Load the indicated attribute value, called, as a as if it were specified as an entry point. """ - return metadata.EntryPoint(value=val, name=None, group=None).load()() + if val is None: + return + parsed = EntryPoint.parse("x=" + val) + return parsed.resolve()() diff --git a/venv/lib/python3.10/site-packages/setuptools/command/upload_docs.py b/venv/lib/python3.10/site-packages/setuptools/command/upload_docs.py index 3263f07..845bff4 100644 --- a/venv/lib/python3.10/site-packages/setuptools/command/upload_docs.py +++ b/venv/lib/python3.10/site-packages/setuptools/command/upload_docs.py @@ -17,11 +17,8 @@ import functools import http.client import urllib.parse -import warnings - -from .._importlib import metadata -from .. import SetuptoolsDeprecationWarning +from pkg_resources import iter_entry_points from .upload import upload @@ -46,10 +43,9 @@ class upload_docs(upload): boolean_options = upload.boolean_options def has_sphinx(self): - return bool( - self.upload_dir is None - and metadata.entry_points(group='distutils.commands', name='build_sphinx') - ) + if self.upload_dir is None: + for ep in iter_entry_points('distutils.commands', 'build_sphinx'): + return True sub_commands = [('build_sphinx', has_sphinx)] @@ -59,9 +55,6 @@ def initialize_options(self): self.target_dir = None def finalize_options(self): - log.warn( - "Upload_docs command is deprecated. Use Read the Docs " - "(https://readthedocs.org) instead.") upload.finalize_options(self) if self.upload_dir is None: if self.has_sphinx(): @@ -73,6 +66,8 @@ def finalize_options(self): else: self.ensure_dirname('upload_dir') self.target_dir = self.upload_dir + if 'pypi.python.org' in self.repository: + log.warn("Upload_docs command is deprecated for PyPi. Use RTD instead.") self.announce('Using upload directory %s' % self.target_dir) def create_zipfile(self, filename): @@ -92,12 +87,6 @@ def create_zipfile(self, filename): zip_file.close() def run(self): - warnings.warn( - "upload_docs is deprecated and will be removed in a future " - "version. Use tools like httpie or curl instead.", - SetuptoolsDeprecationWarning, - ) - # Run sub commands for cmd_name in self.get_sub_commands(): self.run_command(cmd_name) diff --git a/venv/lib/python3.10/site-packages/setuptools/config/__init__.py b/venv/lib/python3.10/site-packages/setuptools/config/__init__.py deleted file mode 100644 index 1a5153a..0000000 --- a/venv/lib/python3.10/site-packages/setuptools/config/__init__.py +++ /dev/null @@ -1,35 +0,0 @@ -"""For backward compatibility, expose main functions from -``setuptools.config.setupcfg`` -""" -import warnings -from functools import wraps -from textwrap import dedent -from typing import Callable, TypeVar, cast - -from .._deprecation_warning import SetuptoolsDeprecationWarning -from . import setupcfg - -Fn = TypeVar("Fn", bound=Callable) - -__all__ = ('parse_configuration', 'read_configuration') - - -def _deprecation_notice(fn: Fn) -> Fn: - @wraps(fn) - def _wrapper(*args, **kwargs): - msg = f"""\ - As setuptools moves its configuration towards `pyproject.toml`, - `{__name__}.{fn.__name__}` became deprecated. - - For the time being, you can use the `{setupcfg.__name__}` module - to access a backward compatible API, but this module is provisional - and might be removed in the future. - """ - warnings.warn(dedent(msg), SetuptoolsDeprecationWarning, stacklevel=2) - return fn(*args, **kwargs) - - return cast(Fn, _wrapper) - - -read_configuration = _deprecation_notice(setupcfg.read_configuration) -parse_configuration = _deprecation_notice(setupcfg.parse_configuration) diff --git a/venv/lib/python3.10/site-packages/setuptools/config/_apply_pyprojecttoml.py b/venv/lib/python3.10/site-packages/setuptools/config/_apply_pyprojecttoml.py deleted file mode 100644 index 8af5561..0000000 --- a/venv/lib/python3.10/site-packages/setuptools/config/_apply_pyprojecttoml.py +++ /dev/null @@ -1,377 +0,0 @@ -"""Translation layer between pyproject config and setuptools distribution and -metadata objects. - -The distribution and metadata objects are modeled after (an old version of) -core metadata, therefore configs in the format specified for ``pyproject.toml`` -need to be processed before being applied. - -**PRIVATE MODULE**: API reserved for setuptools internal usage only. -""" -import logging -import os -import warnings -from collections.abc import Mapping -from email.headerregistry import Address -from functools import partial, reduce -from itertools import chain -from types import MappingProxyType -from typing import (TYPE_CHECKING, Any, Callable, Dict, List, Optional, Set, Tuple, - Type, Union) - -from setuptools._deprecation_warning import SetuptoolsDeprecationWarning - -if TYPE_CHECKING: - from setuptools._importlib import metadata # noqa - from setuptools.dist import Distribution # noqa - -EMPTY: Mapping = MappingProxyType({}) # Immutable dict-like -_Path = Union[os.PathLike, str] -_DictOrStr = Union[dict, str] -_CorrespFn = Callable[["Distribution", Any, _Path], None] -_Correspondence = Union[str, _CorrespFn] - -_logger = logging.getLogger(__name__) - - -def apply(dist: "Distribution", config: dict, filename: _Path) -> "Distribution": - """Apply configuration dict read with :func:`read_configuration`""" - - if not config: - return dist # short-circuit unrelated pyproject.toml file - - root_dir = os.path.dirname(filename) or "." - - _apply_project_table(dist, config, root_dir) - _apply_tool_table(dist, config, filename) - - current_directory = os.getcwd() - os.chdir(root_dir) - try: - dist._finalize_requires() - dist._finalize_license_files() - finally: - os.chdir(current_directory) - - return dist - - -def _apply_project_table(dist: "Distribution", config: dict, root_dir: _Path): - project_table = config.get("project", {}).copy() - if not project_table: - return # short-circuit - - _handle_missing_dynamic(dist, project_table) - _unify_entry_points(project_table) - - for field, value in project_table.items(): - norm_key = json_compatible_key(field) - corresp = PYPROJECT_CORRESPONDENCE.get(norm_key, norm_key) - if callable(corresp): - corresp(dist, value, root_dir) - else: - _set_config(dist, corresp, value) - - -def _apply_tool_table(dist: "Distribution", config: dict, filename: _Path): - tool_table = config.get("tool", {}).get("setuptools", {}) - if not tool_table: - return # short-circuit - - for field, value in tool_table.items(): - norm_key = json_compatible_key(field) - - if norm_key in TOOL_TABLE_DEPRECATIONS: - suggestion = TOOL_TABLE_DEPRECATIONS[norm_key] - msg = f"The parameter `{norm_key}` is deprecated, {suggestion}" - warnings.warn(msg, SetuptoolsDeprecationWarning) - - norm_key = TOOL_TABLE_RENAMES.get(norm_key, norm_key) - _set_config(dist, norm_key, value) - - _copy_command_options(config, dist, filename) - - -def _handle_missing_dynamic(dist: "Distribution", project_table: dict): - """Be temporarily forgiving with ``dynamic`` fields not listed in ``dynamic``""" - # TODO: Set fields back to `None` once the feature stabilizes - dynamic = set(project_table.get("dynamic", [])) - for field, getter in _PREVIOUSLY_DEFINED.items(): - if not (field in project_table or field in dynamic): - value = getter(dist) - if value: - msg = _WouldIgnoreField.message(field, value) - warnings.warn(msg, _WouldIgnoreField) - - -def json_compatible_key(key: str) -> str: - """As defined in :pep:`566#json-compatible-metadata`""" - return key.lower().replace("-", "_") - - -def _set_config(dist: "Distribution", field: str, value: Any): - setter = getattr(dist.metadata, f"set_{field}", None) - if setter: - setter(value) - elif hasattr(dist.metadata, field) or field in SETUPTOOLS_PATCHES: - setattr(dist.metadata, field, value) - else: - setattr(dist, field, value) - - -_CONTENT_TYPES = { - ".md": "text/markdown", - ".rst": "text/x-rst", - ".txt": "text/plain", -} - - -def _guess_content_type(file: str) -> Optional[str]: - _, ext = os.path.splitext(file.lower()) - if not ext: - return None - - if ext in _CONTENT_TYPES: - return _CONTENT_TYPES[ext] - - valid = ", ".join(f"{k} ({v})" for k, v in _CONTENT_TYPES.items()) - msg = f"only the following file extensions are recognized: {valid}." - raise ValueError(f"Undefined content type for {file}, {msg}") - - -def _long_description(dist: "Distribution", val: _DictOrStr, root_dir: _Path): - from setuptools.config import expand - - if isinstance(val, str): - text = expand.read_files(val, root_dir) - ctype = _guess_content_type(val) - else: - text = val.get("text") or expand.read_files(val.get("file", []), root_dir) - ctype = val["content-type"] - - _set_config(dist, "long_description", text) - if ctype: - _set_config(dist, "long_description_content_type", ctype) - - -def _license(dist: "Distribution", val: dict, root_dir: _Path): - from setuptools.config import expand - - if "file" in val: - _set_config(dist, "license", expand.read_files([val["file"]], root_dir)) - else: - _set_config(dist, "license", val["text"]) - - -def _people(dist: "Distribution", val: List[dict], _root_dir: _Path, kind: str): - field = [] - email_field = [] - for person in val: - if "name" not in person: - email_field.append(person["email"]) - elif "email" not in person: - field.append(person["name"]) - else: - addr = Address(display_name=person["name"], addr_spec=person["email"]) - email_field.append(str(addr)) - - if field: - _set_config(dist, kind, ", ".join(field)) - if email_field: - _set_config(dist, f"{kind}_email", ", ".join(email_field)) - - -def _project_urls(dist: "Distribution", val: dict, _root_dir): - _set_config(dist, "project_urls", val) - - -def _python_requires(dist: "Distribution", val: dict, _root_dir): - from setuptools.extern.packaging.specifiers import SpecifierSet - - _set_config(dist, "python_requires", SpecifierSet(val)) - - -def _dependencies(dist: "Distribution", val: list, _root_dir): - if getattr(dist, "install_requires", []): - msg = "`install_requires` overwritten in `pyproject.toml` (dependencies)" - warnings.warn(msg) - _set_config(dist, "install_requires", val) - - -def _optional_dependencies(dist: "Distribution", val: dict, _root_dir): - existing = getattr(dist, "extras_require", {}) - _set_config(dist, "extras_require", {**existing, **val}) - - -def _unify_entry_points(project_table: dict): - project = project_table - entry_points = project.pop("entry-points", project.pop("entry_points", {})) - renaming = {"scripts": "console_scripts", "gui_scripts": "gui_scripts"} - for key, value in list(project.items()): # eager to allow modifications - norm_key = json_compatible_key(key) - if norm_key in renaming and value: - entry_points[renaming[norm_key]] = project.pop(key) - - if entry_points: - project["entry-points"] = { - name: [f"{k} = {v}" for k, v in group.items()] - for name, group in entry_points.items() - } - - -def _copy_command_options(pyproject: dict, dist: "Distribution", filename: _Path): - tool_table = pyproject.get("tool", {}) - cmdclass = tool_table.get("setuptools", {}).get("cmdclass", {}) - valid_options = _valid_command_options(cmdclass) - - cmd_opts = dist.command_options - for cmd, config in pyproject.get("tool", {}).get("distutils", {}).items(): - cmd = json_compatible_key(cmd) - valid = valid_options.get(cmd, set()) - cmd_opts.setdefault(cmd, {}) - for key, value in config.items(): - key = json_compatible_key(key) - cmd_opts[cmd][key] = (str(filename), value) - if key not in valid: - # To avoid removing options that are specified dynamically we - # just log a warn... - _logger.warning(f"Command option {cmd}.{key} is not defined") - - -def _valid_command_options(cmdclass: Mapping = EMPTY) -> Dict[str, Set[str]]: - from .._importlib import metadata - from setuptools.dist import Distribution - - valid_options = {"global": _normalise_cmd_options(Distribution.global_options)} - - unloaded_entry_points = metadata.entry_points(group='distutils.commands') - loaded_entry_points = (_load_ep(ep) for ep in unloaded_entry_points) - entry_points = (ep for ep in loaded_entry_points if ep) - for cmd, cmd_class in chain(entry_points, cmdclass.items()): - opts = valid_options.get(cmd, set()) - opts = opts | _normalise_cmd_options(getattr(cmd_class, "user_options", [])) - valid_options[cmd] = opts - - return valid_options - - -def _load_ep(ep: "metadata.EntryPoint") -> Optional[Tuple[str, Type]]: - # Ignore all the errors - try: - return (ep.name, ep.load()) - except Exception as ex: - msg = f"{ex.__class__.__name__} while trying to load entry-point {ep.name}" - _logger.warning(f"{msg}: {ex}") - return None - - -def _normalise_cmd_option_key(name: str) -> str: - return json_compatible_key(name).strip("_=") - - -def _normalise_cmd_options(desc: List[Tuple[str, Optional[str], str]]) -> Set[str]: - return {_normalise_cmd_option_key(fancy_option[0]) for fancy_option in desc} - - -def _attrgetter(attr): - """ - Similar to ``operator.attrgetter`` but returns None if ``attr`` is not found - >>> from types import SimpleNamespace - >>> obj = SimpleNamespace(a=42, b=SimpleNamespace(c=13)) - >>> _attrgetter("a")(obj) - 42 - >>> _attrgetter("b.c")(obj) - 13 - >>> _attrgetter("d")(obj) is None - True - """ - return partial(reduce, lambda acc, x: getattr(acc, x, None), attr.split(".")) - - -def _some_attrgetter(*items): - """ - Return the first "truth-y" attribute or None - >>> from types import SimpleNamespace - >>> obj = SimpleNamespace(a=42, b=SimpleNamespace(c=13)) - >>> _some_attrgetter("d", "a", "b.c")(obj) - 42 - >>> _some_attrgetter("d", "e", "b.c", "a")(obj) - 13 - >>> _some_attrgetter("d", "e", "f")(obj) is None - True - """ - def _acessor(obj): - values = (_attrgetter(i)(obj) for i in items) - return next((i for i in values if i is not None), None) - return _acessor - - -PYPROJECT_CORRESPONDENCE: Dict[str, _Correspondence] = { - "readme": _long_description, - "license": _license, - "authors": partial(_people, kind="author"), - "maintainers": partial(_people, kind="maintainer"), - "urls": _project_urls, - "dependencies": _dependencies, - "optional_dependencies": _optional_dependencies, - "requires_python": _python_requires, -} - -TOOL_TABLE_RENAMES = {"script_files": "scripts"} -TOOL_TABLE_DEPRECATIONS = { - "namespace_packages": "consider using implicit namespaces instead (PEP 420)." -} - -SETUPTOOLS_PATCHES = {"long_description_content_type", "project_urls", - "provides_extras", "license_file", "license_files"} - -_PREVIOUSLY_DEFINED = { - "name": _attrgetter("metadata.name"), - "version": _attrgetter("metadata.version"), - "description": _attrgetter("metadata.description"), - "readme": _attrgetter("metadata.long_description"), - "requires-python": _some_attrgetter("python_requires", "metadata.python_requires"), - "license": _attrgetter("metadata.license"), - "authors": _some_attrgetter("metadata.author", "metadata.author_email"), - "maintainers": _some_attrgetter("metadata.maintainer", "metadata.maintainer_email"), - "keywords": _attrgetter("metadata.keywords"), - "classifiers": _attrgetter("metadata.classifiers"), - "urls": _attrgetter("metadata.project_urls"), - "entry-points": _attrgetter("entry_points"), - "dependencies": _some_attrgetter("_orig_install_requires", "install_requires"), - "optional-dependencies": _some_attrgetter("_orig_extras_require", "extras_require"), -} - - -class _WouldIgnoreField(UserWarning): - """Inform users that ``pyproject.toml`` would overwrite previous metadata.""" - - MESSAGE = """\ - {field!r} defined outside of `pyproject.toml` would be ignored. - !!\n\n - ########################################################################## - # configuration would be ignored/result in error due to `pyproject.toml` # - ########################################################################## - - The following seems to be defined outside of `pyproject.toml`: - - `{field} = {value!r}` - - According to the spec (see the link below), however, setuptools CANNOT - consider this value unless {field!r} is listed as `dynamic`. - - https://packaging.python.org/en/latest/specifications/declaring-project-metadata/ - - For the time being, `setuptools` will still consider the given value (as a - **transitional** measure), but please note that future releases of setuptools will - follow strictly the standard. - - To prevent this warning, you can list {field!r} under `dynamic` or alternatively - remove the `[project]` table from your file and rely entirely on other means of - configuration. - \n\n!! - """ - - @classmethod - def message(cls, field, value): - from inspect import cleandoc - return cleandoc(cls.MESSAGE.format(field=field, value=value)) diff --git a/venv/lib/python3.10/site-packages/setuptools/config/_validate_pyproject/__init__.py b/venv/lib/python3.10/site-packages/setuptools/config/_validate_pyproject/__init__.py deleted file mode 100644 index dbe6cb4..0000000 --- a/venv/lib/python3.10/site-packages/setuptools/config/_validate_pyproject/__init__.py +++ /dev/null @@ -1,34 +0,0 @@ -from functools import reduce -from typing import Any, Callable, Dict - -from . import formats -from .error_reporting import detailed_errors, ValidationError -from .extra_validations import EXTRA_VALIDATIONS -from .fastjsonschema_exceptions import JsonSchemaException, JsonSchemaValueException -from .fastjsonschema_validations import validate as _validate - -__all__ = [ - "validate", - "FORMAT_FUNCTIONS", - "EXTRA_VALIDATIONS", - "ValidationError", - "JsonSchemaException", - "JsonSchemaValueException", -] - - -FORMAT_FUNCTIONS: Dict[str, Callable[[str], bool]] = { - fn.__name__.replace("_", "-"): fn - for fn in formats.__dict__.values() - if callable(fn) and not fn.__name__.startswith("_") -} - - -def validate(data: Any) -> bool: - """Validate the given ``data`` object using JSON Schema - This function raises ``ValidationError`` if ``data`` is invalid. - """ - with detailed_errors(): - _validate(data, custom_formats=FORMAT_FUNCTIONS) - reduce(lambda acc, fn: fn(acc), EXTRA_VALIDATIONS, data) - return True diff --git a/venv/lib/python3.10/site-packages/setuptools/config/_validate_pyproject/error_reporting.py b/venv/lib/python3.10/site-packages/setuptools/config/_validate_pyproject/error_reporting.py deleted file mode 100644 index f78e483..0000000 --- a/venv/lib/python3.10/site-packages/setuptools/config/_validate_pyproject/error_reporting.py +++ /dev/null @@ -1,318 +0,0 @@ -import io -import json -import logging -import os -import re -from contextlib import contextmanager -from textwrap import indent, wrap -from typing import Any, Dict, Iterator, List, Optional, Sequence, Union, cast - -from .fastjsonschema_exceptions import JsonSchemaValueException - -_logger = logging.getLogger(__name__) - -_MESSAGE_REPLACEMENTS = { - "must be named by propertyName definition": "keys must be named by", - "one of contains definition": "at least one item that matches", - " same as const definition:": "", - "only specified items": "only items matching the definition", -} - -_SKIP_DETAILS = ( - "must not be empty", - "is always invalid", - "must not be there", -) - -_NEED_DETAILS = {"anyOf", "oneOf", "anyOf", "contains", "propertyNames", "not", "items"} - -_CAMEL_CASE_SPLITTER = re.compile(r"\W+|([A-Z][^A-Z\W]*)") -_IDENTIFIER = re.compile(r"^[\w_]+$", re.I) - -_TOML_JARGON = { - "object": "table", - "property": "key", - "properties": "keys", - "property names": "keys", -} - - -class ValidationError(JsonSchemaValueException): - """Report violations of a given JSON schema. - - This class extends :exc:`~fastjsonschema.JsonSchemaValueException` - by adding the following properties: - - - ``summary``: an improved version of the ``JsonSchemaValueException`` error message - with only the necessary information) - - - ``details``: more contextual information about the error like the failing schema - itself and the value that violates the schema. - - Depending on the level of the verbosity of the ``logging`` configuration - the exception message will be only ``summary`` (default) or a combination of - ``summary`` and ``details`` (when the logging level is set to :obj:`logging.DEBUG`). - """ - - summary = "" - details = "" - _original_message = "" - - @classmethod - def _from_jsonschema(cls, ex: JsonSchemaValueException): - formatter = _ErrorFormatting(ex) - obj = cls(str(formatter), ex.value, formatter.name, ex.definition, ex.rule) - debug_code = os.getenv("JSONSCHEMA_DEBUG_CODE_GENERATION", "false").lower() - if debug_code != "false": # pragma: no cover - obj.__cause__, obj.__traceback__ = ex.__cause__, ex.__traceback__ - obj._original_message = ex.message - obj.summary = formatter.summary - obj.details = formatter.details - return obj - - -@contextmanager -def detailed_errors(): - try: - yield - except JsonSchemaValueException as ex: - raise ValidationError._from_jsonschema(ex) from None - - -class _ErrorFormatting: - def __init__(self, ex: JsonSchemaValueException): - self.ex = ex - self.name = f"`{self._simplify_name(ex.name)}`" - self._original_message = self.ex.message.replace(ex.name, self.name) - self._summary = "" - self._details = "" - - def __str__(self) -> str: - if _logger.getEffectiveLevel() <= logging.DEBUG and self.details: - return f"{self.summary}\n\n{self.details}" - - return self.summary - - @property - def summary(self) -> str: - if not self._summary: - self._summary = self._expand_summary() - - return self._summary - - @property - def details(self) -> str: - if not self._details: - self._details = self._expand_details() - - return self._details - - def _simplify_name(self, name): - x = len("data.") - return name[x:] if name.startswith("data.") else name - - def _expand_summary(self): - msg = self._original_message - - for bad, repl in _MESSAGE_REPLACEMENTS.items(): - msg = msg.replace(bad, repl) - - if any(substring in msg for substring in _SKIP_DETAILS): - return msg - - schema = self.ex.rule_definition - if self.ex.rule in _NEED_DETAILS and schema: - summary = _SummaryWriter(_TOML_JARGON) - return f"{msg}:\n\n{indent(summary(schema), ' ')}" - - return msg - - def _expand_details(self) -> str: - optional = [] - desc_lines = self.ex.definition.pop("$$description", []) - desc = self.ex.definition.pop("description", None) or " ".join(desc_lines) - if desc: - description = "\n".join( - wrap( - desc, - width=80, - initial_indent=" ", - subsequent_indent=" ", - break_long_words=False, - ) - ) - optional.append(f"DESCRIPTION:\n{description}") - schema = json.dumps(self.ex.definition, indent=4) - value = json.dumps(self.ex.value, indent=4) - defaults = [ - f"GIVEN VALUE:\n{indent(value, ' ')}", - f"OFFENDING RULE: {self.ex.rule!r}", - f"DEFINITION:\n{indent(schema, ' ')}", - ] - return "\n\n".join(optional + defaults) - - -class _SummaryWriter: - _IGNORE = {"description", "default", "title", "examples"} - - def __init__(self, jargon: Optional[Dict[str, str]] = None): - self.jargon: Dict[str, str] = jargon or {} - # Clarify confusing terms - self._terms = { - "anyOf": "at least one of the following", - "oneOf": "exactly one of the following", - "allOf": "all of the following", - "not": "(*NOT* the following)", - "prefixItems": f"{self._jargon('items')} (in order)", - "items": "items", - "contains": "contains at least one of", - "propertyNames": ( - f"non-predefined acceptable {self._jargon('property names')}" - ), - "patternProperties": f"{self._jargon('properties')} named via pattern", - "const": "predefined value", - "enum": "one of", - } - # Attributes that indicate that the definition is easy and can be done - # inline (e.g. string and number) - self._guess_inline_defs = [ - "enum", - "const", - "maxLength", - "minLength", - "pattern", - "format", - "minimum", - "maximum", - "exclusiveMinimum", - "exclusiveMaximum", - "multipleOf", - ] - - def _jargon(self, term: Union[str, List[str]]) -> Union[str, List[str]]: - if isinstance(term, list): - return [self.jargon.get(t, t) for t in term] - return self.jargon.get(term, term) - - def __call__( - self, - schema: Union[dict, List[dict]], - prefix: str = "", - *, - _path: Sequence[str] = (), - ) -> str: - if isinstance(schema, list): - return self._handle_list(schema, prefix, _path) - - filtered = self._filter_unecessary(schema, _path) - simple = self._handle_simple_dict(filtered, _path) - if simple: - return f"{prefix}{simple}" - - child_prefix = self._child_prefix(prefix, " ") - item_prefix = self._child_prefix(prefix, "- ") - indent = len(prefix) * " " - with io.StringIO() as buffer: - for i, (key, value) in enumerate(filtered.items()): - child_path = [*_path, key] - line_prefix = prefix if i == 0 else indent - buffer.write(f"{line_prefix}{self._label(child_path)}:") - # ^ just the first item should receive the complete prefix - if isinstance(value, dict): - filtered = self._filter_unecessary(value, child_path) - simple = self._handle_simple_dict(filtered, child_path) - buffer.write( - f" {simple}" - if simple - else f"\n{self(value, child_prefix, _path=child_path)}" - ) - elif isinstance(value, list) and ( - key != "type" or self._is_property(child_path) - ): - children = self._handle_list(value, item_prefix, child_path) - sep = " " if children.startswith("[") else "\n" - buffer.write(f"{sep}{children}") - else: - buffer.write(f" {self._value(value, child_path)}\n") - return buffer.getvalue() - - def _is_unecessary(self, path: Sequence[str]) -> bool: - if self._is_property(path) or not path: # empty path => instruction @ root - return False - key = path[-1] - return any(key.startswith(k) for k in "$_") or key in self._IGNORE - - def _filter_unecessary(self, schema: dict, path: Sequence[str]): - return { - key: value - for key, value in schema.items() - if not self._is_unecessary([*path, key]) - } - - def _handle_simple_dict(self, value: dict, path: Sequence[str]) -> Optional[str]: - inline = any(p in value for p in self._guess_inline_defs) - simple = not any(isinstance(v, (list, dict)) for v in value.values()) - if inline or simple: - return f"{{{', '.join(self._inline_attrs(value, path))}}}\n" - return None - - def _handle_list( - self, schemas: list, prefix: str = "", path: Sequence[str] = () - ) -> str: - if self._is_unecessary(path): - return "" - - repr_ = repr(schemas) - if all(not isinstance(e, (dict, list)) for e in schemas) and len(repr_) < 60: - return f"{repr_}\n" - - item_prefix = self._child_prefix(prefix, "- ") - return "".join( - self(v, item_prefix, _path=[*path, f"[{i}]"]) for i, v in enumerate(schemas) - ) - - def _is_property(self, path: Sequence[str]): - """Check if the given path can correspond to an arbitrarily named property""" - counter = 0 - for key in path[-2::-1]: - if key not in {"properties", "patternProperties"}: - break - counter += 1 - - # If the counter if even, the path correspond to a JSON Schema keyword - # otherwise it can be any arbitrary string naming a property - return counter % 2 == 1 - - def _label(self, path: Sequence[str]) -> str: - *parents, key = path - if not self._is_property(path): - norm_key = _separate_terms(key) - return self._terms.get(key) or " ".join(self._jargon(norm_key)) - - if parents[-1] == "patternProperties": - return f"(regex {key!r})" - return repr(key) # property name - - def _value(self, value: Any, path: Sequence[str]) -> str: - if path[-1] == "type" and not self._is_property(path): - type_ = self._jargon(value) - return ( - f"[{', '.join(type_)}]" if isinstance(value, list) else cast(str, type_) - ) - return repr(value) - - def _inline_attrs(self, schema: dict, path: Sequence[str]) -> Iterator[str]: - for key, value in schema.items(): - child_path = [*path, key] - yield f"{self._label(child_path)}: {self._value(value, child_path)}" - - def _child_prefix(self, parent_prefix: str, child_prefix: str) -> str: - return len(parent_prefix) * " " + child_prefix - - -def _separate_terms(word: str) -> List[str]: - """ - >>> _separate_terms("FooBar-foo") - ['foo', 'bar', 'foo'] - """ - return [w.lower() for w in _CAMEL_CASE_SPLITTER.split(word) if w] diff --git a/venv/lib/python3.10/site-packages/setuptools/config/_validate_pyproject/extra_validations.py b/venv/lib/python3.10/site-packages/setuptools/config/_validate_pyproject/extra_validations.py deleted file mode 100644 index 4130a42..0000000 --- a/venv/lib/python3.10/site-packages/setuptools/config/_validate_pyproject/extra_validations.py +++ /dev/null @@ -1,36 +0,0 @@ -"""The purpose of this module is implement PEP 621 validations that are -difficult to express as a JSON Schema (or that are not supported by the current -JSON Schema library). -""" - -from typing import Mapping, TypeVar - -from .error_reporting import ValidationError - -T = TypeVar("T", bound=Mapping) - - -class RedefiningStaticFieldAsDynamic(ValidationError): - """According to PEP 621: - - Build back-ends MUST raise an error if the metadata specifies a field - statically as well as being listed in dynamic. - """ - - -def validate_project_dynamic(pyproject: T) -> T: - project_table = pyproject.get("project", {}) - dynamic = project_table.get("dynamic", []) - - for field in dynamic: - if field in project_table: - msg = f"You cannot provide a value for `project.{field}` and " - msg += "list it under `project.dynamic` at the same time" - name = f"data.project.{field}" - value = {field: project_table[field], "...": " # ...", "dynamic": dynamic} - raise RedefiningStaticFieldAsDynamic(msg, value, name, rule="PEP 621") - - return pyproject - - -EXTRA_VALIDATIONS = (validate_project_dynamic,) diff --git a/venv/lib/python3.10/site-packages/setuptools/config/_validate_pyproject/fastjsonschema_exceptions.py b/venv/lib/python3.10/site-packages/setuptools/config/_validate_pyproject/fastjsonschema_exceptions.py deleted file mode 100644 index d2dddd6..0000000 --- a/venv/lib/python3.10/site-packages/setuptools/config/_validate_pyproject/fastjsonschema_exceptions.py +++ /dev/null @@ -1,51 +0,0 @@ -import re - - -SPLIT_RE = re.compile(r'[\.\[\]]+') - - -class JsonSchemaException(ValueError): - """ - Base exception of ``fastjsonschema`` library. - """ - - -class JsonSchemaValueException(JsonSchemaException): - """ - Exception raised by validation function. Available properties: - - * ``message`` containing human-readable information what is wrong (e.g. ``data.property[index] must be smaller than or equal to 42``), - * invalid ``value`` (e.g. ``60``), - * ``name`` of a path in the data structure (e.g. ``data.property[index]``), - * ``path`` as an array in the data structure (e.g. ``['data', 'property', 'index']``), - * the whole ``definition`` which the ``value`` has to fulfil (e.g. ``{'type': 'number', 'maximum': 42}``), - * ``rule`` which the ``value`` is breaking (e.g. ``maximum``) - * and ``rule_definition`` (e.g. ``42``). - - .. versionchanged:: 2.14.0 - Added all extra properties. - """ - - def __init__(self, message, value=None, name=None, definition=None, rule=None): - super().__init__(message) - self.message = message - self.value = value - self.name = name - self.definition = definition - self.rule = rule - - @property - def path(self): - return [item for item in SPLIT_RE.split(self.name) if item != ''] - - @property - def rule_definition(self): - if not self.rule or not self.definition: - return None - return self.definition.get(self.rule) - - -class JsonSchemaDefinitionException(JsonSchemaException): - """ - Exception raised by generator of validation function. - """ diff --git a/venv/lib/python3.10/site-packages/setuptools/config/_validate_pyproject/fastjsonschema_validations.py b/venv/lib/python3.10/site-packages/setuptools/config/_validate_pyproject/fastjsonschema_validations.py deleted file mode 100644 index ad5ee31..0000000 --- a/venv/lib/python3.10/site-packages/setuptools/config/_validate_pyproject/fastjsonschema_validations.py +++ /dev/null @@ -1,1035 +0,0 @@ -# noqa -# type: ignore -# flake8: noqa -# pylint: skip-file -# mypy: ignore-errors -# yapf: disable -# pylama:skip=1 - - -# *** PLEASE DO NOT MODIFY DIRECTLY: Automatically generated code *** - - -VERSION = "2.15.3" -import re -from .fastjsonschema_exceptions import JsonSchemaValueException - - -REGEX_PATTERNS = { - '^.*$': re.compile('^.*$'), - '.+': re.compile('.+'), - '^.+$': re.compile('^.+$'), - 'idn-email_re_pattern': re.compile('^[^@]+@[^@]+\\.[^@]+\\Z') -} - -NoneType = type(None) - -def validate(data, custom_formats={}, name_prefix=None): - validate_https___packaging_python_org_en_latest_specifications_declaring_build_dependencies(data, custom_formats, (name_prefix or "data") + "") - return data - -def validate_https___packaging_python_org_en_latest_specifications_declaring_build_dependencies(data, custom_formats={}, name_prefix=None): - if not isinstance(data, (dict)): - raise JsonSchemaValueException("" + (name_prefix or "data") + " must be object", value=data, name="" + (name_prefix or "data") + "", definition={'$schema': 'http://json-schema.org/draft-07/schema', '$id': 'https://packaging.python.org/en/latest/specifications/declaring-build-dependencies/', 'title': 'Data structure for ``pyproject.toml`` files', '$$description': ['File format containing build-time configurations for the Python ecosystem. ', ':pep:`517` initially defined a build-system independent format for source trees', 'which was complemented by :pep:`518` to provide a way of specifying dependencies ', 'for building Python projects.', 'Please notice the ``project`` table (as initially defined in :pep:`621`) is not included', 'in this schema and should be considered separately.'], 'type': 'object', 'additionalProperties': False, 'properties': {'build-system': {'type': 'object', 'description': 'Table used to store build-related data', 'additionalProperties': False, 'properties': {'requires': {'type': 'array', '$$description': ['List of dependencies in the :pep:`508` format required to execute the build', 'system. Please notice that the resulting dependency graph', '**MUST NOT contain cycles**'], 'items': {'type': 'string'}}, 'build-backend': {'type': 'string', 'description': 'Python object that will be used to perform the build according to :pep:`517`', 'format': 'pep517-backend-reference'}, 'backend-path': {'type': 'array', '$$description': ['List of directories to be prepended to ``sys.path`` when loading the', 'back-end, and running its hooks'], 'items': {'type': 'string', '$comment': 'Should be a path (TODO: enforce it with format?)'}}}, 'required': ['requires']}, 'project': {'$schema': 'http://json-schema.org/draft-07/schema', '$id': 'https://packaging.python.org/en/latest/specifications/declaring-project-metadata/', 'title': 'Package metadata stored in the ``project`` table', '$$description': ['Data structure for the **project** table inside ``pyproject.toml``', '(as initially defined in :pep:`621`)'], 'type': 'object', 'properties': {'name': {'type': 'string', 'description': 'The name (primary identifier) of the project. MUST be statically defined.', 'format': 'pep508-identifier'}, 'version': {'type': 'string', 'description': 'The version of the project as supported by :pep:`440`.', 'format': 'pep440'}, 'description': {'type': 'string', '$$description': ['The `summary description of the project', '`_']}, 'readme': {'$$description': ['`Full/detailed description of the project in the form of a README', '`_', "with meaning similar to the one defined in `core metadata's Description", '`_'], 'oneOf': [{'type': 'string', '$$description': ['Relative path to a text file (UTF-8) containing the full description', 'of the project. If the file path ends in case-insensitive ``.md`` or', '``.rst`` suffixes, then the content-type is respectively', '``text/markdown`` or ``text/x-rst``']}, {'type': 'object', 'allOf': [{'anyOf': [{'properties': {'file': {'type': 'string', '$$description': ['Relative path to a text file containing the full description', 'of the project.']}}, 'required': ['file']}, {'properties': {'text': {'type': 'string', 'description': 'Full text describing the project.'}}, 'required': ['text']}]}, {'properties': {'content-type': {'type': 'string', '$$description': ['Content-type (:rfc:`1341`) of the full description', '(e.g. ``text/markdown``). The ``charset`` parameter is assumed', 'UTF-8 when not present.'], '$comment': 'TODO: add regex pattern or format?'}}, 'required': ['content-type']}]}]}, 'requires-python': {'type': 'string', 'format': 'pep508-versionspec', '$$description': ['`The Python version requirements of the project', '`_.']}, 'license': {'description': '`Project license `_.', 'oneOf': [{'properties': {'file': {'type': 'string', '$$description': ['Relative path to the file (UTF-8) which contains the license for the', 'project.']}}, 'required': ['file']}, {'properties': {'text': {'type': 'string', '$$description': ['The license of the project whose meaning is that of the', '`License field from the core metadata', '`_.']}}, 'required': ['text']}]}, 'authors': {'type': 'array', 'items': {'$ref': '#/definitions/author'}, '$$description': ["The people or organizations considered to be the 'authors' of the project.", 'The exact meaning is open to interpretation (e.g. original or primary authors,', 'current maintainers, or owners of the package).']}, 'maintainers': {'type': 'array', 'items': {'$ref': '#/definitions/author'}, '$$description': ["The people or organizations considered to be the 'maintainers' of the project.", 'Similarly to ``authors``, the exact meaning is open to interpretation.']}, 'keywords': {'type': 'array', 'items': {'type': 'string'}, 'description': 'List of keywords to assist searching for the distribution in a larger catalog.'}, 'classifiers': {'type': 'array', 'items': {'type': 'string', 'format': 'trove-classifier', 'description': '`PyPI classifier `_.'}, '$$description': ['`Trove classifiers `_', 'which apply to the project.']}, 'urls': {'type': 'object', 'description': 'URLs associated with the project in the form ``label => value``.', 'additionalProperties': False, 'patternProperties': {'^.+$': {'type': 'string', 'format': 'url'}}}, 'scripts': {'$ref': '#/definitions/entry-point-group', '$$description': ['Instruct the installer to create command-line wrappers for the given', '`entry points `_.']}, 'gui-scripts': {'$ref': '#/definitions/entry-point-group', '$$description': ['Instruct the installer to create GUI wrappers for the given', '`entry points `_.', 'The difference between ``scripts`` and ``gui-scripts`` is only relevant in', 'Windows.']}, 'entry-points': {'$$description': ['Instruct the installer to expose the given modules/functions via', '``entry-point`` discovery mechanism (useful for plugins).', 'More information available in the `Python packaging guide', '`_.'], 'propertyNames': {'format': 'python-entrypoint-group'}, 'additionalProperties': False, 'patternProperties': {'^.+$': {'$ref': '#/definitions/entry-point-group'}}}, 'dependencies': {'type': 'array', 'description': 'Project (mandatory) dependencies.', 'items': {'$ref': '#/definitions/dependency'}}, 'optional-dependencies': {'type': 'object', 'description': 'Optional dependency for the project', 'propertyNames': {'format': 'pep508-identifier'}, 'additionalProperties': False, 'patternProperties': {'^.+$': {'type': 'array', 'items': {'$ref': '#/definitions/dependency'}}}}, 'dynamic': {'type': 'array', '$$description': ['Specifies which fields are intentionally unspecified and expected to be', 'dynamically provided by build tools'], 'items': {'enum': ['version', 'description', 'readme', 'requires-python', 'license', 'authors', 'maintainers', 'keywords', 'classifiers', 'urls', 'scripts', 'gui-scripts', 'entry-points', 'dependencies', 'optional-dependencies']}}}, 'required': ['name'], 'additionalProperties': False, 'if': {'not': {'required': ['dynamic'], 'properties': {'dynamic': {'contains': {'const': 'version'}, '$$description': ['version is listed in ``dynamic``']}}}, '$$comment': ['According to :pep:`621`:', ' If the core metadata specification lists a field as "Required", then', ' the metadata MUST specify the field statically or list it in dynamic', 'In turn, `core metadata`_ defines:', ' The required fields are: Metadata-Version, Name, Version.', ' All the other fields are optional.', 'Since ``Metadata-Version`` is defined by the build back-end, ``name`` and', '``version`` are the only mandatory information in ``pyproject.toml``.', '.. _core metadata: https://packaging.python.org/specifications/core-metadata/']}, 'then': {'required': ['version'], '$$description': ['version should be statically defined in the ``version`` field']}, 'definitions': {'author': {'$id': '#/definitions/author', 'title': 'Author or Maintainer', '$comment': 'https://www.python.org/dev/peps/pep-0621/#authors-maintainers', 'type': 'object', 'properties': {'name': {'type': 'string', '$$description': ['MUST be a valid email name, i.e. whatever can be put as a name, before an', 'email, in :rfc:`822`.']}, 'email': {'type': 'string', 'format': 'idn-email', 'description': 'MUST be a valid email address'}}}, 'entry-point-group': {'$id': '#/definitions/entry-point-group', 'title': 'Entry-points', 'type': 'object', '$$description': ['Entry-points are grouped together to indicate what sort of capabilities they', 'provide.', 'See the `packaging guides', '`_', 'and `setuptools docs', '`_', 'for more information.'], 'propertyNames': {'format': 'python-entrypoint-name'}, 'additionalProperties': False, 'patternProperties': {'^.+$': {'type': 'string', '$$description': ['Reference to a Python object. It is either in the form', '``importable.module``, or ``importable.module:object.attr``.'], 'format': 'python-entrypoint-reference', '$comment': 'https://packaging.python.org/specifications/entry-points/'}}}, 'dependency': {'$id': '#/definitions/dependency', 'title': 'Dependency', 'type': 'string', 'description': 'Project dependency specification according to PEP 508', 'format': 'pep508'}}}, 'tool': {'type': 'object', 'properties': {'distutils': {'$schema': 'http://json-schema.org/draft-07/schema', '$id': 'https://docs.python.org/3/install/', 'title': '``tool.distutils`` table', '$$description': ['Originally, ``distutils`` allowed developers to configure arguments for', '``setup.py`` scripts via `distutils configuration files', '`_.', '``tool.distutils`` subtables could be used with the same purpose', '(NOT CURRENTLY IMPLEMENTED).'], 'type': 'object', 'properties': {'global': {'type': 'object', 'description': 'Global options applied to all ``distutils`` commands'}}, 'patternProperties': {'.+': {'type': 'object'}}, '$comment': 'TODO: Is there a practical way of making this schema more specific?'}, 'setuptools': {'$schema': 'http://json-schema.org/draft-07/schema', '$id': 'https://setuptools.pypa.io/en/latest/references/keywords.html', 'title': '``tool.setuptools`` table', '$$description': ['Please notice for the time being the ``setuptools`` project does not specify', 'a way of configuring builds via ``pyproject.toml``.', 'Therefore this schema should be taken just as a *"thought experiment"* on how', 'this *might be done*, by following the principles established in', '`ini2toml `_.', 'It considers only ``setuptools`` `parameters', '`_', 'that can currently be configured via ``setup.cfg`` and are not covered by :pep:`621`', 'but intentionally excludes ``dependency_links`` and ``setup_requires``.', 'NOTE: ``scripts`` was renamed to ``script-files`` to avoid confusion with', 'entry-point based scripts (defined in :pep:`621`).'], 'type': 'object', 'additionalProperties': False, 'properties': {'platforms': {'type': 'array', 'items': {'type': 'string'}}, 'provides': {'$$description': ['Package and virtual package names contained within this package', '**(not supported by pip)**'], 'type': 'array', 'items': {'type': 'string', 'format': 'pep508-identifier'}}, 'obsoletes': {'$$description': ['Packages which this package renders obsolete', '**(not supported by pip)**'], 'type': 'array', 'items': {'type': 'string', 'format': 'pep508-identifier'}}, 'zip-safe': {'description': 'Whether the project can be safely installed and run from a zip file.', 'type': 'boolean'}, 'script-files': {'description': 'Legacy way of defining scripts (entry-points are preferred).', 'type': 'array', 'items': {'type': 'string'}, '$comment': 'TODO: is this field deprecated/should be removed?'}, 'eager-resources': {'$$description': ['Resources that should be extracted together, if any of them is needed,', 'or if any C extensions included in the project are imported.'], 'type': 'array', 'items': {'type': 'string'}}, 'packages': {'$$description': ['Packages that should be included in the distribution.', 'It can be given either as a list of package identifiers', 'or as a ``dict``-like structure with a single key ``find``', 'which corresponds to a dynamic call to', '``setuptools.config.expand.find_packages`` function.', 'The ``find`` key is associated with a nested ``dict``-like structure that can', 'contain ``where``, ``include``, ``exclude`` and ``namespaces`` keys,', 'mimicking the keyword arguments of the associated function.'], 'oneOf': [{'title': 'Array of Python package identifiers', 'type': 'array', 'items': {'type': 'string', 'format': 'python-module-name'}}, {'$ref': '#/definitions/find-directive'}]}, 'package-dir': {'$$description': [':class:`dict`-like structure mapping from package names to directories where their', 'code can be found.', 'The empty string (as key) means that all packages are contained inside', 'the given directory will be included in the distribution.'], 'type': 'object', 'additionalProperties': False, 'propertyNames': {'oneOf': [{'format': 'python-module-name'}, {'const': ''}]}, 'patternProperties': {'^.*$': {'type': 'string'}}}, 'package-data': {'$$description': ['Mapping from package names to lists of glob patterns.', 'Usually this option is not needed when using ``include-package-data = true``', 'For more information on how to include data files, check ``setuptools`` `docs', '`_.'], 'type': 'object', 'additionalProperties': False, 'propertyNames': {'oneOf': [{'format': 'python-module-name'}, {'const': '*'}]}, 'patternProperties': {'^.*$': {'type': 'array', 'items': {'type': 'string'}}}}, 'include-package-data': {'$$description': ['Automatically include any data files inside the package directories', 'that are specified by ``MANIFEST.in``', 'For more information on how to include data files, check ``setuptools`` `docs', '`_.'], 'type': 'boolean'}, 'exclude-package-data': {'$$description': ['Mapping from package names to lists of glob patterns that should be excluded', 'For more information on how to include data files, check ``setuptools`` `docs', '`_.'], 'type': 'object', 'additionalProperties': False, 'propertyNames': {'oneOf': [{'format': 'python-module-name'}, {'const': '*'}]}, 'patternProperties': {'^.*$': {'type': 'array', 'items': {'type': 'string'}}}}, 'namespace-packages': {'type': 'array', 'items': {'type': 'string', 'format': 'python-module-name'}, '$comment': 'https://setuptools.pypa.io/en/latest/userguide/package_discovery.html'}, 'py-modules': {'description': 'Modules that setuptools will manipulate', 'type': 'array', 'items': {'type': 'string', 'format': 'python-module-name'}, '$comment': 'TODO: clarify the relationship with ``packages``'}, 'data-files': {'$$description': ['**DEPRECATED**: dict-like structure where each key represents a directory and', 'the value is a list of glob patterns that should be installed in them.', "Please notice this don't work with wheels. See `data files support", '`_'], 'type': 'object', 'patternProperties': {'^.*$': {'type': 'array', 'items': {'type': 'string'}}}}, 'cmdclass': {'$$description': ['Mapping of distutils-style command names to ``setuptools.Command`` subclasses', 'which in turn should be represented by strings with a qualified class name', '(i.e., "dotted" form with module), e.g.::\n\n', ' cmdclass = {mycmd = "pkg.subpkg.module.CommandClass"}\n\n', 'The command class should be a directly defined at the top-level of the', 'containing module (no class nesting).'], 'type': 'object', 'patternProperties': {'^.*$': {'type': 'string', 'format': 'python-qualified-identifier'}}}, 'license-files': {'type': 'array', 'items': {'type': 'string'}, '$$description': ['PROVISIONAL: List of glob patterns for all license files being distributed.', '(might become standard with PEP 639).'], 'default': ['LICEN[CS]E*', ' COPYING*', ' NOTICE*', 'AUTHORS*'], '$comment': 'TODO: revise if PEP 639 is accepted. Probably ``project.license-files``?'}, 'dynamic': {'type': 'object', 'description': 'Instructions for loading :pep:`621`-related metadata dynamically', 'additionalProperties': False, 'properties': {'version': {'$$description': ['A version dynamically loaded via either the ``attr:`` or ``file:``', 'directives. Please make sure the given file or attribute respects :pep:`440`.'], 'oneOf': [{'$ref': '#/definitions/attr-directive'}, {'$ref': '#/definitions/file-directive'}]}, 'classifiers': {'$ref': '#/definitions/file-directive'}, 'description': {'$ref': '#/definitions/file-directive'}, 'dependencies': {'$ref': '#/definitions/file-directive'}, 'entry-points': {'$ref': '#/definitions/file-directive'}, 'optional-dependencies': {'type': 'object', 'propertyNames': {'format': 'python-identifier'}, 'additionalProperties': False, 'patternProperties': {'.+': {'$ref': '#/definitions/file-directive'}}}, 'readme': {'anyOf': [{'$ref': '#/definitions/file-directive'}, {'properties': {'content-type': {'type': 'string'}}}], 'required': ['file']}}}}, 'definitions': {'file-directive': {'$id': '#/definitions/file-directive', 'title': "'file:' directive", 'description': 'Value is read from a file (or list of files and then concatenated)', 'type': 'object', 'additionalProperties': False, 'properties': {'file': {'oneOf': [{'type': 'string'}, {'type': 'array', 'items': {'type': 'string'}}]}}, 'required': ['file']}, 'attr-directive': {'title': "'attr:' directive", '$id': '#/definitions/attr-directive', '$$description': ['Value is read from a module attribute. Supports callables and iterables;', 'unsupported types are cast via ``str()``'], 'type': 'object', 'additionalProperties': False, 'properties': {'attr': {'type': 'string'}}, 'required': ['attr']}, 'find-directive': {'$id': '#/definitions/find-directive', 'title': "'find:' directive", 'type': 'object', 'additionalProperties': False, 'properties': {'find': {'type': 'object', '$$description': ['Dynamic `package discovery', '`_.'], 'additionalProperties': False, 'properties': {'where': {'description': 'Directories to be searched for packages (Unix-style relative path)', 'type': 'array', 'items': {'type': 'string'}}, 'exclude': {'type': 'array', '$$description': ['Exclude packages that match the values listed in this field.', "Can container shell-style wildcards (e.g. ``'pkg.*'``)"], 'items': {'type': 'string'}}, 'include': {'type': 'array', '$$description': ['Restrict the found packages to just the ones listed in this field.', "Can container shell-style wildcards (e.g. ``'pkg.*'``)"], 'items': {'type': 'string'}}, 'namespaces': {'type': 'boolean', '$$description': ['When ``True``, directories without a ``__init__.py`` file will also', 'be scanned for :pep:`420`-style implicit namespaces']}}}}}}}}}}, 'project': {'$schema': 'http://json-schema.org/draft-07/schema', '$id': 'https://packaging.python.org/en/latest/specifications/declaring-project-metadata/', 'title': 'Package metadata stored in the ``project`` table', '$$description': ['Data structure for the **project** table inside ``pyproject.toml``', '(as initially defined in :pep:`621`)'], 'type': 'object', 'properties': {'name': {'type': 'string', 'description': 'The name (primary identifier) of the project. MUST be statically defined.', 'format': 'pep508-identifier'}, 'version': {'type': 'string', 'description': 'The version of the project as supported by :pep:`440`.', 'format': 'pep440'}, 'description': {'type': 'string', '$$description': ['The `summary description of the project', '`_']}, 'readme': {'$$description': ['`Full/detailed description of the project in the form of a README', '`_', "with meaning similar to the one defined in `core metadata's Description", '`_'], 'oneOf': [{'type': 'string', '$$description': ['Relative path to a text file (UTF-8) containing the full description', 'of the project. If the file path ends in case-insensitive ``.md`` or', '``.rst`` suffixes, then the content-type is respectively', '``text/markdown`` or ``text/x-rst``']}, {'type': 'object', 'allOf': [{'anyOf': [{'properties': {'file': {'type': 'string', '$$description': ['Relative path to a text file containing the full description', 'of the project.']}}, 'required': ['file']}, {'properties': {'text': {'type': 'string', 'description': 'Full text describing the project.'}}, 'required': ['text']}]}, {'properties': {'content-type': {'type': 'string', '$$description': ['Content-type (:rfc:`1341`) of the full description', '(e.g. ``text/markdown``). The ``charset`` parameter is assumed', 'UTF-8 when not present.'], '$comment': 'TODO: add regex pattern or format?'}}, 'required': ['content-type']}]}]}, 'requires-python': {'type': 'string', 'format': 'pep508-versionspec', '$$description': ['`The Python version requirements of the project', '`_.']}, 'license': {'description': '`Project license `_.', 'oneOf': [{'properties': {'file': {'type': 'string', '$$description': ['Relative path to the file (UTF-8) which contains the license for the', 'project.']}}, 'required': ['file']}, {'properties': {'text': {'type': 'string', '$$description': ['The license of the project whose meaning is that of the', '`License field from the core metadata', '`_.']}}, 'required': ['text']}]}, 'authors': {'type': 'array', 'items': {'$ref': '#/definitions/author'}, '$$description': ["The people or organizations considered to be the 'authors' of the project.", 'The exact meaning is open to interpretation (e.g. original or primary authors,', 'current maintainers, or owners of the package).']}, 'maintainers': {'type': 'array', 'items': {'$ref': '#/definitions/author'}, '$$description': ["The people or organizations considered to be the 'maintainers' of the project.", 'Similarly to ``authors``, the exact meaning is open to interpretation.']}, 'keywords': {'type': 'array', 'items': {'type': 'string'}, 'description': 'List of keywords to assist searching for the distribution in a larger catalog.'}, 'classifiers': {'type': 'array', 'items': {'type': 'string', 'format': 'trove-classifier', 'description': '`PyPI classifier `_.'}, '$$description': ['`Trove classifiers `_', 'which apply to the project.']}, 'urls': {'type': 'object', 'description': 'URLs associated with the project in the form ``label => value``.', 'additionalProperties': False, 'patternProperties': {'^.+$': {'type': 'string', 'format': 'url'}}}, 'scripts': {'$ref': '#/definitions/entry-point-group', '$$description': ['Instruct the installer to create command-line wrappers for the given', '`entry points `_.']}, 'gui-scripts': {'$ref': '#/definitions/entry-point-group', '$$description': ['Instruct the installer to create GUI wrappers for the given', '`entry points `_.', 'The difference between ``scripts`` and ``gui-scripts`` is only relevant in', 'Windows.']}, 'entry-points': {'$$description': ['Instruct the installer to expose the given modules/functions via', '``entry-point`` discovery mechanism (useful for plugins).', 'More information available in the `Python packaging guide', '`_.'], 'propertyNames': {'format': 'python-entrypoint-group'}, 'additionalProperties': False, 'patternProperties': {'^.+$': {'$ref': '#/definitions/entry-point-group'}}}, 'dependencies': {'type': 'array', 'description': 'Project (mandatory) dependencies.', 'items': {'$ref': '#/definitions/dependency'}}, 'optional-dependencies': {'type': 'object', 'description': 'Optional dependency for the project', 'propertyNames': {'format': 'pep508-identifier'}, 'additionalProperties': False, 'patternProperties': {'^.+$': {'type': 'array', 'items': {'$ref': '#/definitions/dependency'}}}}, 'dynamic': {'type': 'array', '$$description': ['Specifies which fields are intentionally unspecified and expected to be', 'dynamically provided by build tools'], 'items': {'enum': ['version', 'description', 'readme', 'requires-python', 'license', 'authors', 'maintainers', 'keywords', 'classifiers', 'urls', 'scripts', 'gui-scripts', 'entry-points', 'dependencies', 'optional-dependencies']}}}, 'required': ['name'], 'additionalProperties': False, 'if': {'not': {'required': ['dynamic'], 'properties': {'dynamic': {'contains': {'const': 'version'}, '$$description': ['version is listed in ``dynamic``']}}}, '$$comment': ['According to :pep:`621`:', ' If the core metadata specification lists a field as "Required", then', ' the metadata MUST specify the field statically or list it in dynamic', 'In turn, `core metadata`_ defines:', ' The required fields are: Metadata-Version, Name, Version.', ' All the other fields are optional.', 'Since ``Metadata-Version`` is defined by the build back-end, ``name`` and', '``version`` are the only mandatory information in ``pyproject.toml``.', '.. _core metadata: https://packaging.python.org/specifications/core-metadata/']}, 'then': {'required': ['version'], '$$description': ['version should be statically defined in the ``version`` field']}, 'definitions': {'author': {'$id': '#/definitions/author', 'title': 'Author or Maintainer', '$comment': 'https://www.python.org/dev/peps/pep-0621/#authors-maintainers', 'type': 'object', 'properties': {'name': {'type': 'string', '$$description': ['MUST be a valid email name, i.e. whatever can be put as a name, before an', 'email, in :rfc:`822`.']}, 'email': {'type': 'string', 'format': 'idn-email', 'description': 'MUST be a valid email address'}}}, 'entry-point-group': {'$id': '#/definitions/entry-point-group', 'title': 'Entry-points', 'type': 'object', '$$description': ['Entry-points are grouped together to indicate what sort of capabilities they', 'provide.', 'See the `packaging guides', '`_', 'and `setuptools docs', '`_', 'for more information.'], 'propertyNames': {'format': 'python-entrypoint-name'}, 'additionalProperties': False, 'patternProperties': {'^.+$': {'type': 'string', '$$description': ['Reference to a Python object. It is either in the form', '``importable.module``, or ``importable.module:object.attr``.'], 'format': 'python-entrypoint-reference', '$comment': 'https://packaging.python.org/specifications/entry-points/'}}}, 'dependency': {'$id': '#/definitions/dependency', 'title': 'Dependency', 'type': 'string', 'description': 'Project dependency specification according to PEP 508', 'format': 'pep508'}}}}, rule='type') - data_is_dict = isinstance(data, dict) - if data_is_dict: - data_keys = set(data.keys()) - if "build-system" in data_keys: - data_keys.remove("build-system") - data__buildsystem = data["build-system"] - if not isinstance(data__buildsystem, (dict)): - raise JsonSchemaValueException("" + (name_prefix or "data") + ".build-system must be object", value=data__buildsystem, name="" + (name_prefix or "data") + ".build-system", definition={'type': 'object', 'description': 'Table used to store build-related data', 'additionalProperties': False, 'properties': {'requires': {'type': 'array', '$$description': ['List of dependencies in the :pep:`508` format required to execute the build', 'system. Please notice that the resulting dependency graph', '**MUST NOT contain cycles**'], 'items': {'type': 'string'}}, 'build-backend': {'type': 'string', 'description': 'Python object that will be used to perform the build according to :pep:`517`', 'format': 'pep517-backend-reference'}, 'backend-path': {'type': 'array', '$$description': ['List of directories to be prepended to ``sys.path`` when loading the', 'back-end, and running its hooks'], 'items': {'type': 'string', '$comment': 'Should be a path (TODO: enforce it with format?)'}}}, 'required': ['requires']}, rule='type') - data__buildsystem_is_dict = isinstance(data__buildsystem, dict) - if data__buildsystem_is_dict: - data__buildsystem_len = len(data__buildsystem) - if not all(prop in data__buildsystem for prop in ['requires']): - raise JsonSchemaValueException("" + (name_prefix or "data") + ".build-system must contain ['requires'] properties", value=data__buildsystem, name="" + (name_prefix or "data") + ".build-system", definition={'type': 'object', 'description': 'Table used to store build-related data', 'additionalProperties': False, 'properties': {'requires': {'type': 'array', '$$description': ['List of dependencies in the :pep:`508` format required to execute the build', 'system. Please notice that the resulting dependency graph', '**MUST NOT contain cycles**'], 'items': {'type': 'string'}}, 'build-backend': {'type': 'string', 'description': 'Python object that will be used to perform the build according to :pep:`517`', 'format': 'pep517-backend-reference'}, 'backend-path': {'type': 'array', '$$description': ['List of directories to be prepended to ``sys.path`` when loading the', 'back-end, and running its hooks'], 'items': {'type': 'string', '$comment': 'Should be a path (TODO: enforce it with format?)'}}}, 'required': ['requires']}, rule='required') - data__buildsystem_keys = set(data__buildsystem.keys()) - if "requires" in data__buildsystem_keys: - data__buildsystem_keys.remove("requires") - data__buildsystem__requires = data__buildsystem["requires"] - if not isinstance(data__buildsystem__requires, (list, tuple)): - raise JsonSchemaValueException("" + (name_prefix or "data") + ".build-system.requires must be array", value=data__buildsystem__requires, name="" + (name_prefix or "data") + ".build-system.requires", definition={'type': 'array', '$$description': ['List of dependencies in the :pep:`508` format required to execute the build', 'system. Please notice that the resulting dependency graph', '**MUST NOT contain cycles**'], 'items': {'type': 'string'}}, rule='type') - data__buildsystem__requires_is_list = isinstance(data__buildsystem__requires, (list, tuple)) - if data__buildsystem__requires_is_list: - data__buildsystem__requires_len = len(data__buildsystem__requires) - for data__buildsystem__requires_x, data__buildsystem__requires_item in enumerate(data__buildsystem__requires): - if not isinstance(data__buildsystem__requires_item, (str)): - raise JsonSchemaValueException("" + (name_prefix or "data") + ".build-system.requires[{data__buildsystem__requires_x}]".format(**locals()) + " must be string", value=data__buildsystem__requires_item, name="" + (name_prefix or "data") + ".build-system.requires[{data__buildsystem__requires_x}]".format(**locals()) + "", definition={'type': 'string'}, rule='type') - if "build-backend" in data__buildsystem_keys: - data__buildsystem_keys.remove("build-backend") - data__buildsystem__buildbackend = data__buildsystem["build-backend"] - if not isinstance(data__buildsystem__buildbackend, (str)): - raise JsonSchemaValueException("" + (name_prefix or "data") + ".build-system.build-backend must be string", value=data__buildsystem__buildbackend, name="" + (name_prefix or "data") + ".build-system.build-backend", definition={'type': 'string', 'description': 'Python object that will be used to perform the build according to :pep:`517`', 'format': 'pep517-backend-reference'}, rule='type') - if isinstance(data__buildsystem__buildbackend, str): - if not custom_formats["pep517-backend-reference"](data__buildsystem__buildbackend): - raise JsonSchemaValueException("" + (name_prefix or "data") + ".build-system.build-backend must be pep517-backend-reference", value=data__buildsystem__buildbackend, name="" + (name_prefix or "data") + ".build-system.build-backend", definition={'type': 'string', 'description': 'Python object that will be used to perform the build according to :pep:`517`', 'format': 'pep517-backend-reference'}, rule='format') - if "backend-path" in data__buildsystem_keys: - data__buildsystem_keys.remove("backend-path") - data__buildsystem__backendpath = data__buildsystem["backend-path"] - if not isinstance(data__buildsystem__backendpath, (list, tuple)): - raise JsonSchemaValueException("" + (name_prefix or "data") + ".build-system.backend-path must be array", value=data__buildsystem__backendpath, name="" + (name_prefix or "data") + ".build-system.backend-path", definition={'type': 'array', '$$description': ['List of directories to be prepended to ``sys.path`` when loading the', 'back-end, and running its hooks'], 'items': {'type': 'string', '$comment': 'Should be a path (TODO: enforce it with format?)'}}, rule='type') - data__buildsystem__backendpath_is_list = isinstance(data__buildsystem__backendpath, (list, tuple)) - if data__buildsystem__backendpath_is_list: - data__buildsystem__backendpath_len = len(data__buildsystem__backendpath) - for data__buildsystem__backendpath_x, data__buildsystem__backendpath_item in enumerate(data__buildsystem__backendpath): - if not isinstance(data__buildsystem__backendpath_item, (str)): - raise JsonSchemaValueException("" + (name_prefix or "data") + ".build-system.backend-path[{data__buildsystem__backendpath_x}]".format(**locals()) + " must be string", value=data__buildsystem__backendpath_item, name="" + (name_prefix or "data") + ".build-system.backend-path[{data__buildsystem__backendpath_x}]".format(**locals()) + "", definition={'type': 'string', '$comment': 'Should be a path (TODO: enforce it with format?)'}, rule='type') - if data__buildsystem_keys: - raise JsonSchemaValueException("" + (name_prefix or "data") + ".build-system must not contain "+str(data__buildsystem_keys)+" properties", value=data__buildsystem, name="" + (name_prefix or "data") + ".build-system", definition={'type': 'object', 'description': 'Table used to store build-related data', 'additionalProperties': False, 'properties': {'requires': {'type': 'array', '$$description': ['List of dependencies in the :pep:`508` format required to execute the build', 'system. Please notice that the resulting dependency graph', '**MUST NOT contain cycles**'], 'items': {'type': 'string'}}, 'build-backend': {'type': 'string', 'description': 'Python object that will be used to perform the build according to :pep:`517`', 'format': 'pep517-backend-reference'}, 'backend-path': {'type': 'array', '$$description': ['List of directories to be prepended to ``sys.path`` when loading the', 'back-end, and running its hooks'], 'items': {'type': 'string', '$comment': 'Should be a path (TODO: enforce it with format?)'}}}, 'required': ['requires']}, rule='additionalProperties') - if "project" in data_keys: - data_keys.remove("project") - data__project = data["project"] - validate_https___packaging_python_org_en_latest_specifications_declaring_project_metadata(data__project, custom_formats, (name_prefix or "data") + ".project") - if "tool" in data_keys: - data_keys.remove("tool") - data__tool = data["tool"] - if not isinstance(data__tool, (dict)): - raise JsonSchemaValueException("" + (name_prefix or "data") + ".tool must be object", value=data__tool, name="" + (name_prefix or "data") + ".tool", definition={'type': 'object', 'properties': {'distutils': {'$schema': 'http://json-schema.org/draft-07/schema', '$id': 'https://docs.python.org/3/install/', 'title': '``tool.distutils`` table', '$$description': ['Originally, ``distutils`` allowed developers to configure arguments for', '``setup.py`` scripts via `distutils configuration files', '`_.', '``tool.distutils`` subtables could be used with the same purpose', '(NOT CURRENTLY IMPLEMENTED).'], 'type': 'object', 'properties': {'global': {'type': 'object', 'description': 'Global options applied to all ``distutils`` commands'}}, 'patternProperties': {'.+': {'type': 'object'}}, '$comment': 'TODO: Is there a practical way of making this schema more specific?'}, 'setuptools': {'$schema': 'http://json-schema.org/draft-07/schema', '$id': 'https://setuptools.pypa.io/en/latest/references/keywords.html', 'title': '``tool.setuptools`` table', '$$description': ['Please notice for the time being the ``setuptools`` project does not specify', 'a way of configuring builds via ``pyproject.toml``.', 'Therefore this schema should be taken just as a *"thought experiment"* on how', 'this *might be done*, by following the principles established in', '`ini2toml `_.', 'It considers only ``setuptools`` `parameters', '`_', 'that can currently be configured via ``setup.cfg`` and are not covered by :pep:`621`', 'but intentionally excludes ``dependency_links`` and ``setup_requires``.', 'NOTE: ``scripts`` was renamed to ``script-files`` to avoid confusion with', 'entry-point based scripts (defined in :pep:`621`).'], 'type': 'object', 'additionalProperties': False, 'properties': {'platforms': {'type': 'array', 'items': {'type': 'string'}}, 'provides': {'$$description': ['Package and virtual package names contained within this package', '**(not supported by pip)**'], 'type': 'array', 'items': {'type': 'string', 'format': 'pep508-identifier'}}, 'obsoletes': {'$$description': ['Packages which this package renders obsolete', '**(not supported by pip)**'], 'type': 'array', 'items': {'type': 'string', 'format': 'pep508-identifier'}}, 'zip-safe': {'description': 'Whether the project can be safely installed and run from a zip file.', 'type': 'boolean'}, 'script-files': {'description': 'Legacy way of defining scripts (entry-points are preferred).', 'type': 'array', 'items': {'type': 'string'}, '$comment': 'TODO: is this field deprecated/should be removed?'}, 'eager-resources': {'$$description': ['Resources that should be extracted together, if any of them is needed,', 'or if any C extensions included in the project are imported.'], 'type': 'array', 'items': {'type': 'string'}}, 'packages': {'$$description': ['Packages that should be included in the distribution.', 'It can be given either as a list of package identifiers', 'or as a ``dict``-like structure with a single key ``find``', 'which corresponds to a dynamic call to', '``setuptools.config.expand.find_packages`` function.', 'The ``find`` key is associated with a nested ``dict``-like structure that can', 'contain ``where``, ``include``, ``exclude`` and ``namespaces`` keys,', 'mimicking the keyword arguments of the associated function.'], 'oneOf': [{'title': 'Array of Python package identifiers', 'type': 'array', 'items': {'type': 'string', 'format': 'python-module-name'}}, {'$ref': '#/definitions/find-directive'}]}, 'package-dir': {'$$description': [':class:`dict`-like structure mapping from package names to directories where their', 'code can be found.', 'The empty string (as key) means that all packages are contained inside', 'the given directory will be included in the distribution.'], 'type': 'object', 'additionalProperties': False, 'propertyNames': {'oneOf': [{'format': 'python-module-name'}, {'const': ''}]}, 'patternProperties': {'^.*$': {'type': 'string'}}}, 'package-data': {'$$description': ['Mapping from package names to lists of glob patterns.', 'Usually this option is not needed when using ``include-package-data = true``', 'For more information on how to include data files, check ``setuptools`` `docs', '`_.'], 'type': 'object', 'additionalProperties': False, 'propertyNames': {'oneOf': [{'format': 'python-module-name'}, {'const': '*'}]}, 'patternProperties': {'^.*$': {'type': 'array', 'items': {'type': 'string'}}}}, 'include-package-data': {'$$description': ['Automatically include any data files inside the package directories', 'that are specified by ``MANIFEST.in``', 'For more information on how to include data files, check ``setuptools`` `docs', '`_.'], 'type': 'boolean'}, 'exclude-package-data': {'$$description': ['Mapping from package names to lists of glob patterns that should be excluded', 'For more information on how to include data files, check ``setuptools`` `docs', '`_.'], 'type': 'object', 'additionalProperties': False, 'propertyNames': {'oneOf': [{'format': 'python-module-name'}, {'const': '*'}]}, 'patternProperties': {'^.*$': {'type': 'array', 'items': {'type': 'string'}}}}, 'namespace-packages': {'type': 'array', 'items': {'type': 'string', 'format': 'python-module-name'}, '$comment': 'https://setuptools.pypa.io/en/latest/userguide/package_discovery.html'}, 'py-modules': {'description': 'Modules that setuptools will manipulate', 'type': 'array', 'items': {'type': 'string', 'format': 'python-module-name'}, '$comment': 'TODO: clarify the relationship with ``packages``'}, 'data-files': {'$$description': ['**DEPRECATED**: dict-like structure where each key represents a directory and', 'the value is a list of glob patterns that should be installed in them.', "Please notice this don't work with wheels. See `data files support", '`_'], 'type': 'object', 'patternProperties': {'^.*$': {'type': 'array', 'items': {'type': 'string'}}}}, 'cmdclass': {'$$description': ['Mapping of distutils-style command names to ``setuptools.Command`` subclasses', 'which in turn should be represented by strings with a qualified class name', '(i.e., "dotted" form with module), e.g.::\n\n', ' cmdclass = {mycmd = "pkg.subpkg.module.CommandClass"}\n\n', 'The command class should be a directly defined at the top-level of the', 'containing module (no class nesting).'], 'type': 'object', 'patternProperties': {'^.*$': {'type': 'string', 'format': 'python-qualified-identifier'}}}, 'license-files': {'type': 'array', 'items': {'type': 'string'}, '$$description': ['PROVISIONAL: List of glob patterns for all license files being distributed.', '(might become standard with PEP 639).'], 'default': ['LICEN[CS]E*', ' COPYING*', ' NOTICE*', 'AUTHORS*'], '$comment': 'TODO: revise if PEP 639 is accepted. Probably ``project.license-files``?'}, 'dynamic': {'type': 'object', 'description': 'Instructions for loading :pep:`621`-related metadata dynamically', 'additionalProperties': False, 'properties': {'version': {'$$description': ['A version dynamically loaded via either the ``attr:`` or ``file:``', 'directives. Please make sure the given file or attribute respects :pep:`440`.'], 'oneOf': [{'$ref': '#/definitions/attr-directive'}, {'$ref': '#/definitions/file-directive'}]}, 'classifiers': {'$ref': '#/definitions/file-directive'}, 'description': {'$ref': '#/definitions/file-directive'}, 'dependencies': {'$ref': '#/definitions/file-directive'}, 'entry-points': {'$ref': '#/definitions/file-directive'}, 'optional-dependencies': {'type': 'object', 'propertyNames': {'format': 'python-identifier'}, 'additionalProperties': False, 'patternProperties': {'.+': {'$ref': '#/definitions/file-directive'}}}, 'readme': {'anyOf': [{'$ref': '#/definitions/file-directive'}, {'properties': {'content-type': {'type': 'string'}}}], 'required': ['file']}}}}, 'definitions': {'file-directive': {'$id': '#/definitions/file-directive', 'title': "'file:' directive", 'description': 'Value is read from a file (or list of files and then concatenated)', 'type': 'object', 'additionalProperties': False, 'properties': {'file': {'oneOf': [{'type': 'string'}, {'type': 'array', 'items': {'type': 'string'}}]}}, 'required': ['file']}, 'attr-directive': {'title': "'attr:' directive", '$id': '#/definitions/attr-directive', '$$description': ['Value is read from a module attribute. Supports callables and iterables;', 'unsupported types are cast via ``str()``'], 'type': 'object', 'additionalProperties': False, 'properties': {'attr': {'type': 'string'}}, 'required': ['attr']}, 'find-directive': {'$id': '#/definitions/find-directive', 'title': "'find:' directive", 'type': 'object', 'additionalProperties': False, 'properties': {'find': {'type': 'object', '$$description': ['Dynamic `package discovery', '`_.'], 'additionalProperties': False, 'properties': {'where': {'description': 'Directories to be searched for packages (Unix-style relative path)', 'type': 'array', 'items': {'type': 'string'}}, 'exclude': {'type': 'array', '$$description': ['Exclude packages that match the values listed in this field.', "Can container shell-style wildcards (e.g. ``'pkg.*'``)"], 'items': {'type': 'string'}}, 'include': {'type': 'array', '$$description': ['Restrict the found packages to just the ones listed in this field.', "Can container shell-style wildcards (e.g. ``'pkg.*'``)"], 'items': {'type': 'string'}}, 'namespaces': {'type': 'boolean', '$$description': ['When ``True``, directories without a ``__init__.py`` file will also', 'be scanned for :pep:`420`-style implicit namespaces']}}}}}}}}}, rule='type') - data__tool_is_dict = isinstance(data__tool, dict) - if data__tool_is_dict: - data__tool_keys = set(data__tool.keys()) - if "distutils" in data__tool_keys: - data__tool_keys.remove("distutils") - data__tool__distutils = data__tool["distutils"] - validate_https___docs_python_org_3_install(data__tool__distutils, custom_formats, (name_prefix or "data") + ".tool.distutils") - if "setuptools" in data__tool_keys: - data__tool_keys.remove("setuptools") - data__tool__setuptools = data__tool["setuptools"] - validate_https___setuptools_pypa_io_en_latest_references_keywords_html(data__tool__setuptools, custom_formats, (name_prefix or "data") + ".tool.setuptools") - if data_keys: - raise JsonSchemaValueException("" + (name_prefix or "data") + " must not contain "+str(data_keys)+" properties", value=data, name="" + (name_prefix or "data") + "", definition={'$schema': 'http://json-schema.org/draft-07/schema', '$id': 'https://packaging.python.org/en/latest/specifications/declaring-build-dependencies/', 'title': 'Data structure for ``pyproject.toml`` files', '$$description': ['File format containing build-time configurations for the Python ecosystem. ', ':pep:`517` initially defined a build-system independent format for source trees', 'which was complemented by :pep:`518` to provide a way of specifying dependencies ', 'for building Python projects.', 'Please notice the ``project`` table (as initially defined in :pep:`621`) is not included', 'in this schema and should be considered separately.'], 'type': 'object', 'additionalProperties': False, 'properties': {'build-system': {'type': 'object', 'description': 'Table used to store build-related data', 'additionalProperties': False, 'properties': {'requires': {'type': 'array', '$$description': ['List of dependencies in the :pep:`508` format required to execute the build', 'system. Please notice that the resulting dependency graph', '**MUST NOT contain cycles**'], 'items': {'type': 'string'}}, 'build-backend': {'type': 'string', 'description': 'Python object that will be used to perform the build according to :pep:`517`', 'format': 'pep517-backend-reference'}, 'backend-path': {'type': 'array', '$$description': ['List of directories to be prepended to ``sys.path`` when loading the', 'back-end, and running its hooks'], 'items': {'type': 'string', '$comment': 'Should be a path (TODO: enforce it with format?)'}}}, 'required': ['requires']}, 'project': {'$schema': 'http://json-schema.org/draft-07/schema', '$id': 'https://packaging.python.org/en/latest/specifications/declaring-project-metadata/', 'title': 'Package metadata stored in the ``project`` table', '$$description': ['Data structure for the **project** table inside ``pyproject.toml``', '(as initially defined in :pep:`621`)'], 'type': 'object', 'properties': {'name': {'type': 'string', 'description': 'The name (primary identifier) of the project. MUST be statically defined.', 'format': 'pep508-identifier'}, 'version': {'type': 'string', 'description': 'The version of the project as supported by :pep:`440`.', 'format': 'pep440'}, 'description': {'type': 'string', '$$description': ['The `summary description of the project', '`_']}, 'readme': {'$$description': ['`Full/detailed description of the project in the form of a README', '`_', "with meaning similar to the one defined in `core metadata's Description", '`_'], 'oneOf': [{'type': 'string', '$$description': ['Relative path to a text file (UTF-8) containing the full description', 'of the project. If the file path ends in case-insensitive ``.md`` or', '``.rst`` suffixes, then the content-type is respectively', '``text/markdown`` or ``text/x-rst``']}, {'type': 'object', 'allOf': [{'anyOf': [{'properties': {'file': {'type': 'string', '$$description': ['Relative path to a text file containing the full description', 'of the project.']}}, 'required': ['file']}, {'properties': {'text': {'type': 'string', 'description': 'Full text describing the project.'}}, 'required': ['text']}]}, {'properties': {'content-type': {'type': 'string', '$$description': ['Content-type (:rfc:`1341`) of the full description', '(e.g. ``text/markdown``). The ``charset`` parameter is assumed', 'UTF-8 when not present.'], '$comment': 'TODO: add regex pattern or format?'}}, 'required': ['content-type']}]}]}, 'requires-python': {'type': 'string', 'format': 'pep508-versionspec', '$$description': ['`The Python version requirements of the project', '`_.']}, 'license': {'description': '`Project license `_.', 'oneOf': [{'properties': {'file': {'type': 'string', '$$description': ['Relative path to the file (UTF-8) which contains the license for the', 'project.']}}, 'required': ['file']}, {'properties': {'text': {'type': 'string', '$$description': ['The license of the project whose meaning is that of the', '`License field from the core metadata', '`_.']}}, 'required': ['text']}]}, 'authors': {'type': 'array', 'items': {'$ref': '#/definitions/author'}, '$$description': ["The people or organizations considered to be the 'authors' of the project.", 'The exact meaning is open to interpretation (e.g. original or primary authors,', 'current maintainers, or owners of the package).']}, 'maintainers': {'type': 'array', 'items': {'$ref': '#/definitions/author'}, '$$description': ["The people or organizations considered to be the 'maintainers' of the project.", 'Similarly to ``authors``, the exact meaning is open to interpretation.']}, 'keywords': {'type': 'array', 'items': {'type': 'string'}, 'description': 'List of keywords to assist searching for the distribution in a larger catalog.'}, 'classifiers': {'type': 'array', 'items': {'type': 'string', 'format': 'trove-classifier', 'description': '`PyPI classifier `_.'}, '$$description': ['`Trove classifiers `_', 'which apply to the project.']}, 'urls': {'type': 'object', 'description': 'URLs associated with the project in the form ``label => value``.', 'additionalProperties': False, 'patternProperties': {'^.+$': {'type': 'string', 'format': 'url'}}}, 'scripts': {'$ref': '#/definitions/entry-point-group', '$$description': ['Instruct the installer to create command-line wrappers for the given', '`entry points `_.']}, 'gui-scripts': {'$ref': '#/definitions/entry-point-group', '$$description': ['Instruct the installer to create GUI wrappers for the given', '`entry points `_.', 'The difference between ``scripts`` and ``gui-scripts`` is only relevant in', 'Windows.']}, 'entry-points': {'$$description': ['Instruct the installer to expose the given modules/functions via', '``entry-point`` discovery mechanism (useful for plugins).', 'More information available in the `Python packaging guide', '`_.'], 'propertyNames': {'format': 'python-entrypoint-group'}, 'additionalProperties': False, 'patternProperties': {'^.+$': {'$ref': '#/definitions/entry-point-group'}}}, 'dependencies': {'type': 'array', 'description': 'Project (mandatory) dependencies.', 'items': {'$ref': '#/definitions/dependency'}}, 'optional-dependencies': {'type': 'object', 'description': 'Optional dependency for the project', 'propertyNames': {'format': 'pep508-identifier'}, 'additionalProperties': False, 'patternProperties': {'^.+$': {'type': 'array', 'items': {'$ref': '#/definitions/dependency'}}}}, 'dynamic': {'type': 'array', '$$description': ['Specifies which fields are intentionally unspecified and expected to be', 'dynamically provided by build tools'], 'items': {'enum': ['version', 'description', 'readme', 'requires-python', 'license', 'authors', 'maintainers', 'keywords', 'classifiers', 'urls', 'scripts', 'gui-scripts', 'entry-points', 'dependencies', 'optional-dependencies']}}}, 'required': ['name'], 'additionalProperties': False, 'if': {'not': {'required': ['dynamic'], 'properties': {'dynamic': {'contains': {'const': 'version'}, '$$description': ['version is listed in ``dynamic``']}}}, '$$comment': ['According to :pep:`621`:', ' If the core metadata specification lists a field as "Required", then', ' the metadata MUST specify the field statically or list it in dynamic', 'In turn, `core metadata`_ defines:', ' The required fields are: Metadata-Version, Name, Version.', ' All the other fields are optional.', 'Since ``Metadata-Version`` is defined by the build back-end, ``name`` and', '``version`` are the only mandatory information in ``pyproject.toml``.', '.. _core metadata: https://packaging.python.org/specifications/core-metadata/']}, 'then': {'required': ['version'], '$$description': ['version should be statically defined in the ``version`` field']}, 'definitions': {'author': {'$id': '#/definitions/author', 'title': 'Author or Maintainer', '$comment': 'https://www.python.org/dev/peps/pep-0621/#authors-maintainers', 'type': 'object', 'properties': {'name': {'type': 'string', '$$description': ['MUST be a valid email name, i.e. whatever can be put as a name, before an', 'email, in :rfc:`822`.']}, 'email': {'type': 'string', 'format': 'idn-email', 'description': 'MUST be a valid email address'}}}, 'entry-point-group': {'$id': '#/definitions/entry-point-group', 'title': 'Entry-points', 'type': 'object', '$$description': ['Entry-points are grouped together to indicate what sort of capabilities they', 'provide.', 'See the `packaging guides', '`_', 'and `setuptools docs', '`_', 'for more information.'], 'propertyNames': {'format': 'python-entrypoint-name'}, 'additionalProperties': False, 'patternProperties': {'^.+$': {'type': 'string', '$$description': ['Reference to a Python object. It is either in the form', '``importable.module``, or ``importable.module:object.attr``.'], 'format': 'python-entrypoint-reference', '$comment': 'https://packaging.python.org/specifications/entry-points/'}}}, 'dependency': {'$id': '#/definitions/dependency', 'title': 'Dependency', 'type': 'string', 'description': 'Project dependency specification according to PEP 508', 'format': 'pep508'}}}, 'tool': {'type': 'object', 'properties': {'distutils': {'$schema': 'http://json-schema.org/draft-07/schema', '$id': 'https://docs.python.org/3/install/', 'title': '``tool.distutils`` table', '$$description': ['Originally, ``distutils`` allowed developers to configure arguments for', '``setup.py`` scripts via `distutils configuration files', '`_.', '``tool.distutils`` subtables could be used with the same purpose', '(NOT CURRENTLY IMPLEMENTED).'], 'type': 'object', 'properties': {'global': {'type': 'object', 'description': 'Global options applied to all ``distutils`` commands'}}, 'patternProperties': {'.+': {'type': 'object'}}, '$comment': 'TODO: Is there a practical way of making this schema more specific?'}, 'setuptools': {'$schema': 'http://json-schema.org/draft-07/schema', '$id': 'https://setuptools.pypa.io/en/latest/references/keywords.html', 'title': '``tool.setuptools`` table', '$$description': ['Please notice for the time being the ``setuptools`` project does not specify', 'a way of configuring builds via ``pyproject.toml``.', 'Therefore this schema should be taken just as a *"thought experiment"* on how', 'this *might be done*, by following the principles established in', '`ini2toml `_.', 'It considers only ``setuptools`` `parameters', '`_', 'that can currently be configured via ``setup.cfg`` and are not covered by :pep:`621`', 'but intentionally excludes ``dependency_links`` and ``setup_requires``.', 'NOTE: ``scripts`` was renamed to ``script-files`` to avoid confusion with', 'entry-point based scripts (defined in :pep:`621`).'], 'type': 'object', 'additionalProperties': False, 'properties': {'platforms': {'type': 'array', 'items': {'type': 'string'}}, 'provides': {'$$description': ['Package and virtual package names contained within this package', '**(not supported by pip)**'], 'type': 'array', 'items': {'type': 'string', 'format': 'pep508-identifier'}}, 'obsoletes': {'$$description': ['Packages which this package renders obsolete', '**(not supported by pip)**'], 'type': 'array', 'items': {'type': 'string', 'format': 'pep508-identifier'}}, 'zip-safe': {'description': 'Whether the project can be safely installed and run from a zip file.', 'type': 'boolean'}, 'script-files': {'description': 'Legacy way of defining scripts (entry-points are preferred).', 'type': 'array', 'items': {'type': 'string'}, '$comment': 'TODO: is this field deprecated/should be removed?'}, 'eager-resources': {'$$description': ['Resources that should be extracted together, if any of them is needed,', 'or if any C extensions included in the project are imported.'], 'type': 'array', 'items': {'type': 'string'}}, 'packages': {'$$description': ['Packages that should be included in the distribution.', 'It can be given either as a list of package identifiers', 'or as a ``dict``-like structure with a single key ``find``', 'which corresponds to a dynamic call to', '``setuptools.config.expand.find_packages`` function.', 'The ``find`` key is associated with a nested ``dict``-like structure that can', 'contain ``where``, ``include``, ``exclude`` and ``namespaces`` keys,', 'mimicking the keyword arguments of the associated function.'], 'oneOf': [{'title': 'Array of Python package identifiers', 'type': 'array', 'items': {'type': 'string', 'format': 'python-module-name'}}, {'$ref': '#/definitions/find-directive'}]}, 'package-dir': {'$$description': [':class:`dict`-like structure mapping from package names to directories where their', 'code can be found.', 'The empty string (as key) means that all packages are contained inside', 'the given directory will be included in the distribution.'], 'type': 'object', 'additionalProperties': False, 'propertyNames': {'oneOf': [{'format': 'python-module-name'}, {'const': ''}]}, 'patternProperties': {'^.*$': {'type': 'string'}}}, 'package-data': {'$$description': ['Mapping from package names to lists of glob patterns.', 'Usually this option is not needed when using ``include-package-data = true``', 'For more information on how to include data files, check ``setuptools`` `docs', '`_.'], 'type': 'object', 'additionalProperties': False, 'propertyNames': {'oneOf': [{'format': 'python-module-name'}, {'const': '*'}]}, 'patternProperties': {'^.*$': {'type': 'array', 'items': {'type': 'string'}}}}, 'include-package-data': {'$$description': ['Automatically include any data files inside the package directories', 'that are specified by ``MANIFEST.in``', 'For more information on how to include data files, check ``setuptools`` `docs', '`_.'], 'type': 'boolean'}, 'exclude-package-data': {'$$description': ['Mapping from package names to lists of glob patterns that should be excluded', 'For more information on how to include data files, check ``setuptools`` `docs', '`_.'], 'type': 'object', 'additionalProperties': False, 'propertyNames': {'oneOf': [{'format': 'python-module-name'}, {'const': '*'}]}, 'patternProperties': {'^.*$': {'type': 'array', 'items': {'type': 'string'}}}}, 'namespace-packages': {'type': 'array', 'items': {'type': 'string', 'format': 'python-module-name'}, '$comment': 'https://setuptools.pypa.io/en/latest/userguide/package_discovery.html'}, 'py-modules': {'description': 'Modules that setuptools will manipulate', 'type': 'array', 'items': {'type': 'string', 'format': 'python-module-name'}, '$comment': 'TODO: clarify the relationship with ``packages``'}, 'data-files': {'$$description': ['**DEPRECATED**: dict-like structure where each key represents a directory and', 'the value is a list of glob patterns that should be installed in them.', "Please notice this don't work with wheels. See `data files support", '`_'], 'type': 'object', 'patternProperties': {'^.*$': {'type': 'array', 'items': {'type': 'string'}}}}, 'cmdclass': {'$$description': ['Mapping of distutils-style command names to ``setuptools.Command`` subclasses', 'which in turn should be represented by strings with a qualified class name', '(i.e., "dotted" form with module), e.g.::\n\n', ' cmdclass = {mycmd = "pkg.subpkg.module.CommandClass"}\n\n', 'The command class should be a directly defined at the top-level of the', 'containing module (no class nesting).'], 'type': 'object', 'patternProperties': {'^.*$': {'type': 'string', 'format': 'python-qualified-identifier'}}}, 'license-files': {'type': 'array', 'items': {'type': 'string'}, '$$description': ['PROVISIONAL: List of glob patterns for all license files being distributed.', '(might become standard with PEP 639).'], 'default': ['LICEN[CS]E*', ' COPYING*', ' NOTICE*', 'AUTHORS*'], '$comment': 'TODO: revise if PEP 639 is accepted. Probably ``project.license-files``?'}, 'dynamic': {'type': 'object', 'description': 'Instructions for loading :pep:`621`-related metadata dynamically', 'additionalProperties': False, 'properties': {'version': {'$$description': ['A version dynamically loaded via either the ``attr:`` or ``file:``', 'directives. Please make sure the given file or attribute respects :pep:`440`.'], 'oneOf': [{'$ref': '#/definitions/attr-directive'}, {'$ref': '#/definitions/file-directive'}]}, 'classifiers': {'$ref': '#/definitions/file-directive'}, 'description': {'$ref': '#/definitions/file-directive'}, 'dependencies': {'$ref': '#/definitions/file-directive'}, 'entry-points': {'$ref': '#/definitions/file-directive'}, 'optional-dependencies': {'type': 'object', 'propertyNames': {'format': 'python-identifier'}, 'additionalProperties': False, 'patternProperties': {'.+': {'$ref': '#/definitions/file-directive'}}}, 'readme': {'anyOf': [{'$ref': '#/definitions/file-directive'}, {'properties': {'content-type': {'type': 'string'}}}], 'required': ['file']}}}}, 'definitions': {'file-directive': {'$id': '#/definitions/file-directive', 'title': "'file:' directive", 'description': 'Value is read from a file (or list of files and then concatenated)', 'type': 'object', 'additionalProperties': False, 'properties': {'file': {'oneOf': [{'type': 'string'}, {'type': 'array', 'items': {'type': 'string'}}]}}, 'required': ['file']}, 'attr-directive': {'title': "'attr:' directive", '$id': '#/definitions/attr-directive', '$$description': ['Value is read from a module attribute. Supports callables and iterables;', 'unsupported types are cast via ``str()``'], 'type': 'object', 'additionalProperties': False, 'properties': {'attr': {'type': 'string'}}, 'required': ['attr']}, 'find-directive': {'$id': '#/definitions/find-directive', 'title': "'find:' directive", 'type': 'object', 'additionalProperties': False, 'properties': {'find': {'type': 'object', '$$description': ['Dynamic `package discovery', '`_.'], 'additionalProperties': False, 'properties': {'where': {'description': 'Directories to be searched for packages (Unix-style relative path)', 'type': 'array', 'items': {'type': 'string'}}, 'exclude': {'type': 'array', '$$description': ['Exclude packages that match the values listed in this field.', "Can container shell-style wildcards (e.g. ``'pkg.*'``)"], 'items': {'type': 'string'}}, 'include': {'type': 'array', '$$description': ['Restrict the found packages to just the ones listed in this field.', "Can container shell-style wildcards (e.g. ``'pkg.*'``)"], 'items': {'type': 'string'}}, 'namespaces': {'type': 'boolean', '$$description': ['When ``True``, directories without a ``__init__.py`` file will also', 'be scanned for :pep:`420`-style implicit namespaces']}}}}}}}}}}, 'project': {'$schema': 'http://json-schema.org/draft-07/schema', '$id': 'https://packaging.python.org/en/latest/specifications/declaring-project-metadata/', 'title': 'Package metadata stored in the ``project`` table', '$$description': ['Data structure for the **project** table inside ``pyproject.toml``', '(as initially defined in :pep:`621`)'], 'type': 'object', 'properties': {'name': {'type': 'string', 'description': 'The name (primary identifier) of the project. MUST be statically defined.', 'format': 'pep508-identifier'}, 'version': {'type': 'string', 'description': 'The version of the project as supported by :pep:`440`.', 'format': 'pep440'}, 'description': {'type': 'string', '$$description': ['The `summary description of the project', '`_']}, 'readme': {'$$description': ['`Full/detailed description of the project in the form of a README', '`_', "with meaning similar to the one defined in `core metadata's Description", '`_'], 'oneOf': [{'type': 'string', '$$description': ['Relative path to a text file (UTF-8) containing the full description', 'of the project. If the file path ends in case-insensitive ``.md`` or', '``.rst`` suffixes, then the content-type is respectively', '``text/markdown`` or ``text/x-rst``']}, {'type': 'object', 'allOf': [{'anyOf': [{'properties': {'file': {'type': 'string', '$$description': ['Relative path to a text file containing the full description', 'of the project.']}}, 'required': ['file']}, {'properties': {'text': {'type': 'string', 'description': 'Full text describing the project.'}}, 'required': ['text']}]}, {'properties': {'content-type': {'type': 'string', '$$description': ['Content-type (:rfc:`1341`) of the full description', '(e.g. ``text/markdown``). The ``charset`` parameter is assumed', 'UTF-8 when not present.'], '$comment': 'TODO: add regex pattern or format?'}}, 'required': ['content-type']}]}]}, 'requires-python': {'type': 'string', 'format': 'pep508-versionspec', '$$description': ['`The Python version requirements of the project', '`_.']}, 'license': {'description': '`Project license `_.', 'oneOf': [{'properties': {'file': {'type': 'string', '$$description': ['Relative path to the file (UTF-8) which contains the license for the', 'project.']}}, 'required': ['file']}, {'properties': {'text': {'type': 'string', '$$description': ['The license of the project whose meaning is that of the', '`License field from the core metadata', '`_.']}}, 'required': ['text']}]}, 'authors': {'type': 'array', 'items': {'$ref': '#/definitions/author'}, '$$description': ["The people or organizations considered to be the 'authors' of the project.", 'The exact meaning is open to interpretation (e.g. original or primary authors,', 'current maintainers, or owners of the package).']}, 'maintainers': {'type': 'array', 'items': {'$ref': '#/definitions/author'}, '$$description': ["The people or organizations considered to be the 'maintainers' of the project.", 'Similarly to ``authors``, the exact meaning is open to interpretation.']}, 'keywords': {'type': 'array', 'items': {'type': 'string'}, 'description': 'List of keywords to assist searching for the distribution in a larger catalog.'}, 'classifiers': {'type': 'array', 'items': {'type': 'string', 'format': 'trove-classifier', 'description': '`PyPI classifier `_.'}, '$$description': ['`Trove classifiers `_', 'which apply to the project.']}, 'urls': {'type': 'object', 'description': 'URLs associated with the project in the form ``label => value``.', 'additionalProperties': False, 'patternProperties': {'^.+$': {'type': 'string', 'format': 'url'}}}, 'scripts': {'$ref': '#/definitions/entry-point-group', '$$description': ['Instruct the installer to create command-line wrappers for the given', '`entry points `_.']}, 'gui-scripts': {'$ref': '#/definitions/entry-point-group', '$$description': ['Instruct the installer to create GUI wrappers for the given', '`entry points `_.', 'The difference between ``scripts`` and ``gui-scripts`` is only relevant in', 'Windows.']}, 'entry-points': {'$$description': ['Instruct the installer to expose the given modules/functions via', '``entry-point`` discovery mechanism (useful for plugins).', 'More information available in the `Python packaging guide', '`_.'], 'propertyNames': {'format': 'python-entrypoint-group'}, 'additionalProperties': False, 'patternProperties': {'^.+$': {'$ref': '#/definitions/entry-point-group'}}}, 'dependencies': {'type': 'array', 'description': 'Project (mandatory) dependencies.', 'items': {'$ref': '#/definitions/dependency'}}, 'optional-dependencies': {'type': 'object', 'description': 'Optional dependency for the project', 'propertyNames': {'format': 'pep508-identifier'}, 'additionalProperties': False, 'patternProperties': {'^.+$': {'type': 'array', 'items': {'$ref': '#/definitions/dependency'}}}}, 'dynamic': {'type': 'array', '$$description': ['Specifies which fields are intentionally unspecified and expected to be', 'dynamically provided by build tools'], 'items': {'enum': ['version', 'description', 'readme', 'requires-python', 'license', 'authors', 'maintainers', 'keywords', 'classifiers', 'urls', 'scripts', 'gui-scripts', 'entry-points', 'dependencies', 'optional-dependencies']}}}, 'required': ['name'], 'additionalProperties': False, 'if': {'not': {'required': ['dynamic'], 'properties': {'dynamic': {'contains': {'const': 'version'}, '$$description': ['version is listed in ``dynamic``']}}}, '$$comment': ['According to :pep:`621`:', ' If the core metadata specification lists a field as "Required", then', ' the metadata MUST specify the field statically or list it in dynamic', 'In turn, `core metadata`_ defines:', ' The required fields are: Metadata-Version, Name, Version.', ' All the other fields are optional.', 'Since ``Metadata-Version`` is defined by the build back-end, ``name`` and', '``version`` are the only mandatory information in ``pyproject.toml``.', '.. _core metadata: https://packaging.python.org/specifications/core-metadata/']}, 'then': {'required': ['version'], '$$description': ['version should be statically defined in the ``version`` field']}, 'definitions': {'author': {'$id': '#/definitions/author', 'title': 'Author or Maintainer', '$comment': 'https://www.python.org/dev/peps/pep-0621/#authors-maintainers', 'type': 'object', 'properties': {'name': {'type': 'string', '$$description': ['MUST be a valid email name, i.e. whatever can be put as a name, before an', 'email, in :rfc:`822`.']}, 'email': {'type': 'string', 'format': 'idn-email', 'description': 'MUST be a valid email address'}}}, 'entry-point-group': {'$id': '#/definitions/entry-point-group', 'title': 'Entry-points', 'type': 'object', '$$description': ['Entry-points are grouped together to indicate what sort of capabilities they', 'provide.', 'See the `packaging guides', '`_', 'and `setuptools docs', '`_', 'for more information.'], 'propertyNames': {'format': 'python-entrypoint-name'}, 'additionalProperties': False, 'patternProperties': {'^.+$': {'type': 'string', '$$description': ['Reference to a Python object. It is either in the form', '``importable.module``, or ``importable.module:object.attr``.'], 'format': 'python-entrypoint-reference', '$comment': 'https://packaging.python.org/specifications/entry-points/'}}}, 'dependency': {'$id': '#/definitions/dependency', 'title': 'Dependency', 'type': 'string', 'description': 'Project dependency specification according to PEP 508', 'format': 'pep508'}}}}, rule='additionalProperties') - return data - -def validate_https___setuptools_pypa_io_en_latest_references_keywords_html(data, custom_formats={}, name_prefix=None): - if not isinstance(data, (dict)): - raise JsonSchemaValueException("" + (name_prefix or "data") + " must be object", value=data, name="" + (name_prefix or "data") + "", definition={'$schema': 'http://json-schema.org/draft-07/schema', '$id': 'https://setuptools.pypa.io/en/latest/references/keywords.html', 'title': '``tool.setuptools`` table', '$$description': ['Please notice for the time being the ``setuptools`` project does not specify', 'a way of configuring builds via ``pyproject.toml``.', 'Therefore this schema should be taken just as a *"thought experiment"* on how', 'this *might be done*, by following the principles established in', '`ini2toml `_.', 'It considers only ``setuptools`` `parameters', '`_', 'that can currently be configured via ``setup.cfg`` and are not covered by :pep:`621`', 'but intentionally excludes ``dependency_links`` and ``setup_requires``.', 'NOTE: ``scripts`` was renamed to ``script-files`` to avoid confusion with', 'entry-point based scripts (defined in :pep:`621`).'], 'type': 'object', 'additionalProperties': False, 'properties': {'platforms': {'type': 'array', 'items': {'type': 'string'}}, 'provides': {'$$description': ['Package and virtual package names contained within this package', '**(not supported by pip)**'], 'type': 'array', 'items': {'type': 'string', 'format': 'pep508-identifier'}}, 'obsoletes': {'$$description': ['Packages which this package renders obsolete', '**(not supported by pip)**'], 'type': 'array', 'items': {'type': 'string', 'format': 'pep508-identifier'}}, 'zip-safe': {'description': 'Whether the project can be safely installed and run from a zip file.', 'type': 'boolean'}, 'script-files': {'description': 'Legacy way of defining scripts (entry-points are preferred).', 'type': 'array', 'items': {'type': 'string'}, '$comment': 'TODO: is this field deprecated/should be removed?'}, 'eager-resources': {'$$description': ['Resources that should be extracted together, if any of them is needed,', 'or if any C extensions included in the project are imported.'], 'type': 'array', 'items': {'type': 'string'}}, 'packages': {'$$description': ['Packages that should be included in the distribution.', 'It can be given either as a list of package identifiers', 'or as a ``dict``-like structure with a single key ``find``', 'which corresponds to a dynamic call to', '``setuptools.config.expand.find_packages`` function.', 'The ``find`` key is associated with a nested ``dict``-like structure that can', 'contain ``where``, ``include``, ``exclude`` and ``namespaces`` keys,', 'mimicking the keyword arguments of the associated function.'], 'oneOf': [{'title': 'Array of Python package identifiers', 'type': 'array', 'items': {'type': 'string', 'format': 'python-module-name'}}, {'$id': '#/definitions/find-directive', 'title': "'find:' directive", 'type': 'object', 'additionalProperties': False, 'properties': {'find': {'type': 'object', '$$description': ['Dynamic `package discovery', '`_.'], 'additionalProperties': False, 'properties': {'where': {'description': 'Directories to be searched for packages (Unix-style relative path)', 'type': 'array', 'items': {'type': 'string'}}, 'exclude': {'type': 'array', '$$description': ['Exclude packages that match the values listed in this field.', "Can container shell-style wildcards (e.g. ``'pkg.*'``)"], 'items': {'type': 'string'}}, 'include': {'type': 'array', '$$description': ['Restrict the found packages to just the ones listed in this field.', "Can container shell-style wildcards (e.g. ``'pkg.*'``)"], 'items': {'type': 'string'}}, 'namespaces': {'type': 'boolean', '$$description': ['When ``True``, directories without a ``__init__.py`` file will also', 'be scanned for :pep:`420`-style implicit namespaces']}}}}}]}, 'package-dir': {'$$description': [':class:`dict`-like structure mapping from package names to directories where their', 'code can be found.', 'The empty string (as key) means that all packages are contained inside', 'the given directory will be included in the distribution.'], 'type': 'object', 'additionalProperties': False, 'propertyNames': {'oneOf': [{'format': 'python-module-name'}, {'const': ''}]}, 'patternProperties': {'^.*$': {'type': 'string'}}}, 'package-data': {'$$description': ['Mapping from package names to lists of glob patterns.', 'Usually this option is not needed when using ``include-package-data = true``', 'For more information on how to include data files, check ``setuptools`` `docs', '`_.'], 'type': 'object', 'additionalProperties': False, 'propertyNames': {'oneOf': [{'format': 'python-module-name'}, {'const': '*'}]}, 'patternProperties': {'^.*$': {'type': 'array', 'items': {'type': 'string'}}}}, 'include-package-data': {'$$description': ['Automatically include any data files inside the package directories', 'that are specified by ``MANIFEST.in``', 'For more information on how to include data files, check ``setuptools`` `docs', '`_.'], 'type': 'boolean'}, 'exclude-package-data': {'$$description': ['Mapping from package names to lists of glob patterns that should be excluded', 'For more information on how to include data files, check ``setuptools`` `docs', '`_.'], 'type': 'object', 'additionalProperties': False, 'propertyNames': {'oneOf': [{'format': 'python-module-name'}, {'const': '*'}]}, 'patternProperties': {'^.*$': {'type': 'array', 'items': {'type': 'string'}}}}, 'namespace-packages': {'type': 'array', 'items': {'type': 'string', 'format': 'python-module-name'}, '$comment': 'https://setuptools.pypa.io/en/latest/userguide/package_discovery.html'}, 'py-modules': {'description': 'Modules that setuptools will manipulate', 'type': 'array', 'items': {'type': 'string', 'format': 'python-module-name'}, '$comment': 'TODO: clarify the relationship with ``packages``'}, 'data-files': {'$$description': ['**DEPRECATED**: dict-like structure where each key represents a directory and', 'the value is a list of glob patterns that should be installed in them.', "Please notice this don't work with wheels. See `data files support", '`_'], 'type': 'object', 'patternProperties': {'^.*$': {'type': 'array', 'items': {'type': 'string'}}}}, 'cmdclass': {'$$description': ['Mapping of distutils-style command names to ``setuptools.Command`` subclasses', 'which in turn should be represented by strings with a qualified class name', '(i.e., "dotted" form with module), e.g.::\n\n', ' cmdclass = {mycmd = "pkg.subpkg.module.CommandClass"}\n\n', 'The command class should be a directly defined at the top-level of the', 'containing module (no class nesting).'], 'type': 'object', 'patternProperties': {'^.*$': {'type': 'string', 'format': 'python-qualified-identifier'}}}, 'license-files': {'type': 'array', 'items': {'type': 'string'}, '$$description': ['PROVISIONAL: List of glob patterns for all license files being distributed.', '(might become standard with PEP 639).'], 'default': ['LICEN[CS]E*', ' COPYING*', ' NOTICE*', 'AUTHORS*'], '$comment': 'TODO: revise if PEP 639 is accepted. Probably ``project.license-files``?'}, 'dynamic': {'type': 'object', 'description': 'Instructions for loading :pep:`621`-related metadata dynamically', 'additionalProperties': False, 'properties': {'version': {'$$description': ['A version dynamically loaded via either the ``attr:`` or ``file:``', 'directives. Please make sure the given file or attribute respects :pep:`440`.'], 'oneOf': [{'title': "'attr:' directive", '$id': '#/definitions/attr-directive', '$$description': ['Value is read from a module attribute. Supports callables and iterables;', 'unsupported types are cast via ``str()``'], 'type': 'object', 'additionalProperties': False, 'properties': {'attr': {'type': 'string'}}, 'required': ['attr']}, {'$id': '#/definitions/file-directive', 'title': "'file:' directive", 'description': 'Value is read from a file (or list of files and then concatenated)', 'type': 'object', 'additionalProperties': False, 'properties': {'file': {'oneOf': [{'type': 'string'}, {'type': 'array', 'items': {'type': 'string'}}]}}, 'required': ['file']}]}, 'classifiers': {'$id': '#/definitions/file-directive', 'title': "'file:' directive", 'description': 'Value is read from a file (or list of files and then concatenated)', 'type': 'object', 'additionalProperties': False, 'properties': {'file': {'oneOf': [{'type': 'string'}, {'type': 'array', 'items': {'type': 'string'}}]}}, 'required': ['file']}, 'description': {'$id': '#/definitions/file-directive', 'title': "'file:' directive", 'description': 'Value is read from a file (or list of files and then concatenated)', 'type': 'object', 'additionalProperties': False, 'properties': {'file': {'oneOf': [{'type': 'string'}, {'type': 'array', 'items': {'type': 'string'}}]}}, 'required': ['file']}, 'dependencies': {'$id': '#/definitions/file-directive', 'title': "'file:' directive", 'description': 'Value is read from a file (or list of files and then concatenated)', 'type': 'object', 'additionalProperties': False, 'properties': {'file': {'oneOf': [{'type': 'string'}, {'type': 'array', 'items': {'type': 'string'}}]}}, 'required': ['file']}, 'entry-points': {'$id': '#/definitions/file-directive', 'title': "'file:' directive", 'description': 'Value is read from a file (or list of files and then concatenated)', 'type': 'object', 'additionalProperties': False, 'properties': {'file': {'oneOf': [{'type': 'string'}, {'type': 'array', 'items': {'type': 'string'}}]}}, 'required': ['file']}, 'optional-dependencies': {'type': 'object', 'propertyNames': {'format': 'python-identifier'}, 'additionalProperties': False, 'patternProperties': {'.+': {'$id': '#/definitions/file-directive', 'title': "'file:' directive", 'description': 'Value is read from a file (or list of files and then concatenated)', 'type': 'object', 'additionalProperties': False, 'properties': {'file': {'oneOf': [{'type': 'string'}, {'type': 'array', 'items': {'type': 'string'}}]}}, 'required': ['file']}}}, 'readme': {'anyOf': [{'$id': '#/definitions/file-directive', 'title': "'file:' directive", 'description': 'Value is read from a file (or list of files and then concatenated)', 'type': 'object', 'additionalProperties': False, 'properties': {'file': {'oneOf': [{'type': 'string'}, {'type': 'array', 'items': {'type': 'string'}}]}}, 'required': ['file']}, {'properties': {'content-type': {'type': 'string'}}}], 'required': ['file']}}}}, 'definitions': {'file-directive': {'$id': '#/definitions/file-directive', 'title': "'file:' directive", 'description': 'Value is read from a file (or list of files and then concatenated)', 'type': 'object', 'additionalProperties': False, 'properties': {'file': {'oneOf': [{'type': 'string'}, {'type': 'array', 'items': {'type': 'string'}}]}}, 'required': ['file']}, 'attr-directive': {'title': "'attr:' directive", '$id': '#/definitions/attr-directive', '$$description': ['Value is read from a module attribute. Supports callables and iterables;', 'unsupported types are cast via ``str()``'], 'type': 'object', 'additionalProperties': False, 'properties': {'attr': {'type': 'string'}}, 'required': ['attr']}, 'find-directive': {'$id': '#/definitions/find-directive', 'title': "'find:' directive", 'type': 'object', 'additionalProperties': False, 'properties': {'find': {'type': 'object', '$$description': ['Dynamic `package discovery', '`_.'], 'additionalProperties': False, 'properties': {'where': {'description': 'Directories to be searched for packages (Unix-style relative path)', 'type': 'array', 'items': {'type': 'string'}}, 'exclude': {'type': 'array', '$$description': ['Exclude packages that match the values listed in this field.', "Can container shell-style wildcards (e.g. ``'pkg.*'``)"], 'items': {'type': 'string'}}, 'include': {'type': 'array', '$$description': ['Restrict the found packages to just the ones listed in this field.', "Can container shell-style wildcards (e.g. ``'pkg.*'``)"], 'items': {'type': 'string'}}, 'namespaces': {'type': 'boolean', '$$description': ['When ``True``, directories without a ``__init__.py`` file will also', 'be scanned for :pep:`420`-style implicit namespaces']}}}}}}}, rule='type') - data_is_dict = isinstance(data, dict) - if data_is_dict: - data_keys = set(data.keys()) - if "platforms" in data_keys: - data_keys.remove("platforms") - data__platforms = data["platforms"] - if not isinstance(data__platforms, (list, tuple)): - raise JsonSchemaValueException("" + (name_prefix or "data") + ".platforms must be array", value=data__platforms, name="" + (name_prefix or "data") + ".platforms", definition={'type': 'array', 'items': {'type': 'string'}}, rule='type') - data__platforms_is_list = isinstance(data__platforms, (list, tuple)) - if data__platforms_is_list: - data__platforms_len = len(data__platforms) - for data__platforms_x, data__platforms_item in enumerate(data__platforms): - if not isinstance(data__platforms_item, (str)): - raise JsonSchemaValueException("" + (name_prefix or "data") + ".platforms[{data__platforms_x}]".format(**locals()) + " must be string", value=data__platforms_item, name="" + (name_prefix or "data") + ".platforms[{data__platforms_x}]".format(**locals()) + "", definition={'type': 'string'}, rule='type') - if "provides" in data_keys: - data_keys.remove("provides") - data__provides = data["provides"] - if not isinstance(data__provides, (list, tuple)): - raise JsonSchemaValueException("" + (name_prefix or "data") + ".provides must be array", value=data__provides, name="" + (name_prefix or "data") + ".provides", definition={'$$description': ['Package and virtual package names contained within this package', '**(not supported by pip)**'], 'type': 'array', 'items': {'type': 'string', 'format': 'pep508-identifier'}}, rule='type') - data__provides_is_list = isinstance(data__provides, (list, tuple)) - if data__provides_is_list: - data__provides_len = len(data__provides) - for data__provides_x, data__provides_item in enumerate(data__provides): - if not isinstance(data__provides_item, (str)): - raise JsonSchemaValueException("" + (name_prefix or "data") + ".provides[{data__provides_x}]".format(**locals()) + " must be string", value=data__provides_item, name="" + (name_prefix or "data") + ".provides[{data__provides_x}]".format(**locals()) + "", definition={'type': 'string', 'format': 'pep508-identifier'}, rule='type') - if isinstance(data__provides_item, str): - if not custom_formats["pep508-identifier"](data__provides_item): - raise JsonSchemaValueException("" + (name_prefix or "data") + ".provides[{data__provides_x}]".format(**locals()) + " must be pep508-identifier", value=data__provides_item, name="" + (name_prefix or "data") + ".provides[{data__provides_x}]".format(**locals()) + "", definition={'type': 'string', 'format': 'pep508-identifier'}, rule='format') - if "obsoletes" in data_keys: - data_keys.remove("obsoletes") - data__obsoletes = data["obsoletes"] - if not isinstance(data__obsoletes, (list, tuple)): - raise JsonSchemaValueException("" + (name_prefix or "data") + ".obsoletes must be array", value=data__obsoletes, name="" + (name_prefix or "data") + ".obsoletes", definition={'$$description': ['Packages which this package renders obsolete', '**(not supported by pip)**'], 'type': 'array', 'items': {'type': 'string', 'format': 'pep508-identifier'}}, rule='type') - data__obsoletes_is_list = isinstance(data__obsoletes, (list, tuple)) - if data__obsoletes_is_list: - data__obsoletes_len = len(data__obsoletes) - for data__obsoletes_x, data__obsoletes_item in enumerate(data__obsoletes): - if not isinstance(data__obsoletes_item, (str)): - raise JsonSchemaValueException("" + (name_prefix or "data") + ".obsoletes[{data__obsoletes_x}]".format(**locals()) + " must be string", value=data__obsoletes_item, name="" + (name_prefix or "data") + ".obsoletes[{data__obsoletes_x}]".format(**locals()) + "", definition={'type': 'string', 'format': 'pep508-identifier'}, rule='type') - if isinstance(data__obsoletes_item, str): - if not custom_formats["pep508-identifier"](data__obsoletes_item): - raise JsonSchemaValueException("" + (name_prefix or "data") + ".obsoletes[{data__obsoletes_x}]".format(**locals()) + " must be pep508-identifier", value=data__obsoletes_item, name="" + (name_prefix or "data") + ".obsoletes[{data__obsoletes_x}]".format(**locals()) + "", definition={'type': 'string', 'format': 'pep508-identifier'}, rule='format') - if "zip-safe" in data_keys: - data_keys.remove("zip-safe") - data__zipsafe = data["zip-safe"] - if not isinstance(data__zipsafe, (bool)): - raise JsonSchemaValueException("" + (name_prefix or "data") + ".zip-safe must be boolean", value=data__zipsafe, name="" + (name_prefix or "data") + ".zip-safe", definition={'description': 'Whether the project can be safely installed and run from a zip file.', 'type': 'boolean'}, rule='type') - if "script-files" in data_keys: - data_keys.remove("script-files") - data__scriptfiles = data["script-files"] - if not isinstance(data__scriptfiles, (list, tuple)): - raise JsonSchemaValueException("" + (name_prefix or "data") + ".script-files must be array", value=data__scriptfiles, name="" + (name_prefix or "data") + ".script-files", definition={'description': 'Legacy way of defining scripts (entry-points are preferred).', 'type': 'array', 'items': {'type': 'string'}, '$comment': 'TODO: is this field deprecated/should be removed?'}, rule='type') - data__scriptfiles_is_list = isinstance(data__scriptfiles, (list, tuple)) - if data__scriptfiles_is_list: - data__scriptfiles_len = len(data__scriptfiles) - for data__scriptfiles_x, data__scriptfiles_item in enumerate(data__scriptfiles): - if not isinstance(data__scriptfiles_item, (str)): - raise JsonSchemaValueException("" + (name_prefix or "data") + ".script-files[{data__scriptfiles_x}]".format(**locals()) + " must be string", value=data__scriptfiles_item, name="" + (name_prefix or "data") + ".script-files[{data__scriptfiles_x}]".format(**locals()) + "", definition={'type': 'string'}, rule='type') - if "eager-resources" in data_keys: - data_keys.remove("eager-resources") - data__eagerresources = data["eager-resources"] - if not isinstance(data__eagerresources, (list, tuple)): - raise JsonSchemaValueException("" + (name_prefix or "data") + ".eager-resources must be array", value=data__eagerresources, name="" + (name_prefix or "data") + ".eager-resources", definition={'$$description': ['Resources that should be extracted together, if any of them is needed,', 'or if any C extensions included in the project are imported.'], 'type': 'array', 'items': {'type': 'string'}}, rule='type') - data__eagerresources_is_list = isinstance(data__eagerresources, (list, tuple)) - if data__eagerresources_is_list: - data__eagerresources_len = len(data__eagerresources) - for data__eagerresources_x, data__eagerresources_item in enumerate(data__eagerresources): - if not isinstance(data__eagerresources_item, (str)): - raise JsonSchemaValueException("" + (name_prefix or "data") + ".eager-resources[{data__eagerresources_x}]".format(**locals()) + " must be string", value=data__eagerresources_item, name="" + (name_prefix or "data") + ".eager-resources[{data__eagerresources_x}]".format(**locals()) + "", definition={'type': 'string'}, rule='type') - if "packages" in data_keys: - data_keys.remove("packages") - data__packages = data["packages"] - data__packages_one_of_count1 = 0 - if data__packages_one_of_count1 < 2: - try: - if not isinstance(data__packages, (list, tuple)): - raise JsonSchemaValueException("" + (name_prefix or "data") + ".packages must be array", value=data__packages, name="" + (name_prefix or "data") + ".packages", definition={'title': 'Array of Python package identifiers', 'type': 'array', 'items': {'type': 'string', 'format': 'python-module-name'}}, rule='type') - data__packages_is_list = isinstance(data__packages, (list, tuple)) - if data__packages_is_list: - data__packages_len = len(data__packages) - for data__packages_x, data__packages_item in enumerate(data__packages): - if not isinstance(data__packages_item, (str)): - raise JsonSchemaValueException("" + (name_prefix or "data") + ".packages[{data__packages_x}]".format(**locals()) + " must be string", value=data__packages_item, name="" + (name_prefix or "data") + ".packages[{data__packages_x}]".format(**locals()) + "", definition={'type': 'string', 'format': 'python-module-name'}, rule='type') - if isinstance(data__packages_item, str): - if not custom_formats["python-module-name"](data__packages_item): - raise JsonSchemaValueException("" + (name_prefix or "data") + ".packages[{data__packages_x}]".format(**locals()) + " must be python-module-name", value=data__packages_item, name="" + (name_prefix or "data") + ".packages[{data__packages_x}]".format(**locals()) + "", definition={'type': 'string', 'format': 'python-module-name'}, rule='format') - data__packages_one_of_count1 += 1 - except JsonSchemaValueException: pass - if data__packages_one_of_count1 < 2: - try: - validate_https___setuptools_pypa_io_en_latest_references_keywords_html__definitions_find_directive(data__packages, custom_formats, (name_prefix or "data") + ".packages") - data__packages_one_of_count1 += 1 - except JsonSchemaValueException: pass - if data__packages_one_of_count1 != 1: - raise JsonSchemaValueException("" + (name_prefix or "data") + ".packages must be valid exactly by one definition" + (" (" + str(data__packages_one_of_count1) + " matches found)"), value=data__packages, name="" + (name_prefix or "data") + ".packages", definition={'$$description': ['Packages that should be included in the distribution.', 'It can be given either as a list of package identifiers', 'or as a ``dict``-like structure with a single key ``find``', 'which corresponds to a dynamic call to', '``setuptools.config.expand.find_packages`` function.', 'The ``find`` key is associated with a nested ``dict``-like structure that can', 'contain ``where``, ``include``, ``exclude`` and ``namespaces`` keys,', 'mimicking the keyword arguments of the associated function.'], 'oneOf': [{'title': 'Array of Python package identifiers', 'type': 'array', 'items': {'type': 'string', 'format': 'python-module-name'}}, {'$id': '#/definitions/find-directive', 'title': "'find:' directive", 'type': 'object', 'additionalProperties': False, 'properties': {'find': {'type': 'object', '$$description': ['Dynamic `package discovery', '`_.'], 'additionalProperties': False, 'properties': {'where': {'description': 'Directories to be searched for packages (Unix-style relative path)', 'type': 'array', 'items': {'type': 'string'}}, 'exclude': {'type': 'array', '$$description': ['Exclude packages that match the values listed in this field.', "Can container shell-style wildcards (e.g. ``'pkg.*'``)"], 'items': {'type': 'string'}}, 'include': {'type': 'array', '$$description': ['Restrict the found packages to just the ones listed in this field.', "Can container shell-style wildcards (e.g. ``'pkg.*'``)"], 'items': {'type': 'string'}}, 'namespaces': {'type': 'boolean', '$$description': ['When ``True``, directories without a ``__init__.py`` file will also', 'be scanned for :pep:`420`-style implicit namespaces']}}}}}]}, rule='oneOf') - if "package-dir" in data_keys: - data_keys.remove("package-dir") - data__packagedir = data["package-dir"] - if not isinstance(data__packagedir, (dict)): - raise JsonSchemaValueException("" + (name_prefix or "data") + ".package-dir must be object", value=data__packagedir, name="" + (name_prefix or "data") + ".package-dir", definition={'$$description': [':class:`dict`-like structure mapping from package names to directories where their', 'code can be found.', 'The empty string (as key) means that all packages are contained inside', 'the given directory will be included in the distribution.'], 'type': 'object', 'additionalProperties': False, 'propertyNames': {'oneOf': [{'format': 'python-module-name'}, {'const': ''}]}, 'patternProperties': {'^.*$': {'type': 'string'}}}, rule='type') - data__packagedir_is_dict = isinstance(data__packagedir, dict) - if data__packagedir_is_dict: - data__packagedir_keys = set(data__packagedir.keys()) - for data__packagedir_key, data__packagedir_val in data__packagedir.items(): - if REGEX_PATTERNS['^.*$'].search(data__packagedir_key): - if data__packagedir_key in data__packagedir_keys: - data__packagedir_keys.remove(data__packagedir_key) - if not isinstance(data__packagedir_val, (str)): - raise JsonSchemaValueException("" + (name_prefix or "data") + ".package-dir.{data__packagedir_key}".format(**locals()) + " must be string", value=data__packagedir_val, name="" + (name_prefix or "data") + ".package-dir.{data__packagedir_key}".format(**locals()) + "", definition={'type': 'string'}, rule='type') - if data__packagedir_keys: - raise JsonSchemaValueException("" + (name_prefix or "data") + ".package-dir must not contain "+str(data__packagedir_keys)+" properties", value=data__packagedir, name="" + (name_prefix or "data") + ".package-dir", definition={'$$description': [':class:`dict`-like structure mapping from package names to directories where their', 'code can be found.', 'The empty string (as key) means that all packages are contained inside', 'the given directory will be included in the distribution.'], 'type': 'object', 'additionalProperties': False, 'propertyNames': {'oneOf': [{'format': 'python-module-name'}, {'const': ''}]}, 'patternProperties': {'^.*$': {'type': 'string'}}}, rule='additionalProperties') - data__packagedir_len = len(data__packagedir) - if data__packagedir_len != 0: - data__packagedir_property_names = True - for data__packagedir_key in data__packagedir: - try: - data__packagedir_key_one_of_count2 = 0 - if data__packagedir_key_one_of_count2 < 2: - try: - if isinstance(data__packagedir_key, str): - if not custom_formats["python-module-name"](data__packagedir_key): - raise JsonSchemaValueException("" + (name_prefix or "data") + ".package-dir must be python-module-name", value=data__packagedir_key, name="" + (name_prefix or "data") + ".package-dir", definition={'format': 'python-module-name'}, rule='format') - data__packagedir_key_one_of_count2 += 1 - except JsonSchemaValueException: pass - if data__packagedir_key_one_of_count2 < 2: - try: - if data__packagedir_key != "": - raise JsonSchemaValueException("" + (name_prefix or "data") + ".package-dir must be same as const definition: ", value=data__packagedir_key, name="" + (name_prefix or "data") + ".package-dir", definition={'const': ''}, rule='const') - data__packagedir_key_one_of_count2 += 1 - except JsonSchemaValueException: pass - if data__packagedir_key_one_of_count2 != 1: - raise JsonSchemaValueException("" + (name_prefix or "data") + ".package-dir must be valid exactly by one definition" + (" (" + str(data__packagedir_key_one_of_count2) + " matches found)"), value=data__packagedir_key, name="" + (name_prefix or "data") + ".package-dir", definition={'oneOf': [{'format': 'python-module-name'}, {'const': ''}]}, rule='oneOf') - except JsonSchemaValueException: - data__packagedir_property_names = False - if not data__packagedir_property_names: - raise JsonSchemaValueException("" + (name_prefix or "data") + ".package-dir must be named by propertyName definition", value=data__packagedir, name="" + (name_prefix or "data") + ".package-dir", definition={'$$description': [':class:`dict`-like structure mapping from package names to directories where their', 'code can be found.', 'The empty string (as key) means that all packages are contained inside', 'the given directory will be included in the distribution.'], 'type': 'object', 'additionalProperties': False, 'propertyNames': {'oneOf': [{'format': 'python-module-name'}, {'const': ''}]}, 'patternProperties': {'^.*$': {'type': 'string'}}}, rule='propertyNames') - if "package-data" in data_keys: - data_keys.remove("package-data") - data__packagedata = data["package-data"] - if not isinstance(data__packagedata, (dict)): - raise JsonSchemaValueException("" + (name_prefix or "data") + ".package-data must be object", value=data__packagedata, name="" + (name_prefix or "data") + ".package-data", definition={'$$description': ['Mapping from package names to lists of glob patterns.', 'Usually this option is not needed when using ``include-package-data = true``', 'For more information on how to include data files, check ``setuptools`` `docs', '`_.'], 'type': 'object', 'additionalProperties': False, 'propertyNames': {'oneOf': [{'format': 'python-module-name'}, {'const': '*'}]}, 'patternProperties': {'^.*$': {'type': 'array', 'items': {'type': 'string'}}}}, rule='type') - data__packagedata_is_dict = isinstance(data__packagedata, dict) - if data__packagedata_is_dict: - data__packagedata_keys = set(data__packagedata.keys()) - for data__packagedata_key, data__packagedata_val in data__packagedata.items(): - if REGEX_PATTERNS['^.*$'].search(data__packagedata_key): - if data__packagedata_key in data__packagedata_keys: - data__packagedata_keys.remove(data__packagedata_key) - if not isinstance(data__packagedata_val, (list, tuple)): - raise JsonSchemaValueException("" + (name_prefix or "data") + ".package-data.{data__packagedata_key}".format(**locals()) + " must be array", value=data__packagedata_val, name="" + (name_prefix or "data") + ".package-data.{data__packagedata_key}".format(**locals()) + "", definition={'type': 'array', 'items': {'type': 'string'}}, rule='type') - data__packagedata_val_is_list = isinstance(data__packagedata_val, (list, tuple)) - if data__packagedata_val_is_list: - data__packagedata_val_len = len(data__packagedata_val) - for data__packagedata_val_x, data__packagedata_val_item in enumerate(data__packagedata_val): - if not isinstance(data__packagedata_val_item, (str)): - raise JsonSchemaValueException("" + (name_prefix or "data") + ".package-data.{data__packagedata_key}[{data__packagedata_val_x}]".format(**locals()) + " must be string", value=data__packagedata_val_item, name="" + (name_prefix or "data") + ".package-data.{data__packagedata_key}[{data__packagedata_val_x}]".format(**locals()) + "", definition={'type': 'string'}, rule='type') - if data__packagedata_keys: - raise JsonSchemaValueException("" + (name_prefix or "data") + ".package-data must not contain "+str(data__packagedata_keys)+" properties", value=data__packagedata, name="" + (name_prefix or "data") + ".package-data", definition={'$$description': ['Mapping from package names to lists of glob patterns.', 'Usually this option is not needed when using ``include-package-data = true``', 'For more information on how to include data files, check ``setuptools`` `docs', '`_.'], 'type': 'object', 'additionalProperties': False, 'propertyNames': {'oneOf': [{'format': 'python-module-name'}, {'const': '*'}]}, 'patternProperties': {'^.*$': {'type': 'array', 'items': {'type': 'string'}}}}, rule='additionalProperties') - data__packagedata_len = len(data__packagedata) - if data__packagedata_len != 0: - data__packagedata_property_names = True - for data__packagedata_key in data__packagedata: - try: - data__packagedata_key_one_of_count3 = 0 - if data__packagedata_key_one_of_count3 < 2: - try: - if isinstance(data__packagedata_key, str): - if not custom_formats["python-module-name"](data__packagedata_key): - raise JsonSchemaValueException("" + (name_prefix or "data") + ".package-data must be python-module-name", value=data__packagedata_key, name="" + (name_prefix or "data") + ".package-data", definition={'format': 'python-module-name'}, rule='format') - data__packagedata_key_one_of_count3 += 1 - except JsonSchemaValueException: pass - if data__packagedata_key_one_of_count3 < 2: - try: - if data__packagedata_key != "*": - raise JsonSchemaValueException("" + (name_prefix or "data") + ".package-data must be same as const definition: *", value=data__packagedata_key, name="" + (name_prefix or "data") + ".package-data", definition={'const': '*'}, rule='const') - data__packagedata_key_one_of_count3 += 1 - except JsonSchemaValueException: pass - if data__packagedata_key_one_of_count3 != 1: - raise JsonSchemaValueException("" + (name_prefix or "data") + ".package-data must be valid exactly by one definition" + (" (" + str(data__packagedata_key_one_of_count3) + " matches found)"), value=data__packagedata_key, name="" + (name_prefix or "data") + ".package-data", definition={'oneOf': [{'format': 'python-module-name'}, {'const': '*'}]}, rule='oneOf') - except JsonSchemaValueException: - data__packagedata_property_names = False - if not data__packagedata_property_names: - raise JsonSchemaValueException("" + (name_prefix or "data") + ".package-data must be named by propertyName definition", value=data__packagedata, name="" + (name_prefix or "data") + ".package-data", definition={'$$description': ['Mapping from package names to lists of glob patterns.', 'Usually this option is not needed when using ``include-package-data = true``', 'For more information on how to include data files, check ``setuptools`` `docs', '`_.'], 'type': 'object', 'additionalProperties': False, 'propertyNames': {'oneOf': [{'format': 'python-module-name'}, {'const': '*'}]}, 'patternProperties': {'^.*$': {'type': 'array', 'items': {'type': 'string'}}}}, rule='propertyNames') - if "include-package-data" in data_keys: - data_keys.remove("include-package-data") - data__includepackagedata = data["include-package-data"] - if not isinstance(data__includepackagedata, (bool)): - raise JsonSchemaValueException("" + (name_prefix or "data") + ".include-package-data must be boolean", value=data__includepackagedata, name="" + (name_prefix or "data") + ".include-package-data", definition={'$$description': ['Automatically include any data files inside the package directories', 'that are specified by ``MANIFEST.in``', 'For more information on how to include data files, check ``setuptools`` `docs', '`_.'], 'type': 'boolean'}, rule='type') - if "exclude-package-data" in data_keys: - data_keys.remove("exclude-package-data") - data__excludepackagedata = data["exclude-package-data"] - if not isinstance(data__excludepackagedata, (dict)): - raise JsonSchemaValueException("" + (name_prefix or "data") + ".exclude-package-data must be object", value=data__excludepackagedata, name="" + (name_prefix or "data") + ".exclude-package-data", definition={'$$description': ['Mapping from package names to lists of glob patterns that should be excluded', 'For more information on how to include data files, check ``setuptools`` `docs', '`_.'], 'type': 'object', 'additionalProperties': False, 'propertyNames': {'oneOf': [{'format': 'python-module-name'}, {'const': '*'}]}, 'patternProperties': {'^.*$': {'type': 'array', 'items': {'type': 'string'}}}}, rule='type') - data__excludepackagedata_is_dict = isinstance(data__excludepackagedata, dict) - if data__excludepackagedata_is_dict: - data__excludepackagedata_keys = set(data__excludepackagedata.keys()) - for data__excludepackagedata_key, data__excludepackagedata_val in data__excludepackagedata.items(): - if REGEX_PATTERNS['^.*$'].search(data__excludepackagedata_key): - if data__excludepackagedata_key in data__excludepackagedata_keys: - data__excludepackagedata_keys.remove(data__excludepackagedata_key) - if not isinstance(data__excludepackagedata_val, (list, tuple)): - raise JsonSchemaValueException("" + (name_prefix or "data") + ".exclude-package-data.{data__excludepackagedata_key}".format(**locals()) + " must be array", value=data__excludepackagedata_val, name="" + (name_prefix or "data") + ".exclude-package-data.{data__excludepackagedata_key}".format(**locals()) + "", definition={'type': 'array', 'items': {'type': 'string'}}, rule='type') - data__excludepackagedata_val_is_list = isinstance(data__excludepackagedata_val, (list, tuple)) - if data__excludepackagedata_val_is_list: - data__excludepackagedata_val_len = len(data__excludepackagedata_val) - for data__excludepackagedata_val_x, data__excludepackagedata_val_item in enumerate(data__excludepackagedata_val): - if not isinstance(data__excludepackagedata_val_item, (str)): - raise JsonSchemaValueException("" + (name_prefix or "data") + ".exclude-package-data.{data__excludepackagedata_key}[{data__excludepackagedata_val_x}]".format(**locals()) + " must be string", value=data__excludepackagedata_val_item, name="" + (name_prefix or "data") + ".exclude-package-data.{data__excludepackagedata_key}[{data__excludepackagedata_val_x}]".format(**locals()) + "", definition={'type': 'string'}, rule='type') - if data__excludepackagedata_keys: - raise JsonSchemaValueException("" + (name_prefix or "data") + ".exclude-package-data must not contain "+str(data__excludepackagedata_keys)+" properties", value=data__excludepackagedata, name="" + (name_prefix or "data") + ".exclude-package-data", definition={'$$description': ['Mapping from package names to lists of glob patterns that should be excluded', 'For more information on how to include data files, check ``setuptools`` `docs', '`_.'], 'type': 'object', 'additionalProperties': False, 'propertyNames': {'oneOf': [{'format': 'python-module-name'}, {'const': '*'}]}, 'patternProperties': {'^.*$': {'type': 'array', 'items': {'type': 'string'}}}}, rule='additionalProperties') - data__excludepackagedata_len = len(data__excludepackagedata) - if data__excludepackagedata_len != 0: - data__excludepackagedata_property_names = True - for data__excludepackagedata_key in data__excludepackagedata: - try: - data__excludepackagedata_key_one_of_count4 = 0 - if data__excludepackagedata_key_one_of_count4 < 2: - try: - if isinstance(data__excludepackagedata_key, str): - if not custom_formats["python-module-name"](data__excludepackagedata_key): - raise JsonSchemaValueException("" + (name_prefix or "data") + ".exclude-package-data must be python-module-name", value=data__excludepackagedata_key, name="" + (name_prefix or "data") + ".exclude-package-data", definition={'format': 'python-module-name'}, rule='format') - data__excludepackagedata_key_one_of_count4 += 1 - except JsonSchemaValueException: pass - if data__excludepackagedata_key_one_of_count4 < 2: - try: - if data__excludepackagedata_key != "*": - raise JsonSchemaValueException("" + (name_prefix or "data") + ".exclude-package-data must be same as const definition: *", value=data__excludepackagedata_key, name="" + (name_prefix or "data") + ".exclude-package-data", definition={'const': '*'}, rule='const') - data__excludepackagedata_key_one_of_count4 += 1 - except JsonSchemaValueException: pass - if data__excludepackagedata_key_one_of_count4 != 1: - raise JsonSchemaValueException("" + (name_prefix or "data") + ".exclude-package-data must be valid exactly by one definition" + (" (" + str(data__excludepackagedata_key_one_of_count4) + " matches found)"), value=data__excludepackagedata_key, name="" + (name_prefix or "data") + ".exclude-package-data", definition={'oneOf': [{'format': 'python-module-name'}, {'const': '*'}]}, rule='oneOf') - except JsonSchemaValueException: - data__excludepackagedata_property_names = False - if not data__excludepackagedata_property_names: - raise JsonSchemaValueException("" + (name_prefix or "data") + ".exclude-package-data must be named by propertyName definition", value=data__excludepackagedata, name="" + (name_prefix or "data") + ".exclude-package-data", definition={'$$description': ['Mapping from package names to lists of glob patterns that should be excluded', 'For more information on how to include data files, check ``setuptools`` `docs', '`_.'], 'type': 'object', 'additionalProperties': False, 'propertyNames': {'oneOf': [{'format': 'python-module-name'}, {'const': '*'}]}, 'patternProperties': {'^.*$': {'type': 'array', 'items': {'type': 'string'}}}}, rule='propertyNames') - if "namespace-packages" in data_keys: - data_keys.remove("namespace-packages") - data__namespacepackages = data["namespace-packages"] - if not isinstance(data__namespacepackages, (list, tuple)): - raise JsonSchemaValueException("" + (name_prefix or "data") + ".namespace-packages must be array", value=data__namespacepackages, name="" + (name_prefix or "data") + ".namespace-packages", definition={'type': 'array', 'items': {'type': 'string', 'format': 'python-module-name'}, '$comment': 'https://setuptools.pypa.io/en/latest/userguide/package_discovery.html'}, rule='type') - data__namespacepackages_is_list = isinstance(data__namespacepackages, (list, tuple)) - if data__namespacepackages_is_list: - data__namespacepackages_len = len(data__namespacepackages) - for data__namespacepackages_x, data__namespacepackages_item in enumerate(data__namespacepackages): - if not isinstance(data__namespacepackages_item, (str)): - raise JsonSchemaValueException("" + (name_prefix or "data") + ".namespace-packages[{data__namespacepackages_x}]".format(**locals()) + " must be string", value=data__namespacepackages_item, name="" + (name_prefix or "data") + ".namespace-packages[{data__namespacepackages_x}]".format(**locals()) + "", definition={'type': 'string', 'format': 'python-module-name'}, rule='type') - if isinstance(data__namespacepackages_item, str): - if not custom_formats["python-module-name"](data__namespacepackages_item): - raise JsonSchemaValueException("" + (name_prefix or "data") + ".namespace-packages[{data__namespacepackages_x}]".format(**locals()) + " must be python-module-name", value=data__namespacepackages_item, name="" + (name_prefix or "data") + ".namespace-packages[{data__namespacepackages_x}]".format(**locals()) + "", definition={'type': 'string', 'format': 'python-module-name'}, rule='format') - if "py-modules" in data_keys: - data_keys.remove("py-modules") - data__pymodules = data["py-modules"] - if not isinstance(data__pymodules, (list, tuple)): - raise JsonSchemaValueException("" + (name_prefix or "data") + ".py-modules must be array", value=data__pymodules, name="" + (name_prefix or "data") + ".py-modules", definition={'description': 'Modules that setuptools will manipulate', 'type': 'array', 'items': {'type': 'string', 'format': 'python-module-name'}, '$comment': 'TODO: clarify the relationship with ``packages``'}, rule='type') - data__pymodules_is_list = isinstance(data__pymodules, (list, tuple)) - if data__pymodules_is_list: - data__pymodules_len = len(data__pymodules) - for data__pymodules_x, data__pymodules_item in enumerate(data__pymodules): - if not isinstance(data__pymodules_item, (str)): - raise JsonSchemaValueException("" + (name_prefix or "data") + ".py-modules[{data__pymodules_x}]".format(**locals()) + " must be string", value=data__pymodules_item, name="" + (name_prefix or "data") + ".py-modules[{data__pymodules_x}]".format(**locals()) + "", definition={'type': 'string', 'format': 'python-module-name'}, rule='type') - if isinstance(data__pymodules_item, str): - if not custom_formats["python-module-name"](data__pymodules_item): - raise JsonSchemaValueException("" + (name_prefix or "data") + ".py-modules[{data__pymodules_x}]".format(**locals()) + " must be python-module-name", value=data__pymodules_item, name="" + (name_prefix or "data") + ".py-modules[{data__pymodules_x}]".format(**locals()) + "", definition={'type': 'string', 'format': 'python-module-name'}, rule='format') - if "data-files" in data_keys: - data_keys.remove("data-files") - data__datafiles = data["data-files"] - if not isinstance(data__datafiles, (dict)): - raise JsonSchemaValueException("" + (name_prefix or "data") + ".data-files must be object", value=data__datafiles, name="" + (name_prefix or "data") + ".data-files", definition={'$$description': ['**DEPRECATED**: dict-like structure where each key represents a directory and', 'the value is a list of glob patterns that should be installed in them.', "Please notice this don't work with wheels. See `data files support", '`_'], 'type': 'object', 'patternProperties': {'^.*$': {'type': 'array', 'items': {'type': 'string'}}}}, rule='type') - data__datafiles_is_dict = isinstance(data__datafiles, dict) - if data__datafiles_is_dict: - data__datafiles_keys = set(data__datafiles.keys()) - for data__datafiles_key, data__datafiles_val in data__datafiles.items(): - if REGEX_PATTERNS['^.*$'].search(data__datafiles_key): - if data__datafiles_key in data__datafiles_keys: - data__datafiles_keys.remove(data__datafiles_key) - if not isinstance(data__datafiles_val, (list, tuple)): - raise JsonSchemaValueException("" + (name_prefix or "data") + ".data-files.{data__datafiles_key}".format(**locals()) + " must be array", value=data__datafiles_val, name="" + (name_prefix or "data") + ".data-files.{data__datafiles_key}".format(**locals()) + "", definition={'type': 'array', 'items': {'type': 'string'}}, rule='type') - data__datafiles_val_is_list = isinstance(data__datafiles_val, (list, tuple)) - if data__datafiles_val_is_list: - data__datafiles_val_len = len(data__datafiles_val) - for data__datafiles_val_x, data__datafiles_val_item in enumerate(data__datafiles_val): - if not isinstance(data__datafiles_val_item, (str)): - raise JsonSchemaValueException("" + (name_prefix or "data") + ".data-files.{data__datafiles_key}[{data__datafiles_val_x}]".format(**locals()) + " must be string", value=data__datafiles_val_item, name="" + (name_prefix or "data") + ".data-files.{data__datafiles_key}[{data__datafiles_val_x}]".format(**locals()) + "", definition={'type': 'string'}, rule='type') - if "cmdclass" in data_keys: - data_keys.remove("cmdclass") - data__cmdclass = data["cmdclass"] - if not isinstance(data__cmdclass, (dict)): - raise JsonSchemaValueException("" + (name_prefix or "data") + ".cmdclass must be object", value=data__cmdclass, name="" + (name_prefix or "data") + ".cmdclass", definition={'$$description': ['Mapping of distutils-style command names to ``setuptools.Command`` subclasses', 'which in turn should be represented by strings with a qualified class name', '(i.e., "dotted" form with module), e.g.::\n\n', ' cmdclass = {mycmd = "pkg.subpkg.module.CommandClass"}\n\n', 'The command class should be a directly defined at the top-level of the', 'containing module (no class nesting).'], 'type': 'object', 'patternProperties': {'^.*$': {'type': 'string', 'format': 'python-qualified-identifier'}}}, rule='type') - data__cmdclass_is_dict = isinstance(data__cmdclass, dict) - if data__cmdclass_is_dict: - data__cmdclass_keys = set(data__cmdclass.keys()) - for data__cmdclass_key, data__cmdclass_val in data__cmdclass.items(): - if REGEX_PATTERNS['^.*$'].search(data__cmdclass_key): - if data__cmdclass_key in data__cmdclass_keys: - data__cmdclass_keys.remove(data__cmdclass_key) - if not isinstance(data__cmdclass_val, (str)): - raise JsonSchemaValueException("" + (name_prefix or "data") + ".cmdclass.{data__cmdclass_key}".format(**locals()) + " must be string", value=data__cmdclass_val, name="" + (name_prefix or "data") + ".cmdclass.{data__cmdclass_key}".format(**locals()) + "", definition={'type': 'string', 'format': 'python-qualified-identifier'}, rule='type') - if isinstance(data__cmdclass_val, str): - if not custom_formats["python-qualified-identifier"](data__cmdclass_val): - raise JsonSchemaValueException("" + (name_prefix or "data") + ".cmdclass.{data__cmdclass_key}".format(**locals()) + " must be python-qualified-identifier", value=data__cmdclass_val, name="" + (name_prefix or "data") + ".cmdclass.{data__cmdclass_key}".format(**locals()) + "", definition={'type': 'string', 'format': 'python-qualified-identifier'}, rule='format') - if "license-files" in data_keys: - data_keys.remove("license-files") - data__licensefiles = data["license-files"] - if not isinstance(data__licensefiles, (list, tuple)): - raise JsonSchemaValueException("" + (name_prefix or "data") + ".license-files must be array", value=data__licensefiles, name="" + (name_prefix or "data") + ".license-files", definition={'type': 'array', 'items': {'type': 'string'}, '$$description': ['PROVISIONAL: List of glob patterns for all license files being distributed.', '(might become standard with PEP 639).'], 'default': ['LICEN[CS]E*', ' COPYING*', ' NOTICE*', 'AUTHORS*'], '$comment': 'TODO: revise if PEP 639 is accepted. Probably ``project.license-files``?'}, rule='type') - data__licensefiles_is_list = isinstance(data__licensefiles, (list, tuple)) - if data__licensefiles_is_list: - data__licensefiles_len = len(data__licensefiles) - for data__licensefiles_x, data__licensefiles_item in enumerate(data__licensefiles): - if not isinstance(data__licensefiles_item, (str)): - raise JsonSchemaValueException("" + (name_prefix or "data") + ".license-files[{data__licensefiles_x}]".format(**locals()) + " must be string", value=data__licensefiles_item, name="" + (name_prefix or "data") + ".license-files[{data__licensefiles_x}]".format(**locals()) + "", definition={'type': 'string'}, rule='type') - else: data["license-files"] = ['LICEN[CS]E*', ' COPYING*', ' NOTICE*', 'AUTHORS*'] - if "dynamic" in data_keys: - data_keys.remove("dynamic") - data__dynamic = data["dynamic"] - if not isinstance(data__dynamic, (dict)): - raise JsonSchemaValueException("" + (name_prefix or "data") + ".dynamic must be object", value=data__dynamic, name="" + (name_prefix or "data") + ".dynamic", definition={'type': 'object', 'description': 'Instructions for loading :pep:`621`-related metadata dynamically', 'additionalProperties': False, 'properties': {'version': {'$$description': ['A version dynamically loaded via either the ``attr:`` or ``file:``', 'directives. Please make sure the given file or attribute respects :pep:`440`.'], 'oneOf': [{'title': "'attr:' directive", '$id': '#/definitions/attr-directive', '$$description': ['Value is read from a module attribute. Supports callables and iterables;', 'unsupported types are cast via ``str()``'], 'type': 'object', 'additionalProperties': False, 'properties': {'attr': {'type': 'string'}}, 'required': ['attr']}, {'$id': '#/definitions/file-directive', 'title': "'file:' directive", 'description': 'Value is read from a file (or list of files and then concatenated)', 'type': 'object', 'additionalProperties': False, 'properties': {'file': {'oneOf': [{'type': 'string'}, {'type': 'array', 'items': {'type': 'string'}}]}}, 'required': ['file']}]}, 'classifiers': {'$id': '#/definitions/file-directive', 'title': "'file:' directive", 'description': 'Value is read from a file (or list of files and then concatenated)', 'type': 'object', 'additionalProperties': False, 'properties': {'file': {'oneOf': [{'type': 'string'}, {'type': 'array', 'items': {'type': 'string'}}]}}, 'required': ['file']}, 'description': {'$id': '#/definitions/file-directive', 'title': "'file:' directive", 'description': 'Value is read from a file (or list of files and then concatenated)', 'type': 'object', 'additionalProperties': False, 'properties': {'file': {'oneOf': [{'type': 'string'}, {'type': 'array', 'items': {'type': 'string'}}]}}, 'required': ['file']}, 'dependencies': {'$id': '#/definitions/file-directive', 'title': "'file:' directive", 'description': 'Value is read from a file (or list of files and then concatenated)', 'type': 'object', 'additionalProperties': False, 'properties': {'file': {'oneOf': [{'type': 'string'}, {'type': 'array', 'items': {'type': 'string'}}]}}, 'required': ['file']}, 'entry-points': {'$id': '#/definitions/file-directive', 'title': "'file:' directive", 'description': 'Value is read from a file (or list of files and then concatenated)', 'type': 'object', 'additionalProperties': False, 'properties': {'file': {'oneOf': [{'type': 'string'}, {'type': 'array', 'items': {'type': 'string'}}]}}, 'required': ['file']}, 'optional-dependencies': {'type': 'object', 'propertyNames': {'format': 'python-identifier'}, 'additionalProperties': False, 'patternProperties': {'.+': {'$id': '#/definitions/file-directive', 'title': "'file:' directive", 'description': 'Value is read from a file (or list of files and then concatenated)', 'type': 'object', 'additionalProperties': False, 'properties': {'file': {'oneOf': [{'type': 'string'}, {'type': 'array', 'items': {'type': 'string'}}]}}, 'required': ['file']}}}, 'readme': {'anyOf': [{'$id': '#/definitions/file-directive', 'title': "'file:' directive", 'description': 'Value is read from a file (or list of files and then concatenated)', 'type': 'object', 'additionalProperties': False, 'properties': {'file': {'oneOf': [{'type': 'string'}, {'type': 'array', 'items': {'type': 'string'}}]}}, 'required': ['file']}, {'properties': {'content-type': {'type': 'string'}}}], 'required': ['file']}}}, rule='type') - data__dynamic_is_dict = isinstance(data__dynamic, dict) - if data__dynamic_is_dict: - data__dynamic_keys = set(data__dynamic.keys()) - if "version" in data__dynamic_keys: - data__dynamic_keys.remove("version") - data__dynamic__version = data__dynamic["version"] - data__dynamic__version_one_of_count5 = 0 - if data__dynamic__version_one_of_count5 < 2: - try: - validate_https___setuptools_pypa_io_en_latest_references_keywords_html__definitions_attr_directive(data__dynamic__version, custom_formats, (name_prefix or "data") + ".dynamic.version") - data__dynamic__version_one_of_count5 += 1 - except JsonSchemaValueException: pass - if data__dynamic__version_one_of_count5 < 2: - try: - validate_https___setuptools_pypa_io_en_latest_references_keywords_html__definitions_file_directive(data__dynamic__version, custom_formats, (name_prefix or "data") + ".dynamic.version") - data__dynamic__version_one_of_count5 += 1 - except JsonSchemaValueException: pass - if data__dynamic__version_one_of_count5 != 1: - raise JsonSchemaValueException("" + (name_prefix or "data") + ".dynamic.version must be valid exactly by one definition" + (" (" + str(data__dynamic__version_one_of_count5) + " matches found)"), value=data__dynamic__version, name="" + (name_prefix or "data") + ".dynamic.version", definition={'$$description': ['A version dynamically loaded via either the ``attr:`` or ``file:``', 'directives. Please make sure the given file or attribute respects :pep:`440`.'], 'oneOf': [{'title': "'attr:' directive", '$id': '#/definitions/attr-directive', '$$description': ['Value is read from a module attribute. Supports callables and iterables;', 'unsupported types are cast via ``str()``'], 'type': 'object', 'additionalProperties': False, 'properties': {'attr': {'type': 'string'}}, 'required': ['attr']}, {'$id': '#/definitions/file-directive', 'title': "'file:' directive", 'description': 'Value is read from a file (or list of files and then concatenated)', 'type': 'object', 'additionalProperties': False, 'properties': {'file': {'oneOf': [{'type': 'string'}, {'type': 'array', 'items': {'type': 'string'}}]}}, 'required': ['file']}]}, rule='oneOf') - if "classifiers" in data__dynamic_keys: - data__dynamic_keys.remove("classifiers") - data__dynamic__classifiers = data__dynamic["classifiers"] - validate_https___setuptools_pypa_io_en_latest_references_keywords_html__definitions_file_directive(data__dynamic__classifiers, custom_formats, (name_prefix or "data") + ".dynamic.classifiers") - if "description" in data__dynamic_keys: - data__dynamic_keys.remove("description") - data__dynamic__description = data__dynamic["description"] - validate_https___setuptools_pypa_io_en_latest_references_keywords_html__definitions_file_directive(data__dynamic__description, custom_formats, (name_prefix or "data") + ".dynamic.description") - if "dependencies" in data__dynamic_keys: - data__dynamic_keys.remove("dependencies") - data__dynamic__dependencies = data__dynamic["dependencies"] - validate_https___setuptools_pypa_io_en_latest_references_keywords_html__definitions_file_directive(data__dynamic__dependencies, custom_formats, (name_prefix or "data") + ".dynamic.dependencies") - if "entry-points" in data__dynamic_keys: - data__dynamic_keys.remove("entry-points") - data__dynamic__entrypoints = data__dynamic["entry-points"] - validate_https___setuptools_pypa_io_en_latest_references_keywords_html__definitions_file_directive(data__dynamic__entrypoints, custom_formats, (name_prefix or "data") + ".dynamic.entry-points") - if "optional-dependencies" in data__dynamic_keys: - data__dynamic_keys.remove("optional-dependencies") - data__dynamic__optionaldependencies = data__dynamic["optional-dependencies"] - if not isinstance(data__dynamic__optionaldependencies, (dict)): - raise JsonSchemaValueException("" + (name_prefix or "data") + ".dynamic.optional-dependencies must be object", value=data__dynamic__optionaldependencies, name="" + (name_prefix or "data") + ".dynamic.optional-dependencies", definition={'type': 'object', 'propertyNames': {'format': 'python-identifier'}, 'additionalProperties': False, 'patternProperties': {'.+': {'$id': '#/definitions/file-directive', 'title': "'file:' directive", 'description': 'Value is read from a file (or list of files and then concatenated)', 'type': 'object', 'additionalProperties': False, 'properties': {'file': {'oneOf': [{'type': 'string'}, {'type': 'array', 'items': {'type': 'string'}}]}}, 'required': ['file']}}}, rule='type') - data__dynamic__optionaldependencies_is_dict = isinstance(data__dynamic__optionaldependencies, dict) - if data__dynamic__optionaldependencies_is_dict: - data__dynamic__optionaldependencies_keys = set(data__dynamic__optionaldependencies.keys()) - for data__dynamic__optionaldependencies_key, data__dynamic__optionaldependencies_val in data__dynamic__optionaldependencies.items(): - if REGEX_PATTERNS['.+'].search(data__dynamic__optionaldependencies_key): - if data__dynamic__optionaldependencies_key in data__dynamic__optionaldependencies_keys: - data__dynamic__optionaldependencies_keys.remove(data__dynamic__optionaldependencies_key) - validate_https___setuptools_pypa_io_en_latest_references_keywords_html__definitions_file_directive(data__dynamic__optionaldependencies_val, custom_formats, (name_prefix or "data") + ".dynamic.optional-dependencies.{data__dynamic__optionaldependencies_key}") - if data__dynamic__optionaldependencies_keys: - raise JsonSchemaValueException("" + (name_prefix or "data") + ".dynamic.optional-dependencies must not contain "+str(data__dynamic__optionaldependencies_keys)+" properties", value=data__dynamic__optionaldependencies, name="" + (name_prefix or "data") + ".dynamic.optional-dependencies", definition={'type': 'object', 'propertyNames': {'format': 'python-identifier'}, 'additionalProperties': False, 'patternProperties': {'.+': {'$id': '#/definitions/file-directive', 'title': "'file:' directive", 'description': 'Value is read from a file (or list of files and then concatenated)', 'type': 'object', 'additionalProperties': False, 'properties': {'file': {'oneOf': [{'type': 'string'}, {'type': 'array', 'items': {'type': 'string'}}]}}, 'required': ['file']}}}, rule='additionalProperties') - data__dynamic__optionaldependencies_len = len(data__dynamic__optionaldependencies) - if data__dynamic__optionaldependencies_len != 0: - data__dynamic__optionaldependencies_property_names = True - for data__dynamic__optionaldependencies_key in data__dynamic__optionaldependencies: - try: - if isinstance(data__dynamic__optionaldependencies_key, str): - if not custom_formats["python-identifier"](data__dynamic__optionaldependencies_key): - raise JsonSchemaValueException("" + (name_prefix or "data") + ".dynamic.optional-dependencies must be python-identifier", value=data__dynamic__optionaldependencies_key, name="" + (name_prefix or "data") + ".dynamic.optional-dependencies", definition={'format': 'python-identifier'}, rule='format') - except JsonSchemaValueException: - data__dynamic__optionaldependencies_property_names = False - if not data__dynamic__optionaldependencies_property_names: - raise JsonSchemaValueException("" + (name_prefix or "data") + ".dynamic.optional-dependencies must be named by propertyName definition", value=data__dynamic__optionaldependencies, name="" + (name_prefix or "data") + ".dynamic.optional-dependencies", definition={'type': 'object', 'propertyNames': {'format': 'python-identifier'}, 'additionalProperties': False, 'patternProperties': {'.+': {'$id': '#/definitions/file-directive', 'title': "'file:' directive", 'description': 'Value is read from a file (or list of files and then concatenated)', 'type': 'object', 'additionalProperties': False, 'properties': {'file': {'oneOf': [{'type': 'string'}, {'type': 'array', 'items': {'type': 'string'}}]}}, 'required': ['file']}}}, rule='propertyNames') - if "readme" in data__dynamic_keys: - data__dynamic_keys.remove("readme") - data__dynamic__readme = data__dynamic["readme"] - data__dynamic__readme_any_of_count6 = 0 - if not data__dynamic__readme_any_of_count6: - try: - validate_https___setuptools_pypa_io_en_latest_references_keywords_html__definitions_file_directive(data__dynamic__readme, custom_formats, (name_prefix or "data") + ".dynamic.readme") - data__dynamic__readme_any_of_count6 += 1 - except JsonSchemaValueException: pass - if not data__dynamic__readme_any_of_count6: - try: - data__dynamic__readme_is_dict = isinstance(data__dynamic__readme, dict) - if data__dynamic__readme_is_dict: - data__dynamic__readme_keys = set(data__dynamic__readme.keys()) - if "content-type" in data__dynamic__readme_keys: - data__dynamic__readme_keys.remove("content-type") - data__dynamic__readme__contenttype = data__dynamic__readme["content-type"] - if not isinstance(data__dynamic__readme__contenttype, (str)): - raise JsonSchemaValueException("" + (name_prefix or "data") + ".dynamic.readme.content-type must be string", value=data__dynamic__readme__contenttype, name="" + (name_prefix or "data") + ".dynamic.readme.content-type", definition={'type': 'string'}, rule='type') - data__dynamic__readme_any_of_count6 += 1 - except JsonSchemaValueException: pass - if not data__dynamic__readme_any_of_count6: - raise JsonSchemaValueException("" + (name_prefix or "data") + ".dynamic.readme cannot be validated by any definition", value=data__dynamic__readme, name="" + (name_prefix or "data") + ".dynamic.readme", definition={'anyOf': [{'$id': '#/definitions/file-directive', 'title': "'file:' directive", 'description': 'Value is read from a file (or list of files and then concatenated)', 'type': 'object', 'additionalProperties': False, 'properties': {'file': {'oneOf': [{'type': 'string'}, {'type': 'array', 'items': {'type': 'string'}}]}}, 'required': ['file']}, {'properties': {'content-type': {'type': 'string'}}}], 'required': ['file']}, rule='anyOf') - data__dynamic__readme_is_dict = isinstance(data__dynamic__readme, dict) - if data__dynamic__readme_is_dict: - data__dynamic__readme_len = len(data__dynamic__readme) - if not all(prop in data__dynamic__readme for prop in ['file']): - raise JsonSchemaValueException("" + (name_prefix or "data") + ".dynamic.readme must contain ['file'] properties", value=data__dynamic__readme, name="" + (name_prefix or "data") + ".dynamic.readme", definition={'anyOf': [{'$id': '#/definitions/file-directive', 'title': "'file:' directive", 'description': 'Value is read from a file (or list of files and then concatenated)', 'type': 'object', 'additionalProperties': False, 'properties': {'file': {'oneOf': [{'type': 'string'}, {'type': 'array', 'items': {'type': 'string'}}]}}, 'required': ['file']}, {'properties': {'content-type': {'type': 'string'}}}], 'required': ['file']}, rule='required') - if data__dynamic_keys: - raise JsonSchemaValueException("" + (name_prefix or "data") + ".dynamic must not contain "+str(data__dynamic_keys)+" properties", value=data__dynamic, name="" + (name_prefix or "data") + ".dynamic", definition={'type': 'object', 'description': 'Instructions for loading :pep:`621`-related metadata dynamically', 'additionalProperties': False, 'properties': {'version': {'$$description': ['A version dynamically loaded via either the ``attr:`` or ``file:``', 'directives. Please make sure the given file or attribute respects :pep:`440`.'], 'oneOf': [{'title': "'attr:' directive", '$id': '#/definitions/attr-directive', '$$description': ['Value is read from a module attribute. Supports callables and iterables;', 'unsupported types are cast via ``str()``'], 'type': 'object', 'additionalProperties': False, 'properties': {'attr': {'type': 'string'}}, 'required': ['attr']}, {'$id': '#/definitions/file-directive', 'title': "'file:' directive", 'description': 'Value is read from a file (or list of files and then concatenated)', 'type': 'object', 'additionalProperties': False, 'properties': {'file': {'oneOf': [{'type': 'string'}, {'type': 'array', 'items': {'type': 'string'}}]}}, 'required': ['file']}]}, 'classifiers': {'$id': '#/definitions/file-directive', 'title': "'file:' directive", 'description': 'Value is read from a file (or list of files and then concatenated)', 'type': 'object', 'additionalProperties': False, 'properties': {'file': {'oneOf': [{'type': 'string'}, {'type': 'array', 'items': {'type': 'string'}}]}}, 'required': ['file']}, 'description': {'$id': '#/definitions/file-directive', 'title': "'file:' directive", 'description': 'Value is read from a file (or list of files and then concatenated)', 'type': 'object', 'additionalProperties': False, 'properties': {'file': {'oneOf': [{'type': 'string'}, {'type': 'array', 'items': {'type': 'string'}}]}}, 'required': ['file']}, 'dependencies': {'$id': '#/definitions/file-directive', 'title': "'file:' directive", 'description': 'Value is read from a file (or list of files and then concatenated)', 'type': 'object', 'additionalProperties': False, 'properties': {'file': {'oneOf': [{'type': 'string'}, {'type': 'array', 'items': {'type': 'string'}}]}}, 'required': ['file']}, 'entry-points': {'$id': '#/definitions/file-directive', 'title': "'file:' directive", 'description': 'Value is read from a file (or list of files and then concatenated)', 'type': 'object', 'additionalProperties': False, 'properties': {'file': {'oneOf': [{'type': 'string'}, {'type': 'array', 'items': {'type': 'string'}}]}}, 'required': ['file']}, 'optional-dependencies': {'type': 'object', 'propertyNames': {'format': 'python-identifier'}, 'additionalProperties': False, 'patternProperties': {'.+': {'$id': '#/definitions/file-directive', 'title': "'file:' directive", 'description': 'Value is read from a file (or list of files and then concatenated)', 'type': 'object', 'additionalProperties': False, 'properties': {'file': {'oneOf': [{'type': 'string'}, {'type': 'array', 'items': {'type': 'string'}}]}}, 'required': ['file']}}}, 'readme': {'anyOf': [{'$id': '#/definitions/file-directive', 'title': "'file:' directive", 'description': 'Value is read from a file (or list of files and then concatenated)', 'type': 'object', 'additionalProperties': False, 'properties': {'file': {'oneOf': [{'type': 'string'}, {'type': 'array', 'items': {'type': 'string'}}]}}, 'required': ['file']}, {'properties': {'content-type': {'type': 'string'}}}], 'required': ['file']}}}, rule='additionalProperties') - if data_keys: - raise JsonSchemaValueException("" + (name_prefix or "data") + " must not contain "+str(data_keys)+" properties", value=data, name="" + (name_prefix or "data") + "", definition={'$schema': 'http://json-schema.org/draft-07/schema', '$id': 'https://setuptools.pypa.io/en/latest/references/keywords.html', 'title': '``tool.setuptools`` table', '$$description': ['Please notice for the time being the ``setuptools`` project does not specify', 'a way of configuring builds via ``pyproject.toml``.', 'Therefore this schema should be taken just as a *"thought experiment"* on how', 'this *might be done*, by following the principles established in', '`ini2toml `_.', 'It considers only ``setuptools`` `parameters', '`_', 'that can currently be configured via ``setup.cfg`` and are not covered by :pep:`621`', 'but intentionally excludes ``dependency_links`` and ``setup_requires``.', 'NOTE: ``scripts`` was renamed to ``script-files`` to avoid confusion with', 'entry-point based scripts (defined in :pep:`621`).'], 'type': 'object', 'additionalProperties': False, 'properties': {'platforms': {'type': 'array', 'items': {'type': 'string'}}, 'provides': {'$$description': ['Package and virtual package names contained within this package', '**(not supported by pip)**'], 'type': 'array', 'items': {'type': 'string', 'format': 'pep508-identifier'}}, 'obsoletes': {'$$description': ['Packages which this package renders obsolete', '**(not supported by pip)**'], 'type': 'array', 'items': {'type': 'string', 'format': 'pep508-identifier'}}, 'zip-safe': {'description': 'Whether the project can be safely installed and run from a zip file.', 'type': 'boolean'}, 'script-files': {'description': 'Legacy way of defining scripts (entry-points are preferred).', 'type': 'array', 'items': {'type': 'string'}, '$comment': 'TODO: is this field deprecated/should be removed?'}, 'eager-resources': {'$$description': ['Resources that should be extracted together, if any of them is needed,', 'or if any C extensions included in the project are imported.'], 'type': 'array', 'items': {'type': 'string'}}, 'packages': {'$$description': ['Packages that should be included in the distribution.', 'It can be given either as a list of package identifiers', 'or as a ``dict``-like structure with a single key ``find``', 'which corresponds to a dynamic call to', '``setuptools.config.expand.find_packages`` function.', 'The ``find`` key is associated with a nested ``dict``-like structure that can', 'contain ``where``, ``include``, ``exclude`` and ``namespaces`` keys,', 'mimicking the keyword arguments of the associated function.'], 'oneOf': [{'title': 'Array of Python package identifiers', 'type': 'array', 'items': {'type': 'string', 'format': 'python-module-name'}}, {'$id': '#/definitions/find-directive', 'title': "'find:' directive", 'type': 'object', 'additionalProperties': False, 'properties': {'find': {'type': 'object', '$$description': ['Dynamic `package discovery', '`_.'], 'additionalProperties': False, 'properties': {'where': {'description': 'Directories to be searched for packages (Unix-style relative path)', 'type': 'array', 'items': {'type': 'string'}}, 'exclude': {'type': 'array', '$$description': ['Exclude packages that match the values listed in this field.', "Can container shell-style wildcards (e.g. ``'pkg.*'``)"], 'items': {'type': 'string'}}, 'include': {'type': 'array', '$$description': ['Restrict the found packages to just the ones listed in this field.', "Can container shell-style wildcards (e.g. ``'pkg.*'``)"], 'items': {'type': 'string'}}, 'namespaces': {'type': 'boolean', '$$description': ['When ``True``, directories without a ``__init__.py`` file will also', 'be scanned for :pep:`420`-style implicit namespaces']}}}}}]}, 'package-dir': {'$$description': [':class:`dict`-like structure mapping from package names to directories where their', 'code can be found.', 'The empty string (as key) means that all packages are contained inside', 'the given directory will be included in the distribution.'], 'type': 'object', 'additionalProperties': False, 'propertyNames': {'oneOf': [{'format': 'python-module-name'}, {'const': ''}]}, 'patternProperties': {'^.*$': {'type': 'string'}}}, 'package-data': {'$$description': ['Mapping from package names to lists of glob patterns.', 'Usually this option is not needed when using ``include-package-data = true``', 'For more information on how to include data files, check ``setuptools`` `docs', '`_.'], 'type': 'object', 'additionalProperties': False, 'propertyNames': {'oneOf': [{'format': 'python-module-name'}, {'const': '*'}]}, 'patternProperties': {'^.*$': {'type': 'array', 'items': {'type': 'string'}}}}, 'include-package-data': {'$$description': ['Automatically include any data files inside the package directories', 'that are specified by ``MANIFEST.in``', 'For more information on how to include data files, check ``setuptools`` `docs', '`_.'], 'type': 'boolean'}, 'exclude-package-data': {'$$description': ['Mapping from package names to lists of glob patterns that should be excluded', 'For more information on how to include data files, check ``setuptools`` `docs', '`_.'], 'type': 'object', 'additionalProperties': False, 'propertyNames': {'oneOf': [{'format': 'python-module-name'}, {'const': '*'}]}, 'patternProperties': {'^.*$': {'type': 'array', 'items': {'type': 'string'}}}}, 'namespace-packages': {'type': 'array', 'items': {'type': 'string', 'format': 'python-module-name'}, '$comment': 'https://setuptools.pypa.io/en/latest/userguide/package_discovery.html'}, 'py-modules': {'description': 'Modules that setuptools will manipulate', 'type': 'array', 'items': {'type': 'string', 'format': 'python-module-name'}, '$comment': 'TODO: clarify the relationship with ``packages``'}, 'data-files': {'$$description': ['**DEPRECATED**: dict-like structure where each key represents a directory and', 'the value is a list of glob patterns that should be installed in them.', "Please notice this don't work with wheels. See `data files support", '`_'], 'type': 'object', 'patternProperties': {'^.*$': {'type': 'array', 'items': {'type': 'string'}}}}, 'cmdclass': {'$$description': ['Mapping of distutils-style command names to ``setuptools.Command`` subclasses', 'which in turn should be represented by strings with a qualified class name', '(i.e., "dotted" form with module), e.g.::\n\n', ' cmdclass = {mycmd = "pkg.subpkg.module.CommandClass"}\n\n', 'The command class should be a directly defined at the top-level of the', 'containing module (no class nesting).'], 'type': 'object', 'patternProperties': {'^.*$': {'type': 'string', 'format': 'python-qualified-identifier'}}}, 'license-files': {'type': 'array', 'items': {'type': 'string'}, '$$description': ['PROVISIONAL: List of glob patterns for all license files being distributed.', '(might become standard with PEP 639).'], 'default': ['LICEN[CS]E*', ' COPYING*', ' NOTICE*', 'AUTHORS*'], '$comment': 'TODO: revise if PEP 639 is accepted. Probably ``project.license-files``?'}, 'dynamic': {'type': 'object', 'description': 'Instructions for loading :pep:`621`-related metadata dynamically', 'additionalProperties': False, 'properties': {'version': {'$$description': ['A version dynamically loaded via either the ``attr:`` or ``file:``', 'directives. Please make sure the given file or attribute respects :pep:`440`.'], 'oneOf': [{'title': "'attr:' directive", '$id': '#/definitions/attr-directive', '$$description': ['Value is read from a module attribute. Supports callables and iterables;', 'unsupported types are cast via ``str()``'], 'type': 'object', 'additionalProperties': False, 'properties': {'attr': {'type': 'string'}}, 'required': ['attr']}, {'$id': '#/definitions/file-directive', 'title': "'file:' directive", 'description': 'Value is read from a file (or list of files and then concatenated)', 'type': 'object', 'additionalProperties': False, 'properties': {'file': {'oneOf': [{'type': 'string'}, {'type': 'array', 'items': {'type': 'string'}}]}}, 'required': ['file']}]}, 'classifiers': {'$id': '#/definitions/file-directive', 'title': "'file:' directive", 'description': 'Value is read from a file (or list of files and then concatenated)', 'type': 'object', 'additionalProperties': False, 'properties': {'file': {'oneOf': [{'type': 'string'}, {'type': 'array', 'items': {'type': 'string'}}]}}, 'required': ['file']}, 'description': {'$id': '#/definitions/file-directive', 'title': "'file:' directive", 'description': 'Value is read from a file (or list of files and then concatenated)', 'type': 'object', 'additionalProperties': False, 'properties': {'file': {'oneOf': [{'type': 'string'}, {'type': 'array', 'items': {'type': 'string'}}]}}, 'required': ['file']}, 'dependencies': {'$id': '#/definitions/file-directive', 'title': "'file:' directive", 'description': 'Value is read from a file (or list of files and then concatenated)', 'type': 'object', 'additionalProperties': False, 'properties': {'file': {'oneOf': [{'type': 'string'}, {'type': 'array', 'items': {'type': 'string'}}]}}, 'required': ['file']}, 'entry-points': {'$id': '#/definitions/file-directive', 'title': "'file:' directive", 'description': 'Value is read from a file (or list of files and then concatenated)', 'type': 'object', 'additionalProperties': False, 'properties': {'file': {'oneOf': [{'type': 'string'}, {'type': 'array', 'items': {'type': 'string'}}]}}, 'required': ['file']}, 'optional-dependencies': {'type': 'object', 'propertyNames': {'format': 'python-identifier'}, 'additionalProperties': False, 'patternProperties': {'.+': {'$id': '#/definitions/file-directive', 'title': "'file:' directive", 'description': 'Value is read from a file (or list of files and then concatenated)', 'type': 'object', 'additionalProperties': False, 'properties': {'file': {'oneOf': [{'type': 'string'}, {'type': 'array', 'items': {'type': 'string'}}]}}, 'required': ['file']}}}, 'readme': {'anyOf': [{'$id': '#/definitions/file-directive', 'title': "'file:' directive", 'description': 'Value is read from a file (or list of files and then concatenated)', 'type': 'object', 'additionalProperties': False, 'properties': {'file': {'oneOf': [{'type': 'string'}, {'type': 'array', 'items': {'type': 'string'}}]}}, 'required': ['file']}, {'properties': {'content-type': {'type': 'string'}}}], 'required': ['file']}}}}, 'definitions': {'file-directive': {'$id': '#/definitions/file-directive', 'title': "'file:' directive", 'description': 'Value is read from a file (or list of files and then concatenated)', 'type': 'object', 'additionalProperties': False, 'properties': {'file': {'oneOf': [{'type': 'string'}, {'type': 'array', 'items': {'type': 'string'}}]}}, 'required': ['file']}, 'attr-directive': {'title': "'attr:' directive", '$id': '#/definitions/attr-directive', '$$description': ['Value is read from a module attribute. Supports callables and iterables;', 'unsupported types are cast via ``str()``'], 'type': 'object', 'additionalProperties': False, 'properties': {'attr': {'type': 'string'}}, 'required': ['attr']}, 'find-directive': {'$id': '#/definitions/find-directive', 'title': "'find:' directive", 'type': 'object', 'additionalProperties': False, 'properties': {'find': {'type': 'object', '$$description': ['Dynamic `package discovery', '`_.'], 'additionalProperties': False, 'properties': {'where': {'description': 'Directories to be searched for packages (Unix-style relative path)', 'type': 'array', 'items': {'type': 'string'}}, 'exclude': {'type': 'array', '$$description': ['Exclude packages that match the values listed in this field.', "Can container shell-style wildcards (e.g. ``'pkg.*'``)"], 'items': {'type': 'string'}}, 'include': {'type': 'array', '$$description': ['Restrict the found packages to just the ones listed in this field.', "Can container shell-style wildcards (e.g. ``'pkg.*'``)"], 'items': {'type': 'string'}}, 'namespaces': {'type': 'boolean', '$$description': ['When ``True``, directories without a ``__init__.py`` file will also', 'be scanned for :pep:`420`-style implicit namespaces']}}}}}}}, rule='additionalProperties') - return data - -def validate_https___setuptools_pypa_io_en_latest_references_keywords_html__definitions_file_directive(data, custom_formats={}, name_prefix=None): - if not isinstance(data, (dict)): - raise JsonSchemaValueException("" + (name_prefix or "data") + " must be object", value=data, name="" + (name_prefix or "data") + "", definition={'$id': '#/definitions/file-directive', 'title': "'file:' directive", 'description': 'Value is read from a file (or list of files and then concatenated)', 'type': 'object', 'additionalProperties': False, 'properties': {'file': {'oneOf': [{'type': 'string'}, {'type': 'array', 'items': {'type': 'string'}}]}}, 'required': ['file']}, rule='type') - data_is_dict = isinstance(data, dict) - if data_is_dict: - data_len = len(data) - if not all(prop in data for prop in ['file']): - raise JsonSchemaValueException("" + (name_prefix or "data") + " must contain ['file'] properties", value=data, name="" + (name_prefix or "data") + "", definition={'$id': '#/definitions/file-directive', 'title': "'file:' directive", 'description': 'Value is read from a file (or list of files and then concatenated)', 'type': 'object', 'additionalProperties': False, 'properties': {'file': {'oneOf': [{'type': 'string'}, {'type': 'array', 'items': {'type': 'string'}}]}}, 'required': ['file']}, rule='required') - data_keys = set(data.keys()) - if "file" in data_keys: - data_keys.remove("file") - data__file = data["file"] - data__file_one_of_count7 = 0 - if data__file_one_of_count7 < 2: - try: - if not isinstance(data__file, (str)): - raise JsonSchemaValueException("" + (name_prefix or "data") + ".file must be string", value=data__file, name="" + (name_prefix or "data") + ".file", definition={'type': 'string'}, rule='type') - data__file_one_of_count7 += 1 - except JsonSchemaValueException: pass - if data__file_one_of_count7 < 2: - try: - if not isinstance(data__file, (list, tuple)): - raise JsonSchemaValueException("" + (name_prefix or "data") + ".file must be array", value=data__file, name="" + (name_prefix or "data") + ".file", definition={'type': 'array', 'items': {'type': 'string'}}, rule='type') - data__file_is_list = isinstance(data__file, (list, tuple)) - if data__file_is_list: - data__file_len = len(data__file) - for data__file_x, data__file_item in enumerate(data__file): - if not isinstance(data__file_item, (str)): - raise JsonSchemaValueException("" + (name_prefix or "data") + ".file[{data__file_x}]".format(**locals()) + " must be string", value=data__file_item, name="" + (name_prefix or "data") + ".file[{data__file_x}]".format(**locals()) + "", definition={'type': 'string'}, rule='type') - data__file_one_of_count7 += 1 - except JsonSchemaValueException: pass - if data__file_one_of_count7 != 1: - raise JsonSchemaValueException("" + (name_prefix or "data") + ".file must be valid exactly by one definition" + (" (" + str(data__file_one_of_count7) + " matches found)"), value=data__file, name="" + (name_prefix or "data") + ".file", definition={'oneOf': [{'type': 'string'}, {'type': 'array', 'items': {'type': 'string'}}]}, rule='oneOf') - if data_keys: - raise JsonSchemaValueException("" + (name_prefix or "data") + " must not contain "+str(data_keys)+" properties", value=data, name="" + (name_prefix or "data") + "", definition={'$id': '#/definitions/file-directive', 'title': "'file:' directive", 'description': 'Value is read from a file (or list of files and then concatenated)', 'type': 'object', 'additionalProperties': False, 'properties': {'file': {'oneOf': [{'type': 'string'}, {'type': 'array', 'items': {'type': 'string'}}]}}, 'required': ['file']}, rule='additionalProperties') - return data - -def validate_https___setuptools_pypa_io_en_latest_references_keywords_html__definitions_attr_directive(data, custom_formats={}, name_prefix=None): - if not isinstance(data, (dict)): - raise JsonSchemaValueException("" + (name_prefix or "data") + " must be object", value=data, name="" + (name_prefix or "data") + "", definition={'title': "'attr:' directive", '$id': '#/definitions/attr-directive', '$$description': ['Value is read from a module attribute. Supports callables and iterables;', 'unsupported types are cast via ``str()``'], 'type': 'object', 'additionalProperties': False, 'properties': {'attr': {'type': 'string'}}, 'required': ['attr']}, rule='type') - data_is_dict = isinstance(data, dict) - if data_is_dict: - data_len = len(data) - if not all(prop in data for prop in ['attr']): - raise JsonSchemaValueException("" + (name_prefix or "data") + " must contain ['attr'] properties", value=data, name="" + (name_prefix or "data") + "", definition={'title': "'attr:' directive", '$id': '#/definitions/attr-directive', '$$description': ['Value is read from a module attribute. Supports callables and iterables;', 'unsupported types are cast via ``str()``'], 'type': 'object', 'additionalProperties': False, 'properties': {'attr': {'type': 'string'}}, 'required': ['attr']}, rule='required') - data_keys = set(data.keys()) - if "attr" in data_keys: - data_keys.remove("attr") - data__attr = data["attr"] - if not isinstance(data__attr, (str)): - raise JsonSchemaValueException("" + (name_prefix or "data") + ".attr must be string", value=data__attr, name="" + (name_prefix or "data") + ".attr", definition={'type': 'string'}, rule='type') - if data_keys: - raise JsonSchemaValueException("" + (name_prefix or "data") + " must not contain "+str(data_keys)+" properties", value=data, name="" + (name_prefix or "data") + "", definition={'title': "'attr:' directive", '$id': '#/definitions/attr-directive', '$$description': ['Value is read from a module attribute. Supports callables and iterables;', 'unsupported types are cast via ``str()``'], 'type': 'object', 'additionalProperties': False, 'properties': {'attr': {'type': 'string'}}, 'required': ['attr']}, rule='additionalProperties') - return data - -def validate_https___setuptools_pypa_io_en_latest_references_keywords_html__definitions_find_directive(data, custom_formats={}, name_prefix=None): - if not isinstance(data, (dict)): - raise JsonSchemaValueException("" + (name_prefix or "data") + " must be object", value=data, name="" + (name_prefix or "data") + "", definition={'$id': '#/definitions/find-directive', 'title': "'find:' directive", 'type': 'object', 'additionalProperties': False, 'properties': {'find': {'type': 'object', '$$description': ['Dynamic `package discovery', '`_.'], 'additionalProperties': False, 'properties': {'where': {'description': 'Directories to be searched for packages (Unix-style relative path)', 'type': 'array', 'items': {'type': 'string'}}, 'exclude': {'type': 'array', '$$description': ['Exclude packages that match the values listed in this field.', "Can container shell-style wildcards (e.g. ``'pkg.*'``)"], 'items': {'type': 'string'}}, 'include': {'type': 'array', '$$description': ['Restrict the found packages to just the ones listed in this field.', "Can container shell-style wildcards (e.g. ``'pkg.*'``)"], 'items': {'type': 'string'}}, 'namespaces': {'type': 'boolean', '$$description': ['When ``True``, directories without a ``__init__.py`` file will also', 'be scanned for :pep:`420`-style implicit namespaces']}}}}}, rule='type') - data_is_dict = isinstance(data, dict) - if data_is_dict: - data_keys = set(data.keys()) - if "find" in data_keys: - data_keys.remove("find") - data__find = data["find"] - if not isinstance(data__find, (dict)): - raise JsonSchemaValueException("" + (name_prefix or "data") + ".find must be object", value=data__find, name="" + (name_prefix or "data") + ".find", definition={'type': 'object', '$$description': ['Dynamic `package discovery', '`_.'], 'additionalProperties': False, 'properties': {'where': {'description': 'Directories to be searched for packages (Unix-style relative path)', 'type': 'array', 'items': {'type': 'string'}}, 'exclude': {'type': 'array', '$$description': ['Exclude packages that match the values listed in this field.', "Can container shell-style wildcards (e.g. ``'pkg.*'``)"], 'items': {'type': 'string'}}, 'include': {'type': 'array', '$$description': ['Restrict the found packages to just the ones listed in this field.', "Can container shell-style wildcards (e.g. ``'pkg.*'``)"], 'items': {'type': 'string'}}, 'namespaces': {'type': 'boolean', '$$description': ['When ``True``, directories without a ``__init__.py`` file will also', 'be scanned for :pep:`420`-style implicit namespaces']}}}, rule='type') - data__find_is_dict = isinstance(data__find, dict) - if data__find_is_dict: - data__find_keys = set(data__find.keys()) - if "where" in data__find_keys: - data__find_keys.remove("where") - data__find__where = data__find["where"] - if not isinstance(data__find__where, (list, tuple)): - raise JsonSchemaValueException("" + (name_prefix or "data") + ".find.where must be array", value=data__find__where, name="" + (name_prefix or "data") + ".find.where", definition={'description': 'Directories to be searched for packages (Unix-style relative path)', 'type': 'array', 'items': {'type': 'string'}}, rule='type') - data__find__where_is_list = isinstance(data__find__where, (list, tuple)) - if data__find__where_is_list: - data__find__where_len = len(data__find__where) - for data__find__where_x, data__find__where_item in enumerate(data__find__where): - if not isinstance(data__find__where_item, (str)): - raise JsonSchemaValueException("" + (name_prefix or "data") + ".find.where[{data__find__where_x}]".format(**locals()) + " must be string", value=data__find__where_item, name="" + (name_prefix or "data") + ".find.where[{data__find__where_x}]".format(**locals()) + "", definition={'type': 'string'}, rule='type') - if "exclude" in data__find_keys: - data__find_keys.remove("exclude") - data__find__exclude = data__find["exclude"] - if not isinstance(data__find__exclude, (list, tuple)): - raise JsonSchemaValueException("" + (name_prefix or "data") + ".find.exclude must be array", value=data__find__exclude, name="" + (name_prefix or "data") + ".find.exclude", definition={'type': 'array', '$$description': ['Exclude packages that match the values listed in this field.', "Can container shell-style wildcards (e.g. ``'pkg.*'``)"], 'items': {'type': 'string'}}, rule='type') - data__find__exclude_is_list = isinstance(data__find__exclude, (list, tuple)) - if data__find__exclude_is_list: - data__find__exclude_len = len(data__find__exclude) - for data__find__exclude_x, data__find__exclude_item in enumerate(data__find__exclude): - if not isinstance(data__find__exclude_item, (str)): - raise JsonSchemaValueException("" + (name_prefix or "data") + ".find.exclude[{data__find__exclude_x}]".format(**locals()) + " must be string", value=data__find__exclude_item, name="" + (name_prefix or "data") + ".find.exclude[{data__find__exclude_x}]".format(**locals()) + "", definition={'type': 'string'}, rule='type') - if "include" in data__find_keys: - data__find_keys.remove("include") - data__find__include = data__find["include"] - if not isinstance(data__find__include, (list, tuple)): - raise JsonSchemaValueException("" + (name_prefix or "data") + ".find.include must be array", value=data__find__include, name="" + (name_prefix or "data") + ".find.include", definition={'type': 'array', '$$description': ['Restrict the found packages to just the ones listed in this field.', "Can container shell-style wildcards (e.g. ``'pkg.*'``)"], 'items': {'type': 'string'}}, rule='type') - data__find__include_is_list = isinstance(data__find__include, (list, tuple)) - if data__find__include_is_list: - data__find__include_len = len(data__find__include) - for data__find__include_x, data__find__include_item in enumerate(data__find__include): - if not isinstance(data__find__include_item, (str)): - raise JsonSchemaValueException("" + (name_prefix or "data") + ".find.include[{data__find__include_x}]".format(**locals()) + " must be string", value=data__find__include_item, name="" + (name_prefix or "data") + ".find.include[{data__find__include_x}]".format(**locals()) + "", definition={'type': 'string'}, rule='type') - if "namespaces" in data__find_keys: - data__find_keys.remove("namespaces") - data__find__namespaces = data__find["namespaces"] - if not isinstance(data__find__namespaces, (bool)): - raise JsonSchemaValueException("" + (name_prefix or "data") + ".find.namespaces must be boolean", value=data__find__namespaces, name="" + (name_prefix or "data") + ".find.namespaces", definition={'type': 'boolean', '$$description': ['When ``True``, directories without a ``__init__.py`` file will also', 'be scanned for :pep:`420`-style implicit namespaces']}, rule='type') - if data__find_keys: - raise JsonSchemaValueException("" + (name_prefix or "data") + ".find must not contain "+str(data__find_keys)+" properties", value=data__find, name="" + (name_prefix or "data") + ".find", definition={'type': 'object', '$$description': ['Dynamic `package discovery', '`_.'], 'additionalProperties': False, 'properties': {'where': {'description': 'Directories to be searched for packages (Unix-style relative path)', 'type': 'array', 'items': {'type': 'string'}}, 'exclude': {'type': 'array', '$$description': ['Exclude packages that match the values listed in this field.', "Can container shell-style wildcards (e.g. ``'pkg.*'``)"], 'items': {'type': 'string'}}, 'include': {'type': 'array', '$$description': ['Restrict the found packages to just the ones listed in this field.', "Can container shell-style wildcards (e.g. ``'pkg.*'``)"], 'items': {'type': 'string'}}, 'namespaces': {'type': 'boolean', '$$description': ['When ``True``, directories without a ``__init__.py`` file will also', 'be scanned for :pep:`420`-style implicit namespaces']}}}, rule='additionalProperties') - if data_keys: - raise JsonSchemaValueException("" + (name_prefix or "data") + " must not contain "+str(data_keys)+" properties", value=data, name="" + (name_prefix or "data") + "", definition={'$id': '#/definitions/find-directive', 'title': "'find:' directive", 'type': 'object', 'additionalProperties': False, 'properties': {'find': {'type': 'object', '$$description': ['Dynamic `package discovery', '`_.'], 'additionalProperties': False, 'properties': {'where': {'description': 'Directories to be searched for packages (Unix-style relative path)', 'type': 'array', 'items': {'type': 'string'}}, 'exclude': {'type': 'array', '$$description': ['Exclude packages that match the values listed in this field.', "Can container shell-style wildcards (e.g. ``'pkg.*'``)"], 'items': {'type': 'string'}}, 'include': {'type': 'array', '$$description': ['Restrict the found packages to just the ones listed in this field.', "Can container shell-style wildcards (e.g. ``'pkg.*'``)"], 'items': {'type': 'string'}}, 'namespaces': {'type': 'boolean', '$$description': ['When ``True``, directories without a ``__init__.py`` file will also', 'be scanned for :pep:`420`-style implicit namespaces']}}}}}, rule='additionalProperties') - return data - -def validate_https___docs_python_org_3_install(data, custom_formats={}, name_prefix=None): - if not isinstance(data, (dict)): - raise JsonSchemaValueException("" + (name_prefix or "data") + " must be object", value=data, name="" + (name_prefix or "data") + "", definition={'$schema': 'http://json-schema.org/draft-07/schema', '$id': 'https://docs.python.org/3/install/', 'title': '``tool.distutils`` table', '$$description': ['Originally, ``distutils`` allowed developers to configure arguments for', '``setup.py`` scripts via `distutils configuration files', '`_.', '``tool.distutils`` subtables could be used with the same purpose', '(NOT CURRENTLY IMPLEMENTED).'], 'type': 'object', 'properties': {'global': {'type': 'object', 'description': 'Global options applied to all ``distutils`` commands'}}, 'patternProperties': {'.+': {'type': 'object'}}, '$comment': 'TODO: Is there a practical way of making this schema more specific?'}, rule='type') - data_is_dict = isinstance(data, dict) - if data_is_dict: - data_keys = set(data.keys()) - if "global" in data_keys: - data_keys.remove("global") - data__global = data["global"] - if not isinstance(data__global, (dict)): - raise JsonSchemaValueException("" + (name_prefix or "data") + ".global must be object", value=data__global, name="" + (name_prefix or "data") + ".global", definition={'type': 'object', 'description': 'Global options applied to all ``distutils`` commands'}, rule='type') - for data_key, data_val in data.items(): - if REGEX_PATTERNS['.+'].search(data_key): - if data_key in data_keys: - data_keys.remove(data_key) - if not isinstance(data_val, (dict)): - raise JsonSchemaValueException("" + (name_prefix or "data") + ".{data_key}".format(**locals()) + " must be object", value=data_val, name="" + (name_prefix or "data") + ".{data_key}".format(**locals()) + "", definition={'type': 'object'}, rule='type') - return data - -def validate_https___packaging_python_org_en_latest_specifications_declaring_project_metadata(data, custom_formats={}, name_prefix=None): - if not isinstance(data, (dict)): - raise JsonSchemaValueException("" + (name_prefix or "data") + " must be object", value=data, name="" + (name_prefix or "data") + "", definition={'$schema': 'http://json-schema.org/draft-07/schema', '$id': 'https://packaging.python.org/en/latest/specifications/declaring-project-metadata/', 'title': 'Package metadata stored in the ``project`` table', '$$description': ['Data structure for the **project** table inside ``pyproject.toml``', '(as initially defined in :pep:`621`)'], 'type': 'object', 'properties': {'name': {'type': 'string', 'description': 'The name (primary identifier) of the project. MUST be statically defined.', 'format': 'pep508-identifier'}, 'version': {'type': 'string', 'description': 'The version of the project as supported by :pep:`440`.', 'format': 'pep440'}, 'description': {'type': 'string', '$$description': ['The `summary description of the project', '`_']}, 'readme': {'$$description': ['`Full/detailed description of the project in the form of a README', '`_', "with meaning similar to the one defined in `core metadata's Description", '`_'], 'oneOf': [{'type': 'string', '$$description': ['Relative path to a text file (UTF-8) containing the full description', 'of the project. If the file path ends in case-insensitive ``.md`` or', '``.rst`` suffixes, then the content-type is respectively', '``text/markdown`` or ``text/x-rst``']}, {'type': 'object', 'allOf': [{'anyOf': [{'properties': {'file': {'type': 'string', '$$description': ['Relative path to a text file containing the full description', 'of the project.']}}, 'required': ['file']}, {'properties': {'text': {'type': 'string', 'description': 'Full text describing the project.'}}, 'required': ['text']}]}, {'properties': {'content-type': {'type': 'string', '$$description': ['Content-type (:rfc:`1341`) of the full description', '(e.g. ``text/markdown``). The ``charset`` parameter is assumed', 'UTF-8 when not present.'], '$comment': 'TODO: add regex pattern or format?'}}, 'required': ['content-type']}]}]}, 'requires-python': {'type': 'string', 'format': 'pep508-versionspec', '$$description': ['`The Python version requirements of the project', '`_.']}, 'license': {'description': '`Project license `_.', 'oneOf': [{'properties': {'file': {'type': 'string', '$$description': ['Relative path to the file (UTF-8) which contains the license for the', 'project.']}}, 'required': ['file']}, {'properties': {'text': {'type': 'string', '$$description': ['The license of the project whose meaning is that of the', '`License field from the core metadata', '`_.']}}, 'required': ['text']}]}, 'authors': {'type': 'array', 'items': {'$id': '#/definitions/author', 'title': 'Author or Maintainer', '$comment': 'https://www.python.org/dev/peps/pep-0621/#authors-maintainers', 'type': 'object', 'properties': {'name': {'type': 'string', '$$description': ['MUST be a valid email name, i.e. whatever can be put as a name, before an', 'email, in :rfc:`822`.']}, 'email': {'type': 'string', 'format': 'idn-email', 'description': 'MUST be a valid email address'}}}, '$$description': ["The people or organizations considered to be the 'authors' of the project.", 'The exact meaning is open to interpretation (e.g. original or primary authors,', 'current maintainers, or owners of the package).']}, 'maintainers': {'type': 'array', 'items': {'$id': '#/definitions/author', 'title': 'Author or Maintainer', '$comment': 'https://www.python.org/dev/peps/pep-0621/#authors-maintainers', 'type': 'object', 'properties': {'name': {'type': 'string', '$$description': ['MUST be a valid email name, i.e. whatever can be put as a name, before an', 'email, in :rfc:`822`.']}, 'email': {'type': 'string', 'format': 'idn-email', 'description': 'MUST be a valid email address'}}}, '$$description': ["The people or organizations considered to be the 'maintainers' of the project.", 'Similarly to ``authors``, the exact meaning is open to interpretation.']}, 'keywords': {'type': 'array', 'items': {'type': 'string'}, 'description': 'List of keywords to assist searching for the distribution in a larger catalog.'}, 'classifiers': {'type': 'array', 'items': {'type': 'string', 'format': 'trove-classifier', 'description': '`PyPI classifier `_.'}, '$$description': ['`Trove classifiers `_', 'which apply to the project.']}, 'urls': {'type': 'object', 'description': 'URLs associated with the project in the form ``label => value``.', 'additionalProperties': False, 'patternProperties': {'^.+$': {'type': 'string', 'format': 'url'}}}, 'scripts': {'$id': '#/definitions/entry-point-group', 'title': 'Entry-points', 'type': 'object', '$$description': ['Entry-points are grouped together to indicate what sort of capabilities they', 'provide.', 'See the `packaging guides', '`_', 'and `setuptools docs', '`_', 'for more information.'], 'propertyNames': {'format': 'python-entrypoint-name'}, 'additionalProperties': False, 'patternProperties': {'^.+$': {'type': 'string', '$$description': ['Reference to a Python object. It is either in the form', '``importable.module``, or ``importable.module:object.attr``.'], 'format': 'python-entrypoint-reference', '$comment': 'https://packaging.python.org/specifications/entry-points/'}}}, 'gui-scripts': {'$id': '#/definitions/entry-point-group', 'title': 'Entry-points', 'type': 'object', '$$description': ['Entry-points are grouped together to indicate what sort of capabilities they', 'provide.', 'See the `packaging guides', '`_', 'and `setuptools docs', '`_', 'for more information.'], 'propertyNames': {'format': 'python-entrypoint-name'}, 'additionalProperties': False, 'patternProperties': {'^.+$': {'type': 'string', '$$description': ['Reference to a Python object. It is either in the form', '``importable.module``, or ``importable.module:object.attr``.'], 'format': 'python-entrypoint-reference', '$comment': 'https://packaging.python.org/specifications/entry-points/'}}}, 'entry-points': {'$$description': ['Instruct the installer to expose the given modules/functions via', '``entry-point`` discovery mechanism (useful for plugins).', 'More information available in the `Python packaging guide', '`_.'], 'propertyNames': {'format': 'python-entrypoint-group'}, 'additionalProperties': False, 'patternProperties': {'^.+$': {'$id': '#/definitions/entry-point-group', 'title': 'Entry-points', 'type': 'object', '$$description': ['Entry-points are grouped together to indicate what sort of capabilities they', 'provide.', 'See the `packaging guides', '`_', 'and `setuptools docs', '`_', 'for more information.'], 'propertyNames': {'format': 'python-entrypoint-name'}, 'additionalProperties': False, 'patternProperties': {'^.+$': {'type': 'string', '$$description': ['Reference to a Python object. It is either in the form', '``importable.module``, or ``importable.module:object.attr``.'], 'format': 'python-entrypoint-reference', '$comment': 'https://packaging.python.org/specifications/entry-points/'}}}}}, 'dependencies': {'type': 'array', 'description': 'Project (mandatory) dependencies.', 'items': {'$id': '#/definitions/dependency', 'title': 'Dependency', 'type': 'string', 'description': 'Project dependency specification according to PEP 508', 'format': 'pep508'}}, 'optional-dependencies': {'type': 'object', 'description': 'Optional dependency for the project', 'propertyNames': {'format': 'pep508-identifier'}, 'additionalProperties': False, 'patternProperties': {'^.+$': {'type': 'array', 'items': {'$id': '#/definitions/dependency', 'title': 'Dependency', 'type': 'string', 'description': 'Project dependency specification according to PEP 508', 'format': 'pep508'}}}}, 'dynamic': {'type': 'array', '$$description': ['Specifies which fields are intentionally unspecified and expected to be', 'dynamically provided by build tools'], 'items': {'enum': ['version', 'description', 'readme', 'requires-python', 'license', 'authors', 'maintainers', 'keywords', 'classifiers', 'urls', 'scripts', 'gui-scripts', 'entry-points', 'dependencies', 'optional-dependencies']}}}, 'required': ['name'], 'additionalProperties': False, 'if': {'not': {'required': ['dynamic'], 'properties': {'dynamic': {'contains': {'const': 'version'}, '$$description': ['version is listed in ``dynamic``']}}}, '$$comment': ['According to :pep:`621`:', ' If the core metadata specification lists a field as "Required", then', ' the metadata MUST specify the field statically or list it in dynamic', 'In turn, `core metadata`_ defines:', ' The required fields are: Metadata-Version, Name, Version.', ' All the other fields are optional.', 'Since ``Metadata-Version`` is defined by the build back-end, ``name`` and', '``version`` are the only mandatory information in ``pyproject.toml``.', '.. _core metadata: https://packaging.python.org/specifications/core-metadata/']}, 'then': {'required': ['version'], '$$description': ['version should be statically defined in the ``version`` field']}, 'definitions': {'author': {'$id': '#/definitions/author', 'title': 'Author or Maintainer', '$comment': 'https://www.python.org/dev/peps/pep-0621/#authors-maintainers', 'type': 'object', 'properties': {'name': {'type': 'string', '$$description': ['MUST be a valid email name, i.e. whatever can be put as a name, before an', 'email, in :rfc:`822`.']}, 'email': {'type': 'string', 'format': 'idn-email', 'description': 'MUST be a valid email address'}}}, 'entry-point-group': {'$id': '#/definitions/entry-point-group', 'title': 'Entry-points', 'type': 'object', '$$description': ['Entry-points are grouped together to indicate what sort of capabilities they', 'provide.', 'See the `packaging guides', '`_', 'and `setuptools docs', '`_', 'for more information.'], 'propertyNames': {'format': 'python-entrypoint-name'}, 'additionalProperties': False, 'patternProperties': {'^.+$': {'type': 'string', '$$description': ['Reference to a Python object. It is either in the form', '``importable.module``, or ``importable.module:object.attr``.'], 'format': 'python-entrypoint-reference', '$comment': 'https://packaging.python.org/specifications/entry-points/'}}}, 'dependency': {'$id': '#/definitions/dependency', 'title': 'Dependency', 'type': 'string', 'description': 'Project dependency specification according to PEP 508', 'format': 'pep508'}}}, rule='type') - data_is_dict = isinstance(data, dict) - if data_is_dict: - data_len = len(data) - if not all(prop in data for prop in ['name']): - raise JsonSchemaValueException("" + (name_prefix or "data") + " must contain ['name'] properties", value=data, name="" + (name_prefix or "data") + "", definition={'$schema': 'http://json-schema.org/draft-07/schema', '$id': 'https://packaging.python.org/en/latest/specifications/declaring-project-metadata/', 'title': 'Package metadata stored in the ``project`` table', '$$description': ['Data structure for the **project** table inside ``pyproject.toml``', '(as initially defined in :pep:`621`)'], 'type': 'object', 'properties': {'name': {'type': 'string', 'description': 'The name (primary identifier) of the project. MUST be statically defined.', 'format': 'pep508-identifier'}, 'version': {'type': 'string', 'description': 'The version of the project as supported by :pep:`440`.', 'format': 'pep440'}, 'description': {'type': 'string', '$$description': ['The `summary description of the project', '`_']}, 'readme': {'$$description': ['`Full/detailed description of the project in the form of a README', '`_', "with meaning similar to the one defined in `core metadata's Description", '`_'], 'oneOf': [{'type': 'string', '$$description': ['Relative path to a text file (UTF-8) containing the full description', 'of the project. If the file path ends in case-insensitive ``.md`` or', '``.rst`` suffixes, then the content-type is respectively', '``text/markdown`` or ``text/x-rst``']}, {'type': 'object', 'allOf': [{'anyOf': [{'properties': {'file': {'type': 'string', '$$description': ['Relative path to a text file containing the full description', 'of the project.']}}, 'required': ['file']}, {'properties': {'text': {'type': 'string', 'description': 'Full text describing the project.'}}, 'required': ['text']}]}, {'properties': {'content-type': {'type': 'string', '$$description': ['Content-type (:rfc:`1341`) of the full description', '(e.g. ``text/markdown``). The ``charset`` parameter is assumed', 'UTF-8 when not present.'], '$comment': 'TODO: add regex pattern or format?'}}, 'required': ['content-type']}]}]}, 'requires-python': {'type': 'string', 'format': 'pep508-versionspec', '$$description': ['`The Python version requirements of the project', '`_.']}, 'license': {'description': '`Project license `_.', 'oneOf': [{'properties': {'file': {'type': 'string', '$$description': ['Relative path to the file (UTF-8) which contains the license for the', 'project.']}}, 'required': ['file']}, {'properties': {'text': {'type': 'string', '$$description': ['The license of the project whose meaning is that of the', '`License field from the core metadata', '`_.']}}, 'required': ['text']}]}, 'authors': {'type': 'array', 'items': {'$id': '#/definitions/author', 'title': 'Author or Maintainer', '$comment': 'https://www.python.org/dev/peps/pep-0621/#authors-maintainers', 'type': 'object', 'properties': {'name': {'type': 'string', '$$description': ['MUST be a valid email name, i.e. whatever can be put as a name, before an', 'email, in :rfc:`822`.']}, 'email': {'type': 'string', 'format': 'idn-email', 'description': 'MUST be a valid email address'}}}, '$$description': ["The people or organizations considered to be the 'authors' of the project.", 'The exact meaning is open to interpretation (e.g. original or primary authors,', 'current maintainers, or owners of the package).']}, 'maintainers': {'type': 'array', 'items': {'$id': '#/definitions/author', 'title': 'Author or Maintainer', '$comment': 'https://www.python.org/dev/peps/pep-0621/#authors-maintainers', 'type': 'object', 'properties': {'name': {'type': 'string', '$$description': ['MUST be a valid email name, i.e. whatever can be put as a name, before an', 'email, in :rfc:`822`.']}, 'email': {'type': 'string', 'format': 'idn-email', 'description': 'MUST be a valid email address'}}}, '$$description': ["The people or organizations considered to be the 'maintainers' of the project.", 'Similarly to ``authors``, the exact meaning is open to interpretation.']}, 'keywords': {'type': 'array', 'items': {'type': 'string'}, 'description': 'List of keywords to assist searching for the distribution in a larger catalog.'}, 'classifiers': {'type': 'array', 'items': {'type': 'string', 'format': 'trove-classifier', 'description': '`PyPI classifier `_.'}, '$$description': ['`Trove classifiers `_', 'which apply to the project.']}, 'urls': {'type': 'object', 'description': 'URLs associated with the project in the form ``label => value``.', 'additionalProperties': False, 'patternProperties': {'^.+$': {'type': 'string', 'format': 'url'}}}, 'scripts': {'$id': '#/definitions/entry-point-group', 'title': 'Entry-points', 'type': 'object', '$$description': ['Entry-points are grouped together to indicate what sort of capabilities they', 'provide.', 'See the `packaging guides', '`_', 'and `setuptools docs', '`_', 'for more information.'], 'propertyNames': {'format': 'python-entrypoint-name'}, 'additionalProperties': False, 'patternProperties': {'^.+$': {'type': 'string', '$$description': ['Reference to a Python object. It is either in the form', '``importable.module``, or ``importable.module:object.attr``.'], 'format': 'python-entrypoint-reference', '$comment': 'https://packaging.python.org/specifications/entry-points/'}}}, 'gui-scripts': {'$id': '#/definitions/entry-point-group', 'title': 'Entry-points', 'type': 'object', '$$description': ['Entry-points are grouped together to indicate what sort of capabilities they', 'provide.', 'See the `packaging guides', '`_', 'and `setuptools docs', '`_', 'for more information.'], 'propertyNames': {'format': 'python-entrypoint-name'}, 'additionalProperties': False, 'patternProperties': {'^.+$': {'type': 'string', '$$description': ['Reference to a Python object. It is either in the form', '``importable.module``, or ``importable.module:object.attr``.'], 'format': 'python-entrypoint-reference', '$comment': 'https://packaging.python.org/specifications/entry-points/'}}}, 'entry-points': {'$$description': ['Instruct the installer to expose the given modules/functions via', '``entry-point`` discovery mechanism (useful for plugins).', 'More information available in the `Python packaging guide', '`_.'], 'propertyNames': {'format': 'python-entrypoint-group'}, 'additionalProperties': False, 'patternProperties': {'^.+$': {'$id': '#/definitions/entry-point-group', 'title': 'Entry-points', 'type': 'object', '$$description': ['Entry-points are grouped together to indicate what sort of capabilities they', 'provide.', 'See the `packaging guides', '`_', 'and `setuptools docs', '`_', 'for more information.'], 'propertyNames': {'format': 'python-entrypoint-name'}, 'additionalProperties': False, 'patternProperties': {'^.+$': {'type': 'string', '$$description': ['Reference to a Python object. It is either in the form', '``importable.module``, or ``importable.module:object.attr``.'], 'format': 'python-entrypoint-reference', '$comment': 'https://packaging.python.org/specifications/entry-points/'}}}}}, 'dependencies': {'type': 'array', 'description': 'Project (mandatory) dependencies.', 'items': {'$id': '#/definitions/dependency', 'title': 'Dependency', 'type': 'string', 'description': 'Project dependency specification according to PEP 508', 'format': 'pep508'}}, 'optional-dependencies': {'type': 'object', 'description': 'Optional dependency for the project', 'propertyNames': {'format': 'pep508-identifier'}, 'additionalProperties': False, 'patternProperties': {'^.+$': {'type': 'array', 'items': {'$id': '#/definitions/dependency', 'title': 'Dependency', 'type': 'string', 'description': 'Project dependency specification according to PEP 508', 'format': 'pep508'}}}}, 'dynamic': {'type': 'array', '$$description': ['Specifies which fields are intentionally unspecified and expected to be', 'dynamically provided by build tools'], 'items': {'enum': ['version', 'description', 'readme', 'requires-python', 'license', 'authors', 'maintainers', 'keywords', 'classifiers', 'urls', 'scripts', 'gui-scripts', 'entry-points', 'dependencies', 'optional-dependencies']}}}, 'required': ['name'], 'additionalProperties': False, 'if': {'not': {'required': ['dynamic'], 'properties': {'dynamic': {'contains': {'const': 'version'}, '$$description': ['version is listed in ``dynamic``']}}}, '$$comment': ['According to :pep:`621`:', ' If the core metadata specification lists a field as "Required", then', ' the metadata MUST specify the field statically or list it in dynamic', 'In turn, `core metadata`_ defines:', ' The required fields are: Metadata-Version, Name, Version.', ' All the other fields are optional.', 'Since ``Metadata-Version`` is defined by the build back-end, ``name`` and', '``version`` are the only mandatory information in ``pyproject.toml``.', '.. _core metadata: https://packaging.python.org/specifications/core-metadata/']}, 'then': {'required': ['version'], '$$description': ['version should be statically defined in the ``version`` field']}, 'definitions': {'author': {'$id': '#/definitions/author', 'title': 'Author or Maintainer', '$comment': 'https://www.python.org/dev/peps/pep-0621/#authors-maintainers', 'type': 'object', 'properties': {'name': {'type': 'string', '$$description': ['MUST be a valid email name, i.e. whatever can be put as a name, before an', 'email, in :rfc:`822`.']}, 'email': {'type': 'string', 'format': 'idn-email', 'description': 'MUST be a valid email address'}}}, 'entry-point-group': {'$id': '#/definitions/entry-point-group', 'title': 'Entry-points', 'type': 'object', '$$description': ['Entry-points are grouped together to indicate what sort of capabilities they', 'provide.', 'See the `packaging guides', '`_', 'and `setuptools docs', '`_', 'for more information.'], 'propertyNames': {'format': 'python-entrypoint-name'}, 'additionalProperties': False, 'patternProperties': {'^.+$': {'type': 'string', '$$description': ['Reference to a Python object. It is either in the form', '``importable.module``, or ``importable.module:object.attr``.'], 'format': 'python-entrypoint-reference', '$comment': 'https://packaging.python.org/specifications/entry-points/'}}}, 'dependency': {'$id': '#/definitions/dependency', 'title': 'Dependency', 'type': 'string', 'description': 'Project dependency specification according to PEP 508', 'format': 'pep508'}}}, rule='required') - data_keys = set(data.keys()) - if "name" in data_keys: - data_keys.remove("name") - data__name = data["name"] - if not isinstance(data__name, (str)): - raise JsonSchemaValueException("" + (name_prefix or "data") + ".name must be string", value=data__name, name="" + (name_prefix or "data") + ".name", definition={'type': 'string', 'description': 'The name (primary identifier) of the project. MUST be statically defined.', 'format': 'pep508-identifier'}, rule='type') - if isinstance(data__name, str): - if not custom_formats["pep508-identifier"](data__name): - raise JsonSchemaValueException("" + (name_prefix or "data") + ".name must be pep508-identifier", value=data__name, name="" + (name_prefix or "data") + ".name", definition={'type': 'string', 'description': 'The name (primary identifier) of the project. MUST be statically defined.', 'format': 'pep508-identifier'}, rule='format') - if "version" in data_keys: - data_keys.remove("version") - data__version = data["version"] - if not isinstance(data__version, (str)): - raise JsonSchemaValueException("" + (name_prefix or "data") + ".version must be string", value=data__version, name="" + (name_prefix or "data") + ".version", definition={'type': 'string', 'description': 'The version of the project as supported by :pep:`440`.', 'format': 'pep440'}, rule='type') - if isinstance(data__version, str): - if not custom_formats["pep440"](data__version): - raise JsonSchemaValueException("" + (name_prefix or "data") + ".version must be pep440", value=data__version, name="" + (name_prefix or "data") + ".version", definition={'type': 'string', 'description': 'The version of the project as supported by :pep:`440`.', 'format': 'pep440'}, rule='format') - if "description" in data_keys: - data_keys.remove("description") - data__description = data["description"] - if not isinstance(data__description, (str)): - raise JsonSchemaValueException("" + (name_prefix or "data") + ".description must be string", value=data__description, name="" + (name_prefix or "data") + ".description", definition={'type': 'string', '$$description': ['The `summary description of the project', '`_']}, rule='type') - if "readme" in data_keys: - data_keys.remove("readme") - data__readme = data["readme"] - data__readme_one_of_count8 = 0 - if data__readme_one_of_count8 < 2: - try: - if not isinstance(data__readme, (str)): - raise JsonSchemaValueException("" + (name_prefix or "data") + ".readme must be string", value=data__readme, name="" + (name_prefix or "data") + ".readme", definition={'type': 'string', '$$description': ['Relative path to a text file (UTF-8) containing the full description', 'of the project. If the file path ends in case-insensitive ``.md`` or', '``.rst`` suffixes, then the content-type is respectively', '``text/markdown`` or ``text/x-rst``']}, rule='type') - data__readme_one_of_count8 += 1 - except JsonSchemaValueException: pass - if data__readme_one_of_count8 < 2: - try: - if not isinstance(data__readme, (dict)): - raise JsonSchemaValueException("" + (name_prefix or "data") + ".readme must be object", value=data__readme, name="" + (name_prefix or "data") + ".readme", definition={'type': 'object', 'allOf': [{'anyOf': [{'properties': {'file': {'type': 'string', '$$description': ['Relative path to a text file containing the full description', 'of the project.']}}, 'required': ['file']}, {'properties': {'text': {'type': 'string', 'description': 'Full text describing the project.'}}, 'required': ['text']}]}, {'properties': {'content-type': {'type': 'string', '$$description': ['Content-type (:rfc:`1341`) of the full description', '(e.g. ``text/markdown``). The ``charset`` parameter is assumed', 'UTF-8 when not present.'], '$comment': 'TODO: add regex pattern or format?'}}, 'required': ['content-type']}]}, rule='type') - data__readme_any_of_count9 = 0 - if not data__readme_any_of_count9: - try: - data__readme_is_dict = isinstance(data__readme, dict) - if data__readme_is_dict: - data__readme_len = len(data__readme) - if not all(prop in data__readme for prop in ['file']): - raise JsonSchemaValueException("" + (name_prefix or "data") + ".readme must contain ['file'] properties", value=data__readme, name="" + (name_prefix or "data") + ".readme", definition={'properties': {'file': {'type': 'string', '$$description': ['Relative path to a text file containing the full description', 'of the project.']}}, 'required': ['file']}, rule='required') - data__readme_keys = set(data__readme.keys()) - if "file" in data__readme_keys: - data__readme_keys.remove("file") - data__readme__file = data__readme["file"] - if not isinstance(data__readme__file, (str)): - raise JsonSchemaValueException("" + (name_prefix or "data") + ".readme.file must be string", value=data__readme__file, name="" + (name_prefix or "data") + ".readme.file", definition={'type': 'string', '$$description': ['Relative path to a text file containing the full description', 'of the project.']}, rule='type') - data__readme_any_of_count9 += 1 - except JsonSchemaValueException: pass - if not data__readme_any_of_count9: - try: - data__readme_is_dict = isinstance(data__readme, dict) - if data__readme_is_dict: - data__readme_len = len(data__readme) - if not all(prop in data__readme for prop in ['text']): - raise JsonSchemaValueException("" + (name_prefix or "data") + ".readme must contain ['text'] properties", value=data__readme, name="" + (name_prefix or "data") + ".readme", definition={'properties': {'text': {'type': 'string', 'description': 'Full text describing the project.'}}, 'required': ['text']}, rule='required') - data__readme_keys = set(data__readme.keys()) - if "text" in data__readme_keys: - data__readme_keys.remove("text") - data__readme__text = data__readme["text"] - if not isinstance(data__readme__text, (str)): - raise JsonSchemaValueException("" + (name_prefix or "data") + ".readme.text must be string", value=data__readme__text, name="" + (name_prefix or "data") + ".readme.text", definition={'type': 'string', 'description': 'Full text describing the project.'}, rule='type') - data__readme_any_of_count9 += 1 - except JsonSchemaValueException: pass - if not data__readme_any_of_count9: - raise JsonSchemaValueException("" + (name_prefix or "data") + ".readme cannot be validated by any definition", value=data__readme, name="" + (name_prefix or "data") + ".readme", definition={'anyOf': [{'properties': {'file': {'type': 'string', '$$description': ['Relative path to a text file containing the full description', 'of the project.']}}, 'required': ['file']}, {'properties': {'text': {'type': 'string', 'description': 'Full text describing the project.'}}, 'required': ['text']}]}, rule='anyOf') - data__readme_is_dict = isinstance(data__readme, dict) - if data__readme_is_dict: - data__readme_len = len(data__readme) - if not all(prop in data__readme for prop in ['content-type']): - raise JsonSchemaValueException("" + (name_prefix or "data") + ".readme must contain ['content-type'] properties", value=data__readme, name="" + (name_prefix or "data") + ".readme", definition={'properties': {'content-type': {'type': 'string', '$$description': ['Content-type (:rfc:`1341`) of the full description', '(e.g. ``text/markdown``). The ``charset`` parameter is assumed', 'UTF-8 when not present.'], '$comment': 'TODO: add regex pattern or format?'}}, 'required': ['content-type']}, rule='required') - data__readme_keys = set(data__readme.keys()) - if "content-type" in data__readme_keys: - data__readme_keys.remove("content-type") - data__readme__contenttype = data__readme["content-type"] - if not isinstance(data__readme__contenttype, (str)): - raise JsonSchemaValueException("" + (name_prefix or "data") + ".readme.content-type must be string", value=data__readme__contenttype, name="" + (name_prefix or "data") + ".readme.content-type", definition={'type': 'string', '$$description': ['Content-type (:rfc:`1341`) of the full description', '(e.g. ``text/markdown``). The ``charset`` parameter is assumed', 'UTF-8 when not present.'], '$comment': 'TODO: add regex pattern or format?'}, rule='type') - data__readme_one_of_count8 += 1 - except JsonSchemaValueException: pass - if data__readme_one_of_count8 != 1: - raise JsonSchemaValueException("" + (name_prefix or "data") + ".readme must be valid exactly by one definition" + (" (" + str(data__readme_one_of_count8) + " matches found)"), value=data__readme, name="" + (name_prefix or "data") + ".readme", definition={'$$description': ['`Full/detailed description of the project in the form of a README', '`_', "with meaning similar to the one defined in `core metadata's Description", '`_'], 'oneOf': [{'type': 'string', '$$description': ['Relative path to a text file (UTF-8) containing the full description', 'of the project. If the file path ends in case-insensitive ``.md`` or', '``.rst`` suffixes, then the content-type is respectively', '``text/markdown`` or ``text/x-rst``']}, {'type': 'object', 'allOf': [{'anyOf': [{'properties': {'file': {'type': 'string', '$$description': ['Relative path to a text file containing the full description', 'of the project.']}}, 'required': ['file']}, {'properties': {'text': {'type': 'string', 'description': 'Full text describing the project.'}}, 'required': ['text']}]}, {'properties': {'content-type': {'type': 'string', '$$description': ['Content-type (:rfc:`1341`) of the full description', '(e.g. ``text/markdown``). The ``charset`` parameter is assumed', 'UTF-8 when not present.'], '$comment': 'TODO: add regex pattern or format?'}}, 'required': ['content-type']}]}]}, rule='oneOf') - if "requires-python" in data_keys: - data_keys.remove("requires-python") - data__requirespython = data["requires-python"] - if not isinstance(data__requirespython, (str)): - raise JsonSchemaValueException("" + (name_prefix or "data") + ".requires-python must be string", value=data__requirespython, name="" + (name_prefix or "data") + ".requires-python", definition={'type': 'string', 'format': 'pep508-versionspec', '$$description': ['`The Python version requirements of the project', '`_.']}, rule='type') - if isinstance(data__requirespython, str): - if not custom_formats["pep508-versionspec"](data__requirespython): - raise JsonSchemaValueException("" + (name_prefix or "data") + ".requires-python must be pep508-versionspec", value=data__requirespython, name="" + (name_prefix or "data") + ".requires-python", definition={'type': 'string', 'format': 'pep508-versionspec', '$$description': ['`The Python version requirements of the project', '`_.']}, rule='format') - if "license" in data_keys: - data_keys.remove("license") - data__license = data["license"] - data__license_one_of_count10 = 0 - if data__license_one_of_count10 < 2: - try: - data__license_is_dict = isinstance(data__license, dict) - if data__license_is_dict: - data__license_len = len(data__license) - if not all(prop in data__license for prop in ['file']): - raise JsonSchemaValueException("" + (name_prefix or "data") + ".license must contain ['file'] properties", value=data__license, name="" + (name_prefix or "data") + ".license", definition={'properties': {'file': {'type': 'string', '$$description': ['Relative path to the file (UTF-8) which contains the license for the', 'project.']}}, 'required': ['file']}, rule='required') - data__license_keys = set(data__license.keys()) - if "file" in data__license_keys: - data__license_keys.remove("file") - data__license__file = data__license["file"] - if not isinstance(data__license__file, (str)): - raise JsonSchemaValueException("" + (name_prefix or "data") + ".license.file must be string", value=data__license__file, name="" + (name_prefix or "data") + ".license.file", definition={'type': 'string', '$$description': ['Relative path to the file (UTF-8) which contains the license for the', 'project.']}, rule='type') - data__license_one_of_count10 += 1 - except JsonSchemaValueException: pass - if data__license_one_of_count10 < 2: - try: - data__license_is_dict = isinstance(data__license, dict) - if data__license_is_dict: - data__license_len = len(data__license) - if not all(prop in data__license for prop in ['text']): - raise JsonSchemaValueException("" + (name_prefix or "data") + ".license must contain ['text'] properties", value=data__license, name="" + (name_prefix or "data") + ".license", definition={'properties': {'text': {'type': 'string', '$$description': ['The license of the project whose meaning is that of the', '`License field from the core metadata', '`_.']}}, 'required': ['text']}, rule='required') - data__license_keys = set(data__license.keys()) - if "text" in data__license_keys: - data__license_keys.remove("text") - data__license__text = data__license["text"] - if not isinstance(data__license__text, (str)): - raise JsonSchemaValueException("" + (name_prefix or "data") + ".license.text must be string", value=data__license__text, name="" + (name_prefix or "data") + ".license.text", definition={'type': 'string', '$$description': ['The license of the project whose meaning is that of the', '`License field from the core metadata', '`_.']}, rule='type') - data__license_one_of_count10 += 1 - except JsonSchemaValueException: pass - if data__license_one_of_count10 != 1: - raise JsonSchemaValueException("" + (name_prefix or "data") + ".license must be valid exactly by one definition" + (" (" + str(data__license_one_of_count10) + " matches found)"), value=data__license, name="" + (name_prefix or "data") + ".license", definition={'description': '`Project license `_.', 'oneOf': [{'properties': {'file': {'type': 'string', '$$description': ['Relative path to the file (UTF-8) which contains the license for the', 'project.']}}, 'required': ['file']}, {'properties': {'text': {'type': 'string', '$$description': ['The license of the project whose meaning is that of the', '`License field from the core metadata', '`_.']}}, 'required': ['text']}]}, rule='oneOf') - if "authors" in data_keys: - data_keys.remove("authors") - data__authors = data["authors"] - if not isinstance(data__authors, (list, tuple)): - raise JsonSchemaValueException("" + (name_prefix or "data") + ".authors must be array", value=data__authors, name="" + (name_prefix or "data") + ".authors", definition={'type': 'array', 'items': {'$id': '#/definitions/author', 'title': 'Author or Maintainer', '$comment': 'https://www.python.org/dev/peps/pep-0621/#authors-maintainers', 'type': 'object', 'properties': {'name': {'type': 'string', '$$description': ['MUST be a valid email name, i.e. whatever can be put as a name, before an', 'email, in :rfc:`822`.']}, 'email': {'type': 'string', 'format': 'idn-email', 'description': 'MUST be a valid email address'}}}, '$$description': ["The people or organizations considered to be the 'authors' of the project.", 'The exact meaning is open to interpretation (e.g. original or primary authors,', 'current maintainers, or owners of the package).']}, rule='type') - data__authors_is_list = isinstance(data__authors, (list, tuple)) - if data__authors_is_list: - data__authors_len = len(data__authors) - for data__authors_x, data__authors_item in enumerate(data__authors): - validate_https___packaging_python_org_en_latest_specifications_declaring_project_metadata___definitions_author(data__authors_item, custom_formats, (name_prefix or "data") + ".authors[{data__authors_x}]") - if "maintainers" in data_keys: - data_keys.remove("maintainers") - data__maintainers = data["maintainers"] - if not isinstance(data__maintainers, (list, tuple)): - raise JsonSchemaValueException("" + (name_prefix or "data") + ".maintainers must be array", value=data__maintainers, name="" + (name_prefix or "data") + ".maintainers", definition={'type': 'array', 'items': {'$id': '#/definitions/author', 'title': 'Author or Maintainer', '$comment': 'https://www.python.org/dev/peps/pep-0621/#authors-maintainers', 'type': 'object', 'properties': {'name': {'type': 'string', '$$description': ['MUST be a valid email name, i.e. whatever can be put as a name, before an', 'email, in :rfc:`822`.']}, 'email': {'type': 'string', 'format': 'idn-email', 'description': 'MUST be a valid email address'}}}, '$$description': ["The people or organizations considered to be the 'maintainers' of the project.", 'Similarly to ``authors``, the exact meaning is open to interpretation.']}, rule='type') - data__maintainers_is_list = isinstance(data__maintainers, (list, tuple)) - if data__maintainers_is_list: - data__maintainers_len = len(data__maintainers) - for data__maintainers_x, data__maintainers_item in enumerate(data__maintainers): - validate_https___packaging_python_org_en_latest_specifications_declaring_project_metadata___definitions_author(data__maintainers_item, custom_formats, (name_prefix or "data") + ".maintainers[{data__maintainers_x}]") - if "keywords" in data_keys: - data_keys.remove("keywords") - data__keywords = data["keywords"] - if not isinstance(data__keywords, (list, tuple)): - raise JsonSchemaValueException("" + (name_prefix or "data") + ".keywords must be array", value=data__keywords, name="" + (name_prefix or "data") + ".keywords", definition={'type': 'array', 'items': {'type': 'string'}, 'description': 'List of keywords to assist searching for the distribution in a larger catalog.'}, rule='type') - data__keywords_is_list = isinstance(data__keywords, (list, tuple)) - if data__keywords_is_list: - data__keywords_len = len(data__keywords) - for data__keywords_x, data__keywords_item in enumerate(data__keywords): - if not isinstance(data__keywords_item, (str)): - raise JsonSchemaValueException("" + (name_prefix or "data") + ".keywords[{data__keywords_x}]".format(**locals()) + " must be string", value=data__keywords_item, name="" + (name_prefix or "data") + ".keywords[{data__keywords_x}]".format(**locals()) + "", definition={'type': 'string'}, rule='type') - if "classifiers" in data_keys: - data_keys.remove("classifiers") - data__classifiers = data["classifiers"] - if not isinstance(data__classifiers, (list, tuple)): - raise JsonSchemaValueException("" + (name_prefix or "data") + ".classifiers must be array", value=data__classifiers, name="" + (name_prefix or "data") + ".classifiers", definition={'type': 'array', 'items': {'type': 'string', 'format': 'trove-classifier', 'description': '`PyPI classifier `_.'}, '$$description': ['`Trove classifiers `_', 'which apply to the project.']}, rule='type') - data__classifiers_is_list = isinstance(data__classifiers, (list, tuple)) - if data__classifiers_is_list: - data__classifiers_len = len(data__classifiers) - for data__classifiers_x, data__classifiers_item in enumerate(data__classifiers): - if not isinstance(data__classifiers_item, (str)): - raise JsonSchemaValueException("" + (name_prefix or "data") + ".classifiers[{data__classifiers_x}]".format(**locals()) + " must be string", value=data__classifiers_item, name="" + (name_prefix or "data") + ".classifiers[{data__classifiers_x}]".format(**locals()) + "", definition={'type': 'string', 'format': 'trove-classifier', 'description': '`PyPI classifier `_.'}, rule='type') - if isinstance(data__classifiers_item, str): - if not custom_formats["trove-classifier"](data__classifiers_item): - raise JsonSchemaValueException("" + (name_prefix or "data") + ".classifiers[{data__classifiers_x}]".format(**locals()) + " must be trove-classifier", value=data__classifiers_item, name="" + (name_prefix or "data") + ".classifiers[{data__classifiers_x}]".format(**locals()) + "", definition={'type': 'string', 'format': 'trove-classifier', 'description': '`PyPI classifier `_.'}, rule='format') - if "urls" in data_keys: - data_keys.remove("urls") - data__urls = data["urls"] - if not isinstance(data__urls, (dict)): - raise JsonSchemaValueException("" + (name_prefix or "data") + ".urls must be object", value=data__urls, name="" + (name_prefix or "data") + ".urls", definition={'type': 'object', 'description': 'URLs associated with the project in the form ``label => value``.', 'additionalProperties': False, 'patternProperties': {'^.+$': {'type': 'string', 'format': 'url'}}}, rule='type') - data__urls_is_dict = isinstance(data__urls, dict) - if data__urls_is_dict: - data__urls_keys = set(data__urls.keys()) - for data__urls_key, data__urls_val in data__urls.items(): - if REGEX_PATTERNS['^.+$'].search(data__urls_key): - if data__urls_key in data__urls_keys: - data__urls_keys.remove(data__urls_key) - if not isinstance(data__urls_val, (str)): - raise JsonSchemaValueException("" + (name_prefix or "data") + ".urls.{data__urls_key}".format(**locals()) + " must be string", value=data__urls_val, name="" + (name_prefix or "data") + ".urls.{data__urls_key}".format(**locals()) + "", definition={'type': 'string', 'format': 'url'}, rule='type') - if isinstance(data__urls_val, str): - if not custom_formats["url"](data__urls_val): - raise JsonSchemaValueException("" + (name_prefix or "data") + ".urls.{data__urls_key}".format(**locals()) + " must be url", value=data__urls_val, name="" + (name_prefix or "data") + ".urls.{data__urls_key}".format(**locals()) + "", definition={'type': 'string', 'format': 'url'}, rule='format') - if data__urls_keys: - raise JsonSchemaValueException("" + (name_prefix or "data") + ".urls must not contain "+str(data__urls_keys)+" properties", value=data__urls, name="" + (name_prefix or "data") + ".urls", definition={'type': 'object', 'description': 'URLs associated with the project in the form ``label => value``.', 'additionalProperties': False, 'patternProperties': {'^.+$': {'type': 'string', 'format': 'url'}}}, rule='additionalProperties') - if "scripts" in data_keys: - data_keys.remove("scripts") - data__scripts = data["scripts"] - validate_https___packaging_python_org_en_latest_specifications_declaring_project_metadata___definitions_entry_point_group(data__scripts, custom_formats, (name_prefix or "data") + ".scripts") - if "gui-scripts" in data_keys: - data_keys.remove("gui-scripts") - data__guiscripts = data["gui-scripts"] - validate_https___packaging_python_org_en_latest_specifications_declaring_project_metadata___definitions_entry_point_group(data__guiscripts, custom_formats, (name_prefix or "data") + ".gui-scripts") - if "entry-points" in data_keys: - data_keys.remove("entry-points") - data__entrypoints = data["entry-points"] - data__entrypoints_is_dict = isinstance(data__entrypoints, dict) - if data__entrypoints_is_dict: - data__entrypoints_keys = set(data__entrypoints.keys()) - for data__entrypoints_key, data__entrypoints_val in data__entrypoints.items(): - if REGEX_PATTERNS['^.+$'].search(data__entrypoints_key): - if data__entrypoints_key in data__entrypoints_keys: - data__entrypoints_keys.remove(data__entrypoints_key) - validate_https___packaging_python_org_en_latest_specifications_declaring_project_metadata___definitions_entry_point_group(data__entrypoints_val, custom_formats, (name_prefix or "data") + ".entry-points.{data__entrypoints_key}") - if data__entrypoints_keys: - raise JsonSchemaValueException("" + (name_prefix or "data") + ".entry-points must not contain "+str(data__entrypoints_keys)+" properties", value=data__entrypoints, name="" + (name_prefix or "data") + ".entry-points", definition={'$$description': ['Instruct the installer to expose the given modules/functions via', '``entry-point`` discovery mechanism (useful for plugins).', 'More information available in the `Python packaging guide', '`_.'], 'propertyNames': {'format': 'python-entrypoint-group'}, 'additionalProperties': False, 'patternProperties': {'^.+$': {'$id': '#/definitions/entry-point-group', 'title': 'Entry-points', 'type': 'object', '$$description': ['Entry-points are grouped together to indicate what sort of capabilities they', 'provide.', 'See the `packaging guides', '`_', 'and `setuptools docs', '`_', 'for more information.'], 'propertyNames': {'format': 'python-entrypoint-name'}, 'additionalProperties': False, 'patternProperties': {'^.+$': {'type': 'string', '$$description': ['Reference to a Python object. It is either in the form', '``importable.module``, or ``importable.module:object.attr``.'], 'format': 'python-entrypoint-reference', '$comment': 'https://packaging.python.org/specifications/entry-points/'}}}}}, rule='additionalProperties') - data__entrypoints_len = len(data__entrypoints) - if data__entrypoints_len != 0: - data__entrypoints_property_names = True - for data__entrypoints_key in data__entrypoints: - try: - if isinstance(data__entrypoints_key, str): - if not custom_formats["python-entrypoint-group"](data__entrypoints_key): - raise JsonSchemaValueException("" + (name_prefix or "data") + ".entry-points must be python-entrypoint-group", value=data__entrypoints_key, name="" + (name_prefix or "data") + ".entry-points", definition={'format': 'python-entrypoint-group'}, rule='format') - except JsonSchemaValueException: - data__entrypoints_property_names = False - if not data__entrypoints_property_names: - raise JsonSchemaValueException("" + (name_prefix or "data") + ".entry-points must be named by propertyName definition", value=data__entrypoints, name="" + (name_prefix or "data") + ".entry-points", definition={'$$description': ['Instruct the installer to expose the given modules/functions via', '``entry-point`` discovery mechanism (useful for plugins).', 'More information available in the `Python packaging guide', '`_.'], 'propertyNames': {'format': 'python-entrypoint-group'}, 'additionalProperties': False, 'patternProperties': {'^.+$': {'$id': '#/definitions/entry-point-group', 'title': 'Entry-points', 'type': 'object', '$$description': ['Entry-points are grouped together to indicate what sort of capabilities they', 'provide.', 'See the `packaging guides', '`_', 'and `setuptools docs', '`_', 'for more information.'], 'propertyNames': {'format': 'python-entrypoint-name'}, 'additionalProperties': False, 'patternProperties': {'^.+$': {'type': 'string', '$$description': ['Reference to a Python object. It is either in the form', '``importable.module``, or ``importable.module:object.attr``.'], 'format': 'python-entrypoint-reference', '$comment': 'https://packaging.python.org/specifications/entry-points/'}}}}}, rule='propertyNames') - if "dependencies" in data_keys: - data_keys.remove("dependencies") - data__dependencies = data["dependencies"] - if not isinstance(data__dependencies, (list, tuple)): - raise JsonSchemaValueException("" + (name_prefix or "data") + ".dependencies must be array", value=data__dependencies, name="" + (name_prefix or "data") + ".dependencies", definition={'type': 'array', 'description': 'Project (mandatory) dependencies.', 'items': {'$id': '#/definitions/dependency', 'title': 'Dependency', 'type': 'string', 'description': 'Project dependency specification according to PEP 508', 'format': 'pep508'}}, rule='type') - data__dependencies_is_list = isinstance(data__dependencies, (list, tuple)) - if data__dependencies_is_list: - data__dependencies_len = len(data__dependencies) - for data__dependencies_x, data__dependencies_item in enumerate(data__dependencies): - validate_https___packaging_python_org_en_latest_specifications_declaring_project_metadata___definitions_dependency(data__dependencies_item, custom_formats, (name_prefix or "data") + ".dependencies[{data__dependencies_x}]") - if "optional-dependencies" in data_keys: - data_keys.remove("optional-dependencies") - data__optionaldependencies = data["optional-dependencies"] - if not isinstance(data__optionaldependencies, (dict)): - raise JsonSchemaValueException("" + (name_prefix or "data") + ".optional-dependencies must be object", value=data__optionaldependencies, name="" + (name_prefix or "data") + ".optional-dependencies", definition={'type': 'object', 'description': 'Optional dependency for the project', 'propertyNames': {'format': 'pep508-identifier'}, 'additionalProperties': False, 'patternProperties': {'^.+$': {'type': 'array', 'items': {'$id': '#/definitions/dependency', 'title': 'Dependency', 'type': 'string', 'description': 'Project dependency specification according to PEP 508', 'format': 'pep508'}}}}, rule='type') - data__optionaldependencies_is_dict = isinstance(data__optionaldependencies, dict) - if data__optionaldependencies_is_dict: - data__optionaldependencies_keys = set(data__optionaldependencies.keys()) - for data__optionaldependencies_key, data__optionaldependencies_val in data__optionaldependencies.items(): - if REGEX_PATTERNS['^.+$'].search(data__optionaldependencies_key): - if data__optionaldependencies_key in data__optionaldependencies_keys: - data__optionaldependencies_keys.remove(data__optionaldependencies_key) - if not isinstance(data__optionaldependencies_val, (list, tuple)): - raise JsonSchemaValueException("" + (name_prefix or "data") + ".optional-dependencies.{data__optionaldependencies_key}".format(**locals()) + " must be array", value=data__optionaldependencies_val, name="" + (name_prefix or "data") + ".optional-dependencies.{data__optionaldependencies_key}".format(**locals()) + "", definition={'type': 'array', 'items': {'$id': '#/definitions/dependency', 'title': 'Dependency', 'type': 'string', 'description': 'Project dependency specification according to PEP 508', 'format': 'pep508'}}, rule='type') - data__optionaldependencies_val_is_list = isinstance(data__optionaldependencies_val, (list, tuple)) - if data__optionaldependencies_val_is_list: - data__optionaldependencies_val_len = len(data__optionaldependencies_val) - for data__optionaldependencies_val_x, data__optionaldependencies_val_item in enumerate(data__optionaldependencies_val): - validate_https___packaging_python_org_en_latest_specifications_declaring_project_metadata___definitions_dependency(data__optionaldependencies_val_item, custom_formats, (name_prefix or "data") + ".optional-dependencies.{data__optionaldependencies_key}[{data__optionaldependencies_val_x}]") - if data__optionaldependencies_keys: - raise JsonSchemaValueException("" + (name_prefix or "data") + ".optional-dependencies must not contain "+str(data__optionaldependencies_keys)+" properties", value=data__optionaldependencies, name="" + (name_prefix or "data") + ".optional-dependencies", definition={'type': 'object', 'description': 'Optional dependency for the project', 'propertyNames': {'format': 'pep508-identifier'}, 'additionalProperties': False, 'patternProperties': {'^.+$': {'type': 'array', 'items': {'$id': '#/definitions/dependency', 'title': 'Dependency', 'type': 'string', 'description': 'Project dependency specification according to PEP 508', 'format': 'pep508'}}}}, rule='additionalProperties') - data__optionaldependencies_len = len(data__optionaldependencies) - if data__optionaldependencies_len != 0: - data__optionaldependencies_property_names = True - for data__optionaldependencies_key in data__optionaldependencies: - try: - if isinstance(data__optionaldependencies_key, str): - if not custom_formats["pep508-identifier"](data__optionaldependencies_key): - raise JsonSchemaValueException("" + (name_prefix or "data") + ".optional-dependencies must be pep508-identifier", value=data__optionaldependencies_key, name="" + (name_prefix or "data") + ".optional-dependencies", definition={'format': 'pep508-identifier'}, rule='format') - except JsonSchemaValueException: - data__optionaldependencies_property_names = False - if not data__optionaldependencies_property_names: - raise JsonSchemaValueException("" + (name_prefix or "data") + ".optional-dependencies must be named by propertyName definition", value=data__optionaldependencies, name="" + (name_prefix or "data") + ".optional-dependencies", definition={'type': 'object', 'description': 'Optional dependency for the project', 'propertyNames': {'format': 'pep508-identifier'}, 'additionalProperties': False, 'patternProperties': {'^.+$': {'type': 'array', 'items': {'$id': '#/definitions/dependency', 'title': 'Dependency', 'type': 'string', 'description': 'Project dependency specification according to PEP 508', 'format': 'pep508'}}}}, rule='propertyNames') - if "dynamic" in data_keys: - data_keys.remove("dynamic") - data__dynamic = data["dynamic"] - if not isinstance(data__dynamic, (list, tuple)): - raise JsonSchemaValueException("" + (name_prefix or "data") + ".dynamic must be array", value=data__dynamic, name="" + (name_prefix or "data") + ".dynamic", definition={'type': 'array', '$$description': ['Specifies which fields are intentionally unspecified and expected to be', 'dynamically provided by build tools'], 'items': {'enum': ['version', 'description', 'readme', 'requires-python', 'license', 'authors', 'maintainers', 'keywords', 'classifiers', 'urls', 'scripts', 'gui-scripts', 'entry-points', 'dependencies', 'optional-dependencies']}}, rule='type') - data__dynamic_is_list = isinstance(data__dynamic, (list, tuple)) - if data__dynamic_is_list: - data__dynamic_len = len(data__dynamic) - for data__dynamic_x, data__dynamic_item in enumerate(data__dynamic): - if data__dynamic_item not in ['version', 'description', 'readme', 'requires-python', 'license', 'authors', 'maintainers', 'keywords', 'classifiers', 'urls', 'scripts', 'gui-scripts', 'entry-points', 'dependencies', 'optional-dependencies']: - raise JsonSchemaValueException("" + (name_prefix or "data") + ".dynamic[{data__dynamic_x}]".format(**locals()) + " must be one of ['version', 'description', 'readme', 'requires-python', 'license', 'authors', 'maintainers', 'keywords', 'classifiers', 'urls', 'scripts', 'gui-scripts', 'entry-points', 'dependencies', 'optional-dependencies']", value=data__dynamic_item, name="" + (name_prefix or "data") + ".dynamic[{data__dynamic_x}]".format(**locals()) + "", definition={'enum': ['version', 'description', 'readme', 'requires-python', 'license', 'authors', 'maintainers', 'keywords', 'classifiers', 'urls', 'scripts', 'gui-scripts', 'entry-points', 'dependencies', 'optional-dependencies']}, rule='enum') - if data_keys: - raise JsonSchemaValueException("" + (name_prefix or "data") + " must not contain "+str(data_keys)+" properties", value=data, name="" + (name_prefix or "data") + "", definition={'$schema': 'http://json-schema.org/draft-07/schema', '$id': 'https://packaging.python.org/en/latest/specifications/declaring-project-metadata/', 'title': 'Package metadata stored in the ``project`` table', '$$description': ['Data structure for the **project** table inside ``pyproject.toml``', '(as initially defined in :pep:`621`)'], 'type': 'object', 'properties': {'name': {'type': 'string', 'description': 'The name (primary identifier) of the project. MUST be statically defined.', 'format': 'pep508-identifier'}, 'version': {'type': 'string', 'description': 'The version of the project as supported by :pep:`440`.', 'format': 'pep440'}, 'description': {'type': 'string', '$$description': ['The `summary description of the project', '`_']}, 'readme': {'$$description': ['`Full/detailed description of the project in the form of a README', '`_', "with meaning similar to the one defined in `core metadata's Description", '`_'], 'oneOf': [{'type': 'string', '$$description': ['Relative path to a text file (UTF-8) containing the full description', 'of the project. If the file path ends in case-insensitive ``.md`` or', '``.rst`` suffixes, then the content-type is respectively', '``text/markdown`` or ``text/x-rst``']}, {'type': 'object', 'allOf': [{'anyOf': [{'properties': {'file': {'type': 'string', '$$description': ['Relative path to a text file containing the full description', 'of the project.']}}, 'required': ['file']}, {'properties': {'text': {'type': 'string', 'description': 'Full text describing the project.'}}, 'required': ['text']}]}, {'properties': {'content-type': {'type': 'string', '$$description': ['Content-type (:rfc:`1341`) of the full description', '(e.g. ``text/markdown``). The ``charset`` parameter is assumed', 'UTF-8 when not present.'], '$comment': 'TODO: add regex pattern or format?'}}, 'required': ['content-type']}]}]}, 'requires-python': {'type': 'string', 'format': 'pep508-versionspec', '$$description': ['`The Python version requirements of the project', '`_.']}, 'license': {'description': '`Project license `_.', 'oneOf': [{'properties': {'file': {'type': 'string', '$$description': ['Relative path to the file (UTF-8) which contains the license for the', 'project.']}}, 'required': ['file']}, {'properties': {'text': {'type': 'string', '$$description': ['The license of the project whose meaning is that of the', '`License field from the core metadata', '`_.']}}, 'required': ['text']}]}, 'authors': {'type': 'array', 'items': {'$id': '#/definitions/author', 'title': 'Author or Maintainer', '$comment': 'https://www.python.org/dev/peps/pep-0621/#authors-maintainers', 'type': 'object', 'properties': {'name': {'type': 'string', '$$description': ['MUST be a valid email name, i.e. whatever can be put as a name, before an', 'email, in :rfc:`822`.']}, 'email': {'type': 'string', 'format': 'idn-email', 'description': 'MUST be a valid email address'}}}, '$$description': ["The people or organizations considered to be the 'authors' of the project.", 'The exact meaning is open to interpretation (e.g. original or primary authors,', 'current maintainers, or owners of the package).']}, 'maintainers': {'type': 'array', 'items': {'$id': '#/definitions/author', 'title': 'Author or Maintainer', '$comment': 'https://www.python.org/dev/peps/pep-0621/#authors-maintainers', 'type': 'object', 'properties': {'name': {'type': 'string', '$$description': ['MUST be a valid email name, i.e. whatever can be put as a name, before an', 'email, in :rfc:`822`.']}, 'email': {'type': 'string', 'format': 'idn-email', 'description': 'MUST be a valid email address'}}}, '$$description': ["The people or organizations considered to be the 'maintainers' of the project.", 'Similarly to ``authors``, the exact meaning is open to interpretation.']}, 'keywords': {'type': 'array', 'items': {'type': 'string'}, 'description': 'List of keywords to assist searching for the distribution in a larger catalog.'}, 'classifiers': {'type': 'array', 'items': {'type': 'string', 'format': 'trove-classifier', 'description': '`PyPI classifier `_.'}, '$$description': ['`Trove classifiers `_', 'which apply to the project.']}, 'urls': {'type': 'object', 'description': 'URLs associated with the project in the form ``label => value``.', 'additionalProperties': False, 'patternProperties': {'^.+$': {'type': 'string', 'format': 'url'}}}, 'scripts': {'$id': '#/definitions/entry-point-group', 'title': 'Entry-points', 'type': 'object', '$$description': ['Entry-points are grouped together to indicate what sort of capabilities they', 'provide.', 'See the `packaging guides', '`_', 'and `setuptools docs', '`_', 'for more information.'], 'propertyNames': {'format': 'python-entrypoint-name'}, 'additionalProperties': False, 'patternProperties': {'^.+$': {'type': 'string', '$$description': ['Reference to a Python object. It is either in the form', '``importable.module``, or ``importable.module:object.attr``.'], 'format': 'python-entrypoint-reference', '$comment': 'https://packaging.python.org/specifications/entry-points/'}}}, 'gui-scripts': {'$id': '#/definitions/entry-point-group', 'title': 'Entry-points', 'type': 'object', '$$description': ['Entry-points are grouped together to indicate what sort of capabilities they', 'provide.', 'See the `packaging guides', '`_', 'and `setuptools docs', '`_', 'for more information.'], 'propertyNames': {'format': 'python-entrypoint-name'}, 'additionalProperties': False, 'patternProperties': {'^.+$': {'type': 'string', '$$description': ['Reference to a Python object. It is either in the form', '``importable.module``, or ``importable.module:object.attr``.'], 'format': 'python-entrypoint-reference', '$comment': 'https://packaging.python.org/specifications/entry-points/'}}}, 'entry-points': {'$$description': ['Instruct the installer to expose the given modules/functions via', '``entry-point`` discovery mechanism (useful for plugins).', 'More information available in the `Python packaging guide', '`_.'], 'propertyNames': {'format': 'python-entrypoint-group'}, 'additionalProperties': False, 'patternProperties': {'^.+$': {'$id': '#/definitions/entry-point-group', 'title': 'Entry-points', 'type': 'object', '$$description': ['Entry-points are grouped together to indicate what sort of capabilities they', 'provide.', 'See the `packaging guides', '`_', 'and `setuptools docs', '`_', 'for more information.'], 'propertyNames': {'format': 'python-entrypoint-name'}, 'additionalProperties': False, 'patternProperties': {'^.+$': {'type': 'string', '$$description': ['Reference to a Python object. It is either in the form', '``importable.module``, or ``importable.module:object.attr``.'], 'format': 'python-entrypoint-reference', '$comment': 'https://packaging.python.org/specifications/entry-points/'}}}}}, 'dependencies': {'type': 'array', 'description': 'Project (mandatory) dependencies.', 'items': {'$id': '#/definitions/dependency', 'title': 'Dependency', 'type': 'string', 'description': 'Project dependency specification according to PEP 508', 'format': 'pep508'}}, 'optional-dependencies': {'type': 'object', 'description': 'Optional dependency for the project', 'propertyNames': {'format': 'pep508-identifier'}, 'additionalProperties': False, 'patternProperties': {'^.+$': {'type': 'array', 'items': {'$id': '#/definitions/dependency', 'title': 'Dependency', 'type': 'string', 'description': 'Project dependency specification according to PEP 508', 'format': 'pep508'}}}}, 'dynamic': {'type': 'array', '$$description': ['Specifies which fields are intentionally unspecified and expected to be', 'dynamically provided by build tools'], 'items': {'enum': ['version', 'description', 'readme', 'requires-python', 'license', 'authors', 'maintainers', 'keywords', 'classifiers', 'urls', 'scripts', 'gui-scripts', 'entry-points', 'dependencies', 'optional-dependencies']}}}, 'required': ['name'], 'additionalProperties': False, 'if': {'not': {'required': ['dynamic'], 'properties': {'dynamic': {'contains': {'const': 'version'}, '$$description': ['version is listed in ``dynamic``']}}}, '$$comment': ['According to :pep:`621`:', ' If the core metadata specification lists a field as "Required", then', ' the metadata MUST specify the field statically or list it in dynamic', 'In turn, `core metadata`_ defines:', ' The required fields are: Metadata-Version, Name, Version.', ' All the other fields are optional.', 'Since ``Metadata-Version`` is defined by the build back-end, ``name`` and', '``version`` are the only mandatory information in ``pyproject.toml``.', '.. _core metadata: https://packaging.python.org/specifications/core-metadata/']}, 'then': {'required': ['version'], '$$description': ['version should be statically defined in the ``version`` field']}, 'definitions': {'author': {'$id': '#/definitions/author', 'title': 'Author or Maintainer', '$comment': 'https://www.python.org/dev/peps/pep-0621/#authors-maintainers', 'type': 'object', 'properties': {'name': {'type': 'string', '$$description': ['MUST be a valid email name, i.e. whatever can be put as a name, before an', 'email, in :rfc:`822`.']}, 'email': {'type': 'string', 'format': 'idn-email', 'description': 'MUST be a valid email address'}}}, 'entry-point-group': {'$id': '#/definitions/entry-point-group', 'title': 'Entry-points', 'type': 'object', '$$description': ['Entry-points are grouped together to indicate what sort of capabilities they', 'provide.', 'See the `packaging guides', '`_', 'and `setuptools docs', '`_', 'for more information.'], 'propertyNames': {'format': 'python-entrypoint-name'}, 'additionalProperties': False, 'patternProperties': {'^.+$': {'type': 'string', '$$description': ['Reference to a Python object. It is either in the form', '``importable.module``, or ``importable.module:object.attr``.'], 'format': 'python-entrypoint-reference', '$comment': 'https://packaging.python.org/specifications/entry-points/'}}}, 'dependency': {'$id': '#/definitions/dependency', 'title': 'Dependency', 'type': 'string', 'description': 'Project dependency specification according to PEP 508', 'format': 'pep508'}}}, rule='additionalProperties') - try: - try: - data_is_dict = isinstance(data, dict) - if data_is_dict: - data_len = len(data) - if not all(prop in data for prop in ['dynamic']): - raise JsonSchemaValueException("" + (name_prefix or "data") + " must contain ['dynamic'] properties", value=data, name="" + (name_prefix or "data") + "", definition={'required': ['dynamic'], 'properties': {'dynamic': {'contains': {'const': 'version'}, '$$description': ['version is listed in ``dynamic``']}}}, rule='required') - data_keys = set(data.keys()) - if "dynamic" in data_keys: - data_keys.remove("dynamic") - data__dynamic = data["dynamic"] - data__dynamic_is_list = isinstance(data__dynamic, (list, tuple)) - if data__dynamic_is_list: - data__dynamic_contains = False - for data__dynamic_key in data__dynamic: - try: - if data__dynamic_key != "version": - raise JsonSchemaValueException("" + (name_prefix or "data") + ".dynamic must be same as const definition: version", value=data__dynamic_key, name="" + (name_prefix or "data") + ".dynamic", definition={'const': 'version'}, rule='const') - data__dynamic_contains = True - break - except JsonSchemaValueException: pass - if not data__dynamic_contains: - raise JsonSchemaValueException("" + (name_prefix or "data") + ".dynamic must contain one of contains definition", value=data__dynamic, name="" + (name_prefix or "data") + ".dynamic", definition={'contains': {'const': 'version'}, '$$description': ['version is listed in ``dynamic``']}, rule='contains') - except JsonSchemaValueException: pass - else: - raise JsonSchemaValueException("" + (name_prefix or "data") + " must NOT match a disallowed definition", value=data, name="" + (name_prefix or "data") + "", definition={'not': {'required': ['dynamic'], 'properties': {'dynamic': {'contains': {'const': 'version'}, '$$description': ['version is listed in ``dynamic``']}}}, '$$comment': ['According to :pep:`621`:', ' If the core metadata specification lists a field as "Required", then', ' the metadata MUST specify the field statically or list it in dynamic', 'In turn, `core metadata`_ defines:', ' The required fields are: Metadata-Version, Name, Version.', ' All the other fields are optional.', 'Since ``Metadata-Version`` is defined by the build back-end, ``name`` and', '``version`` are the only mandatory information in ``pyproject.toml``.', '.. _core metadata: https://packaging.python.org/specifications/core-metadata/']}, rule='not') - except JsonSchemaValueException: - pass - else: - data_is_dict = isinstance(data, dict) - if data_is_dict: - data_len = len(data) - if not all(prop in data for prop in ['version']): - raise JsonSchemaValueException("" + (name_prefix or "data") + " must contain ['version'] properties", value=data, name="" + (name_prefix or "data") + "", definition={'required': ['version'], '$$description': ['version should be statically defined in the ``version`` field']}, rule='required') - return data - -def validate_https___packaging_python_org_en_latest_specifications_declaring_project_metadata___definitions_dependency(data, custom_formats={}, name_prefix=None): - if not isinstance(data, (str)): - raise JsonSchemaValueException("" + (name_prefix or "data") + " must be string", value=data, name="" + (name_prefix or "data") + "", definition={'$id': '#/definitions/dependency', 'title': 'Dependency', 'type': 'string', 'description': 'Project dependency specification according to PEP 508', 'format': 'pep508'}, rule='type') - if isinstance(data, str): - if not custom_formats["pep508"](data): - raise JsonSchemaValueException("" + (name_prefix or "data") + " must be pep508", value=data, name="" + (name_prefix or "data") + "", definition={'$id': '#/definitions/dependency', 'title': 'Dependency', 'type': 'string', 'description': 'Project dependency specification according to PEP 508', 'format': 'pep508'}, rule='format') - return data - -def validate_https___packaging_python_org_en_latest_specifications_declaring_project_metadata___definitions_entry_point_group(data, custom_formats={}, name_prefix=None): - if not isinstance(data, (dict)): - raise JsonSchemaValueException("" + (name_prefix or "data") + " must be object", value=data, name="" + (name_prefix or "data") + "", definition={'$id': '#/definitions/entry-point-group', 'title': 'Entry-points', 'type': 'object', '$$description': ['Entry-points are grouped together to indicate what sort of capabilities they', 'provide.', 'See the `packaging guides', '`_', 'and `setuptools docs', '`_', 'for more information.'], 'propertyNames': {'format': 'python-entrypoint-name'}, 'additionalProperties': False, 'patternProperties': {'^.+$': {'type': 'string', '$$description': ['Reference to a Python object. It is either in the form', '``importable.module``, or ``importable.module:object.attr``.'], 'format': 'python-entrypoint-reference', '$comment': 'https://packaging.python.org/specifications/entry-points/'}}}, rule='type') - data_is_dict = isinstance(data, dict) - if data_is_dict: - data_keys = set(data.keys()) - for data_key, data_val in data.items(): - if REGEX_PATTERNS['^.+$'].search(data_key): - if data_key in data_keys: - data_keys.remove(data_key) - if not isinstance(data_val, (str)): - raise JsonSchemaValueException("" + (name_prefix or "data") + ".{data_key}".format(**locals()) + " must be string", value=data_val, name="" + (name_prefix or "data") + ".{data_key}".format(**locals()) + "", definition={'type': 'string', '$$description': ['Reference to a Python object. It is either in the form', '``importable.module``, or ``importable.module:object.attr``.'], 'format': 'python-entrypoint-reference', '$comment': 'https://packaging.python.org/specifications/entry-points/'}, rule='type') - if isinstance(data_val, str): - if not custom_formats["python-entrypoint-reference"](data_val): - raise JsonSchemaValueException("" + (name_prefix or "data") + ".{data_key}".format(**locals()) + " must be python-entrypoint-reference", value=data_val, name="" + (name_prefix or "data") + ".{data_key}".format(**locals()) + "", definition={'type': 'string', '$$description': ['Reference to a Python object. It is either in the form', '``importable.module``, or ``importable.module:object.attr``.'], 'format': 'python-entrypoint-reference', '$comment': 'https://packaging.python.org/specifications/entry-points/'}, rule='format') - if data_keys: - raise JsonSchemaValueException("" + (name_prefix or "data") + " must not contain "+str(data_keys)+" properties", value=data, name="" + (name_prefix or "data") + "", definition={'$id': '#/definitions/entry-point-group', 'title': 'Entry-points', 'type': 'object', '$$description': ['Entry-points are grouped together to indicate what sort of capabilities they', 'provide.', 'See the `packaging guides', '`_', 'and `setuptools docs', '`_', 'for more information.'], 'propertyNames': {'format': 'python-entrypoint-name'}, 'additionalProperties': False, 'patternProperties': {'^.+$': {'type': 'string', '$$description': ['Reference to a Python object. It is either in the form', '``importable.module``, or ``importable.module:object.attr``.'], 'format': 'python-entrypoint-reference', '$comment': 'https://packaging.python.org/specifications/entry-points/'}}}, rule='additionalProperties') - data_len = len(data) - if data_len != 0: - data_property_names = True - for data_key in data: - try: - if isinstance(data_key, str): - if not custom_formats["python-entrypoint-name"](data_key): - raise JsonSchemaValueException("" + (name_prefix or "data") + " must be python-entrypoint-name", value=data_key, name="" + (name_prefix or "data") + "", definition={'format': 'python-entrypoint-name'}, rule='format') - except JsonSchemaValueException: - data_property_names = False - if not data_property_names: - raise JsonSchemaValueException("" + (name_prefix or "data") + " must be named by propertyName definition", value=data, name="" + (name_prefix or "data") + "", definition={'$id': '#/definitions/entry-point-group', 'title': 'Entry-points', 'type': 'object', '$$description': ['Entry-points are grouped together to indicate what sort of capabilities they', 'provide.', 'See the `packaging guides', '`_', 'and `setuptools docs', '`_', 'for more information.'], 'propertyNames': {'format': 'python-entrypoint-name'}, 'additionalProperties': False, 'patternProperties': {'^.+$': {'type': 'string', '$$description': ['Reference to a Python object. It is either in the form', '``importable.module``, or ``importable.module:object.attr``.'], 'format': 'python-entrypoint-reference', '$comment': 'https://packaging.python.org/specifications/entry-points/'}}}, rule='propertyNames') - return data - -def validate_https___packaging_python_org_en_latest_specifications_declaring_project_metadata___definitions_author(data, custom_formats={}, name_prefix=None): - if not isinstance(data, (dict)): - raise JsonSchemaValueException("" + (name_prefix or "data") + " must be object", value=data, name="" + (name_prefix or "data") + "", definition={'$id': '#/definitions/author', 'title': 'Author or Maintainer', '$comment': 'https://www.python.org/dev/peps/pep-0621/#authors-maintainers', 'type': 'object', 'properties': {'name': {'type': 'string', '$$description': ['MUST be a valid email name, i.e. whatever can be put as a name, before an', 'email, in :rfc:`822`.']}, 'email': {'type': 'string', 'format': 'idn-email', 'description': 'MUST be a valid email address'}}}, rule='type') - data_is_dict = isinstance(data, dict) - if data_is_dict: - data_keys = set(data.keys()) - if "name" in data_keys: - data_keys.remove("name") - data__name = data["name"] - if not isinstance(data__name, (str)): - raise JsonSchemaValueException("" + (name_prefix or "data") + ".name must be string", value=data__name, name="" + (name_prefix or "data") + ".name", definition={'type': 'string', '$$description': ['MUST be a valid email name, i.e. whatever can be put as a name, before an', 'email, in :rfc:`822`.']}, rule='type') - if "email" in data_keys: - data_keys.remove("email") - data__email = data["email"] - if not isinstance(data__email, (str)): - raise JsonSchemaValueException("" + (name_prefix or "data") + ".email must be string", value=data__email, name="" + (name_prefix or "data") + ".email", definition={'type': 'string', 'format': 'idn-email', 'description': 'MUST be a valid email address'}, rule='type') - if isinstance(data__email, str): - if not REGEX_PATTERNS["idn-email_re_pattern"].match(data__email): - raise JsonSchemaValueException("" + (name_prefix or "data") + ".email must be idn-email", value=data__email, name="" + (name_prefix or "data") + ".email", definition={'type': 'string', 'format': 'idn-email', 'description': 'MUST be a valid email address'}, rule='format') - return data \ No newline at end of file diff --git a/venv/lib/python3.10/site-packages/setuptools/config/_validate_pyproject/formats.py b/venv/lib/python3.10/site-packages/setuptools/config/_validate_pyproject/formats.py deleted file mode 100644 index 638ac11..0000000 --- a/venv/lib/python3.10/site-packages/setuptools/config/_validate_pyproject/formats.py +++ /dev/null @@ -1,259 +0,0 @@ -import logging -import os -import re -import string -import typing -from itertools import chain as _chain - -_logger = logging.getLogger(__name__) - -# ------------------------------------------------------------------------------------- -# PEP 440 - -VERSION_PATTERN = r""" - v? - (?: - (?:(?P[0-9]+)!)? # epoch - (?P[0-9]+(?:\.[0-9]+)*) # release segment - (?P
                                          # pre-release
-            [-_\.]?
-            (?P(a|b|c|rc|alpha|beta|pre|preview))
-            [-_\.]?
-            (?P[0-9]+)?
-        )?
-        (?P                                         # post release
-            (?:-(?P[0-9]+))
-            |
-            (?:
-                [-_\.]?
-                (?Ppost|rev|r)
-                [-_\.]?
-                (?P[0-9]+)?
-            )
-        )?
-        (?P                                          # dev release
-            [-_\.]?
-            (?Pdev)
-            [-_\.]?
-            (?P[0-9]+)?
-        )?
-    )
-    (?:\+(?P[a-z0-9]+(?:[-_\.][a-z0-9]+)*))?       # local version
-"""
-
-VERSION_REGEX = re.compile(r"^\s*" + VERSION_PATTERN + r"\s*$", re.X | re.I)
-
-
-def pep440(version: str) -> bool:
-    return VERSION_REGEX.match(version) is not None
-
-
-# -------------------------------------------------------------------------------------
-# PEP 508
-
-PEP508_IDENTIFIER_PATTERN = r"([A-Z0-9]|[A-Z0-9][A-Z0-9._-]*[A-Z0-9])"
-PEP508_IDENTIFIER_REGEX = re.compile(f"^{PEP508_IDENTIFIER_PATTERN}$", re.I)
-
-
-def pep508_identifier(name: str) -> bool:
-    return PEP508_IDENTIFIER_REGEX.match(name) is not None
-
-
-try:
-    try:
-        from packaging import requirements as _req
-    except ImportError:  # pragma: no cover
-        # let's try setuptools vendored version
-        from setuptools._vendor.packaging import requirements as _req  # type: ignore
-
-    def pep508(value: str) -> bool:
-        try:
-            _req.Requirement(value)
-            return True
-        except _req.InvalidRequirement:
-            return False
-
-except ImportError:  # pragma: no cover
-    _logger.warning(
-        "Could not find an installation of `packaging`. Requirements, dependencies and "
-        "versions might not be validated. "
-        "To enforce validation, please install `packaging`."
-    )
-
-    def pep508(value: str) -> bool:
-        return True
-
-
-def pep508_versionspec(value: str) -> bool:
-    """Expression that can be used to specify/lock versions (including ranges)"""
-    if any(c in value for c in (";", "]", "@")):
-        # In PEP 508:
-        # conditional markers, extras and URL specs are not included in the
-        # versionspec
-        return False
-    # Let's pretend we have a dependency called `requirement` with the given
-    # version spec, then we can re-use the pep508 function for validation:
-    return pep508(f"requirement{value}")
-
-
-# -------------------------------------------------------------------------------------
-# PEP 517
-
-
-def pep517_backend_reference(value: str) -> bool:
-    module, _, obj = value.partition(":")
-    identifiers = (i.strip() for i in _chain(module.split("."), obj.split(".")))
-    return all(python_identifier(i) for i in identifiers if i)
-
-
-# -------------------------------------------------------------------------------------
-# Classifiers - PEP 301
-
-
-def _download_classifiers() -> str:
-    import ssl
-    from email.message import Message
-    from urllib.request import urlopen
-
-    url = "https://pypi.org/pypi?:action=list_classifiers"
-    context = ssl.create_default_context()
-    with urlopen(url, context=context) as response:
-        headers = Message()
-        headers["content_type"] = response.getheader("content-type", "text/plain")
-        return response.read().decode(headers.get_param("charset", "utf-8"))
-
-
-class _TroveClassifier:
-    """The ``trove_classifiers`` package is the official way of validating classifiers,
-    however this package might not be always available.
-    As a workaround we can still download a list from PyPI.
-    We also don't want to be over strict about it, so simply skipping silently is an
-    option (classifiers will be validated anyway during the upload to PyPI).
-    """
-
-    def __init__(self):
-        self.downloaded: typing.Union[None, False, typing.Set[str]] = None
-        self._skip_download = False
-        # None => not cached yet
-        # False => cache not available
-        self.__name__ = "trove_classifier"  # Emulate a public function
-
-    def _disable_download(self):
-        # This is a private API. Only setuptools has the consent of using it.
-        self._skip_download = True
-
-    def __call__(self, value: str) -> bool:
-        if self.downloaded is False or self._skip_download is True:
-            return True
-
-        if os.getenv("NO_NETWORK") or os.getenv("VALIDATE_PYPROJECT_NO_NETWORK"):
-            self.downloaded = False
-            msg = (
-                "Install ``trove-classifiers`` to ensure proper validation. "
-                "Skipping download of classifiers list from PyPI (NO_NETWORK)."
-            )
-            _logger.debug(msg)
-            return True
-
-        if self.downloaded is None:
-            msg = (
-                "Install ``trove-classifiers`` to ensure proper validation. "
-                "Meanwhile a list of classifiers will be downloaded from PyPI."
-            )
-            _logger.debug(msg)
-            try:
-                self.downloaded = set(_download_classifiers().splitlines())
-            except Exception:
-                self.downloaded = False
-                _logger.debug("Problem with download, skipping validation")
-                return True
-
-        return value in self.downloaded or value.lower().startswith("private ::")
-
-
-try:
-    from trove_classifiers import classifiers as _trove_classifiers
-
-    def trove_classifier(value: str) -> bool:
-        return value in _trove_classifiers or value.lower().startswith("private ::")
-
-except ImportError:  # pragma: no cover
-    trove_classifier = _TroveClassifier()
-
-
-# -------------------------------------------------------------------------------------
-# Non-PEP related
-
-
-def url(value: str) -> bool:
-    from urllib.parse import urlparse
-
-    try:
-        parts = urlparse(value)
-        if not parts.scheme:
-            _logger.warning(
-                "For maximum compatibility please make sure to include a "
-                "`scheme` prefix in your URL (e.g. 'http://'). "
-                f"Given value: {value}"
-            )
-            if not (value.startswith("/") or value.startswith("\\") or "@" in value):
-                parts = urlparse(f"http://{value}")
-
-        return bool(parts.scheme and parts.netloc)
-    except Exception:
-        return False
-
-
-# https://packaging.python.org/specifications/entry-points/
-ENTRYPOINT_PATTERN = r"[^\[\s=]([^=]*[^\s=])?"
-ENTRYPOINT_REGEX = re.compile(f"^{ENTRYPOINT_PATTERN}$", re.I)
-RECOMMEDED_ENTRYPOINT_PATTERN = r"[\w.-]+"
-RECOMMEDED_ENTRYPOINT_REGEX = re.compile(f"^{RECOMMEDED_ENTRYPOINT_PATTERN}$", re.I)
-ENTRYPOINT_GROUP_PATTERN = r"\w+(\.\w+)*"
-ENTRYPOINT_GROUP_REGEX = re.compile(f"^{ENTRYPOINT_GROUP_PATTERN}$", re.I)
-
-
-def python_identifier(value: str) -> bool:
-    return value.isidentifier()
-
-
-def python_qualified_identifier(value: str) -> bool:
-    if value.startswith(".") or value.endswith("."):
-        return False
-    return all(python_identifier(m) for m in value.split("."))
-
-
-def python_module_name(value: str) -> bool:
-    return python_qualified_identifier(value)
-
-
-def python_entrypoint_group(value: str) -> bool:
-    return ENTRYPOINT_GROUP_REGEX.match(value) is not None
-
-
-def python_entrypoint_name(value: str) -> bool:
-    if not ENTRYPOINT_REGEX.match(value):
-        return False
-    if not RECOMMEDED_ENTRYPOINT_REGEX.match(value):
-        msg = f"Entry point `{value}` does not follow recommended pattern: "
-        msg += RECOMMEDED_ENTRYPOINT_PATTERN
-        _logger.warning(msg)
-    return True
-
-
-def python_entrypoint_reference(value: str) -> bool:
-    module, _, rest = value.partition(":")
-    if "[" in rest:
-        obj, _, extras_ = rest.partition("[")
-        if extras_.strip()[-1] != "]":
-            return False
-        extras = (x.strip() for x in extras_.strip(string.whitespace + "[]").split(","))
-        if not all(pep508_identifier(e) for e in extras):
-            return False
-        _logger.warning(f"`{value}` - using extras for entry points is not recommended")
-    else:
-        obj = rest
-
-    module_parts = module.split(".")
-    identifiers = _chain(module_parts, obj.split(".")) if rest else module_parts
-    return all(python_identifier(i.strip()) for i in identifiers)
diff --git a/venv/lib/python3.10/site-packages/setuptools/config/expand.py b/venv/lib/python3.10/site-packages/setuptools/config/expand.py
deleted file mode 100644
index c8db2c4..0000000
--- a/venv/lib/python3.10/site-packages/setuptools/config/expand.py
+++ /dev/null
@@ -1,462 +0,0 @@
-"""Utility functions to expand configuration directives or special values
-(such glob patterns).
-
-We can split the process of interpreting configuration files into 2 steps:
-
-1. The parsing the file contents from strings to value objects
-   that can be understand by Python (for example a string with a comma
-   separated list of keywords into an actual Python list of strings).
-
-2. The expansion (or post-processing) of these values according to the
-   semantics ``setuptools`` assign to them (for example a configuration field
-   with the ``file:`` directive should be expanded from a list of file paths to
-   a single string with the contents of those files concatenated)
-
-This module focus on the second step, and therefore allow sharing the expansion
-functions among several configuration file formats.
-
-**PRIVATE MODULE**: API reserved for setuptools internal usage only.
-"""
-import ast
-import importlib
-import io
-import os
-import pathlib
-import sys
-import warnings
-from glob import iglob
-from configparser import ConfigParser
-from importlib.machinery import ModuleSpec
-from itertools import chain
-from typing import (
-    TYPE_CHECKING,
-    Callable,
-    Dict,
-    Iterable,
-    Iterator,
-    List,
-    Mapping,
-    Optional,
-    Tuple,
-    TypeVar,
-    Union,
-    cast
-)
-from pathlib import Path
-from types import ModuleType
-
-from distutils.errors import DistutilsOptionError
-
-from .._path import same_path as _same_path
-
-if TYPE_CHECKING:
-    from setuptools.dist import Distribution  # noqa
-    from setuptools.discovery import ConfigDiscovery  # noqa
-    from distutils.dist import DistributionMetadata  # noqa
-
-chain_iter = chain.from_iterable
-_Path = Union[str, os.PathLike]
-_K = TypeVar("_K")
-_V = TypeVar("_V", covariant=True)
-
-
-class StaticModule:
-    """Proxy to a module object that avoids executing arbitrary code."""
-
-    def __init__(self, name: str, spec: ModuleSpec):
-        module = ast.parse(pathlib.Path(spec.origin).read_bytes())
-        vars(self).update(locals())
-        del self.self
-
-    def _find_assignments(self) -> Iterator[Tuple[ast.AST, ast.AST]]:
-        for statement in self.module.body:
-            if isinstance(statement, ast.Assign):
-                yield from ((target, statement.value) for target in statement.targets)
-            elif isinstance(statement, ast.AnnAssign) and statement.value:
-                yield (statement.target, statement.value)
-
-    def __getattr__(self, attr):
-        """Attempt to load an attribute "statically", via :func:`ast.literal_eval`."""
-        try:
-            return next(
-                ast.literal_eval(value)
-                for target, value in self._find_assignments()
-                if isinstance(target, ast.Name) and target.id == attr
-            )
-        except Exception as e:
-            raise AttributeError(f"{self.name} has no attribute {attr}") from e
-
-
-def glob_relative(
-    patterns: Iterable[str], root_dir: Optional[_Path] = None
-) -> List[str]:
-    """Expand the list of glob patterns, but preserving relative paths.
-
-    :param list[str] patterns: List of glob patterns
-    :param str root_dir: Path to which globs should be relative
-                         (current directory by default)
-    :rtype: list
-    """
-    glob_characters = {'*', '?', '[', ']', '{', '}'}
-    expanded_values = []
-    root_dir = root_dir or os.getcwd()
-    for value in patterns:
-
-        # Has globby characters?
-        if any(char in value for char in glob_characters):
-            # then expand the glob pattern while keeping paths *relative*:
-            glob_path = os.path.abspath(os.path.join(root_dir, value))
-            expanded_values.extend(sorted(
-                os.path.relpath(path, root_dir).replace(os.sep, "/")
-                for path in iglob(glob_path, recursive=True)))
-
-        else:
-            # take the value as-is
-            path = os.path.relpath(value, root_dir).replace(os.sep, "/")
-            expanded_values.append(path)
-
-    return expanded_values
-
-
-def read_files(filepaths: Union[str, bytes, Iterable[_Path]], root_dir=None) -> str:
-    """Return the content of the files concatenated using ``\n`` as str
-
-    This function is sandboxed and won't reach anything outside ``root_dir``
-
-    (By default ``root_dir`` is the current directory).
-    """
-    from setuptools.extern.more_itertools import always_iterable
-
-    root_dir = os.path.abspath(root_dir or os.getcwd())
-    _filepaths = (os.path.join(root_dir, path) for path in always_iterable(filepaths))
-    return '\n'.join(
-        _read_file(path)
-        for path in _filter_existing_files(_filepaths)
-        if _assert_local(path, root_dir)
-    )
-
-
-def _filter_existing_files(filepaths: Iterable[_Path]) -> Iterator[_Path]:
-    for path in filepaths:
-        if os.path.isfile(path):
-            yield path
-        else:
-            warnings.warn(f"File {path!r} cannot be found")
-
-
-def _read_file(filepath: Union[bytes, _Path]) -> str:
-    with io.open(filepath, encoding='utf-8') as f:
-        return f.read()
-
-
-def _assert_local(filepath: _Path, root_dir: str):
-    if Path(os.path.abspath(root_dir)) not in Path(os.path.abspath(filepath)).parents:
-        msg = f"Cannot access {filepath!r} (or anything outside {root_dir!r})"
-        raise DistutilsOptionError(msg)
-
-    return True
-
-
-def read_attr(
-    attr_desc: str,
-    package_dir: Optional[Mapping[str, str]] = None,
-    root_dir: Optional[_Path] = None
-):
-    """Reads the value of an attribute from a module.
-
-    This function will try to read the attributed statically first
-    (via :func:`ast.literal_eval`), and only evaluate the module if it fails.
-
-    Examples:
-        read_attr("package.attr")
-        read_attr("package.module.attr")
-
-    :param str attr_desc: Dot-separated string describing how to reach the
-        attribute (see examples above)
-    :param dict[str, str] package_dir: Mapping of package names to their
-        location in disk (represented by paths relative to ``root_dir``).
-    :param str root_dir: Path to directory containing all the packages in
-        ``package_dir`` (current directory by default).
-    :rtype: str
-    """
-    root_dir = root_dir or os.getcwd()
-    attrs_path = attr_desc.strip().split('.')
-    attr_name = attrs_path.pop()
-    module_name = '.'.join(attrs_path)
-    module_name = module_name or '__init__'
-    _parent_path, path, module_name = _find_module(module_name, package_dir, root_dir)
-    spec = _find_spec(module_name, path)
-
-    try:
-        return getattr(StaticModule(module_name, spec), attr_name)
-    except Exception:
-        # fallback to evaluate module
-        module = _load_spec(spec, module_name)
-        return getattr(module, attr_name)
-
-
-def _find_spec(module_name: str, module_path: Optional[_Path]) -> ModuleSpec:
-    spec = importlib.util.spec_from_file_location(module_name, module_path)
-    spec = spec or importlib.util.find_spec(module_name)
-
-    if spec is None:
-        raise ModuleNotFoundError(module_name)
-
-    return spec
-
-
-def _load_spec(spec: ModuleSpec, module_name: str) -> ModuleType:
-    name = getattr(spec, "__name__", module_name)
-    if name in sys.modules:
-        return sys.modules[name]
-    module = importlib.util.module_from_spec(spec)
-    sys.modules[name] = module  # cache (it also ensures `==` works on loaded items)
-    spec.loader.exec_module(module)  # type: ignore
-    return module
-
-
-def _find_module(
-    module_name: str, package_dir: Optional[Mapping[str, str]], root_dir: _Path
-) -> Tuple[_Path, Optional[str], str]:
-    """Given a module (that could normally be imported by ``module_name``
-    after the build is complete), find the path to the parent directory where
-    it is contained and the canonical name that could be used to import it
-    considering the ``package_dir`` in the build configuration and ``root_dir``
-    """
-    parent_path = root_dir
-    module_parts = module_name.split('.')
-    if package_dir:
-        if module_parts[0] in package_dir:
-            # A custom path was specified for the module we want to import
-            custom_path = package_dir[module_parts[0]]
-            parts = custom_path.rsplit('/', 1)
-            if len(parts) > 1:
-                parent_path = os.path.join(root_dir, parts[0])
-                parent_module = parts[1]
-            else:
-                parent_module = custom_path
-            module_name = ".".join([parent_module, *module_parts[1:]])
-        elif '' in package_dir:
-            # A custom parent directory was specified for all root modules
-            parent_path = os.path.join(root_dir, package_dir[''])
-
-    path_start = os.path.join(parent_path, *module_name.split("."))
-    candidates = chain(
-        (f"{path_start}.py", os.path.join(path_start, "__init__.py")),
-        iglob(f"{path_start}.*")
-    )
-    module_path = next((x for x in candidates if os.path.isfile(x)), None)
-    return parent_path, module_path, module_name
-
-
-def resolve_class(
-    qualified_class_name: str,
-    package_dir: Optional[Mapping[str, str]] = None,
-    root_dir: Optional[_Path] = None
-) -> Callable:
-    """Given a qualified class name, return the associated class object"""
-    root_dir = root_dir or os.getcwd()
-    idx = qualified_class_name.rfind('.')
-    class_name = qualified_class_name[idx + 1 :]
-    pkg_name = qualified_class_name[:idx]
-
-    _parent_path, path, module_name = _find_module(pkg_name, package_dir, root_dir)
-    module = _load_spec(_find_spec(module_name, path), module_name)
-    return getattr(module, class_name)
-
-
-def cmdclass(
-    values: Dict[str, str],
-    package_dir: Optional[Mapping[str, str]] = None,
-    root_dir: Optional[_Path] = None
-) -> Dict[str, Callable]:
-    """Given a dictionary mapping command names to strings for qualified class
-    names, apply :func:`resolve_class` to the dict values.
-    """
-    return {k: resolve_class(v, package_dir, root_dir) for k, v in values.items()}
-
-
-def find_packages(
-    *,
-    namespaces=True,
-    fill_package_dir: Optional[Dict[str, str]] = None,
-    root_dir: Optional[_Path] = None,
-    **kwargs
-) -> List[str]:
-    """Works similarly to :func:`setuptools.find_packages`, but with all
-    arguments given as keyword arguments. Moreover, ``where`` can be given
-    as a list (the results will be simply concatenated).
-
-    When the additional keyword argument ``namespaces`` is ``True``, it will
-    behave like :func:`setuptools.find_namespace_packages`` (i.e. include
-    implicit namespaces as per :pep:`420`).
-
-    The ``where`` argument will be considered relative to ``root_dir`` (or the current
-    working directory when ``root_dir`` is not given).
-
-    If the ``fill_package_dir`` argument is passed, this function will consider it as a
-    similar data structure to the ``package_dir`` configuration parameter add fill-in
-    any missing package location.
-
-    :rtype: list
-    """
-    from setuptools.discovery import construct_package_dir
-    from setuptools.extern.more_itertools import unique_everseen, always_iterable
-
-    if namespaces:
-        from setuptools.discovery import PEP420PackageFinder as PackageFinder
-    else:
-        from setuptools.discovery import PackageFinder  # type: ignore
-
-    root_dir = root_dir or os.curdir
-    where = kwargs.pop('where', ['.'])
-    packages: List[str] = []
-    fill_package_dir = {} if fill_package_dir is None else fill_package_dir
-    search = list(unique_everseen(always_iterable(where)))
-
-    if len(search) == 1 and all(not _same_path(search[0], x) for x in (".", root_dir)):
-        fill_package_dir.setdefault("", search[0])
-
-    for path in search:
-        package_path = _nest_path(root_dir, path)
-        pkgs = PackageFinder.find(package_path, **kwargs)
-        packages.extend(pkgs)
-        if pkgs and not (
-            fill_package_dir.get("") == path
-            or os.path.samefile(package_path, root_dir)
-        ):
-            fill_package_dir.update(construct_package_dir(pkgs, path))
-
-    return packages
-
-
-def _nest_path(parent: _Path, path: _Path) -> str:
-    path = parent if path in {".", ""} else os.path.join(parent, path)
-    return os.path.normpath(path)
-
-
-def version(value: Union[Callable, Iterable[Union[str, int]], str]) -> str:
-    """When getting the version directly from an attribute,
-    it should be normalised to string.
-    """
-    if callable(value):
-        value = value()
-
-    value = cast(Iterable[Union[str, int]], value)
-
-    if not isinstance(value, str):
-        if hasattr(value, '__iter__'):
-            value = '.'.join(map(str, value))
-        else:
-            value = '%s' % value
-
-    return value
-
-
-def canonic_package_data(package_data: dict) -> dict:
-    if "*" in package_data:
-        package_data[""] = package_data.pop("*")
-    return package_data
-
-
-def canonic_data_files(
-    data_files: Union[list, dict], root_dir: Optional[_Path] = None
-) -> List[Tuple[str, List[str]]]:
-    """For compatibility with ``setup.py``, ``data_files`` should be a list
-    of pairs instead of a dict.
-
-    This function also expands glob patterns.
-    """
-    if isinstance(data_files, list):
-        return data_files
-
-    return [
-        (dest, glob_relative(patterns, root_dir))
-        for dest, patterns in data_files.items()
-    ]
-
-
-def entry_points(text: str, text_source="entry-points") -> Dict[str, dict]:
-    """Given the contents of entry-points file,
-    process it into a 2-level dictionary (``dict[str, dict[str, str]]``).
-    The first level keys are entry-point groups, the second level keys are
-    entry-point names, and the second level values are references to objects
-    (that correspond to the entry-point value).
-    """
-    parser = ConfigParser(default_section=None, delimiters=("=",))  # type: ignore
-    parser.optionxform = str  # case sensitive
-    parser.read_string(text, text_source)
-    groups = {k: dict(v.items()) for k, v in parser.items()}
-    groups.pop(parser.default_section, None)
-    return groups
-
-
-class EnsurePackagesDiscovered:
-    """Some expand functions require all the packages to already be discovered before
-    they run, e.g. :func:`read_attr`, :func:`resolve_class`, :func:`cmdclass`.
-
-    Therefore in some cases we will need to run autodiscovery during the evaluation of
-    the configuration. However, it is better to postpone calling package discovery as
-    much as possible, because some parameters can influence it (e.g. ``package_dir``),
-    and those might not have been processed yet.
-    """
-
-    def __init__(self, distribution: "Distribution"):
-        self._dist = distribution
-        self._called = False
-
-    def __call__(self):
-        """Trigger the automatic package discovery, if it is still necessary."""
-        if not self._called:
-            self._called = True
-            self._dist.set_defaults(name=False)  # Skip name, we can still be parsing
-
-    def __enter__(self):
-        return self
-
-    def __exit__(self, _exc_type, _exc_value, _traceback):
-        if self._called:
-            self._dist.set_defaults.analyse_name()  # Now we can set a default name
-
-    def _get_package_dir(self) -> Mapping[str, str]:
-        self()
-        pkg_dir = self._dist.package_dir
-        return {} if pkg_dir is None else pkg_dir
-
-    @property
-    def package_dir(self) -> Mapping[str, str]:
-        """Proxy to ``package_dir`` that may trigger auto-discovery when used."""
-        return LazyMappingProxy(self._get_package_dir)
-
-
-class LazyMappingProxy(Mapping[_K, _V]):
-    """Mapping proxy that delays resolving the target object, until really needed.
-
-    >>> def obtain_mapping():
-    ...     print("Running expensive function!")
-    ...     return {"key": "value", "other key": "other value"}
-    >>> mapping = LazyMappingProxy(obtain_mapping)
-    >>> mapping["key"]
-    Running expensive function!
-    'value'
-    >>> mapping["other key"]
-    'other value'
-    """
-
-    def __init__(self, obtain_mapping_value: Callable[[], Mapping[_K, _V]]):
-        self._obtain = obtain_mapping_value
-        self._value: Optional[Mapping[_K, _V]] = None
-
-    def _target(self) -> Mapping[_K, _V]:
-        if self._value is None:
-            self._value = self._obtain()
-        return self._value
-
-    def __getitem__(self, key: _K) -> _V:
-        return self._target()[key]
-
-    def __len__(self) -> int:
-        return len(self._target())
-
-    def __iter__(self) -> Iterator[_K]:
-        return iter(self._target())
diff --git a/venv/lib/python3.10/site-packages/setuptools/config/pyprojecttoml.py b/venv/lib/python3.10/site-packages/setuptools/config/pyprojecttoml.py
deleted file mode 100644
index d995f0b..0000000
--- a/venv/lib/python3.10/site-packages/setuptools/config/pyprojecttoml.py
+++ /dev/null
@@ -1,493 +0,0 @@
-"""
-Load setuptools configuration from ``pyproject.toml`` files.
-
-**PRIVATE MODULE**: API reserved for setuptools internal usage only.
-"""
-import logging
-import os
-import warnings
-from contextlib import contextmanager
-from functools import partial
-from typing import TYPE_CHECKING, Callable, Dict, Optional, Mapping, Union
-
-from setuptools.errors import FileError, OptionError
-
-from . import expand as _expand
-from ._apply_pyprojecttoml import apply as _apply
-from ._apply_pyprojecttoml import _PREVIOUSLY_DEFINED, _WouldIgnoreField
-
-if TYPE_CHECKING:
-    from setuptools.dist import Distribution  # noqa
-
-_Path = Union[str, os.PathLike]
-_logger = logging.getLogger(__name__)
-
-
-def load_file(filepath: _Path) -> dict:
-    from setuptools.extern import tomli  # type: ignore
-
-    with open(filepath, "rb") as file:
-        return tomli.load(file)
-
-
-def validate(config: dict, filepath: _Path) -> bool:
-    from . import _validate_pyproject as validator
-
-    trove_classifier = validator.FORMAT_FUNCTIONS.get("trove-classifier")
-    if hasattr(trove_classifier, "_disable_download"):
-        # Improve reproducibility by default. See issue 31 for validate-pyproject.
-        trove_classifier._disable_download()  # type: ignore
-
-    try:
-        return validator.validate(config)
-    except validator.ValidationError as ex:
-        summary = f"configuration error: {ex.summary}"
-        if ex.name.strip("`") != "project":
-            # Probably it is just a field missing/misnamed, not worthy the verbosity...
-            _logger.debug(summary)
-            _logger.debug(ex.details)
-
-        error = f"invalid pyproject.toml config: {ex.name}."
-        raise ValueError(f"{error}\n{summary}") from None
-
-
-def apply_configuration(
-    dist: "Distribution",
-    filepath: _Path,
-    ignore_option_errors=False,
-) -> "Distribution":
-    """Apply the configuration from a ``pyproject.toml`` file into an existing
-    distribution object.
-    """
-    config = read_configuration(filepath, True, ignore_option_errors, dist)
-    return _apply(dist, config, filepath)
-
-
-def read_configuration(
-    filepath: _Path,
-    expand=True,
-    ignore_option_errors=False,
-    dist: Optional["Distribution"] = None,
-):
-    """Read given configuration file and returns options from it as a dict.
-
-    :param str|unicode filepath: Path to configuration file in the ``pyproject.toml``
-        format.
-
-    :param bool expand: Whether to expand directives and other computed values
-        (i.e. post-process the given configuration)
-
-    :param bool ignore_option_errors: Whether to silently ignore
-        options, values of which could not be resolved (e.g. due to exceptions
-        in directives such as file:, attr:, etc.).
-        If False exceptions are propagated as expected.
-
-    :param Distribution|None: Distribution object to which the configuration refers.
-        If not given a dummy object will be created and discarded after the
-        configuration is read. This is used for auto-discovery of packages in the case
-        a dynamic configuration (e.g. ``attr`` or ``cmdclass``) is expanded.
-        When ``expand=False`` this object is simply ignored.
-
-    :rtype: dict
-    """
-    filepath = os.path.abspath(filepath)
-
-    if not os.path.isfile(filepath):
-        raise FileError(f"Configuration file {filepath!r} does not exist.")
-
-    asdict = load_file(filepath) or {}
-    project_table = asdict.get("project", {})
-    tool_table = asdict.get("tool", {})
-    setuptools_table = tool_table.get("setuptools", {})
-    if not asdict or not (project_table or setuptools_table):
-        return {}  # User is not using pyproject to configure setuptools
-
-    if setuptools_table:
-        # TODO: Remove the following once the feature stabilizes:
-        msg = "Support for `[tool.setuptools]` in `pyproject.toml` is still *beta*."
-        warnings.warn(msg, _BetaConfiguration)
-
-    # There is an overall sense in the community that making include_package_data=True
-    # the default would be an improvement.
-    # `ini2toml` backfills include_package_data=False when nothing is explicitly given,
-    # therefore setting a default here is backwards compatible.
-    orig_setuptools_table = setuptools_table.copy()
-    if dist and getattr(dist, "include_package_data") is not None:
-        setuptools_table.setdefault("include-package-data", dist.include_package_data)
-    else:
-        setuptools_table.setdefault("include-package-data", True)
-    # Persist changes:
-    asdict["tool"] = tool_table
-    tool_table["setuptools"] = setuptools_table
-
-    try:
-        # Don't complain about unrelated errors (e.g. tools not using the "tool" table)
-        subset = {"project": project_table, "tool": {"setuptools": setuptools_table}}
-        validate(subset, filepath)
-    except Exception as ex:
-        # TODO: Remove the following once the feature stabilizes:
-        if _skip_bad_config(project_table, orig_setuptools_table, dist):
-            return {}
-        # TODO: After the previous statement is removed the try/except can be replaced
-        # by the _ignore_errors context manager.
-        if ignore_option_errors:
-            _logger.debug(f"ignored error: {ex.__class__.__name__} - {ex}")
-        else:
-            raise  # re-raise exception
-
-    if expand:
-        root_dir = os.path.dirname(filepath)
-        return expand_configuration(asdict, root_dir, ignore_option_errors, dist)
-
-    return asdict
-
-
-def _skip_bad_config(
-    project_cfg: dict, setuptools_cfg: dict, dist: Optional["Distribution"]
-) -> bool:
-    """Be temporarily forgiving with invalid ``pyproject.toml``"""
-    # See pypa/setuptools#3199 and pypa/cibuildwheel#1064
-
-    if dist is None or (
-        dist.metadata.name is None
-        and dist.metadata.version is None
-        and dist.install_requires is None
-    ):
-        # It seems that the build is not getting any configuration from other places
-        return False
-
-    if setuptools_cfg:
-        # If `[tool.setuptools]` is set, then `pyproject.toml` config is intentional
-        return False
-
-    given_config = set(project_cfg.keys())
-    popular_subset = {"name", "version", "python_requires", "requires-python"}
-    if given_config <= popular_subset:
-        # It seems that the docs in cibuildtool has been inadvertently encouraging users
-        # to create `pyproject.toml` files that are not compliant with the standards.
-        # Let's be forgiving for the time being.
-        warnings.warn(_InvalidFile.message(), _InvalidFile, stacklevel=2)
-        return True
-
-    return False
-
-
-def expand_configuration(
-    config: dict,
-    root_dir: Optional[_Path] = None,
-    ignore_option_errors: bool = False,
-    dist: Optional["Distribution"] = None,
-) -> dict:
-    """Given a configuration with unresolved fields (e.g. dynamic, cmdclass, ...)
-    find their final values.
-
-    :param dict config: Dict containing the configuration for the distribution
-    :param str root_dir: Top-level directory for the distribution/project
-        (the same directory where ``pyproject.toml`` is place)
-    :param bool ignore_option_errors: see :func:`read_configuration`
-    :param Distribution|None: Distribution object to which the configuration refers.
-        If not given a dummy object will be created and discarded after the
-        configuration is read. Used in the case a dynamic configuration
-        (e.g. ``attr`` or ``cmdclass``).
-
-    :rtype: dict
-    """
-    return _ConfigExpander(config, root_dir, ignore_option_errors, dist).expand()
-
-
-class _ConfigExpander:
-    def __init__(
-        self,
-        config: dict,
-        root_dir: Optional[_Path] = None,
-        ignore_option_errors: bool = False,
-        dist: Optional["Distribution"] = None,
-    ):
-        self.config = config
-        self.root_dir = root_dir or os.getcwd()
-        self.project_cfg = config.get("project", {})
-        self.dynamic = self.project_cfg.get("dynamic", [])
-        self.setuptools_cfg = config.get("tool", {}).get("setuptools", {})
-        self.dynamic_cfg = self.setuptools_cfg.get("dynamic", {})
-        self.ignore_option_errors = ignore_option_errors
-        self._dist = dist
-
-    def _ensure_dist(self) -> "Distribution":
-        from setuptools.dist import Distribution
-
-        attrs = {"src_root": self.root_dir, "name": self.project_cfg.get("name", None)}
-        return self._dist or Distribution(attrs)
-
-    def _process_field(self, container: dict, field: str, fn: Callable):
-        if field in container:
-            with _ignore_errors(self.ignore_option_errors):
-                container[field] = fn(container[field])
-
-    def _canonic_package_data(self, field="package-data"):
-        package_data = self.setuptools_cfg.get(field, {})
-        return _expand.canonic_package_data(package_data)
-
-    def expand(self):
-        self._expand_packages()
-        self._canonic_package_data()
-        self._canonic_package_data("exclude-package-data")
-
-        # A distribution object is required for discovering the correct package_dir
-        dist = self._ensure_dist()
-        ctx = _EnsurePackagesDiscovered(dist, self.project_cfg, self.setuptools_cfg)
-        with ctx as ensure_discovered:
-            package_dir = ensure_discovered.package_dir
-            self._expand_data_files()
-            self._expand_cmdclass(package_dir)
-            self._expand_all_dynamic(dist, package_dir)
-
-        return self.config
-
-    def _expand_packages(self):
-        packages = self.setuptools_cfg.get("packages")
-        if packages is None or isinstance(packages, (list, tuple)):
-            return
-
-        find = packages.get("find")
-        if isinstance(find, dict):
-            find["root_dir"] = self.root_dir
-            find["fill_package_dir"] = self.setuptools_cfg.setdefault("package-dir", {})
-            with _ignore_errors(self.ignore_option_errors):
-                self.setuptools_cfg["packages"] = _expand.find_packages(**find)
-
-    def _expand_data_files(self):
-        data_files = partial(_expand.canonic_data_files, root_dir=self.root_dir)
-        self._process_field(self.setuptools_cfg, "data-files", data_files)
-
-    def _expand_cmdclass(self, package_dir: Mapping[str, str]):
-        root_dir = self.root_dir
-        cmdclass = partial(_expand.cmdclass, package_dir=package_dir, root_dir=root_dir)
-        self._process_field(self.setuptools_cfg, "cmdclass", cmdclass)
-
-    def _expand_all_dynamic(self, dist: "Distribution", package_dir: Mapping[str, str]):
-        special = (  # need special handling
-            "version",
-            "readme",
-            "entry-points",
-            "scripts",
-            "gui-scripts",
-            "classifiers",
-            "dependencies",
-            "optional-dependencies",
-        )
-        # `_obtain` functions are assumed to raise appropriate exceptions/warnings.
-        obtained_dynamic = {
-            field: self._obtain(dist, field, package_dir)
-            for field in self.dynamic
-            if field not in special
-        }
-        obtained_dynamic.update(
-            self._obtain_entry_points(dist, package_dir) or {},
-            version=self._obtain_version(dist, package_dir),
-            readme=self._obtain_readme(dist),
-            classifiers=self._obtain_classifiers(dist),
-            dependencies=self._obtain_dependencies(dist),
-            optional_dependencies=self._obtain_optional_dependencies(dist),
-        )
-        # `None` indicates there is nothing in `tool.setuptools.dynamic` but the value
-        # might have already been set by setup.py/extensions, so avoid overwriting.
-        updates = {k: v for k, v in obtained_dynamic.items() if v is not None}
-        self.project_cfg.update(updates)
-
-    def _ensure_previously_set(self, dist: "Distribution", field: str):
-        previous = _PREVIOUSLY_DEFINED[field](dist)
-        if previous is None and not self.ignore_option_errors:
-            msg = (
-                f"No configuration found for dynamic {field!r}.\n"
-                "Some dynamic fields need to be specified via `tool.setuptools.dynamic`"
-                "\nothers must be specified via the equivalent attribute in `setup.py`."
-            )
-            raise OptionError(msg)
-
-    def _expand_directive(
-        self, specifier: str, directive, package_dir: Mapping[str, str]
-    ):
-        with _ignore_errors(self.ignore_option_errors):
-            root_dir = self.root_dir
-            if "file" in directive:
-                return _expand.read_files(directive["file"], root_dir)
-            if "attr" in directive:
-                return _expand.read_attr(directive["attr"], package_dir, root_dir)
-            raise ValueError(f"invalid `{specifier}`: {directive!r}")
-        return None
-
-    def _obtain(self, dist: "Distribution", field: str, package_dir: Mapping[str, str]):
-        if field in self.dynamic_cfg:
-            return self._expand_directive(
-                f"tool.setuptools.dynamic.{field}",
-                self.dynamic_cfg[field],
-                package_dir,
-            )
-        self._ensure_previously_set(dist, field)
-        return None
-
-    def _obtain_version(self, dist: "Distribution", package_dir: Mapping[str, str]):
-        # Since plugins can set version, let's silently skip if it cannot be obtained
-        if "version" in self.dynamic and "version" in self.dynamic_cfg:
-            return _expand.version(self._obtain(dist, "version", package_dir))
-        return None
-
-    def _obtain_readme(self, dist: "Distribution") -> Optional[Dict[str, str]]:
-        if "readme" not in self.dynamic:
-            return None
-
-        dynamic_cfg = self.dynamic_cfg
-        if "readme" in dynamic_cfg:
-            return {
-                "text": self._obtain(dist, "readme", {}),
-                "content-type": dynamic_cfg["readme"].get("content-type", "text/x-rst"),
-            }
-
-        self._ensure_previously_set(dist, "readme")
-        return None
-
-    def _obtain_entry_points(
-        self, dist: "Distribution", package_dir: Mapping[str, str]
-    ) -> Optional[Dict[str, dict]]:
-        fields = ("entry-points", "scripts", "gui-scripts")
-        if not any(field in self.dynamic for field in fields):
-            return None
-
-        text = self._obtain(dist, "entry-points", package_dir)
-        if text is None:
-            return None
-
-        groups = _expand.entry_points(text)
-        expanded = {"entry-points": groups}
-
-        def _set_scripts(field: str, group: str):
-            if group in groups:
-                value = groups.pop(group)
-                if field not in self.dynamic:
-                    msg = _WouldIgnoreField.message(field, value)
-                    warnings.warn(msg, _WouldIgnoreField)
-                # TODO: Don't set field when support for pyproject.toml stabilizes
-                #       instead raise an error as specified in PEP 621
-                expanded[field] = value
-
-        _set_scripts("scripts", "console_scripts")
-        _set_scripts("gui-scripts", "gui_scripts")
-
-        return expanded
-
-    def _obtain_classifiers(self, dist: "Distribution"):
-        if "classifiers" in self.dynamic:
-            value = self._obtain(dist, "classifiers", {})
-            if value:
-                return value.splitlines()
-        return None
-
-    def _obtain_dependencies(self, dist: "Distribution"):
-        if "dependencies" in self.dynamic:
-            value = self._obtain(dist, "dependencies", {})
-            if value:
-                return _parse_requirements_list(value)
-        return None
-
-    def _obtain_optional_dependencies(self, dist: "Distribution"):
-        if "optional-dependencies" not in self.dynamic:
-            return None
-        if "optional-dependencies" in self.dynamic_cfg:
-            optional_dependencies_map = self.dynamic_cfg["optional-dependencies"]
-            assert isinstance(optional_dependencies_map, dict)
-            return {
-                group: _parse_requirements_list(self._expand_directive(
-                    f"tool.setuptools.dynamic.optional-dependencies.{group}",
-                    directive,
-                    {},
-                ))
-                for group, directive in optional_dependencies_map.items()
-            }
-        self._ensure_previously_set(dist, "optional-dependencies")
-        return None
-
-
-def _parse_requirements_list(value):
-    return [
-        line
-        for line in value.splitlines()
-        if line.strip() and not line.strip().startswith("#")
-    ]
-
-
-@contextmanager
-def _ignore_errors(ignore_option_errors: bool):
-    if not ignore_option_errors:
-        yield
-        return
-
-    try:
-        yield
-    except Exception as ex:
-        _logger.debug(f"ignored error: {ex.__class__.__name__} - {ex}")
-
-
-class _EnsurePackagesDiscovered(_expand.EnsurePackagesDiscovered):
-    def __init__(
-        self, distribution: "Distribution", project_cfg: dict, setuptools_cfg: dict
-    ):
-        super().__init__(distribution)
-        self._project_cfg = project_cfg
-        self._setuptools_cfg = setuptools_cfg
-
-    def __enter__(self):
-        """When entering the context, the values of ``packages``, ``py_modules`` and
-        ``package_dir`` that are missing in ``dist`` are copied from ``setuptools_cfg``.
-        """
-        dist, cfg = self._dist, self._setuptools_cfg
-        package_dir: Dict[str, str] = cfg.setdefault("package-dir", {})
-        package_dir.update(dist.package_dir or {})
-        dist.package_dir = package_dir  # needs to be the same object
-
-        dist.set_defaults._ignore_ext_modules()  # pyproject.toml-specific behaviour
-
-        # Set `name`, `py_modules` and `packages` in dist to short-circuit
-        # auto-discovery, but avoid overwriting empty lists purposefully set by users.
-        if dist.metadata.name is None:
-            dist.metadata.name = self._project_cfg.get("name")
-        if dist.py_modules is None:
-            dist.py_modules = cfg.get("py-modules")
-        if dist.packages is None:
-            dist.packages = cfg.get("packages")
-
-        return super().__enter__()
-
-    def __exit__(self, exc_type, exc_value, traceback):
-        """When exiting the context, if values of ``packages``, ``py_modules`` and
-        ``package_dir`` are missing in ``setuptools_cfg``, copy from ``dist``.
-        """
-        # If anything was discovered set them back, so they count in the final config.
-        self._setuptools_cfg.setdefault("packages", self._dist.packages)
-        self._setuptools_cfg.setdefault("py-modules", self._dist.py_modules)
-        return super().__exit__(exc_type, exc_value, traceback)
-
-
-class _BetaConfiguration(UserWarning):
-    """Explicitly inform users that some `pyproject.toml` configuration is *beta*"""
-
-
-class _InvalidFile(UserWarning):
-    """The given `pyproject.toml` file is invalid and would be ignored.
-    !!\n\n
-    ############################
-    # Invalid `pyproject.toml` #
-    ############################
-
-    Any configurations in `pyproject.toml` will be ignored.
-    Please note that future releases of setuptools will halt the build process
-    if an invalid file is given.
-
-    To prevent setuptools from considering `pyproject.toml` please
-    DO NOT include the `[project]` or `[tool.setuptools]` tables in your file.
-    \n\n!!
-    """
-
-    @classmethod
-    def message(cls):
-        from inspect import cleandoc
-        return cleandoc(cls.__doc__)
diff --git a/venv/lib/python3.10/site-packages/setuptools/config/setupcfg.py b/venv/lib/python3.10/site-packages/setuptools/config/setupcfg.py
deleted file mode 100644
index c2a974d..0000000
--- a/venv/lib/python3.10/site-packages/setuptools/config/setupcfg.py
+++ /dev/null
@@ -1,762 +0,0 @@
-"""
-Load setuptools configuration from ``setup.cfg`` files.
-
-**API will be made private in the future**
-"""
-import os
-
-import contextlib
-import functools
-import warnings
-from collections import defaultdict
-from functools import partial
-from functools import wraps
-from typing import (TYPE_CHECKING, Callable, Any, Dict, Generic, Iterable, List,
-                    Optional, Tuple, TypeVar, Union)
-
-from distutils.errors import DistutilsOptionError, DistutilsFileError
-from setuptools.extern.packaging.requirements import Requirement, InvalidRequirement
-from setuptools.extern.packaging.version import Version, InvalidVersion
-from setuptools.extern.packaging.specifiers import SpecifierSet
-from setuptools._deprecation_warning import SetuptoolsDeprecationWarning
-
-from . import expand
-
-if TYPE_CHECKING:
-    from setuptools.dist import Distribution  # noqa
-    from distutils.dist import DistributionMetadata  # noqa
-
-_Path = Union[str, os.PathLike]
-SingleCommandOptions = Dict["str", Tuple["str", Any]]
-"""Dict that associate the name of the options of a particular command to a
-tuple. The first element of the tuple indicates the origin of the option value
-(e.g. the name of the configuration file where it was read from),
-while the second element of the tuple is the option value itself
-"""
-AllCommandOptions = Dict["str", SingleCommandOptions]  # cmd name => its options
-Target = TypeVar("Target", bound=Union["Distribution", "DistributionMetadata"])
-
-
-def read_configuration(
-    filepath: _Path,
-    find_others=False,
-    ignore_option_errors=False
-) -> dict:
-    """Read given configuration file and returns options from it as a dict.
-
-    :param str|unicode filepath: Path to configuration file
-        to get options from.
-
-    :param bool find_others: Whether to search for other configuration files
-        which could be on in various places.
-
-    :param bool ignore_option_errors: Whether to silently ignore
-        options, values of which could not be resolved (e.g. due to exceptions
-        in directives such as file:, attr:, etc.).
-        If False exceptions are propagated as expected.
-
-    :rtype: dict
-    """
-    from setuptools.dist import Distribution
-
-    dist = Distribution()
-    filenames = dist.find_config_files() if find_others else []
-    handlers = _apply(dist, filepath, filenames, ignore_option_errors)
-    return configuration_to_dict(handlers)
-
-
-def apply_configuration(dist: "Distribution", filepath: _Path) -> "Distribution":
-    """Apply the configuration from a ``setup.cfg`` file into an existing
-    distribution object.
-    """
-    _apply(dist, filepath)
-    dist._finalize_requires()
-    return dist
-
-
-def _apply(
-    dist: "Distribution", filepath: _Path,
-    other_files: Iterable[_Path] = (),
-    ignore_option_errors: bool = False,
-) -> Tuple["ConfigHandler", ...]:
-    """Read configuration from ``filepath`` and applies to the ``dist`` object."""
-    from setuptools.dist import _Distribution
-
-    filepath = os.path.abspath(filepath)
-
-    if not os.path.isfile(filepath):
-        raise DistutilsFileError('Configuration file %s does not exist.' % filepath)
-
-    current_directory = os.getcwd()
-    os.chdir(os.path.dirname(filepath))
-    filenames = [*other_files, filepath]
-
-    try:
-        _Distribution.parse_config_files(dist, filenames=filenames)
-        handlers = parse_configuration(
-            dist, dist.command_options, ignore_option_errors=ignore_option_errors
-        )
-        dist._finalize_license_files()
-    finally:
-        os.chdir(current_directory)
-
-    return handlers
-
-
-def _get_option(target_obj: Target, key: str):
-    """
-    Given a target object and option key, get that option from
-    the target object, either through a get_{key} method or
-    from an attribute directly.
-    """
-    getter_name = 'get_{key}'.format(**locals())
-    by_attribute = functools.partial(getattr, target_obj, key)
-    getter = getattr(target_obj, getter_name, by_attribute)
-    return getter()
-
-
-def configuration_to_dict(handlers: Tuple["ConfigHandler", ...]) -> dict:
-    """Returns configuration data gathered by given handlers as a dict.
-
-    :param list[ConfigHandler] handlers: Handlers list,
-        usually from parse_configuration()
-
-    :rtype: dict
-    """
-    config_dict: dict = defaultdict(dict)
-
-    for handler in handlers:
-        for option in handler.set_options:
-            value = _get_option(handler.target_obj, option)
-            config_dict[handler.section_prefix][option] = value
-
-    return config_dict
-
-
-def parse_configuration(
-    distribution: "Distribution",
-    command_options: AllCommandOptions,
-    ignore_option_errors=False
-) -> Tuple["ConfigMetadataHandler", "ConfigOptionsHandler"]:
-    """Performs additional parsing of configuration options
-    for a distribution.
-
-    Returns a list of used option handlers.
-
-    :param Distribution distribution:
-    :param dict command_options:
-    :param bool ignore_option_errors: Whether to silently ignore
-        options, values of which could not be resolved (e.g. due to exceptions
-        in directives such as file:, attr:, etc.).
-        If False exceptions are propagated as expected.
-    :rtype: list
-    """
-    with expand.EnsurePackagesDiscovered(distribution) as ensure_discovered:
-        options = ConfigOptionsHandler(
-            distribution,
-            command_options,
-            ignore_option_errors,
-            ensure_discovered,
-        )
-
-        options.parse()
-        if not distribution.package_dir:
-            distribution.package_dir = options.package_dir  # Filled by `find_packages`
-
-        meta = ConfigMetadataHandler(
-            distribution.metadata,
-            command_options,
-            ignore_option_errors,
-            ensure_discovered,
-            distribution.package_dir,
-            distribution.src_root,
-        )
-        meta.parse()
-
-    return meta, options
-
-
-def _warn_accidental_env_marker_misconfig(label: str, orig_value: str, parsed: list):
-    """Because users sometimes misinterpret this configuration:
-
-    [options.extras_require]
-    foo = bar;python_version<"4"
-
-    It looks like one requirement with an environment marker
-    but because there is no newline, it's parsed as two requirements
-    with a semicolon as separator.
-
-    Therefore, if:
-        * input string does not contain a newline AND
-        * parsed result contains two requirements AND
-        * parsing of the two parts from the result (";")
-        leads in a valid Requirement with a valid marker
-    a UserWarning is shown to inform the user about the possible problem.
-    """
-    if "\n" in orig_value or len(parsed) != 2:
-        return
-
-    with contextlib.suppress(InvalidRequirement):
-        original_requirements_str = ";".join(parsed)
-        req = Requirement(original_requirements_str)
-        if req.marker is not None:
-            msg = (
-                f"One of the parsed requirements in `{label}` "
-                f"looks like a valid environment marker: '{parsed[1]}'\n"
-                "Make sure that the config is correct and check "
-                "https://setuptools.pypa.io/en/latest/userguide/declarative_config.html#opt-2"  # noqa: E501
-            )
-            warnings.warn(msg, UserWarning)
-
-
-class ConfigHandler(Generic[Target]):
-    """Handles metadata supplied in configuration files."""
-
-    section_prefix: str
-    """Prefix for config sections handled by this handler.
-    Must be provided by class heirs.
-
-    """
-
-    aliases: Dict[str, str] = {}
-    """Options aliases.
-    For compatibility with various packages. E.g.: d2to1 and pbr.
-    Note: `-` in keys is replaced with `_` by config parser.
-
-    """
-
-    def __init__(
-        self,
-        target_obj: Target,
-        options: AllCommandOptions,
-        ignore_option_errors,
-        ensure_discovered: expand.EnsurePackagesDiscovered,
-    ):
-        sections: AllCommandOptions = {}
-
-        section_prefix = self.section_prefix
-        for section_name, section_options in options.items():
-            if not section_name.startswith(section_prefix):
-                continue
-
-            section_name = section_name.replace(section_prefix, '').strip('.')
-            sections[section_name] = section_options
-
-        self.ignore_option_errors = ignore_option_errors
-        self.target_obj = target_obj
-        self.sections = sections
-        self.set_options: List[str] = []
-        self.ensure_discovered = ensure_discovered
-
-    @property
-    def parsers(self):
-        """Metadata item name to parser function mapping."""
-        raise NotImplementedError(
-            '%s must provide .parsers property' % self.__class__.__name__
-        )
-
-    def __setitem__(self, option_name, value):
-        unknown = tuple()
-        target_obj = self.target_obj
-
-        # Translate alias into real name.
-        option_name = self.aliases.get(option_name, option_name)
-
-        current_value = getattr(target_obj, option_name, unknown)
-
-        if current_value is unknown:
-            raise KeyError(option_name)
-
-        if current_value:
-            # Already inhabited. Skipping.
-            return
-
-        skip_option = False
-        parser = self.parsers.get(option_name)
-        if parser:
-            try:
-                value = parser(value)
-
-            except Exception:
-                skip_option = True
-                if not self.ignore_option_errors:
-                    raise
-
-        if skip_option:
-            return
-
-        setter = getattr(target_obj, 'set_%s' % option_name, None)
-        if setter is None:
-            setattr(target_obj, option_name, value)
-        else:
-            setter(value)
-
-        self.set_options.append(option_name)
-
-    @classmethod
-    def _parse_list(cls, value, separator=','):
-        """Represents value as a list.
-
-        Value is split either by separator (defaults to comma) or by lines.
-
-        :param value:
-        :param separator: List items separator character.
-        :rtype: list
-        """
-        if isinstance(value, list):  # _get_parser_compound case
-            return value
-
-        if '\n' in value:
-            value = value.splitlines()
-        else:
-            value = value.split(separator)
-
-        return [chunk.strip() for chunk in value if chunk.strip()]
-
-    @classmethod
-    def _parse_dict(cls, value):
-        """Represents value as a dict.
-
-        :param value:
-        :rtype: dict
-        """
-        separator = '='
-        result = {}
-        for line in cls._parse_list(value):
-            key, sep, val = line.partition(separator)
-            if sep != separator:
-                raise DistutilsOptionError(
-                    'Unable to parse option value to dict: %s' % value
-                )
-            result[key.strip()] = val.strip()
-
-        return result
-
-    @classmethod
-    def _parse_bool(cls, value):
-        """Represents value as boolean.
-
-        :param value:
-        :rtype: bool
-        """
-        value = value.lower()
-        return value in ('1', 'true', 'yes')
-
-    @classmethod
-    def _exclude_files_parser(cls, key):
-        """Returns a parser function to make sure field inputs
-        are not files.
-
-        Parses a value after getting the key so error messages are
-        more informative.
-
-        :param key:
-        :rtype: callable
-        """
-
-        def parser(value):
-            exclude_directive = 'file:'
-            if value.startswith(exclude_directive):
-                raise ValueError(
-                    'Only strings are accepted for the {0} field, '
-                    'files are not accepted'.format(key)
-                )
-            return value
-
-        return parser
-
-    @classmethod
-    def _parse_file(cls, value, root_dir: _Path):
-        """Represents value as a string, allowing including text
-        from nearest files using `file:` directive.
-
-        Directive is sandboxed and won't reach anything outside
-        directory with setup.py.
-
-        Examples:
-            file: README.rst, CHANGELOG.md, src/file.txt
-
-        :param str value:
-        :rtype: str
-        """
-        include_directive = 'file:'
-
-        if not isinstance(value, str):
-            return value
-
-        if not value.startswith(include_directive):
-            return value
-
-        spec = value[len(include_directive) :]
-        filepaths = (path.strip() for path in spec.split(','))
-        return expand.read_files(filepaths, root_dir)
-
-    def _parse_attr(self, value, package_dir, root_dir: _Path):
-        """Represents value as a module attribute.
-
-        Examples:
-            attr: package.attr
-            attr: package.module.attr
-
-        :param str value:
-        :rtype: str
-        """
-        attr_directive = 'attr:'
-        if not value.startswith(attr_directive):
-            return value
-
-        attr_desc = value.replace(attr_directive, '')
-
-        # Make sure package_dir is populated correctly, so `attr:` directives can work
-        package_dir.update(self.ensure_discovered.package_dir)
-        return expand.read_attr(attr_desc, package_dir, root_dir)
-
-    @classmethod
-    def _get_parser_compound(cls, *parse_methods):
-        """Returns parser function to represents value as a list.
-
-        Parses a value applying given methods one after another.
-
-        :param parse_methods:
-        :rtype: callable
-        """
-
-        def parse(value):
-            parsed = value
-
-            for method in parse_methods:
-                parsed = method(parsed)
-
-            return parsed
-
-        return parse
-
-    @classmethod
-    def _parse_section_to_dict_with_key(cls, section_options, values_parser):
-        """Parses section options into a dictionary.
-
-        Applies a given parser to each option in a section.
-
-        :param dict section_options:
-        :param callable values_parser: function with 2 args corresponding to key, value
-        :rtype: dict
-        """
-        value = {}
-        for key, (_, val) in section_options.items():
-            value[key] = values_parser(key, val)
-        return value
-
-    @classmethod
-    def _parse_section_to_dict(cls, section_options, values_parser=None):
-        """Parses section options into a dictionary.
-
-        Optionally applies a given parser to each value.
-
-        :param dict section_options:
-        :param callable values_parser: function with 1 arg corresponding to option value
-        :rtype: dict
-        """
-        parser = (lambda _, v: values_parser(v)) if values_parser else (lambda _, v: v)
-        return cls._parse_section_to_dict_with_key(section_options, parser)
-
-    def parse_section(self, section_options):
-        """Parses configuration file section.
-
-        :param dict section_options:
-        """
-        for (name, (_, value)) in section_options.items():
-            with contextlib.suppress(KeyError):
-                # Keep silent for a new option may appear anytime.
-                self[name] = value
-
-    def parse(self):
-        """Parses configuration file items from one
-        or more related sections.
-
-        """
-        for section_name, section_options in self.sections.items():
-
-            method_postfix = ''
-            if section_name:  # [section.option] variant
-                method_postfix = '_%s' % section_name
-
-            section_parser_method: Optional[Callable] = getattr(
-                self,
-                # Dots in section names are translated into dunderscores.
-                ('parse_section%s' % method_postfix).replace('.', '__'),
-                None,
-            )
-
-            if section_parser_method is None:
-                raise DistutilsOptionError(
-                    'Unsupported distribution option section: [%s.%s]'
-                    % (self.section_prefix, section_name)
-                )
-
-            section_parser_method(section_options)
-
-    def _deprecated_config_handler(self, func, msg, warning_class):
-        """this function will wrap around parameters that are deprecated
-
-        :param msg: deprecation message
-        :param warning_class: class of warning exception to be raised
-        :param func: function to be wrapped around
-        """
-
-        @wraps(func)
-        def config_handler(*args, **kwargs):
-            warnings.warn(msg, warning_class)
-            return func(*args, **kwargs)
-
-        return config_handler
-
-
-class ConfigMetadataHandler(ConfigHandler["DistributionMetadata"]):
-
-    section_prefix = 'metadata'
-
-    aliases = {
-        'home_page': 'url',
-        'summary': 'description',
-        'classifier': 'classifiers',
-        'platform': 'platforms',
-    }
-
-    strict_mode = False
-    """We need to keep it loose, to be partially compatible with
-    `pbr` and `d2to1` packages which also uses `metadata` section.
-
-    """
-
-    def __init__(
-        self,
-        target_obj: "DistributionMetadata",
-        options: AllCommandOptions,
-        ignore_option_errors: bool,
-        ensure_discovered: expand.EnsurePackagesDiscovered,
-        package_dir: Optional[dict] = None,
-        root_dir: _Path = os.curdir
-    ):
-        super().__init__(target_obj, options, ignore_option_errors, ensure_discovered)
-        self.package_dir = package_dir
-        self.root_dir = root_dir
-
-    @property
-    def parsers(self):
-        """Metadata item name to parser function mapping."""
-        parse_list = self._parse_list
-        parse_file = partial(self._parse_file, root_dir=self.root_dir)
-        parse_dict = self._parse_dict
-        exclude_files_parser = self._exclude_files_parser
-
-        return {
-            'platforms': parse_list,
-            'keywords': parse_list,
-            'provides': parse_list,
-            'requires': self._deprecated_config_handler(
-                parse_list,
-                "The requires parameter is deprecated, please use "
-                "install_requires for runtime dependencies.",
-                SetuptoolsDeprecationWarning,
-            ),
-            'obsoletes': parse_list,
-            'classifiers': self._get_parser_compound(parse_file, parse_list),
-            'license': exclude_files_parser('license'),
-            'license_file': self._deprecated_config_handler(
-                exclude_files_parser('license_file'),
-                "The license_file parameter is deprecated, "
-                "use license_files instead.",
-                SetuptoolsDeprecationWarning,
-            ),
-            'license_files': parse_list,
-            'description': parse_file,
-            'long_description': parse_file,
-            'version': self._parse_version,
-            'project_urls': parse_dict,
-        }
-
-    def _parse_version(self, value):
-        """Parses `version` option value.
-
-        :param value:
-        :rtype: str
-
-        """
-        version = self._parse_file(value, self.root_dir)
-
-        if version != value:
-            version = version.strip()
-            # Be strict about versions loaded from file because it's easy to
-            # accidentally include newlines and other unintended content
-            try:
-                Version(version)
-            except InvalidVersion:
-                tmpl = (
-                    'Version loaded from {value} does not '
-                    'comply with PEP 440: {version}'
-                )
-                raise DistutilsOptionError(tmpl.format(**locals()))
-
-            return version
-
-        return expand.version(self._parse_attr(value, self.package_dir, self.root_dir))
-
-
-class ConfigOptionsHandler(ConfigHandler["Distribution"]):
-
-    section_prefix = 'options'
-
-    def __init__(
-        self,
-        target_obj: "Distribution",
-        options: AllCommandOptions,
-        ignore_option_errors: bool,
-        ensure_discovered: expand.EnsurePackagesDiscovered,
-    ):
-        super().__init__(target_obj, options, ignore_option_errors, ensure_discovered)
-        self.root_dir = target_obj.src_root
-        self.package_dir: Dict[str, str] = {}  # To be filled by `find_packages`
-
-    @classmethod
-    def _parse_list_semicolon(cls, value):
-        return cls._parse_list(value, separator=';')
-
-    def _parse_file_in_root(self, value):
-        return self._parse_file(value, root_dir=self.root_dir)
-
-    def _parse_requirements_list(self, label: str, value: str):
-        # Parse a requirements list, either by reading in a `file:`, or a list.
-        parsed = self._parse_list_semicolon(self._parse_file_in_root(value))
-        _warn_accidental_env_marker_misconfig(label, value, parsed)
-        # Filter it to only include lines that are not comments. `parse_list`
-        # will have stripped each line and filtered out empties.
-        return [line for line in parsed if not line.startswith("#")]
-
-    @property
-    def parsers(self):
-        """Metadata item name to parser function mapping."""
-        parse_list = self._parse_list
-        parse_bool = self._parse_bool
-        parse_dict = self._parse_dict
-        parse_cmdclass = self._parse_cmdclass
-
-        return {
-            'zip_safe': parse_bool,
-            'include_package_data': parse_bool,
-            'package_dir': parse_dict,
-            'scripts': parse_list,
-            'eager_resources': parse_list,
-            'dependency_links': parse_list,
-            'namespace_packages': self._deprecated_config_handler(
-                parse_list,
-                "The namespace_packages parameter is deprecated, "
-                "consider using implicit namespaces instead (PEP 420).",
-                SetuptoolsDeprecationWarning,
-            ),
-            'install_requires': partial(
-                self._parse_requirements_list, "install_requires"
-            ),
-            'setup_requires': self._parse_list_semicolon,
-            'tests_require': self._parse_list_semicolon,
-            'packages': self._parse_packages,
-            'entry_points': self._parse_file_in_root,
-            'py_modules': parse_list,
-            'python_requires': SpecifierSet,
-            'cmdclass': parse_cmdclass,
-        }
-
-    def _parse_cmdclass(self, value):
-        package_dir = self.ensure_discovered.package_dir
-        return expand.cmdclass(self._parse_dict(value), package_dir, self.root_dir)
-
-    def _parse_packages(self, value):
-        """Parses `packages` option value.
-
-        :param value:
-        :rtype: list
-        """
-        find_directives = ['find:', 'find_namespace:']
-        trimmed_value = value.strip()
-
-        if trimmed_value not in find_directives:
-            return self._parse_list(value)
-
-        # Read function arguments from a dedicated section.
-        find_kwargs = self.parse_section_packages__find(
-            self.sections.get('packages.find', {})
-        )
-
-        find_kwargs.update(
-            namespaces=(trimmed_value == find_directives[1]),
-            root_dir=self.root_dir,
-            fill_package_dir=self.package_dir,
-        )
-
-        return expand.find_packages(**find_kwargs)
-
-    def parse_section_packages__find(self, section_options):
-        """Parses `packages.find` configuration file section.
-
-        To be used in conjunction with _parse_packages().
-
-        :param dict section_options:
-        """
-        section_data = self._parse_section_to_dict(section_options, self._parse_list)
-
-        valid_keys = ['where', 'include', 'exclude']
-
-        find_kwargs = dict(
-            [(k, v) for k, v in section_data.items() if k in valid_keys and v]
-        )
-
-        where = find_kwargs.get('where')
-        if where is not None:
-            find_kwargs['where'] = where[0]  # cast list to single val
-
-        return find_kwargs
-
-    def parse_section_entry_points(self, section_options):
-        """Parses `entry_points` configuration file section.
-
-        :param dict section_options:
-        """
-        parsed = self._parse_section_to_dict(section_options, self._parse_list)
-        self['entry_points'] = parsed
-
-    def _parse_package_data(self, section_options):
-        package_data = self._parse_section_to_dict(section_options, self._parse_list)
-        return expand.canonic_package_data(package_data)
-
-    def parse_section_package_data(self, section_options):
-        """Parses `package_data` configuration file section.
-
-        :param dict section_options:
-        """
-        self['package_data'] = self._parse_package_data(section_options)
-
-    def parse_section_exclude_package_data(self, section_options):
-        """Parses `exclude_package_data` configuration file section.
-
-        :param dict section_options:
-        """
-        self['exclude_package_data'] = self._parse_package_data(section_options)
-
-    def parse_section_extras_require(self, section_options):
-        """Parses `extras_require` configuration file section.
-
-        :param dict section_options:
-        """
-        parsed = self._parse_section_to_dict_with_key(
-            section_options,
-            lambda k, v: self._parse_requirements_list(f"extras_require[{k}]", v)
-        )
-
-        self['extras_require'] = parsed
-
-    def parse_section_data_files(self, section_options):
-        """Parses `data_files` configuration file section.
-
-        :param dict section_options:
-        """
-        parsed = self._parse_section_to_dict(section_options, self._parse_list)
-        self['data_files'] = expand.canonic_data_files(parsed, self.root_dir)
diff --git a/venv/lib/python3.10/site-packages/setuptools/discovery.py b/venv/lib/python3.10/site-packages/setuptools/discovery.py
deleted file mode 100644
index 98fc2a7..0000000
--- a/venv/lib/python3.10/site-packages/setuptools/discovery.py
+++ /dev/null
@@ -1,600 +0,0 @@
-"""Automatic discovery of Python modules and packages (for inclusion in the
-distribution) and other config values.
-
-For the purposes of this module, the following nomenclature is used:
-
-- "src-layout": a directory representing a Python project that contains a "src"
-  folder. Everything under the "src" folder is meant to be included in the
-  distribution when packaging the project. Example::
-
-    .
-    ├── tox.ini
-    ├── pyproject.toml
-    └── src/
-        └── mypkg/
-            ├── __init__.py
-            ├── mymodule.py
-            └── my_data_file.txt
-
-- "flat-layout": a Python project that does not use "src-layout" but instead
-  have a directory under the project root for each package::
-
-    .
-    ├── tox.ini
-    ├── pyproject.toml
-    └── mypkg/
-        ├── __init__.py
-        ├── mymodule.py
-        └── my_data_file.txt
-
-- "single-module": a project that contains a single Python script direct under
-  the project root (no directory used)::
-
-    .
-    ├── tox.ini
-    ├── pyproject.toml
-    └── mymodule.py
-
-"""
-
-import itertools
-import os
-from fnmatch import fnmatchcase
-from glob import glob
-from pathlib import Path
-from typing import (
-    TYPE_CHECKING,
-    Callable,
-    Dict,
-    Iterable,
-    Iterator,
-    List,
-    Mapping,
-    Optional,
-    Tuple,
-    Union
-)
-
-import _distutils_hack.override  # noqa: F401
-
-from distutils import log
-from distutils.util import convert_path
-
-_Path = Union[str, os.PathLike]
-_Filter = Callable[[str], bool]
-StrIter = Iterator[str]
-
-chain_iter = itertools.chain.from_iterable
-
-if TYPE_CHECKING:
-    from setuptools import Distribution  # noqa
-
-
-def _valid_name(path: _Path) -> bool:
-    # Ignore invalid names that cannot be imported directly
-    return os.path.basename(path).isidentifier()
-
-
-class _Finder:
-    """Base class that exposes functionality for module/package finders"""
-
-    ALWAYS_EXCLUDE: Tuple[str, ...] = ()
-    DEFAULT_EXCLUDE: Tuple[str, ...] = ()
-
-    @classmethod
-    def find(
-        cls,
-        where: _Path = '.',
-        exclude: Iterable[str] = (),
-        include: Iterable[str] = ('*',)
-    ) -> List[str]:
-        """Return a list of all Python items (packages or modules, depending on
-        the finder implementation) found within directory 'where'.
-
-        'where' is the root directory which will be searched.
-        It should be supplied as a "cross-platform" (i.e. URL-style) path;
-        it will be converted to the appropriate local path syntax.
-
-        'exclude' is a sequence of names to exclude; '*' can be used
-        as a wildcard in the names.
-        When finding packages, 'foo.*' will exclude all subpackages of 'foo'
-        (but not 'foo' itself).
-
-        'include' is a sequence of names to include.
-        If it's specified, only the named items will be included.
-        If it's not specified, all found items will be included.
-        'include' can contain shell style wildcard patterns just like
-        'exclude'.
-        """
-
-        exclude = exclude or cls.DEFAULT_EXCLUDE
-        return list(
-            cls._find_iter(
-                convert_path(str(where)),
-                cls._build_filter(*cls.ALWAYS_EXCLUDE, *exclude),
-                cls._build_filter(*include),
-            )
-        )
-
-    @classmethod
-    def _find_iter(cls, where: _Path, exclude: _Filter, include: _Filter) -> StrIter:
-        raise NotImplementedError
-
-    @staticmethod
-    def _build_filter(*patterns: str) -> _Filter:
-        """
-        Given a list of patterns, return a callable that will be true only if
-        the input matches at least one of the patterns.
-        """
-        return lambda name: any(fnmatchcase(name, pat) for pat in patterns)
-
-
-class PackageFinder(_Finder):
-    """
-    Generate a list of all Python packages found within a directory
-    """
-
-    ALWAYS_EXCLUDE = ("ez_setup", "*__pycache__")
-
-    @classmethod
-    def _find_iter(cls, where: _Path, exclude: _Filter, include: _Filter) -> StrIter:
-        """
-        All the packages found in 'where' that pass the 'include' filter, but
-        not the 'exclude' filter.
-        """
-        for root, dirs, files in os.walk(str(where), followlinks=True):
-            # Copy dirs to iterate over it, then empty dirs.
-            all_dirs = dirs[:]
-            dirs[:] = []
-
-            for dir in all_dirs:
-                full_path = os.path.join(root, dir)
-                rel_path = os.path.relpath(full_path, where)
-                package = rel_path.replace(os.path.sep, '.')
-
-                # Skip directory trees that are not valid packages
-                if '.' in dir or not cls._looks_like_package(full_path, package):
-                    continue
-
-                # Should this package be included?
-                if include(package) and not exclude(package):
-                    yield package
-
-                # Keep searching subdirectories, as there may be more packages
-                # down there, even if the parent was excluded.
-                dirs.append(dir)
-
-    @staticmethod
-    def _looks_like_package(path: _Path, _package_name: str) -> bool:
-        """Does a directory look like a package?"""
-        return os.path.isfile(os.path.join(path, '__init__.py'))
-
-
-class PEP420PackageFinder(PackageFinder):
-    @staticmethod
-    def _looks_like_package(_path: _Path, _package_name: str) -> bool:
-        return True
-
-
-class ModuleFinder(_Finder):
-    """Find isolated Python modules.
-    This function will **not** recurse subdirectories.
-    """
-
-    @classmethod
-    def _find_iter(cls, where: _Path, exclude: _Filter, include: _Filter) -> StrIter:
-        for file in glob(os.path.join(where, "*.py")):
-            module, _ext = os.path.splitext(os.path.basename(file))
-
-            if not cls._looks_like_module(module):
-                continue
-
-            if include(module) and not exclude(module):
-                yield module
-
-    _looks_like_module = staticmethod(_valid_name)
-
-
-# We have to be extra careful in the case of flat layout to not include files
-# and directories not meant for distribution (e.g. tool-related)
-
-
-class FlatLayoutPackageFinder(PEP420PackageFinder):
-    _EXCLUDE = (
-        "ci",
-        "bin",
-        "doc",
-        "docs",
-        "documentation",
-        "manpages",
-        "news",
-        "changelog",
-        "test",
-        "tests",
-        "unit_test",
-        "unit_tests",
-        "example",
-        "examples",
-        "scripts",
-        "tools",
-        "util",
-        "utils",
-        "python",
-        "build",
-        "dist",
-        "venv",
-        "env",
-        "requirements",
-        # ---- Task runners / Build tools ----
-        "tasks",  # invoke
-        "fabfile",  # fabric
-        "site_scons",  # SCons
-        # ---- Other tools ----
-        "benchmark",
-        "benchmarks",
-        "exercise",
-        "exercises",
-        # ---- Hidden directories/Private packages ----
-        "[._]*",
-    )
-
-    DEFAULT_EXCLUDE = tuple(chain_iter((p, f"{p}.*") for p in _EXCLUDE))
-    """Reserved package names"""
-
-    @staticmethod
-    def _looks_like_package(_path: _Path, package_name: str) -> bool:
-        names = package_name.split('.')
-        # Consider PEP 561
-        root_pkg_is_valid = names[0].isidentifier() or names[0].endswith("-stubs")
-        return root_pkg_is_valid and all(name.isidentifier() for name in names[1:])
-
-
-class FlatLayoutModuleFinder(ModuleFinder):
-    DEFAULT_EXCLUDE = (
-        "setup",
-        "conftest",
-        "test",
-        "tests",
-        "example",
-        "examples",
-        "build",
-        # ---- Task runners ----
-        "toxfile",
-        "noxfile",
-        "pavement",
-        "dodo",
-        "tasks",
-        "fabfile",
-        # ---- Other tools ----
-        "[Ss][Cc]onstruct",  # SCons
-        "conanfile",  # Connan: C/C++ build tool
-        "manage",  # Django
-        "benchmark",
-        "benchmarks",
-        "exercise",
-        "exercises",
-        # ---- Hidden files/Private modules ----
-        "[._]*",
-    )
-    """Reserved top-level module names"""
-
-
-def _find_packages_within(root_pkg: str, pkg_dir: _Path) -> List[str]:
-    nested = PEP420PackageFinder.find(pkg_dir)
-    return [root_pkg] + [".".join((root_pkg, n)) for n in nested]
-
-
-class ConfigDiscovery:
-    """Fill-in metadata and options that can be automatically derived
-    (from other metadata/options, the file system or conventions)
-    """
-
-    def __init__(self, distribution: "Distribution"):
-        self.dist = distribution
-        self._called = False
-        self._disabled = False
-        self._skip_ext_modules = False
-
-    def _disable(self):
-        """Internal API to disable automatic discovery"""
-        self._disabled = True
-
-    def _ignore_ext_modules(self):
-        """Internal API to disregard ext_modules.
-
-        Normally auto-discovery would not be triggered if ``ext_modules`` are set
-        (this is done for backward compatibility with existing packages relying on
-        ``setup.py`` or ``setup.cfg``). However, ``setuptools`` can call this function
-        to ignore given ``ext_modules`` and proceed with the auto-discovery if
-        ``packages`` and ``py_modules`` are not given (e.g. when using pyproject.toml
-        metadata).
-        """
-        self._skip_ext_modules = True
-
-    @property
-    def _root_dir(self) -> _Path:
-        # The best is to wait until `src_root` is set in dist, before using _root_dir.
-        return self.dist.src_root or os.curdir
-
-    @property
-    def _package_dir(self) -> Dict[str, str]:
-        if self.dist.package_dir is None:
-            return {}
-        return self.dist.package_dir
-
-    def __call__(self, force=False, name=True, ignore_ext_modules=False):
-        """Automatically discover missing configuration fields
-        and modifies the given ``distribution`` object in-place.
-
-        Note that by default this will only have an effect the first time the
-        ``ConfigDiscovery`` object is called.
-
-        To repeatedly invoke automatic discovery (e.g. when the project
-        directory changes), please use ``force=True`` (or create a new
-        ``ConfigDiscovery`` instance).
-        """
-        if force is False and (self._called or self._disabled):
-            # Avoid overhead of multiple calls
-            return
-
-        self._analyse_package_layout(ignore_ext_modules)
-        if name:
-            self.analyse_name()  # depends on ``packages`` and ``py_modules``
-
-        self._called = True
-
-    def _explicitly_specified(self, ignore_ext_modules: bool) -> bool:
-        """``True`` if the user has specified some form of package/module listing"""
-        ignore_ext_modules = ignore_ext_modules or self._skip_ext_modules
-        ext_modules = not (self.dist.ext_modules is None or ignore_ext_modules)
-        return (
-            self.dist.packages is not None
-            or self.dist.py_modules is not None
-            or ext_modules
-            or hasattr(self.dist, "configuration") and self.dist.configuration
-            # ^ Some projects use numpy.distutils.misc_util.Configuration
-        )
-
-    def _analyse_package_layout(self, ignore_ext_modules: bool) -> bool:
-        if self._explicitly_specified(ignore_ext_modules):
-            # For backward compatibility, just try to find modules/packages
-            # when nothing is given
-            return True
-
-        log.debug(
-            "No `packages` or `py_modules` configuration, performing "
-            "automatic discovery."
-        )
-
-        return (
-            self._analyse_explicit_layout()
-            or self._analyse_src_layout()
-            # flat-layout is the trickiest for discovery so it should be last
-            or self._analyse_flat_layout()
-        )
-
-    def _analyse_explicit_layout(self) -> bool:
-        """The user can explicitly give a package layout via ``package_dir``"""
-        package_dir = self._package_dir.copy()  # don't modify directly
-        package_dir.pop("", None)  # This falls under the "src-layout" umbrella
-        root_dir = self._root_dir
-
-        if not package_dir:
-            return False
-
-        log.debug(f"`explicit-layout` detected -- analysing {package_dir}")
-        pkgs = chain_iter(
-            _find_packages_within(pkg, os.path.join(root_dir, parent_dir))
-            for pkg, parent_dir in package_dir.items()
-        )
-        self.dist.packages = list(pkgs)
-        log.debug(f"discovered packages -- {self.dist.packages}")
-        return True
-
-    def _analyse_src_layout(self) -> bool:
-        """Try to find all packages or modules under the ``src`` directory
-        (or anything pointed by ``package_dir[""]``).
-
-        The "src-layout" is relatively safe for automatic discovery.
-        We assume that everything within is meant to be included in the
-        distribution.
-
-        If ``package_dir[""]`` is not given, but the ``src`` directory exists,
-        this function will set ``package_dir[""] = "src"``.
-        """
-        package_dir = self._package_dir
-        src_dir = os.path.join(self._root_dir, package_dir.get("", "src"))
-        if not os.path.isdir(src_dir):
-            return False
-
-        log.debug(f"`src-layout` detected -- analysing {src_dir}")
-        package_dir.setdefault("", os.path.basename(src_dir))
-        self.dist.package_dir = package_dir  # persist eventual modifications
-        self.dist.packages = PEP420PackageFinder.find(src_dir)
-        self.dist.py_modules = ModuleFinder.find(src_dir)
-        log.debug(f"discovered packages -- {self.dist.packages}")
-        log.debug(f"discovered py_modules -- {self.dist.py_modules}")
-        return True
-
-    def _analyse_flat_layout(self) -> bool:
-        """Try to find all packages and modules under the project root.
-
-        Since the ``flat-layout`` is more dangerous in terms of accidentally including
-        extra files/directories, this function is more conservative and will raise an
-        error if multiple packages or modules are found.
-
-        This assumes that multi-package dists are uncommon and refuse to support that
-        use case in order to be able to prevent unintended errors.
-        """
-        log.debug(f"`flat-layout` detected -- analysing {self._root_dir}")
-        return self._analyse_flat_packages() or self._analyse_flat_modules()
-
-    def _analyse_flat_packages(self) -> bool:
-        self.dist.packages = FlatLayoutPackageFinder.find(self._root_dir)
-        top_level = remove_nested_packages(remove_stubs(self.dist.packages))
-        log.debug(f"discovered packages -- {self.dist.packages}")
-        self._ensure_no_accidental_inclusion(top_level, "packages")
-        return bool(top_level)
-
-    def _analyse_flat_modules(self) -> bool:
-        self.dist.py_modules = FlatLayoutModuleFinder.find(self._root_dir)
-        log.debug(f"discovered py_modules -- {self.dist.py_modules}")
-        self._ensure_no_accidental_inclusion(self.dist.py_modules, "modules")
-        return bool(self.dist.py_modules)
-
-    def _ensure_no_accidental_inclusion(self, detected: List[str], kind: str):
-        if len(detected) > 1:
-            from inspect import cleandoc
-
-            from setuptools.errors import PackageDiscoveryError
-
-            msg = f"""Multiple top-level {kind} discovered in a flat-layout: {detected}.
-
-            To avoid accidental inclusion of unwanted files or directories,
-            setuptools will not proceed with this build.
-
-            If you are trying to create a single distribution with multiple {kind}
-            on purpose, you should not rely on automatic discovery.
-            Instead, consider the following options:
-
-            1. set up custom discovery (`find` directive with `include` or `exclude`)
-            2. use a `src-layout`
-            3. explicitly set `py_modules` or `packages` with a list of names
-
-            To find more information, look for "package discovery" on setuptools docs.
-            """
-            raise PackageDiscoveryError(cleandoc(msg))
-
-    def analyse_name(self):
-        """The packages/modules are the essential contribution of the author.
-        Therefore the name of the distribution can be derived from them.
-        """
-        if self.dist.metadata.name or self.dist.name:
-            # get_name() is not reliable (can return "UNKNOWN")
-            return None
-
-        log.debug("No `name` configuration, performing automatic discovery")
-
-        name = (
-            self._find_name_single_package_or_module()
-            or self._find_name_from_packages()
-        )
-        if name:
-            self.dist.metadata.name = name
-
-    def _find_name_single_package_or_module(self) -> Optional[str]:
-        """Exactly one module or package"""
-        for field in ('packages', 'py_modules'):
-            items = getattr(self.dist, field, None) or []
-            if items and len(items) == 1:
-                log.debug(f"Single module/package detected, name: {items[0]}")
-                return items[0]
-
-        return None
-
-    def _find_name_from_packages(self) -> Optional[str]:
-        """Try to find the root package that is not a PEP 420 namespace"""
-        if not self.dist.packages:
-            return None
-
-        packages = remove_stubs(sorted(self.dist.packages, key=len))
-        package_dir = self.dist.package_dir or {}
-
-        parent_pkg = find_parent_package(packages, package_dir, self._root_dir)
-        if parent_pkg:
-            log.debug(f"Common parent package detected, name: {parent_pkg}")
-            return parent_pkg
-
-        log.warn("No parent package detected, impossible to derive `name`")
-        return None
-
-
-def remove_nested_packages(packages: List[str]) -> List[str]:
-    """Remove nested packages from a list of packages.
-
-    >>> remove_nested_packages(["a", "a.b1", "a.b2", "a.b1.c1"])
-    ['a']
-    >>> remove_nested_packages(["a", "b", "c.d", "c.d.e.f", "g.h", "a.a1"])
-    ['a', 'b', 'c.d', 'g.h']
-    """
-    pkgs = sorted(packages, key=len)
-    top_level = pkgs[:]
-    size = len(pkgs)
-    for i, name in enumerate(reversed(pkgs)):
-        if any(name.startswith(f"{other}.") for other in top_level):
-            top_level.pop(size - i - 1)
-
-    return top_level
-
-
-def remove_stubs(packages: List[str]) -> List[str]:
-    """Remove type stubs (:pep:`561`) from a list of packages.
-
-    >>> remove_stubs(["a", "a.b", "a-stubs", "a-stubs.b.c", "b", "c-stubs"])
-    ['a', 'a.b', 'b']
-    """
-    return [pkg for pkg in packages if not pkg.split(".")[0].endswith("-stubs")]
-
-
-def find_parent_package(
-    packages: List[str], package_dir: Mapping[str, str], root_dir: _Path
-) -> Optional[str]:
-    """Find the parent package that is not a namespace."""
-    packages = sorted(packages, key=len)
-    common_ancestors = []
-    for i, name in enumerate(packages):
-        if not all(n.startswith(f"{name}.") for n in packages[i+1:]):
-            # Since packages are sorted by length, this condition is able
-            # to find a list of all common ancestors.
-            # When there is divergence (e.g. multiple root packages)
-            # the list will be empty
-            break
-        common_ancestors.append(name)
-
-    for name in common_ancestors:
-        pkg_path = find_package_path(name, package_dir, root_dir)
-        init = os.path.join(pkg_path, "__init__.py")
-        if os.path.isfile(init):
-            return name
-
-    return None
-
-
-def find_package_path(
-    name: str, package_dir: Mapping[str, str], root_dir: _Path
-) -> str:
-    """Given a package name, return the path where it should be found on
-    disk, considering the ``package_dir`` option.
-
-    >>> path = find_package_path("my.pkg", {"": "root/is/nested"}, ".")
-    >>> path.replace(os.sep, "/")
-    './root/is/nested/my/pkg'
-
-    >>> path = find_package_path("my.pkg", {"my": "root/is/nested"}, ".")
-    >>> path.replace(os.sep, "/")
-    './root/is/nested/pkg'
-
-    >>> path = find_package_path("my.pkg", {"my.pkg": "root/is/nested"}, ".")
-    >>> path.replace(os.sep, "/")
-    './root/is/nested'
-
-    >>> path = find_package_path("other.pkg", {"my.pkg": "root/is/nested"}, ".")
-    >>> path.replace(os.sep, "/")
-    './other/pkg'
-    """
-    parts = name.split(".")
-    for i in range(len(parts), 0, -1):
-        # Look backwards, the most specific package_dir first
-        partial_name = ".".join(parts[:i])
-        if partial_name in package_dir:
-            parent = package_dir[partial_name]
-            return os.path.join(root_dir, parent, *parts[i:])
-
-    parent = package_dir.get("") or ""
-    return os.path.join(root_dir, *parent.split("/"), *parts)
-
-
-def construct_package_dir(packages: List[str], package_path: _Path) -> Dict[str, str]:
-    parent_pkgs = remove_nested_packages(packages)
-    prefix = Path(package_path).parts
-    return {pkg: "/".join([*prefix, *pkg.split(".")]) for pkg in parent_pkgs}
diff --git a/venv/lib/python3.10/site-packages/setuptools/dist.py b/venv/lib/python3.10/site-packages/setuptools/dist.py
index 8242354..4adb2d2 100644
--- a/venv/lib/python3.10/site-packages/setuptools/dist.py
+++ b/venv/lib/python3.10/site-packages/setuptools/dist.py
@@ -19,7 +19,6 @@
 import itertools
 import textwrap
 from typing import List, Optional, TYPE_CHECKING
-from pathlib import Path
 
 from collections import defaultdict
 from email import message_from_file
@@ -29,9 +28,7 @@
 
 from setuptools.extern import packaging
 from setuptools.extern import ordered_set
-from setuptools.extern.more_itertools import unique_everseen, partition
-
-from ._importlib import metadata
+from setuptools.extern.more_itertools import unique_everseen
 
 from . import SetuptoolsDeprecationWarning
 
@@ -39,13 +36,9 @@
 import setuptools.command
 from setuptools import windows_support
 from setuptools.monkey import get_unpatched
-from setuptools.config import setupcfg, pyprojecttoml
-from setuptools.discovery import ConfigDiscovery
-
+from setuptools.config import parse_configuration
 import pkg_resources
 from setuptools.extern.packaging import version
-from . import _reqs
-from . import _entry_points
 
 if TYPE_CHECKING:
     from email.message import Message
@@ -101,7 +94,7 @@ def _read_list_from_msg(msg: "Message", field: str) -> Optional[List[str]]:
 
 def _read_payload_from_msg(msg: "Message") -> Optional[str]:
     value = msg.get_payload().strip()
-    if value == 'UNKNOWN' or not value:
+    if value == 'UNKNOWN':
         return None
     return value
 
@@ -120,9 +113,13 @@ def read_pkg_file(self, file):
     self.author_email = _read_field_from_msg(msg, 'author-email')
     self.maintainer_email = None
     self.url = _read_field_from_msg(msg, 'home-page')
-    self.download_url = _read_field_from_msg(msg, 'download-url')
     self.license = _read_field_unescaped_from_msg(msg, 'license')
 
+    if 'download-url' in msg:
+        self.download_url = _read_field_from_msg(msg, 'download-url')
+    else:
+        self.download_url = None
+
     self.long_description = _read_field_unescaped_from_msg(msg, 'description')
     if (
         self.long_description is None and
@@ -173,14 +170,10 @@ def write_field(key, value):
     write_field('Metadata-Version', str(version))
     write_field('Name', self.get_name())
     write_field('Version', self.get_version())
-
-    summary = self.get_description()
-    if summary:
-        write_field('Summary', single_line(summary))
+    write_field('Summary', single_line(self.get_description()))
+    write_field('Home-page', self.get_url())
 
     optional_fields = (
-        ('Home-page', 'url'),
-        ('Download-URL', 'download_url'),
         ('Author', 'author'),
         ('Author-email', 'author_email'),
         ('Maintainer', 'maintainer'),
@@ -192,10 +185,10 @@ def write_field(key, value):
         if attr_val is not None:
             write_field(field, attr_val)
 
-    license = self.get_license()
-    if license:
-        write_field('License', rfc822_escape(license))
-
+    license = rfc822_escape(self.get_license())
+    write_field('License', license)
+    if self.download_url:
+        write_field('Download-URL', self.download_url)
     for project_url in self.project_urls.items():
         write_field('Project-URL', '%s, %s' % project_url)
 
@@ -203,8 +196,7 @@ def write_field(key, value):
     if keywords:
         write_field('Keywords', keywords)
 
-    platforms = self.get_platforms() or []
-    for platform in platforms:
+    for platform in self.get_platforms():
         write_field('Platform', platform)
 
     self._write_list(file, 'Classifier', self.get_classifiers())
@@ -222,16 +214,12 @@ def write_field(key, value):
     if self.long_description_content_type:
         write_field('Description-Content-Type', self.long_description_content_type)
     if self.provides_extras:
-        for extra in self.provides_extras:
+        for extra in sorted(self.provides_extras):
             write_field('Provides-Extra', extra)
 
     self._write_list(file, 'License-File', self.license_files or [])
 
-    long_description = self.get_long_description()
-    if long_description:
-        file.write("\n%s" % long_description)
-        if not long_description.endswith("\n"):
-            file.write("\n")
+    file.write("\n%s\n\n" % self.get_long_description())
 
 
 sequence = tuple, list
@@ -239,7 +227,7 @@ def write_field(key, value):
 
 def check_importable(dist, attr, value):
     try:
-        ep = metadata.EntryPoint(value=value, name=None, group=None)
+        ep = pkg_resources.EntryPoint.parse('x=' + value)
         assert not ep.extras
     except (TypeError, ValueError, AttributeError, AssertionError) as e:
         raise DistutilsSetupError(
@@ -279,11 +267,6 @@ def check_nsp(dist, attr, value):
                 nsp,
                 parent,
             )
-        msg = (
-            "The namespace_packages parameter is deprecated, "
-            "consider using implicit namespaces instead (PEP 420)."
-        )
-        warnings.warn(msg, SetuptoolsDeprecationWarning)
 
 
 def check_extras(dist, attr, value):
@@ -302,7 +285,7 @@ def _check_extra(extra, reqs):
     name, sep, marker = extra.partition(':')
     if marker and pkg_resources.invalid_marker(marker):
         raise DistutilsSetupError("Invalid environment marker: " + marker)
-    list(_reqs.parse(reqs))
+    list(pkg_resources.parse_requirements(reqs))
 
 
 def assert_bool(dist, attr, value):
@@ -322,7 +305,7 @@ def invalid_unless_false(dist, attr, value):
 def check_requirements(dist, attr, value):
     """Verify that install_requires is a valid requirements list"""
     try:
-        list(_reqs.parse(value))
+        list(pkg_resources.parse_requirements(value))
         if isinstance(value, (dict, set)):
             raise TypeError("Unordered types are not allowed")
     except (TypeError, ValueError) as error:
@@ -347,8 +330,8 @@ def check_specifier(dist, attr, value):
 def check_entry_points(dist, attr, value):
     """Verify that entry_points map is parseable"""
     try:
-        _entry_points.load(value)
-    except Exception as e:
+        pkg_resources.EntryPoint.parse_map(value)
+    except ValueError as e:
         raise DistutilsSetupError(e) from e
 
 
@@ -471,7 +454,7 @@ def __init__(self, attrs=None):
         self.patch_missing_pkg_info(attrs)
         self.dependency_links = attrs.pop('dependency_links', [])
         self.setup_requires = attrs.pop('setup_requires', [])
-        for ep in metadata.entry_points(group='distutils.setup_keywords'):
+        for ep in pkg_resources.iter_entry_points('distutils.setup_keywords'):
             vars(self).setdefault(ep.name, None)
         _Distribution.__init__(
             self,
@@ -482,13 +465,6 @@ def __init__(self, attrs=None):
             },
         )
 
-        # Save the original dependencies before they are processed into the egg format
-        self._orig_extras_require = {}
-        self._orig_install_requires = []
-        self._tmp_extras_require = defaultdict(ordered_set.OrderedSet)
-
-        self.set_defaults = ConfigDiscovery(self)
-
         self._set_metadata_defaults(attrs)
 
         self.metadata.version = self._normalize_version(
@@ -496,19 +472,6 @@ def __init__(self, attrs=None):
         )
         self._finalize_requires()
 
-    def _validate_metadata(self):
-        required = {"name"}
-        provided = {
-            key
-            for key in vars(self.metadata)
-            if getattr(self.metadata, key, None) is not None
-        }
-        missing = required - provided
-
-        if missing:
-            msg = f"Required package metadata is missing: {missing}"
-            raise DistutilsSetupError(msg)
-
     def _set_metadata_defaults(self, attrs):
         """
         Fill-in missing metadata fields not supported by distutils.
@@ -559,8 +522,6 @@ def _finalize_requires(self):
             self.metadata.python_requires = self.python_requires
 
         if getattr(self, 'extras_require', None):
-            # Save original before it is messed by _convert_extras_requirements
-            self._orig_extras_require = self._orig_extras_require or self.extras_require
             for extra in self.extras_require.keys():
                 # Since this gets called multiple times at points where the
                 # keys have become 'converted' extras, ensure that we are only
@@ -569,10 +530,6 @@ def _finalize_requires(self):
                 if extra:
                     self.metadata.provides_extras.add(extra)
 
-        if getattr(self, 'install_requires', None) and not self._orig_install_requires:
-            # Save original before it is messed by _move_install_requirements_markers
-            self._orig_install_requires = self.install_requires
-
         self._convert_extras_requirements()
         self._move_install_requirements_markers()
 
@@ -583,12 +540,11 @@ def _convert_extras_requirements(self):
         `"extra:{marker}": ["barbazquux"]`.
         """
         spec_ext_reqs = getattr(self, 'extras_require', None) or {}
-        tmp = defaultdict(ordered_set.OrderedSet)
-        self._tmp_extras_require = getattr(self, '_tmp_extras_require', tmp)
+        self._tmp_extras_require = defaultdict(list)
         for section, v in spec_ext_reqs.items():
             # Do not strip empty sections.
             self._tmp_extras_require[section]
-            for r in _reqs.parse(v):
+            for r in pkg_resources.parse_requirements(v):
                 suffix = self._suffix_for(r)
                 self._tmp_extras_require[section + suffix].append(r)
 
@@ -614,7 +570,7 @@ def is_simple_req(req):
             return not req.marker
 
         spec_inst_reqs = getattr(self, 'install_requires', None) or ()
-        inst_reqs = list(_reqs.parse(spec_inst_reqs))
+        inst_reqs = list(pkg_resources.parse_requirements(spec_inst_reqs))
         simple_reqs = filter(is_simple_req, inst_reqs)
         complex_reqs = itertools.filterfalse(is_simple_req, inst_reqs)
         self.install_requires = list(map(str, simple_reqs))
@@ -622,8 +578,7 @@ def is_simple_req(req):
         for r in complex_reqs:
             self._tmp_extras_require[':' + str(r.marker)].append(r)
         self.extras_require = dict(
-            # list(dict.fromkeys(...))  ensures a list of unique strings
-            (k, list(dict.fromkeys(str(r) for r in map(self._clean_req, v))))
+            (k, [str(r) for r in map(self._clean_req, v)])
             for k, v in self._tmp_extras_require.items()
         )
 
@@ -756,10 +711,7 @@ def warn_dash_deprecation(self, opt, section):
             return opt
 
         underscore_opt = opt.replace('-', '_')
-        commands = list(itertools.chain(
-            distutils.command.__all__,
-            self._setuptools_commands(),
-        ))
+        commands = distutils.command.__all__ + self._setuptools_commands()
         if (
             not section.startswith('options')
             and section != 'metadata'
@@ -777,8 +729,9 @@ def warn_dash_deprecation(self, opt, section):
 
     def _setuptools_commands(self):
         try:
-            return metadata.distribution('setuptools').entry_points.names
-        except metadata.PackageNotFoundError:
+            dist = pkg_resources.get_distribution('setuptools')
+            return list(dist.get_entry_map('distutils.commands'))
+        except pkg_resources.DistributionNotFound:
             # during bootstrapping, distribution doesn't exist
             return []
 
@@ -841,39 +794,23 @@ def _set_command_options(self, command_obj, option_dict=None):  # noqa: C901
             except ValueError as e:
                 raise DistutilsOptionError(e) from e
 
-    def _get_project_config_files(self, filenames):
-        """Add default file and split between INI and TOML"""
-        tomlfiles = []
-        standard_project_metadata = Path(self.src_root or os.curdir, "pyproject.toml")
-        if filenames is not None:
-            parts = partition(lambda f: Path(f).suffix == ".toml", filenames)
-            filenames = list(parts[0])  # 1st element => predicate is False
-            tomlfiles = list(parts[1])  # 2nd element => predicate is True
-        elif standard_project_metadata.exists():
-            tomlfiles = [standard_project_metadata]
-        return filenames, tomlfiles
-
     def parse_config_files(self, filenames=None, ignore_option_errors=False):
         """Parses configuration files from various levels
         and loads configuration.
-        """
-        inifiles, tomlfiles = self._get_project_config_files(filenames)
 
-        self._parse_config_files(filenames=inifiles)
+        """
+        self._parse_config_files(filenames=filenames)
 
-        setupcfg.parse_configuration(
+        parse_configuration(
             self, self.command_options, ignore_option_errors=ignore_option_errors
         )
-        for filename in tomlfiles:
-            pyprojecttoml.apply_configuration(self, filename, ignore_option_errors)
-
         self._finalize_requires()
         self._finalize_license_files()
 
     def fetch_build_eggs(self, requires):
         """Resolve pre-setup requirements"""
         resolved_dists = pkg_resources.working_set.resolve(
-            _reqs.parse(requires),
+            pkg_resources.parse_requirements(requires),
             installer=self.fetch_build_egg,
             replace_conflicting=True,
         )
@@ -893,7 +830,7 @@ def finalize_options(self):
         def by_order(hook):
             return getattr(hook, 'order', 0)
 
-        defined = metadata.entry_points(group=group)
+        defined = pkg_resources.iter_entry_points(group)
         filtered = itertools.filterfalse(self._removed, defined)
         loaded = map(lambda e: e.load(), filtered)
         for ep in sorted(loaded, key=by_order):
@@ -914,9 +851,10 @@ def _removed(ep):
         return ep.name in removed
 
     def _finalize_setup_keywords(self):
-        for ep in metadata.entry_points(group='distutils.setup_keywords'):
+        for ep in pkg_resources.iter_entry_points('distutils.setup_keywords'):
             value = getattr(self, ep.name, None)
             if value is not None:
+                ep.require(installer=self.fetch_build_egg)
                 ep.load()(self, ep.name, value)
 
     def get_egg_cache_dir(self):
@@ -949,24 +887,27 @@ def get_command_class(self, command):
         if command in self.cmdclass:
             return self.cmdclass[command]
 
-        eps = metadata.entry_points(group='distutils.commands', name=command)
+        eps = pkg_resources.iter_entry_points('distutils.commands', command)
         for ep in eps:
+            ep.require(installer=self.fetch_build_egg)
             self.cmdclass[command] = cmdclass = ep.load()
             return cmdclass
         else:
             return _Distribution.get_command_class(self, command)
 
     def print_commands(self):
-        for ep in metadata.entry_points(group='distutils.commands'):
+        for ep in pkg_resources.iter_entry_points('distutils.commands'):
             if ep.name not in self.cmdclass:
-                cmdclass = ep.load()
+                # don't require extras as the commands won't be invoked
+                cmdclass = ep.resolve()
                 self.cmdclass[ep.name] = cmdclass
         return _Distribution.print_commands(self)
 
     def get_command_list(self):
-        for ep in metadata.entry_points(group='distutils.commands'):
+        for ep in pkg_resources.iter_entry_points('distutils.commands'):
             if ep.name not in self.cmdclass:
-                cmdclass = ep.load()
+                # don't require extras as the commands won't be invoked
+                cmdclass = ep.resolve()
                 self.cmdclass[ep.name] = cmdclass
         return _Distribution.get_command_list(self)
 
@@ -1209,13 +1150,6 @@ def handle_display_options(self, option_order):
                 sys.stdout.detach(), encoding, errors, newline, line_buffering
             )
 
-    def run_command(self, command):
-        self.set_defaults()
-        # Postpone defaults until all explicit configuration is considered
-        # (setup() args, config files, command line and plugins)
-
-        super().run_command(command)
-
 
 class DistDeprecationWarning(SetuptoolsDeprecationWarning):
     """Class for warning about deprecations in dist in
diff --git a/venv/lib/python3.10/site-packages/setuptools/errors.py b/venv/lib/python3.10/site-packages/setuptools/errors.py
index ec7fb3b..f4d35a6 100644
--- a/venv/lib/python3.10/site-packages/setuptools/errors.py
+++ b/venv/lib/python3.10/site-packages/setuptools/errors.py
@@ -4,6 +4,17 @@
 """
 
 from distutils import errors as _distutils_errors
+from distutils.errors import DistutilsError
+
+
+class RemovedCommandError(DistutilsError, RuntimeError):
+    """Error used for commands that have been removed in setuptools.
+
+    Since ``setuptools`` is built on ``distutils``, simply removing a command
+    from ``setuptools`` will make the behavior fall back to ``distutils``; this
+    error is raised if a command exists in ``distutils`` but has been actively
+    removed in ``setuptools``.
+    """
 
 
 # Re-export errors from distutils to facilitate the migration to PEP632
@@ -27,32 +38,3 @@
 
 # The root error class in the hierarchy
 BaseError = _distutils_errors.DistutilsError
-
-
-class RemovedCommandError(BaseError, RuntimeError):
-    """Error used for commands that have been removed in setuptools.
-
-    Since ``setuptools`` is built on ``distutils``, simply removing a command
-    from ``setuptools`` will make the behavior fall back to ``distutils``; this
-    error is raised if a command exists in ``distutils`` but has been actively
-    removed in ``setuptools``.
-    """
-
-
-class PackageDiscoveryError(BaseError, RuntimeError):
-    """Impossible to perform automatic discovery of packages and/or modules.
-
-    The current project layout or given discovery options can lead to problems when
-    scanning the project directory.
-
-    Setuptools might also refuse to complete auto-discovery if an error prone condition
-    is detected (e.g. when a project is organised as a flat-layout but contains
-    multiple directories that can be taken as top-level packages inside a single
-    distribution [*]_). In these situations the users are encouraged to be explicit
-    about which packages to include or to make the discovery parameters more specific.
-
-    .. [*] Since multi-package distributions are uncommon it is very likely that the
-       developers did not intend for all the directories to be packaged, and are just
-       leaving auxiliary code in the repository top-level, such as maintenance-related
-       scripts.
-    """
diff --git a/venv/lib/python3.10/site-packages/setuptools/extension.py b/venv/lib/python3.10/site-packages/setuptools/extension.py
index 58c023f..1820722 100644
--- a/venv/lib/python3.10/site-packages/setuptools/extension.py
+++ b/venv/lib/python3.10/site-packages/setuptools/extension.py
@@ -28,106 +28,13 @@ def _have_cython():
 
 
 class Extension(_Extension):
-    """
-    Describes a single extension module.
-
-    This means that all source files will be compiled into a single binary file
-    ``.`` (with ```` derived from ``name`` and
-    ```` defined by one of the values in
-    ``importlib.machinery.EXTENSION_SUFFIXES``).
-
-    In the case ``.pyx`` files are passed as ``sources and`` ``Cython`` is **not**
-    installed in the build environment, ``setuptools`` may also try to look for the
-    equivalent ``.cpp`` or ``.c`` files.
-
-    :arg str name:
-      the full name of the extension, including any packages -- ie.
-      *not* a filename or pathname, but Python dotted name
-
-    :arg list[str] sources:
-      list of source filenames, relative to the distribution root
-      (where the setup script lives), in Unix form (slash-separated)
-      for portability.  Source files may be C, C++, SWIG (.i),
-      platform-specific resource files, or whatever else is recognized
-      by the "build_ext" command as source for a Python extension.
-
-    :keyword list[str] include_dirs:
-      list of directories to search for C/C++ header files (in Unix
-      form for portability)
-
-    :keyword list[tuple[str, str|None]] define_macros:
-      list of macros to define; each macro is defined using a 2-tuple:
-      the first item corresponding to the name of the macro and the second
-      item either a string with its value or None to
-      define it without a particular value (equivalent of "#define
-      FOO" in source or -DFOO on Unix C compiler command line)
-
-    :keyword list[str] undef_macros:
-      list of macros to undefine explicitly
-
-    :keyword list[str] library_dirs:
-      list of directories to search for C/C++ libraries at link time
-
-    :keyword list[str] libraries:
-      list of library names (not filenames or paths) to link against
-
-    :keyword list[str] runtime_library_dirs:
-      list of directories to search for C/C++ libraries at run time
-      (for shared extensions, this is when the extension is loaded).
-      Setting this will cause an exception during build on Windows
-      platforms.
-
-    :keyword list[str] extra_objects:
-      list of extra files to link with (eg. object files not implied
-      by 'sources', static library that must be explicitly specified,
-      binary resource files, etc.)
-
-    :keyword list[str] extra_compile_args:
-      any extra platform- and compiler-specific information to use
-      when compiling the source files in 'sources'.  For platforms and
-      compilers where "command line" makes sense, this is typically a
-      list of command-line arguments, but for other platforms it could
-      be anything.
-
-    :keyword list[str] extra_link_args:
-      any extra platform- and compiler-specific information to use
-      when linking object files together to create the extension (or
-      to create a new static Python interpreter).  Similar
-      interpretation as for 'extra_compile_args'.
-
-    :keyword list[str] export_symbols:
-      list of symbols to be exported from a shared extension.  Not
-      used on all platforms, and not generally necessary for Python
-      extensions, which typically export exactly one symbol: "init" +
-      extension_name.
-
-    :keyword list[str] swig_opts:
-      any extra options to pass to SWIG if a source file has the .i
-      extension.
-
-    :keyword list[str] depends:
-      list of files that the extension depends on
-
-    :keyword str language:
-      extension language (i.e. "c", "c++", "objc"). Will be detected
-      from the source extensions if not provided.
-
-    :keyword bool optional:
-      specifies that a build failure in the extension should not abort the
-      build process, but simply not install the failing extension.
-
-    :keyword bool py_limited_api:
-      opt-in flag for the usage of :doc:`Python's limited API `.
-
-    :raises setuptools.errors.PlatformError: if 'runtime_library_dirs' is
-      specified on Windows. (since v63)
-    """
+    """Extension that uses '.c' files in place of '.pyx' files"""
 
     def __init__(self, name, sources, *args, **kw):
         # The *args is needed for compatibility as calls may use positional
         # arguments. py_limited_api may be set only via keyword.
         self.py_limited_api = kw.pop("py_limited_api", False)
-        super().__init__(name, sources, *args, **kw)
+        _Extension.__init__(self, name, sources, *args, **kw)
 
     def _convert_pyx_sources_to_lang(self):
         """
diff --git a/venv/lib/python3.10/site-packages/setuptools/extern/__init__.py b/venv/lib/python3.10/site-packages/setuptools/extern/__init__.py
index d3a6dc9..baca1af 100644
--- a/venv/lib/python3.10/site-packages/setuptools/extern/__init__.py
+++ b/venv/lib/python3.10/site-packages/setuptools/extern/__init__.py
@@ -69,8 +69,5 @@ def install(self):
             sys.meta_path.append(self)
 
 
-names = (
-    'packaging', 'pyparsing', 'ordered_set', 'more_itertools', 'importlib_metadata',
-    'zipp', 'importlib_resources', 'jaraco', 'typing_extensions', 'tomli',
-)
+names = 'packaging', 'pyparsing', 'ordered_set', 'more_itertools',
 VendorImporter(__name__, names, 'setuptools._vendor').install()
diff --git a/venv/lib/python3.10/site-packages/setuptools/logging.py b/venv/lib/python3.10/site-packages/setuptools/logging.py
deleted file mode 100644
index e99c1b9..0000000
--- a/venv/lib/python3.10/site-packages/setuptools/logging.py
+++ /dev/null
@@ -1,36 +0,0 @@
-import sys
-import logging
-import distutils.log
-from . import monkey
-
-
-def _not_warning(record):
-    return record.levelno < logging.WARNING
-
-
-def configure():
-    """
-    Configure logging to emit warning and above to stderr
-    and everything else to stdout. This behavior is provided
-    for compatibility with distutils.log but may change in
-    the future.
-    """
-    err_handler = logging.StreamHandler()
-    err_handler.setLevel(logging.WARNING)
-    out_handler = logging.StreamHandler(sys.stdout)
-    out_handler.addFilter(_not_warning)
-    handlers = err_handler, out_handler
-    logging.basicConfig(
-        format="{message}", style='{', handlers=handlers, level=logging.DEBUG)
-    if hasattr(distutils.log, 'Log'):
-        monkey.patch_func(set_threshold, distutils.log, 'set_threshold')
-        # For some reason `distutils.log` module is getting cached in `distutils.dist`
-        # and then loaded again when patched,
-        # implying: id(distutils.log) != id(distutils.dist.log).
-        # Make sure the same module object is used everywhere:
-        distutils.dist.log = distutils.log
-
-
-def set_threshold(level):
-    logging.root.setLevel(level*10)
-    return set_threshold.unpatched(level)
diff --git a/venv/lib/python3.10/site-packages/setuptools/monkey.py b/venv/lib/python3.10/site-packages/setuptools/monkey.py
index 77a7adc..fb36dc1 100644
--- a/venv/lib/python3.10/site-packages/setuptools/monkey.py
+++ b/venv/lib/python3.10/site-packages/setuptools/monkey.py
@@ -71,6 +71,8 @@ def patch_all():
         distutils.filelist.findall = setuptools.findall
 
     needs_warehouse = (
+        sys.version_info < (2, 7, 13)
+        or
         (3, 4) < sys.version_info < (3, 4, 6)
         or
         (3, 5) < sys.version_info <= (3, 5, 3)
@@ -141,7 +143,7 @@ def patch_params(mod_name, func_name):
         """
         Prepare the parameters for patch_func to patch indicated function.
         """
-        repl_prefix = 'msvc14_'
+        repl_prefix = 'msvc9_' if 'msvc9' in mod_name else 'msvc14_'
         repl_name = repl_prefix + func_name.lstrip('_')
         repl = getattr(msvc, repl_name)
         mod = import_module(mod_name)
@@ -149,9 +151,19 @@ def patch_params(mod_name, func_name):
             raise ImportError(func_name)
         return repl, mod, func_name
 
+    # Python 2.7 to 3.4
+    msvc9 = functools.partial(patch_params, 'distutils.msvc9compiler')
+
     # Python 3.5+
     msvc14 = functools.partial(patch_params, 'distutils._msvccompiler')
 
+    try:
+        # Patch distutils.msvc9compiler
+        patch_func(*msvc9('find_vcvarsall'))
+        patch_func(*msvc9('query_vcvarsall'))
+    except ImportError:
+        pass
+
     try:
         # Patch distutils._msvccompiler._get_vc_env
         patch_func(*msvc14('_get_vc_env'))
diff --git a/venv/lib/python3.10/site-packages/setuptools/msvc.py b/venv/lib/python3.10/site-packages/setuptools/msvc.py
index 5d4d775..281ea1c 100644
--- a/venv/lib/python3.10/site-packages/setuptools/msvc.py
+++ b/venv/lib/python3.10/site-packages/setuptools/msvc.py
@@ -3,6 +3,14 @@
 
 Known supported compilers:
 --------------------------
+Microsoft Visual C++ 9.0:
+    Microsoft Visual C++ Compiler for Python 2.7 (x86, amd64)
+    Microsoft Windows SDK 6.1 (x86, x64, ia64)
+    Microsoft Windows SDK 7.0 (x86, x64, ia64)
+
+Microsoft Visual C++ 10.0:
+    Microsoft Windows SDK 7.1 (x86, x64, ia64)
+
 Microsoft Visual C++ 14.X:
     Microsoft Visual C++ Build Tools 2015 (x86, x64, arm)
     Microsoft Visual Studio Build Tools 2017 (x86, x64, arm, arm64)
@@ -40,6 +48,100 @@ class winreg:
 
     environ = dict()
 
+_msvc9_suppress_errors = (
+    # msvc9compiler isn't available on some platforms
+    ImportError,
+
+    # msvc9compiler raises DistutilsPlatformError in some
+    # environments. See #1118.
+    distutils.errors.DistutilsPlatformError,
+)
+
+try:
+    from distutils.msvc9compiler import Reg
+except _msvc9_suppress_errors:
+    pass
+
+
+def msvc9_find_vcvarsall(version):
+    """
+    Patched "distutils.msvc9compiler.find_vcvarsall" to use the standalone
+    compiler build for Python
+    (VCForPython / Microsoft Visual C++ Compiler for Python 2.7).
+
+    Fall back to original behavior when the standalone compiler is not
+    available.
+
+    Redirect the path of "vcvarsall.bat".
+
+    Parameters
+    ----------
+    version: float
+        Required Microsoft Visual C++ version.
+
+    Return
+    ------
+    str
+        vcvarsall.bat path
+    """
+    vc_base = r'Software\%sMicrosoft\DevDiv\VCForPython\%0.1f'
+    key = vc_base % ('', version)
+    try:
+        # Per-user installs register the compiler path here
+        productdir = Reg.get_value(key, "installdir")
+    except KeyError:
+        try:
+            # All-user installs on a 64-bit system register here
+            key = vc_base % ('Wow6432Node\\', version)
+            productdir = Reg.get_value(key, "installdir")
+        except KeyError:
+            productdir = None
+
+    if productdir:
+        vcvarsall = join(productdir, "vcvarsall.bat")
+        if isfile(vcvarsall):
+            return vcvarsall
+
+    return get_unpatched(msvc9_find_vcvarsall)(version)
+
+
+def msvc9_query_vcvarsall(ver, arch='x86', *args, **kwargs):
+    """
+    Patched "distutils.msvc9compiler.query_vcvarsall" for support extra
+    Microsoft Visual C++ 9.0 and 10.0 compilers.
+
+    Set environment without use of "vcvarsall.bat".
+
+    Parameters
+    ----------
+    ver: float
+        Required Microsoft Visual C++ version.
+    arch: str
+        Target architecture.
+
+    Return
+    ------
+    dict
+        environment
+    """
+    # Try to get environment from vcvarsall.bat (Classical way)
+    try:
+        orig = get_unpatched(msvc9_query_vcvarsall)
+        return orig(ver, arch, *args, **kwargs)
+    except distutils.errors.DistutilsPlatformError:
+        # Pass error if Vcvarsall.bat is missing
+        pass
+    except ValueError:
+        # Pass error if environment not set after executing vcvarsall.bat
+        pass
+
+    # If error, try to set environment directly
+    try:
+        return EnvironmentInfo(arch, ver).return_env()
+    except distutils.errors.DistutilsPlatformError as exc:
+        _augment_exception(exc, ver, arch)
+        raise
+
 
 def _msvc14_find_vc2015():
     """Python 3.8 "distutils/_msvccompiler.py" backport"""
diff --git a/venv/lib/python3.10/site-packages/setuptools/package_index.py b/venv/lib/python3.10/site-packages/setuptools/package_index.py
index 362e26f..e93fcc6 100644
--- a/venv/lib/python3.10/site-packages/setuptools/package_index.py
+++ b/venv/lib/python3.10/site-packages/setuptools/package_index.py
@@ -1,5 +1,4 @@
-"""PyPI and direct package downloading."""
-
+"""PyPI and direct package downloading"""
 import sys
 import os
 import re
@@ -20,20 +19,9 @@
 
 import setuptools
 from pkg_resources import (
-    CHECKOUT_DIST,
-    Distribution,
-    BINARY_DIST,
-    normalize_path,
-    SOURCE_DIST,
-    Environment,
-    find_distributions,
-    safe_name,
-    safe_version,
-    to_filename,
-    Requirement,
-    DEVELOP_DIST,
-    EGG_DIST,
-    parse_version,
+    CHECKOUT_DIST, Distribution, BINARY_DIST, normalize_path, SOURCE_DIST,
+    Environment, find_distributions, safe_name, safe_version,
+    to_filename, Requirement, DEVELOP_DIST, EGG_DIST, parse_version,
 )
 from distutils import log
 from distutils.errors import DistutilsError
@@ -52,9 +40,7 @@
 EXTENSIONS = ".tar.gz .tar.bz2 .tar .zip .tgz".split()
 
 __all__ = [
-    'PackageIndex',
-    'distros_for_url',
-    'parse_bdist_wininst',
+    'PackageIndex', 'distros_for_url', 'parse_bdist_wininst',
     'interpret_distro_name',
 ]
 
@@ -62,8 +48,7 @@
 
 _tmpl = "setuptools/{setuptools.__version__} Python-urllib/{py_major}"
 user_agent = _tmpl.format(
-    py_major='{}.{}'.format(*sys.version_info), setuptools=setuptools
-)
+    py_major='{}.{}'.format(*sys.version_info), setuptools=setuptools)
 
 
 def parse_requirement_arg(spec):
@@ -135,15 +120,13 @@ def distros_for_location(location, basename, metadata=None):
         wheel = Wheel(basename)
         if not wheel.is_compatible():
             return []
-        return [
-            Distribution(
-                location=location,
-                project_name=wheel.project_name,
-                version=wheel.version,
-                # Increase priority over eggs.
-                precedence=EGG_DIST + 1,
-            )
-        ]
+        return [Distribution(
+            location=location,
+            project_name=wheel.project_name,
+            version=wheel.version,
+            # Increase priority over eggs.
+            precedence=EGG_DIST + 1,
+        )]
     if basename.endswith('.exe'):
         win_base, py_ver, platform = parse_bdist_wininst(basename)
         if win_base is not None:
@@ -154,7 +137,7 @@ def distros_for_location(location, basename, metadata=None):
     #
     for ext in EXTENSIONS:
         if basename.endswith(ext):
-            basename = basename[: -len(ext)]
+            basename = basename[:-len(ext)]
             return interpret_distro_name(location, basename, metadata)
     return []  # no extension matched
 
@@ -167,7 +150,8 @@ def distros_for_filename(filename, metadata=None):
 
 
 def interpret_distro_name(
-    location, basename, metadata, py_version=None, precedence=SOURCE_DIST, platform=None
+        location, basename, metadata, py_version=None, precedence=SOURCE_DIST,
+        platform=None
 ):
     """Generate alternative interpretations of a source distro name
 
@@ -194,13 +178,9 @@ def interpret_distro_name(
 
     for p in range(1, len(parts) + 1):
         yield Distribution(
-            location,
-            metadata,
-            '-'.join(parts[:p]),
-            '-'.join(parts[p:]),
-            py_version=py_version,
-            precedence=precedence,
-            platform=platform,
+            location, metadata, '-'.join(parts[:p]), '-'.join(parts[p:]),
+            py_version=py_version, precedence=precedence,
+            platform=platform
         )
 
 
@@ -218,9 +198,7 @@ def wrapper(*args, **kwargs):
 
 
 REL = re.compile(r"""<([^>]*\srel\s{0,10}=\s{0,10}['"]?([^'" >]+)[^>]*)>""", re.I)
-"""
-Regex for an HTML tag with 'rel="val"' attributes.
-"""
+# this line is here to fix emacs' cruddy broken syntax highlighting
 
 
 @unique_values
@@ -304,16 +282,11 @@ class PackageIndex(Environment):
     """A distribution index that scans web pages for download URLs"""
 
     def __init__(
-        self,
-        index_url="https://pypi.org/simple/",
-        hosts=('*',),
-        ca_bundle=None,
-        verify_ssl=True,
-        *args,
-        **kw
+            self, index_url="https://pypi.org/simple/", hosts=('*',),
+            ca_bundle=None, verify_ssl=True, *args, **kw
     ):
-        super().__init__(*args, **kw)
-        self.index_url = index_url + "/"[: not index_url.endswith('/')]
+        Environment.__init__(self, *args, **kw)
+        self.index_url = index_url + "/" [:not index_url.endswith('/')]
         self.scanned_urls = {}
         self.fetched_urls = {}
         self.package_pages = {}
@@ -406,8 +379,7 @@ def url_ok(self, url, fatal=False):
             return True
         msg = (
             "\nNote: Bypassing %s (disallowed host; see "
-            "http://bit.ly/2hrImnY for details).\n"
-        )
+            "http://bit.ly/2hrImnY for details).\n")
         if fatal:
             raise DistutilsError(msg % url)
         else:
@@ -445,7 +417,9 @@ def _scan(self, link):
         if not link.startswith(self.index_url):
             return NO_MATCH_SENTINEL
 
-        parts = list(map(urllib.parse.unquote, link[len(self.index_url) :].split('/')))
+        parts = list(map(
+            urllib.parse.unquote, link[len(self.index_url):].split('/')
+        ))
         if len(parts) != 2 or '#' in parts[1]:
             return NO_MATCH_SENTINEL
 
@@ -487,15 +461,16 @@ def process_index(self, url, page):
     def need_version_info(self, url):
         self.scan_all(
             "Page at %s links to .py file(s) without version info; an index "
-            "scan is required.",
-            url,
+            "scan is required.", url
         )
 
     def scan_all(self, msg=None, *args):
         if self.index_url not in self.fetched_urls:
             if msg:
                 self.warn(msg, *args)
-            self.info("Scanning index of all packages (this may take a while)")
+            self.info(
+                "Scanning index of all packages (this may take a while)"
+            )
         self.scan_url(self.index_url)
 
     def find_packages(self, requirement):
@@ -526,7 +501,9 @@ def check_hash(self, checker, filename, tfp):
         """
         checker is a ContentChecker
         """
-        checker.report(self.debug, "Validating %%s checksum for %s" % filename)
+        checker.report(
+            self.debug,
+            "Validating %%s checksum for %s" % filename)
         if not checker.is_valid():
             tfp.close()
             os.unlink(filename)
@@ -563,8 +540,7 @@ def not_found_in_index(self, requirement):
         else:  # no distros seen for this name, might be misspelled
             meth, msg = (
                 self.warn,
-                "Couldn't find index page for %r (maybe misspelled?)",
-            )
+                "Couldn't find index page for %r (maybe misspelled?)")
         meth(msg, requirement.unsafe_name)
         self.scan_all()
 
@@ -603,14 +579,8 @@ def download(self, spec, tmpdir):
         return getattr(self.fetch_distribution(spec, tmpdir), 'location', None)
 
     def fetch_distribution(  # noqa: C901  # is too complex (14)  # FIXME
-        self,
-        requirement,
-        tmpdir,
-        force_scan=False,
-        source=False,
-        develop_ok=False,
-        local_index=None,
-    ):
+            self, requirement, tmpdir, force_scan=False, source=False,
+            develop_ok=False, local_index=None):
         """Obtain a distribution suitable for fulfilling `requirement`
 
         `requirement` must be a ``pkg_resources.Requirement`` instance.
@@ -642,13 +612,15 @@ def find(req, env=None):
                 if dist.precedence == DEVELOP_DIST and not develop_ok:
                     if dist not in skipped:
                         self.warn(
-                            "Skipping development or system egg: %s",
-                            dist,
+                            "Skipping development or system egg: %s", dist,
                         )
                         skipped[dist] = 1
                     continue
 
-                test = dist in req and (dist.precedence <= SOURCE_DIST or not source)
+                test = (
+                    dist in req
+                    and (dist.precedence <= SOURCE_DIST or not source)
+                )
                 if test:
                     loc = self.download(dist.location, tmpdir)
                     dist.download_location = loc
@@ -697,15 +669,10 @@ def fetch(self, requirement, tmpdir, force_scan=False, source=False):
 
     def gen_setup(self, filename, fragment, tmpdir):
         match = EGG_FRAGMENT.match(fragment)
-        dists = (
-            match
-            and [
-                d
-                for d in interpret_distro_name(filename, match.group(1), None)
-                if d.version
-            ]
-            or []
-        )
+        dists = match and [
+            d for d in
+            interpret_distro_name(filename, match.group(1), None) if d.version
+        ] or []
 
         if len(dists) == 1:  # unambiguous ``#egg`` fragment
             basename = os.path.basename(filename)
@@ -713,7 +680,8 @@ def gen_setup(self, filename, fragment, tmpdir):
             # Make sure the file has been downloaded to the temp dir.
             if os.path.dirname(filename) != tmpdir:
                 dst = os.path.join(tmpdir, basename)
-                if not (os.path.exists(dst) and os.path.samefile(filename, dst)):
+                from setuptools.command.easy_install import samefile
+                if not samefile(filename, dst):
                     shutil.copy2(filename, dst)
                     filename = dst
 
@@ -722,9 +690,8 @@ def gen_setup(self, filename, fragment, tmpdir):
                     "from setuptools import setup\n"
                     "setup(name=%r, version=%r, py_modules=[%r])\n"
                     % (
-                        dists[0].project_name,
-                        dists[0].version,
-                        os.path.splitext(basename)[0],
+                        dists[0].project_name, dists[0].version,
+                        os.path.splitext(basename)[0]
                     )
                 )
             return filename
@@ -800,22 +767,23 @@ def open_url(self, url, warning=None):  # noqa: C901  # is too complex (12)
             if warning:
                 self.warn(warning, v.reason)
             else:
-                raise DistutilsError(
-                    "Download error for %s: %s" % (url, v.reason)
-                ) from v
+                raise DistutilsError("Download error for %s: %s"
+                                     % (url, v.reason)) from v
         except http.client.BadStatusLine as v:
             if warning:
                 self.warn(warning, v.line)
             else:
                 raise DistutilsError(
                     '%s returned a bad status line. The server might be '
-                    'down, %s' % (url, v.line)
+                    'down, %s' %
+                    (url, v.line)
                 ) from v
         except (http.client.HTTPException, socket.error) as v:
             if warning:
                 self.warn(warning, v)
             else:
-                raise DistutilsError("Download error for %s: %s" % (url, v)) from v
+                raise DistutilsError("Download error for %s: %s"
+                                     % (url, v)) from v
 
     def _download_url(self, scheme, url, tmpdir):
         # Determine download filename
@@ -920,13 +888,10 @@ def _download_git(self, url, filename):
 
         if rev is not None:
             self.info("Checking out %s", rev)
-            os.system(
-                "git -C %s checkout --quiet %s"
-                % (
-                    filename,
-                    rev,
-                )
-            )
+            os.system("git -C %s checkout --quiet %s" % (
+                filename,
+                rev,
+            ))
 
         return filename
 
@@ -939,13 +904,10 @@ def _download_hg(self, url, filename):
 
         if rev is not None:
             self.info("Updating to %s", rev)
-            os.system(
-                "hg --cwd %s up -C -r %s -q"
-                % (
-                    filename,
-                    rev,
-                )
-            )
+            os.system("hg --cwd %s up -C -r %s -q" % (
+                filename,
+                rev,
+            ))
 
         return filename
 
@@ -1040,7 +1002,7 @@ def __init__(self):
         Load from ~/.pypirc
         """
         defaults = dict.fromkeys(['username', 'password', 'repository'], '')
-        super().__init__(defaults)
+        configparser.RawConfigParser.__init__(self, defaults)
 
         rc = os.path.join(os.path.expanduser('~'), '.pypirc')
         if os.path.exists(rc):
@@ -1049,8 +1011,7 @@ def __init__(self):
     @property
     def creds_by_repository(self):
         sections_with_repositories = [
-            section
-            for section in self.sections()
+            section for section in self.sections()
             if self.get(section, 'repository').strip()
         ]
 
@@ -1154,8 +1115,8 @@ def local_open(url):
             files.append('{name}'.format(name=f))
         else:
             tmpl = (
-                "{url}" "{files}"
-            )
+                "{url}"
+                "{files}")
             body = tmpl.format(url=url, files='\n'.join(files))
         status, message = 200, "OK"
     else:
diff --git a/venv/lib/python3.10/site-packages/setuptools/wheel.py b/venv/lib/python3.10/site-packages/setuptools/wheel.py
index 527ed3b..0be811a 100644
--- a/venv/lib/python3.10/site-packages/setuptools/wheel.py
+++ b/venv/lib/python3.10/site-packages/setuptools/wheel.py
@@ -1,14 +1,13 @@
 """Wheels support."""
 
+from distutils.util import get_platform
+from distutils import log
 import email
 import itertools
 import os
 import posixpath
 import re
 import zipfile
-import contextlib
-
-from distutils.util import get_platform
 
 import pkg_resources
 import setuptools
@@ -16,7 +15,6 @@
 from setuptools.extern.packaging.tags import sys_tags
 from setuptools.extern.packaging.utils import canonicalize_name
 from setuptools.command.egg_info import write_requirements
-from setuptools.archive_util import _unpack_zipfile_obj
 
 
 WHEEL_NAME = re.compile(
@@ -51,19 +49,6 @@ def unpack(src_dir, dst_dir):
         os.rmdir(dirpath)
 
 
-@contextlib.contextmanager
-def disable_info_traces():
-    """
-    Temporarily disable info traces.
-    """
-    from distutils import log
-    saved = log.set_threshold(log.WARN)
-    try:
-        yield
-    finally:
-        log.set_threshold(saved)
-
-
 class Wheel:
 
     def __init__(self, filename):
@@ -136,7 +121,8 @@ def get_metadata(name):
             raise ValueError(
                 'unsupported wheel format version: %s' % wheel_version)
         # Extract to target directory.
-        _unpack_zipfile_obj(zf, destination_eggdir)
+        os.mkdir(destination_eggdir)
+        zf.extractall(destination_eggdir)
         # Convert metadata.
         dist_info = os.path.join(destination_eggdir, dist_info)
         dist = pkg_resources.Distribution.from_location(
@@ -150,13 +136,13 @@ def get_metadata(name):
         def raw_req(req):
             req.marker = None
             return str(req)
-        install_requires = list(map(raw_req, dist.requires()))
+        install_requires = list(sorted(map(raw_req, dist.requires())))
         extras_require = {
-            extra: [
+            extra: sorted(
                 req
                 for req in map(raw_req, dist.requires((extra,)))
                 if req not in install_requires
-            ]
+            )
             for extra in dist.extras
         }
         os.rename(dist_info, egg_info)
@@ -170,12 +156,17 @@ def raw_req(req):
                 extras_require=extras_require,
             ),
         )
-        with disable_info_traces():
+        # Temporarily disable info traces.
+        log_threshold = log._global_log.threshold
+        log.set_threshold(log.WARN)
+        try:
             write_requirements(
                 setup_dist.get_command_obj('egg_info'),
                 None,
                 os.path.join(egg_info, 'requires.txt'),
             )
+        finally:
+            log.set_threshold(log_threshold)
 
     @staticmethod
     def _move_data_entries(destination_eggdir, dist_data):
diff --git a/venv/lib/python3.10/site-packages/setuptools/windows_support.py b/venv/lib/python3.10/site-packages/setuptools/windows_support.py
index 1ca64fb..cb977cf 100644
--- a/venv/lib/python3.10/site-packages/setuptools/windows_support.py
+++ b/venv/lib/python3.10/site-packages/setuptools/windows_support.py
@@ -1,4 +1,5 @@
 import platform
+import ctypes
 
 
 def windows_only(func):
@@ -16,7 +17,6 @@ def hide_file(path):
 
     `path` must be text.
     """
-    import ctypes
     __import__('ctypes.wintypes')
     SetFileAttributes = ctypes.windll.kernel32.SetFileAttributesW
     SetFileAttributes.argtypes = ctypes.wintypes.LPWSTR, ctypes.wintypes.DWORD
diff --git a/venv/lib/python3.10/site-packages/six-1.16.0.dist-info/INSTALLER b/venv/lib/python3.10/site-packages/six-1.16.0.dist-info/INSTALLER
new file mode 100644
index 0000000..a1b589e
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/six-1.16.0.dist-info/INSTALLER
@@ -0,0 +1 @@
+pip
diff --git a/venv/lib/python3.10/site-packages/six-1.16.0.dist-info/LICENSE b/venv/lib/python3.10/site-packages/six-1.16.0.dist-info/LICENSE
new file mode 100644
index 0000000..de66331
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/six-1.16.0.dist-info/LICENSE
@@ -0,0 +1,18 @@
+Copyright (c) 2010-2020 Benjamin Peterson
+
+Permission is hereby granted, free of charge, to any person obtaining a copy of
+this software and associated documentation files (the "Software"), to deal in
+the Software without restriction, including without limitation the rights to
+use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
+the Software, and to permit persons to whom the Software is furnished to do so,
+subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
+FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
diff --git a/venv/lib/python3.10/site-packages/six-1.16.0.dist-info/METADATA b/venv/lib/python3.10/site-packages/six-1.16.0.dist-info/METADATA
new file mode 100644
index 0000000..6d7525c
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/six-1.16.0.dist-info/METADATA
@@ -0,0 +1,49 @@
+Metadata-Version: 2.1
+Name: six
+Version: 1.16.0
+Summary: Python 2 and 3 compatibility utilities
+Home-page: https://github.com/benjaminp/six
+Author: Benjamin Peterson
+Author-email: benjamin@python.org
+License: MIT
+Platform: UNKNOWN
+Classifier: Development Status :: 5 - Production/Stable
+Classifier: Programming Language :: Python :: 2
+Classifier: Programming Language :: Python :: 3
+Classifier: Intended Audience :: Developers
+Classifier: License :: OSI Approved :: MIT License
+Classifier: Topic :: Software Development :: Libraries
+Classifier: Topic :: Utilities
+Requires-Python: >=2.7, !=3.0.*, !=3.1.*, !=3.2.*
+
+.. image:: https://img.shields.io/pypi/v/six.svg
+   :target: https://pypi.org/project/six/
+   :alt: six on PyPI
+
+.. image:: https://travis-ci.org/benjaminp/six.svg?branch=master
+   :target: https://travis-ci.org/benjaminp/six
+   :alt: six on TravisCI
+
+.. image:: https://readthedocs.org/projects/six/badge/?version=latest
+   :target: https://six.readthedocs.io/
+   :alt: six's documentation on Read the Docs
+
+.. image:: https://img.shields.io/badge/license-MIT-green.svg
+   :target: https://github.com/benjaminp/six/blob/master/LICENSE
+   :alt: MIT License badge
+
+Six is a Python 2 and 3 compatibility library.  It provides utility functions
+for smoothing over the differences between the Python versions with the goal of
+writing Python code that is compatible on both Python versions.  See the
+documentation for more information on what is provided.
+
+Six supports Python 2.7 and 3.3+.  It is contained in only one Python
+file, so it can be easily copied into your project. (The copyright and license
+notice must be retained.)
+
+Online documentation is at https://six.readthedocs.io/.
+
+Bugs can be reported to https://github.com/benjaminp/six.  The code can also
+be found there.
+
+
diff --git a/venv/lib/python3.10/site-packages/six-1.16.0.dist-info/RECORD b/venv/lib/python3.10/site-packages/six-1.16.0.dist-info/RECORD
new file mode 100644
index 0000000..ccfe214
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/six-1.16.0.dist-info/RECORD
@@ -0,0 +1,8 @@
+__pycache__/six.cpython-310.pyc,,
+six-1.16.0.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4
+six-1.16.0.dist-info/LICENSE,sha256=i7hQxWWqOJ_cFvOkaWWtI9gq3_YPI5P8J2K2MYXo5sk,1066
+six-1.16.0.dist-info/METADATA,sha256=VQcGIFCAEmfZcl77E5riPCN4v2TIsc_qtacnjxKHJoI,1795
+six-1.16.0.dist-info/RECORD,,
+six-1.16.0.dist-info/WHEEL,sha256=Z-nyYpwrcSqxfdux5Mbn_DQ525iP7J2DG3JgGvOYyTQ,110
+six-1.16.0.dist-info/top_level.txt,sha256=_iVH_iYEtEXnD8nYGQYpYFUvkUW9sEO1GYbkeKSAais,4
+six.py,sha256=TOOfQi7nFGfMrIvtdr6wX4wyHH8M7aknmuLfo2cBBrM,34549
diff --git a/venv/lib/python3.10/site-packages/six-1.16.0.dist-info/WHEEL b/venv/lib/python3.10/site-packages/six-1.16.0.dist-info/WHEEL
new file mode 100644
index 0000000..01b8fc7
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/six-1.16.0.dist-info/WHEEL
@@ -0,0 +1,6 @@
+Wheel-Version: 1.0
+Generator: bdist_wheel (0.36.2)
+Root-Is-Purelib: true
+Tag: py2-none-any
+Tag: py3-none-any
+
diff --git a/venv/lib/python3.10/site-packages/six-1.16.0.dist-info/top_level.txt b/venv/lib/python3.10/site-packages/six-1.16.0.dist-info/top_level.txt
new file mode 100644
index 0000000..ffe2fce
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/six-1.16.0.dist-info/top_level.txt
@@ -0,0 +1 @@
+six
diff --git a/venv/lib/python3.10/site-packages/six.py b/venv/lib/python3.10/site-packages/six.py
new file mode 100644
index 0000000..4e15675
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/six.py
@@ -0,0 +1,998 @@
+# Copyright (c) 2010-2020 Benjamin Peterson
+#
+# Permission is hereby granted, free of charge, to any person obtaining a copy
+# of this software and associated documentation files (the "Software"), to deal
+# in the Software without restriction, including without limitation the rights
+# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+# copies of the Software, and to permit persons to whom the Software is
+# furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in all
+# copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+# SOFTWARE.
+
+"""Utilities for writing code that runs on Python 2 and 3"""
+
+from __future__ import absolute_import
+
+import functools
+import itertools
+import operator
+import sys
+import types
+
+__author__ = "Benjamin Peterson "
+__version__ = "1.16.0"
+
+
+# Useful for very coarse version differentiation.
+PY2 = sys.version_info[0] == 2
+PY3 = sys.version_info[0] == 3
+PY34 = sys.version_info[0:2] >= (3, 4)
+
+if PY3:
+    string_types = str,
+    integer_types = int,
+    class_types = type,
+    text_type = str
+    binary_type = bytes
+
+    MAXSIZE = sys.maxsize
+else:
+    string_types = basestring,
+    integer_types = (int, long)
+    class_types = (type, types.ClassType)
+    text_type = unicode
+    binary_type = str
+
+    if sys.platform.startswith("java"):
+        # Jython always uses 32 bits.
+        MAXSIZE = int((1 << 31) - 1)
+    else:
+        # It's possible to have sizeof(long) != sizeof(Py_ssize_t).
+        class X(object):
+
+            def __len__(self):
+                return 1 << 31
+        try:
+            len(X())
+        except OverflowError:
+            # 32-bit
+            MAXSIZE = int((1 << 31) - 1)
+        else:
+            # 64-bit
+            MAXSIZE = int((1 << 63) - 1)
+        del X
+
+if PY34:
+    from importlib.util import spec_from_loader
+else:
+    spec_from_loader = None
+
+
+def _add_doc(func, doc):
+    """Add documentation to a function."""
+    func.__doc__ = doc
+
+
+def _import_module(name):
+    """Import module, returning the module after the last dot."""
+    __import__(name)
+    return sys.modules[name]
+
+
+class _LazyDescr(object):
+
+    def __init__(self, name):
+        self.name = name
+
+    def __get__(self, obj, tp):
+        result = self._resolve()
+        setattr(obj, self.name, result)  # Invokes __set__.
+        try:
+            # This is a bit ugly, but it avoids running this again by
+            # removing this descriptor.
+            delattr(obj.__class__, self.name)
+        except AttributeError:
+            pass
+        return result
+
+
+class MovedModule(_LazyDescr):
+
+    def __init__(self, name, old, new=None):
+        super(MovedModule, self).__init__(name)
+        if PY3:
+            if new is None:
+                new = name
+            self.mod = new
+        else:
+            self.mod = old
+
+    def _resolve(self):
+        return _import_module(self.mod)
+
+    def __getattr__(self, attr):
+        _module = self._resolve()
+        value = getattr(_module, attr)
+        setattr(self, attr, value)
+        return value
+
+
+class _LazyModule(types.ModuleType):
+
+    def __init__(self, name):
+        super(_LazyModule, self).__init__(name)
+        self.__doc__ = self.__class__.__doc__
+
+    def __dir__(self):
+        attrs = ["__doc__", "__name__"]
+        attrs += [attr.name for attr in self._moved_attributes]
+        return attrs
+
+    # Subclasses should override this
+    _moved_attributes = []
+
+
+class MovedAttribute(_LazyDescr):
+
+    def __init__(self, name, old_mod, new_mod, old_attr=None, new_attr=None):
+        super(MovedAttribute, self).__init__(name)
+        if PY3:
+            if new_mod is None:
+                new_mod = name
+            self.mod = new_mod
+            if new_attr is None:
+                if old_attr is None:
+                    new_attr = name
+                else:
+                    new_attr = old_attr
+            self.attr = new_attr
+        else:
+            self.mod = old_mod
+            if old_attr is None:
+                old_attr = name
+            self.attr = old_attr
+
+    def _resolve(self):
+        module = _import_module(self.mod)
+        return getattr(module, self.attr)
+
+
+class _SixMetaPathImporter(object):
+
+    """
+    A meta path importer to import six.moves and its submodules.
+
+    This class implements a PEP302 finder and loader. It should be compatible
+    with Python 2.5 and all existing versions of Python3
+    """
+
+    def __init__(self, six_module_name):
+        self.name = six_module_name
+        self.known_modules = {}
+
+    def _add_module(self, mod, *fullnames):
+        for fullname in fullnames:
+            self.known_modules[self.name + "." + fullname] = mod
+
+    def _get_module(self, fullname):
+        return self.known_modules[self.name + "." + fullname]
+
+    def find_module(self, fullname, path=None):
+        if fullname in self.known_modules:
+            return self
+        return None
+
+    def find_spec(self, fullname, path, target=None):
+        if fullname in self.known_modules:
+            return spec_from_loader(fullname, self)
+        return None
+
+    def __get_module(self, fullname):
+        try:
+            return self.known_modules[fullname]
+        except KeyError:
+            raise ImportError("This loader does not know module " + fullname)
+
+    def load_module(self, fullname):
+        try:
+            # in case of a reload
+            return sys.modules[fullname]
+        except KeyError:
+            pass
+        mod = self.__get_module(fullname)
+        if isinstance(mod, MovedModule):
+            mod = mod._resolve()
+        else:
+            mod.__loader__ = self
+        sys.modules[fullname] = mod
+        return mod
+
+    def is_package(self, fullname):
+        """
+        Return true, if the named module is a package.
+
+        We need this method to get correct spec objects with
+        Python 3.4 (see PEP451)
+        """
+        return hasattr(self.__get_module(fullname), "__path__")
+
+    def get_code(self, fullname):
+        """Return None
+
+        Required, if is_package is implemented"""
+        self.__get_module(fullname)  # eventually raises ImportError
+        return None
+    get_source = get_code  # same as get_code
+
+    def create_module(self, spec):
+        return self.load_module(spec.name)
+
+    def exec_module(self, module):
+        pass
+
+_importer = _SixMetaPathImporter(__name__)
+
+
+class _MovedItems(_LazyModule):
+
+    """Lazy loading of moved objects"""
+    __path__ = []  # mark as package
+
+
+_moved_attributes = [
+    MovedAttribute("cStringIO", "cStringIO", "io", "StringIO"),
+    MovedAttribute("filter", "itertools", "builtins", "ifilter", "filter"),
+    MovedAttribute("filterfalse", "itertools", "itertools", "ifilterfalse", "filterfalse"),
+    MovedAttribute("input", "__builtin__", "builtins", "raw_input", "input"),
+    MovedAttribute("intern", "__builtin__", "sys"),
+    MovedAttribute("map", "itertools", "builtins", "imap", "map"),
+    MovedAttribute("getcwd", "os", "os", "getcwdu", "getcwd"),
+    MovedAttribute("getcwdb", "os", "os", "getcwd", "getcwdb"),
+    MovedAttribute("getoutput", "commands", "subprocess"),
+    MovedAttribute("range", "__builtin__", "builtins", "xrange", "range"),
+    MovedAttribute("reload_module", "__builtin__", "importlib" if PY34 else "imp", "reload"),
+    MovedAttribute("reduce", "__builtin__", "functools"),
+    MovedAttribute("shlex_quote", "pipes", "shlex", "quote"),
+    MovedAttribute("StringIO", "StringIO", "io"),
+    MovedAttribute("UserDict", "UserDict", "collections"),
+    MovedAttribute("UserList", "UserList", "collections"),
+    MovedAttribute("UserString", "UserString", "collections"),
+    MovedAttribute("xrange", "__builtin__", "builtins", "xrange", "range"),
+    MovedAttribute("zip", "itertools", "builtins", "izip", "zip"),
+    MovedAttribute("zip_longest", "itertools", "itertools", "izip_longest", "zip_longest"),
+    MovedModule("builtins", "__builtin__"),
+    MovedModule("configparser", "ConfigParser"),
+    MovedModule("collections_abc", "collections", "collections.abc" if sys.version_info >= (3, 3) else "collections"),
+    MovedModule("copyreg", "copy_reg"),
+    MovedModule("dbm_gnu", "gdbm", "dbm.gnu"),
+    MovedModule("dbm_ndbm", "dbm", "dbm.ndbm"),
+    MovedModule("_dummy_thread", "dummy_thread", "_dummy_thread" if sys.version_info < (3, 9) else "_thread"),
+    MovedModule("http_cookiejar", "cookielib", "http.cookiejar"),
+    MovedModule("http_cookies", "Cookie", "http.cookies"),
+    MovedModule("html_entities", "htmlentitydefs", "html.entities"),
+    MovedModule("html_parser", "HTMLParser", "html.parser"),
+    MovedModule("http_client", "httplib", "http.client"),
+    MovedModule("email_mime_base", "email.MIMEBase", "email.mime.base"),
+    MovedModule("email_mime_image", "email.MIMEImage", "email.mime.image"),
+    MovedModule("email_mime_multipart", "email.MIMEMultipart", "email.mime.multipart"),
+    MovedModule("email_mime_nonmultipart", "email.MIMENonMultipart", "email.mime.nonmultipart"),
+    MovedModule("email_mime_text", "email.MIMEText", "email.mime.text"),
+    MovedModule("BaseHTTPServer", "BaseHTTPServer", "http.server"),
+    MovedModule("CGIHTTPServer", "CGIHTTPServer", "http.server"),
+    MovedModule("SimpleHTTPServer", "SimpleHTTPServer", "http.server"),
+    MovedModule("cPickle", "cPickle", "pickle"),
+    MovedModule("queue", "Queue"),
+    MovedModule("reprlib", "repr"),
+    MovedModule("socketserver", "SocketServer"),
+    MovedModule("_thread", "thread", "_thread"),
+    MovedModule("tkinter", "Tkinter"),
+    MovedModule("tkinter_dialog", "Dialog", "tkinter.dialog"),
+    MovedModule("tkinter_filedialog", "FileDialog", "tkinter.filedialog"),
+    MovedModule("tkinter_scrolledtext", "ScrolledText", "tkinter.scrolledtext"),
+    MovedModule("tkinter_simpledialog", "SimpleDialog", "tkinter.simpledialog"),
+    MovedModule("tkinter_tix", "Tix", "tkinter.tix"),
+    MovedModule("tkinter_ttk", "ttk", "tkinter.ttk"),
+    MovedModule("tkinter_constants", "Tkconstants", "tkinter.constants"),
+    MovedModule("tkinter_dnd", "Tkdnd", "tkinter.dnd"),
+    MovedModule("tkinter_colorchooser", "tkColorChooser",
+                "tkinter.colorchooser"),
+    MovedModule("tkinter_commondialog", "tkCommonDialog",
+                "tkinter.commondialog"),
+    MovedModule("tkinter_tkfiledialog", "tkFileDialog", "tkinter.filedialog"),
+    MovedModule("tkinter_font", "tkFont", "tkinter.font"),
+    MovedModule("tkinter_messagebox", "tkMessageBox", "tkinter.messagebox"),
+    MovedModule("tkinter_tksimpledialog", "tkSimpleDialog",
+                "tkinter.simpledialog"),
+    MovedModule("urllib_parse", __name__ + ".moves.urllib_parse", "urllib.parse"),
+    MovedModule("urllib_error", __name__ + ".moves.urllib_error", "urllib.error"),
+    MovedModule("urllib", __name__ + ".moves.urllib", __name__ + ".moves.urllib"),
+    MovedModule("urllib_robotparser", "robotparser", "urllib.robotparser"),
+    MovedModule("xmlrpc_client", "xmlrpclib", "xmlrpc.client"),
+    MovedModule("xmlrpc_server", "SimpleXMLRPCServer", "xmlrpc.server"),
+]
+# Add windows specific modules.
+if sys.platform == "win32":
+    _moved_attributes += [
+        MovedModule("winreg", "_winreg"),
+    ]
+
+for attr in _moved_attributes:
+    setattr(_MovedItems, attr.name, attr)
+    if isinstance(attr, MovedModule):
+        _importer._add_module(attr, "moves." + attr.name)
+del attr
+
+_MovedItems._moved_attributes = _moved_attributes
+
+moves = _MovedItems(__name__ + ".moves")
+_importer._add_module(moves, "moves")
+
+
+class Module_six_moves_urllib_parse(_LazyModule):
+
+    """Lazy loading of moved objects in six.moves.urllib_parse"""
+
+
+_urllib_parse_moved_attributes = [
+    MovedAttribute("ParseResult", "urlparse", "urllib.parse"),
+    MovedAttribute("SplitResult", "urlparse", "urllib.parse"),
+    MovedAttribute("parse_qs", "urlparse", "urllib.parse"),
+    MovedAttribute("parse_qsl", "urlparse", "urllib.parse"),
+    MovedAttribute("urldefrag", "urlparse", "urllib.parse"),
+    MovedAttribute("urljoin", "urlparse", "urllib.parse"),
+    MovedAttribute("urlparse", "urlparse", "urllib.parse"),
+    MovedAttribute("urlsplit", "urlparse", "urllib.parse"),
+    MovedAttribute("urlunparse", "urlparse", "urllib.parse"),
+    MovedAttribute("urlunsplit", "urlparse", "urllib.parse"),
+    MovedAttribute("quote", "urllib", "urllib.parse"),
+    MovedAttribute("quote_plus", "urllib", "urllib.parse"),
+    MovedAttribute("unquote", "urllib", "urllib.parse"),
+    MovedAttribute("unquote_plus", "urllib", "urllib.parse"),
+    MovedAttribute("unquote_to_bytes", "urllib", "urllib.parse", "unquote", "unquote_to_bytes"),
+    MovedAttribute("urlencode", "urllib", "urllib.parse"),
+    MovedAttribute("splitquery", "urllib", "urllib.parse"),
+    MovedAttribute("splittag", "urllib", "urllib.parse"),
+    MovedAttribute("splituser", "urllib", "urllib.parse"),
+    MovedAttribute("splitvalue", "urllib", "urllib.parse"),
+    MovedAttribute("uses_fragment", "urlparse", "urllib.parse"),
+    MovedAttribute("uses_netloc", "urlparse", "urllib.parse"),
+    MovedAttribute("uses_params", "urlparse", "urllib.parse"),
+    MovedAttribute("uses_query", "urlparse", "urllib.parse"),
+    MovedAttribute("uses_relative", "urlparse", "urllib.parse"),
+]
+for attr in _urllib_parse_moved_attributes:
+    setattr(Module_six_moves_urllib_parse, attr.name, attr)
+del attr
+
+Module_six_moves_urllib_parse._moved_attributes = _urllib_parse_moved_attributes
+
+_importer._add_module(Module_six_moves_urllib_parse(__name__ + ".moves.urllib_parse"),
+                      "moves.urllib_parse", "moves.urllib.parse")
+
+
+class Module_six_moves_urllib_error(_LazyModule):
+
+    """Lazy loading of moved objects in six.moves.urllib_error"""
+
+
+_urllib_error_moved_attributes = [
+    MovedAttribute("URLError", "urllib2", "urllib.error"),
+    MovedAttribute("HTTPError", "urllib2", "urllib.error"),
+    MovedAttribute("ContentTooShortError", "urllib", "urllib.error"),
+]
+for attr in _urllib_error_moved_attributes:
+    setattr(Module_six_moves_urllib_error, attr.name, attr)
+del attr
+
+Module_six_moves_urllib_error._moved_attributes = _urllib_error_moved_attributes
+
+_importer._add_module(Module_six_moves_urllib_error(__name__ + ".moves.urllib.error"),
+                      "moves.urllib_error", "moves.urllib.error")
+
+
+class Module_six_moves_urllib_request(_LazyModule):
+
+    """Lazy loading of moved objects in six.moves.urllib_request"""
+
+
+_urllib_request_moved_attributes = [
+    MovedAttribute("urlopen", "urllib2", "urllib.request"),
+    MovedAttribute("install_opener", "urllib2", "urllib.request"),
+    MovedAttribute("build_opener", "urllib2", "urllib.request"),
+    MovedAttribute("pathname2url", "urllib", "urllib.request"),
+    MovedAttribute("url2pathname", "urllib", "urllib.request"),
+    MovedAttribute("getproxies", "urllib", "urllib.request"),
+    MovedAttribute("Request", "urllib2", "urllib.request"),
+    MovedAttribute("OpenerDirector", "urllib2", "urllib.request"),
+    MovedAttribute("HTTPDefaultErrorHandler", "urllib2", "urllib.request"),
+    MovedAttribute("HTTPRedirectHandler", "urllib2", "urllib.request"),
+    MovedAttribute("HTTPCookieProcessor", "urllib2", "urllib.request"),
+    MovedAttribute("ProxyHandler", "urllib2", "urllib.request"),
+    MovedAttribute("BaseHandler", "urllib2", "urllib.request"),
+    MovedAttribute("HTTPPasswordMgr", "urllib2", "urllib.request"),
+    MovedAttribute("HTTPPasswordMgrWithDefaultRealm", "urllib2", "urllib.request"),
+    MovedAttribute("AbstractBasicAuthHandler", "urllib2", "urllib.request"),
+    MovedAttribute("HTTPBasicAuthHandler", "urllib2", "urllib.request"),
+    MovedAttribute("ProxyBasicAuthHandler", "urllib2", "urllib.request"),
+    MovedAttribute("AbstractDigestAuthHandler", "urllib2", "urllib.request"),
+    MovedAttribute("HTTPDigestAuthHandler", "urllib2", "urllib.request"),
+    MovedAttribute("ProxyDigestAuthHandler", "urllib2", "urllib.request"),
+    MovedAttribute("HTTPHandler", "urllib2", "urllib.request"),
+    MovedAttribute("HTTPSHandler", "urllib2", "urllib.request"),
+    MovedAttribute("FileHandler", "urllib2", "urllib.request"),
+    MovedAttribute("FTPHandler", "urllib2", "urllib.request"),
+    MovedAttribute("CacheFTPHandler", "urllib2", "urllib.request"),
+    MovedAttribute("UnknownHandler", "urllib2", "urllib.request"),
+    MovedAttribute("HTTPErrorProcessor", "urllib2", "urllib.request"),
+    MovedAttribute("urlretrieve", "urllib", "urllib.request"),
+    MovedAttribute("urlcleanup", "urllib", "urllib.request"),
+    MovedAttribute("URLopener", "urllib", "urllib.request"),
+    MovedAttribute("FancyURLopener", "urllib", "urllib.request"),
+    MovedAttribute("proxy_bypass", "urllib", "urllib.request"),
+    MovedAttribute("parse_http_list", "urllib2", "urllib.request"),
+    MovedAttribute("parse_keqv_list", "urllib2", "urllib.request"),
+]
+for attr in _urllib_request_moved_attributes:
+    setattr(Module_six_moves_urllib_request, attr.name, attr)
+del attr
+
+Module_six_moves_urllib_request._moved_attributes = _urllib_request_moved_attributes
+
+_importer._add_module(Module_six_moves_urllib_request(__name__ + ".moves.urllib.request"),
+                      "moves.urllib_request", "moves.urllib.request")
+
+
+class Module_six_moves_urllib_response(_LazyModule):
+
+    """Lazy loading of moved objects in six.moves.urllib_response"""
+
+
+_urllib_response_moved_attributes = [
+    MovedAttribute("addbase", "urllib", "urllib.response"),
+    MovedAttribute("addclosehook", "urllib", "urllib.response"),
+    MovedAttribute("addinfo", "urllib", "urllib.response"),
+    MovedAttribute("addinfourl", "urllib", "urllib.response"),
+]
+for attr in _urllib_response_moved_attributes:
+    setattr(Module_six_moves_urllib_response, attr.name, attr)
+del attr
+
+Module_six_moves_urllib_response._moved_attributes = _urllib_response_moved_attributes
+
+_importer._add_module(Module_six_moves_urllib_response(__name__ + ".moves.urllib.response"),
+                      "moves.urllib_response", "moves.urllib.response")
+
+
+class Module_six_moves_urllib_robotparser(_LazyModule):
+
+    """Lazy loading of moved objects in six.moves.urllib_robotparser"""
+
+
+_urllib_robotparser_moved_attributes = [
+    MovedAttribute("RobotFileParser", "robotparser", "urllib.robotparser"),
+]
+for attr in _urllib_robotparser_moved_attributes:
+    setattr(Module_six_moves_urllib_robotparser, attr.name, attr)
+del attr
+
+Module_six_moves_urllib_robotparser._moved_attributes = _urllib_robotparser_moved_attributes
+
+_importer._add_module(Module_six_moves_urllib_robotparser(__name__ + ".moves.urllib.robotparser"),
+                      "moves.urllib_robotparser", "moves.urllib.robotparser")
+
+
+class Module_six_moves_urllib(types.ModuleType):
+
+    """Create a six.moves.urllib namespace that resembles the Python 3 namespace"""
+    __path__ = []  # mark as package
+    parse = _importer._get_module("moves.urllib_parse")
+    error = _importer._get_module("moves.urllib_error")
+    request = _importer._get_module("moves.urllib_request")
+    response = _importer._get_module("moves.urllib_response")
+    robotparser = _importer._get_module("moves.urllib_robotparser")
+
+    def __dir__(self):
+        return ['parse', 'error', 'request', 'response', 'robotparser']
+
+_importer._add_module(Module_six_moves_urllib(__name__ + ".moves.urllib"),
+                      "moves.urllib")
+
+
+def add_move(move):
+    """Add an item to six.moves."""
+    setattr(_MovedItems, move.name, move)
+
+
+def remove_move(name):
+    """Remove item from six.moves."""
+    try:
+        delattr(_MovedItems, name)
+    except AttributeError:
+        try:
+            del moves.__dict__[name]
+        except KeyError:
+            raise AttributeError("no such move, %r" % (name,))
+
+
+if PY3:
+    _meth_func = "__func__"
+    _meth_self = "__self__"
+
+    _func_closure = "__closure__"
+    _func_code = "__code__"
+    _func_defaults = "__defaults__"
+    _func_globals = "__globals__"
+else:
+    _meth_func = "im_func"
+    _meth_self = "im_self"
+
+    _func_closure = "func_closure"
+    _func_code = "func_code"
+    _func_defaults = "func_defaults"
+    _func_globals = "func_globals"
+
+
+try:
+    advance_iterator = next
+except NameError:
+    def advance_iterator(it):
+        return it.next()
+next = advance_iterator
+
+
+try:
+    callable = callable
+except NameError:
+    def callable(obj):
+        return any("__call__" in klass.__dict__ for klass in type(obj).__mro__)
+
+
+if PY3:
+    def get_unbound_function(unbound):
+        return unbound
+
+    create_bound_method = types.MethodType
+
+    def create_unbound_method(func, cls):
+        return func
+
+    Iterator = object
+else:
+    def get_unbound_function(unbound):
+        return unbound.im_func
+
+    def create_bound_method(func, obj):
+        return types.MethodType(func, obj, obj.__class__)
+
+    def create_unbound_method(func, cls):
+        return types.MethodType(func, None, cls)
+
+    class Iterator(object):
+
+        def next(self):
+            return type(self).__next__(self)
+
+    callable = callable
+_add_doc(get_unbound_function,
+         """Get the function out of a possibly unbound function""")
+
+
+get_method_function = operator.attrgetter(_meth_func)
+get_method_self = operator.attrgetter(_meth_self)
+get_function_closure = operator.attrgetter(_func_closure)
+get_function_code = operator.attrgetter(_func_code)
+get_function_defaults = operator.attrgetter(_func_defaults)
+get_function_globals = operator.attrgetter(_func_globals)
+
+
+if PY3:
+    def iterkeys(d, **kw):
+        return iter(d.keys(**kw))
+
+    def itervalues(d, **kw):
+        return iter(d.values(**kw))
+
+    def iteritems(d, **kw):
+        return iter(d.items(**kw))
+
+    def iterlists(d, **kw):
+        return iter(d.lists(**kw))
+
+    viewkeys = operator.methodcaller("keys")
+
+    viewvalues = operator.methodcaller("values")
+
+    viewitems = operator.methodcaller("items")
+else:
+    def iterkeys(d, **kw):
+        return d.iterkeys(**kw)
+
+    def itervalues(d, **kw):
+        return d.itervalues(**kw)
+
+    def iteritems(d, **kw):
+        return d.iteritems(**kw)
+
+    def iterlists(d, **kw):
+        return d.iterlists(**kw)
+
+    viewkeys = operator.methodcaller("viewkeys")
+
+    viewvalues = operator.methodcaller("viewvalues")
+
+    viewitems = operator.methodcaller("viewitems")
+
+_add_doc(iterkeys, "Return an iterator over the keys of a dictionary.")
+_add_doc(itervalues, "Return an iterator over the values of a dictionary.")
+_add_doc(iteritems,
+         "Return an iterator over the (key, value) pairs of a dictionary.")
+_add_doc(iterlists,
+         "Return an iterator over the (key, [values]) pairs of a dictionary.")
+
+
+if PY3:
+    def b(s):
+        return s.encode("latin-1")
+
+    def u(s):
+        return s
+    unichr = chr
+    import struct
+    int2byte = struct.Struct(">B").pack
+    del struct
+    byte2int = operator.itemgetter(0)
+    indexbytes = operator.getitem
+    iterbytes = iter
+    import io
+    StringIO = io.StringIO
+    BytesIO = io.BytesIO
+    del io
+    _assertCountEqual = "assertCountEqual"
+    if sys.version_info[1] <= 1:
+        _assertRaisesRegex = "assertRaisesRegexp"
+        _assertRegex = "assertRegexpMatches"
+        _assertNotRegex = "assertNotRegexpMatches"
+    else:
+        _assertRaisesRegex = "assertRaisesRegex"
+        _assertRegex = "assertRegex"
+        _assertNotRegex = "assertNotRegex"
+else:
+    def b(s):
+        return s
+    # Workaround for standalone backslash
+
+    def u(s):
+        return unicode(s.replace(r'\\', r'\\\\'), "unicode_escape")
+    unichr = unichr
+    int2byte = chr
+
+    def byte2int(bs):
+        return ord(bs[0])
+
+    def indexbytes(buf, i):
+        return ord(buf[i])
+    iterbytes = functools.partial(itertools.imap, ord)
+    import StringIO
+    StringIO = BytesIO = StringIO.StringIO
+    _assertCountEqual = "assertItemsEqual"
+    _assertRaisesRegex = "assertRaisesRegexp"
+    _assertRegex = "assertRegexpMatches"
+    _assertNotRegex = "assertNotRegexpMatches"
+_add_doc(b, """Byte literal""")
+_add_doc(u, """Text literal""")
+
+
+def assertCountEqual(self, *args, **kwargs):
+    return getattr(self, _assertCountEqual)(*args, **kwargs)
+
+
+def assertRaisesRegex(self, *args, **kwargs):
+    return getattr(self, _assertRaisesRegex)(*args, **kwargs)
+
+
+def assertRegex(self, *args, **kwargs):
+    return getattr(self, _assertRegex)(*args, **kwargs)
+
+
+def assertNotRegex(self, *args, **kwargs):
+    return getattr(self, _assertNotRegex)(*args, **kwargs)
+
+
+if PY3:
+    exec_ = getattr(moves.builtins, "exec")
+
+    def reraise(tp, value, tb=None):
+        try:
+            if value is None:
+                value = tp()
+            if value.__traceback__ is not tb:
+                raise value.with_traceback(tb)
+            raise value
+        finally:
+            value = None
+            tb = None
+
+else:
+    def exec_(_code_, _globs_=None, _locs_=None):
+        """Execute code in a namespace."""
+        if _globs_ is None:
+            frame = sys._getframe(1)
+            _globs_ = frame.f_globals
+            if _locs_ is None:
+                _locs_ = frame.f_locals
+            del frame
+        elif _locs_ is None:
+            _locs_ = _globs_
+        exec("""exec _code_ in _globs_, _locs_""")
+
+    exec_("""def reraise(tp, value, tb=None):
+    try:
+        raise tp, value, tb
+    finally:
+        tb = None
+""")
+
+
+if sys.version_info[:2] > (3,):
+    exec_("""def raise_from(value, from_value):
+    try:
+        raise value from from_value
+    finally:
+        value = None
+""")
+else:
+    def raise_from(value, from_value):
+        raise value
+
+
+print_ = getattr(moves.builtins, "print", None)
+if print_ is None:
+    def print_(*args, **kwargs):
+        """The new-style print function for Python 2.4 and 2.5."""
+        fp = kwargs.pop("file", sys.stdout)
+        if fp is None:
+            return
+
+        def write(data):
+            if not isinstance(data, basestring):
+                data = str(data)
+            # If the file has an encoding, encode unicode with it.
+            if (isinstance(fp, file) and
+                    isinstance(data, unicode) and
+                    fp.encoding is not None):
+                errors = getattr(fp, "errors", None)
+                if errors is None:
+                    errors = "strict"
+                data = data.encode(fp.encoding, errors)
+            fp.write(data)
+        want_unicode = False
+        sep = kwargs.pop("sep", None)
+        if sep is not None:
+            if isinstance(sep, unicode):
+                want_unicode = True
+            elif not isinstance(sep, str):
+                raise TypeError("sep must be None or a string")
+        end = kwargs.pop("end", None)
+        if end is not None:
+            if isinstance(end, unicode):
+                want_unicode = True
+            elif not isinstance(end, str):
+                raise TypeError("end must be None or a string")
+        if kwargs:
+            raise TypeError("invalid keyword arguments to print()")
+        if not want_unicode:
+            for arg in args:
+                if isinstance(arg, unicode):
+                    want_unicode = True
+                    break
+        if want_unicode:
+            newline = unicode("\n")
+            space = unicode(" ")
+        else:
+            newline = "\n"
+            space = " "
+        if sep is None:
+            sep = space
+        if end is None:
+            end = newline
+        for i, arg in enumerate(args):
+            if i:
+                write(sep)
+            write(arg)
+        write(end)
+if sys.version_info[:2] < (3, 3):
+    _print = print_
+
+    def print_(*args, **kwargs):
+        fp = kwargs.get("file", sys.stdout)
+        flush = kwargs.pop("flush", False)
+        _print(*args, **kwargs)
+        if flush and fp is not None:
+            fp.flush()
+
+_add_doc(reraise, """Reraise an exception.""")
+
+if sys.version_info[0:2] < (3, 4):
+    # This does exactly the same what the :func:`py3:functools.update_wrapper`
+    # function does on Python versions after 3.2. It sets the ``__wrapped__``
+    # attribute on ``wrapper`` object and it doesn't raise an error if any of
+    # the attributes mentioned in ``assigned`` and ``updated`` are missing on
+    # ``wrapped`` object.
+    def _update_wrapper(wrapper, wrapped,
+                        assigned=functools.WRAPPER_ASSIGNMENTS,
+                        updated=functools.WRAPPER_UPDATES):
+        for attr in assigned:
+            try:
+                value = getattr(wrapped, attr)
+            except AttributeError:
+                continue
+            else:
+                setattr(wrapper, attr, value)
+        for attr in updated:
+            getattr(wrapper, attr).update(getattr(wrapped, attr, {}))
+        wrapper.__wrapped__ = wrapped
+        return wrapper
+    _update_wrapper.__doc__ = functools.update_wrapper.__doc__
+
+    def wraps(wrapped, assigned=functools.WRAPPER_ASSIGNMENTS,
+              updated=functools.WRAPPER_UPDATES):
+        return functools.partial(_update_wrapper, wrapped=wrapped,
+                                 assigned=assigned, updated=updated)
+    wraps.__doc__ = functools.wraps.__doc__
+
+else:
+    wraps = functools.wraps
+
+
+def with_metaclass(meta, *bases):
+    """Create a base class with a metaclass."""
+    # This requires a bit of explanation: the basic idea is to make a dummy
+    # metaclass for one level of class instantiation that replaces itself with
+    # the actual metaclass.
+    class metaclass(type):
+
+        def __new__(cls, name, this_bases, d):
+            if sys.version_info[:2] >= (3, 7):
+                # This version introduced PEP 560 that requires a bit
+                # of extra care (we mimic what is done by __build_class__).
+                resolved_bases = types.resolve_bases(bases)
+                if resolved_bases is not bases:
+                    d['__orig_bases__'] = bases
+            else:
+                resolved_bases = bases
+            return meta(name, resolved_bases, d)
+
+        @classmethod
+        def __prepare__(cls, name, this_bases):
+            return meta.__prepare__(name, bases)
+    return type.__new__(metaclass, 'temporary_class', (), {})
+
+
+def add_metaclass(metaclass):
+    """Class decorator for creating a class with a metaclass."""
+    def wrapper(cls):
+        orig_vars = cls.__dict__.copy()
+        slots = orig_vars.get('__slots__')
+        if slots is not None:
+            if isinstance(slots, str):
+                slots = [slots]
+            for slots_var in slots:
+                orig_vars.pop(slots_var)
+        orig_vars.pop('__dict__', None)
+        orig_vars.pop('__weakref__', None)
+        if hasattr(cls, '__qualname__'):
+            orig_vars['__qualname__'] = cls.__qualname__
+        return metaclass(cls.__name__, cls.__bases__, orig_vars)
+    return wrapper
+
+
+def ensure_binary(s, encoding='utf-8', errors='strict'):
+    """Coerce **s** to six.binary_type.
+
+    For Python 2:
+      - `unicode` -> encoded to `str`
+      - `str` -> `str`
+
+    For Python 3:
+      - `str` -> encoded to `bytes`
+      - `bytes` -> `bytes`
+    """
+    if isinstance(s, binary_type):
+        return s
+    if isinstance(s, text_type):
+        return s.encode(encoding, errors)
+    raise TypeError("not expecting type '%s'" % type(s))
+
+
+def ensure_str(s, encoding='utf-8', errors='strict'):
+    """Coerce *s* to `str`.
+
+    For Python 2:
+      - `unicode` -> encoded to `str`
+      - `str` -> `str`
+
+    For Python 3:
+      - `str` -> `str`
+      - `bytes` -> decoded to `str`
+    """
+    # Optimization: Fast return for the common case.
+    if type(s) is str:
+        return s
+    if PY2 and isinstance(s, text_type):
+        return s.encode(encoding, errors)
+    elif PY3 and isinstance(s, binary_type):
+        return s.decode(encoding, errors)
+    elif not isinstance(s, (text_type, binary_type)):
+        raise TypeError("not expecting type '%s'" % type(s))
+    return s
+
+
+def ensure_text(s, encoding='utf-8', errors='strict'):
+    """Coerce *s* to six.text_type.
+
+    For Python 2:
+      - `unicode` -> `unicode`
+      - `str` -> `unicode`
+
+    For Python 3:
+      - `str` -> `str`
+      - `bytes` -> decoded to `str`
+    """
+    if isinstance(s, binary_type):
+        return s.decode(encoding, errors)
+    elif isinstance(s, text_type):
+        return s
+    else:
+        raise TypeError("not expecting type '%s'" % type(s))
+
+
+def python_2_unicode_compatible(klass):
+    """
+    A class decorator that defines __unicode__ and __str__ methods under Python 2.
+    Under Python 3 it does nothing.
+
+    To support Python 2 and 3 with a single code base, define a __str__ method
+    returning text and apply this decorator to the class.
+    """
+    if PY2:
+        if '__str__' not in klass.__dict__:
+            raise ValueError("@python_2_unicode_compatible cannot be applied "
+                             "to %s because it doesn't define __str__()." %
+                             klass.__name__)
+        klass.__unicode__ = klass.__str__
+        klass.__str__ = lambda self: self.__unicode__().encode('utf-8')
+    return klass
+
+
+# Complete the moves implementation.
+# This code is at the end of this module to speed up module loading.
+# Turn this module into a package.
+__path__ = []  # required for PEP 302 and PEP 451
+__package__ = __name__  # see PEP 366 @ReservedAssignment
+if globals().get("__spec__") is not None:
+    __spec__.submodule_search_locations = []  # PEP 451 @UndefinedVariable
+# Remove other six meta path importers, since they cause problems. This can
+# happen if six is removed from sys.modules and then reloaded. (Setuptools does
+# this for some reason.)
+if sys.meta_path:
+    for i, importer in enumerate(sys.meta_path):
+        # Here's some real nastiness: Another "instance" of the six module might
+        # be floating around. Therefore, we can't use isinstance() to check for
+        # the six meta path importer, since the other six instance will have
+        # inserted an importer with different class.
+        if (type(importer).__name__ == "_SixMetaPathImporter" and
+                importer.name == __name__):
+            del sys.meta_path[i]
+            break
+    del i, importer
+# Finally, add the importer to the meta path import hook.
+sys.meta_path.append(_importer)
diff --git a/venv/lib/python3.10/site-packages/toml-0.10.2.dist-info/INSTALLER b/venv/lib/python3.10/site-packages/toml-0.10.2.dist-info/INSTALLER
new file mode 100644
index 0000000..a1b589e
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/toml-0.10.2.dist-info/INSTALLER
@@ -0,0 +1 @@
+pip
diff --git a/venv/lib/python3.10/site-packages/toml-0.10.2.dist-info/LICENSE b/venv/lib/python3.10/site-packages/toml-0.10.2.dist-info/LICENSE
new file mode 100644
index 0000000..5010e30
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/toml-0.10.2.dist-info/LICENSE
@@ -0,0 +1,27 @@
+The MIT License
+
+Copyright 2013-2019 William Pearson
+Copyright 2015-2016 Julien Enselme
+Copyright 2016 Google Inc.
+Copyright 2017 Samuel Vasko
+Copyright 2017 Nate Prewitt
+Copyright 2017 Jack Evans
+Copyright 2019 Filippo Broggini
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
\ No newline at end of file
diff --git a/venv/lib/python3.10/site-packages/toml-0.10.2.dist-info/METADATA b/venv/lib/python3.10/site-packages/toml-0.10.2.dist-info/METADATA
new file mode 100644
index 0000000..6f2635c
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/toml-0.10.2.dist-info/METADATA
@@ -0,0 +1,255 @@
+Metadata-Version: 2.1
+Name: toml
+Version: 0.10.2
+Summary: Python Library for Tom's Obvious, Minimal Language
+Home-page: https://github.com/uiri/toml
+Author: William Pearson
+Author-email: uiri@xqz.ca
+License: MIT
+Platform: UNKNOWN
+Classifier: Development Status :: 5 - Production/Stable
+Classifier: Intended Audience :: Developers
+Classifier: License :: OSI Approved :: MIT License
+Classifier: Operating System :: OS Independent
+Classifier: Programming Language :: Python
+Classifier: Programming Language :: Python :: 2
+Classifier: Programming Language :: Python :: 2.6
+Classifier: Programming Language :: Python :: 2.7
+Classifier: Programming Language :: Python :: 3
+Classifier: Programming Language :: Python :: 3.3
+Classifier: Programming Language :: Python :: 3.4
+Classifier: Programming Language :: Python :: 3.5
+Classifier: Programming Language :: Python :: 3.6
+Classifier: Programming Language :: Python :: 3.7
+Classifier: Programming Language :: Python :: 3.8
+Classifier: Programming Language :: Python :: 3.9
+Classifier: Programming Language :: Python :: Implementation :: CPython
+Classifier: Programming Language :: Python :: Implementation :: PyPy
+Requires-Python: >=2.6, !=3.0.*, !=3.1.*, !=3.2.*
+
+****
+TOML
+****
+
+.. image:: https://img.shields.io/pypi/v/toml
+    :target: https://pypi.org/project/toml/
+
+.. image:: https://travis-ci.org/uiri/toml.svg?branch=master
+    :target: https://travis-ci.org/uiri/toml
+
+.. image:: https://img.shields.io/pypi/pyversions/toml.svg
+    :target: https://pypi.org/project/toml/
+
+
+A Python library for parsing and creating `TOML `_.
+
+The module passes `the TOML test suite `_.
+
+See also:
+
+* `The TOML Standard `_
+* `The currently supported TOML specification `_
+
+Installation
+============
+
+To install the latest release on `PyPI `_,
+simply run:
+
+::
+
+  pip install toml
+
+Or to install the latest development version, run:
+
+::
+
+  git clone https://github.com/uiri/toml.git
+  cd toml
+  python setup.py install
+
+Quick Tutorial
+==============
+
+*toml.loads* takes in a string containing standard TOML-formatted data and
+returns a dictionary containing the parsed data.
+
+.. code:: pycon
+
+  >>> import toml
+  >>> toml_string = """
+  ... # This is a TOML document.
+  ...
+  ... title = "TOML Example"
+  ...
+  ... [owner]
+  ... name = "Tom Preston-Werner"
+  ... dob = 1979-05-27T07:32:00-08:00 # First class dates
+  ...
+  ... [database]
+  ... server = "192.168.1.1"
+  ... ports = [ 8001, 8001, 8002 ]
+  ... connection_max = 5000
+  ... enabled = true
+  ...
+  ... [servers]
+  ...
+  ...   # Indentation (tabs and/or spaces) is allowed but not required
+  ...   [servers.alpha]
+  ...   ip = "10.0.0.1"
+  ...   dc = "eqdc10"
+  ...
+  ...   [servers.beta]
+  ...   ip = "10.0.0.2"
+  ...   dc = "eqdc10"
+  ...
+  ... [clients]
+  ... data = [ ["gamma", "delta"], [1, 2] ]
+  ...
+  ... # Line breaks are OK when inside arrays
+  ... hosts = [
+  ...   "alpha",
+  ...   "omega"
+  ... ]
+  ... """
+  >>> parsed_toml = toml.loads(toml_string)
+
+
+*toml.dumps* takes a dictionary and returns a string containing the
+corresponding TOML-formatted data.
+
+.. code:: pycon
+
+  >>> new_toml_string = toml.dumps(parsed_toml)
+  >>> print(new_toml_string)
+  title = "TOML Example"
+  [owner]
+  name = "Tom Preston-Werner"
+  dob = 1979-05-27T07:32:00Z
+  [database]
+  server = "192.168.1.1"
+  ports = [ 8001, 8001, 8002,]
+  connection_max = 5000
+  enabled = true
+  [clients]
+  data = [ [ "gamma", "delta",], [ 1, 2,],]
+  hosts = [ "alpha", "omega",]
+  [servers.alpha]
+  ip = "10.0.0.1"
+  dc = "eqdc10"
+  [servers.beta]
+  ip = "10.0.0.2"
+  dc = "eqdc10"
+
+*toml.dump* takes a dictionary and a file descriptor and returns a string containing the
+corresponding TOML-formatted data.
+
+.. code:: pycon
+
+  >>> with open('new_toml_file.toml', 'w') as f:
+  ...     new_toml_string = toml.dump(parsed_toml, f)
+  >>> print(new_toml_string)
+  title = "TOML Example"
+  [owner]
+  name = "Tom Preston-Werner"
+  dob = 1979-05-27T07:32:00Z
+  [database]
+  server = "192.168.1.1"
+  ports = [ 8001, 8001, 8002,]
+  connection_max = 5000
+  enabled = true
+  [clients]
+  data = [ [ "gamma", "delta",], [ 1, 2,],]
+  hosts = [ "alpha", "omega",]
+  [servers.alpha]
+  ip = "10.0.0.1"
+  dc = "eqdc10"
+  [servers.beta]
+  ip = "10.0.0.2"
+  dc = "eqdc10"
+
+For more functions, view the API Reference below.
+
+Note
+----
+
+For Numpy users, by default the data types ``np.floatX`` will not be translated to floats by toml, but will instead be encoded as strings. To get around this, specify the ``TomlNumpyEncoder`` when saving your data.
+
+.. code:: pycon
+
+  >>> import toml
+  >>> import numpy as np
+  >>> a = np.arange(0, 10, dtype=np.double)
+  >>> output = {'a': a}
+  >>> toml.dumps(output)
+  'a = [ "0.0", "1.0", "2.0", "3.0", "4.0", "5.0", "6.0", "7.0", "8.0", "9.0",]\n'
+  >>> toml.dumps(output, encoder=toml.TomlNumpyEncoder())
+  'a = [ 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0,]\n'
+
+API Reference
+=============
+
+``toml.load(f, _dict=dict)``
+  Parse a file or a list of files as TOML and return a dictionary.
+
+  :Args:
+    * ``f``: A path to a file, list of filepaths (to be read into single
+      object) or a file descriptor
+    * ``_dict``: The class of the dictionary object to be returned
+
+  :Returns:
+    A dictionary (or object ``_dict``) containing parsed TOML data
+
+  :Raises:
+    * ``TypeError``: When ``f`` is an invalid type or is a list containing
+      invalid types
+    * ``TomlDecodeError``: When an error occurs while decoding the file(s)
+
+``toml.loads(s, _dict=dict)``
+  Parse a TOML-formatted string to a dictionary.
+
+  :Args:
+    * ``s``: The TOML-formatted string to be parsed
+    * ``_dict``: Specifies the class of the returned toml dictionary
+
+  :Returns:
+    A dictionary (or object ``_dict``) containing parsed TOML data
+
+  :Raises:
+    * ``TypeError``: When a non-string object is passed
+    * ``TomlDecodeError``: When an error occurs while decoding the
+      TOML-formatted string
+
+``toml.dump(o, f, encoder=None)``
+  Write a dictionary to a file containing TOML-formatted data
+
+  :Args:
+    * ``o``: An object to be converted into TOML
+    * ``f``: A File descriptor where the TOML-formatted output should be stored
+    * ``encoder``: An instance of ``TomlEncoder`` (or subclass) for encoding the object. If ``None``, will default to ``TomlEncoder``
+
+  :Returns:
+    A string containing the TOML-formatted data corresponding to object ``o``
+
+  :Raises:
+    * ``TypeError``: When anything other than file descriptor is passed
+
+``toml.dumps(o, encoder=None)``
+  Create a TOML-formatted string from an input object
+
+  :Args:
+    * ``o``: An object to be converted into TOML
+    * ``encoder``: An instance of ``TomlEncoder`` (or subclass) for encoding the object. If ``None``, will default to ``TomlEncoder``
+
+  :Returns:
+    A string containing the TOML-formatted data corresponding to object ``o``
+
+
+
+Licensing
+=========
+
+This project is released under the terms of the MIT Open Source License. View
+*LICENSE.txt* for more information.
+
+
diff --git a/venv/lib/python3.10/site-packages/toml-0.10.2.dist-info/RECORD b/venv/lib/python3.10/site-packages/toml-0.10.2.dist-info/RECORD
new file mode 100644
index 0000000..b81f899
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/toml-0.10.2.dist-info/RECORD
@@ -0,0 +1,16 @@
+toml-0.10.2.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4
+toml-0.10.2.dist-info/LICENSE,sha256=LZKUgj32yJNXyL5JJ_znk2HWVh5e51MtWSbmOTmqpTY,1252
+toml-0.10.2.dist-info/METADATA,sha256=n_YkspvEihd_QXLIZZ50WVSFz3rZ_k7jQP-OU1WUpWY,7142
+toml-0.10.2.dist-info/RECORD,,
+toml-0.10.2.dist-info/WHEEL,sha256=ADKeyaGyKF5DwBNE0sRE5pvW-bSkFMJfBuhzZ3rceP4,110
+toml-0.10.2.dist-info/top_level.txt,sha256=2BO8ZRNnvJWgXyiQv66LBb_v87qBzcoUtEBefA75Ouk,5
+toml/__init__.py,sha256=Au3kqCwKD0cjbf4yJGOpUFwpsY0WHsC1ZRGvWgIKmpc,723
+toml/__pycache__/__init__.cpython-310.pyc,,
+toml/__pycache__/decoder.cpython-310.pyc,,
+toml/__pycache__/encoder.cpython-310.pyc,,
+toml/__pycache__/ordered.cpython-310.pyc,,
+toml/__pycache__/tz.cpython-310.pyc,,
+toml/decoder.py,sha256=hSGTLf-2WBDZ_ddoCHWFy6N647XyMSh1o3rN2o4dEFg,38942
+toml/encoder.py,sha256=XjBc8ayvvlsLyd_qDA4tMWDNmMFRS4DpwtuDSWBq7zo,9940
+toml/ordered.py,sha256=mz03lZmV0bmc9lsYRIUOuj7Dsu5Ptwq-UtGVq5FdVZ4,354
+toml/tz.py,sha256=-5vg8wkg_atnVi2TnEveexIVE7T_FxBVr_-2WVfO1oA,701
diff --git a/venv/lib/python3.10/site-packages/toml-0.10.2.dist-info/WHEEL b/venv/lib/python3.10/site-packages/toml-0.10.2.dist-info/WHEEL
new file mode 100644
index 0000000..6d38aa0
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/toml-0.10.2.dist-info/WHEEL
@@ -0,0 +1,6 @@
+Wheel-Version: 1.0
+Generator: bdist_wheel (0.35.1)
+Root-Is-Purelib: true
+Tag: py2-none-any
+Tag: py3-none-any
+
diff --git a/venv/lib/python3.10/site-packages/toml-0.10.2.dist-info/top_level.txt b/venv/lib/python3.10/site-packages/toml-0.10.2.dist-info/top_level.txt
new file mode 100644
index 0000000..bd79a65
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/toml-0.10.2.dist-info/top_level.txt
@@ -0,0 +1 @@
+toml
diff --git a/venv/lib/python3.10/site-packages/toml/__init__.py b/venv/lib/python3.10/site-packages/toml/__init__.py
new file mode 100644
index 0000000..7719ac2
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/toml/__init__.py
@@ -0,0 +1,25 @@
+"""Python module which parses and emits TOML.
+
+Released under the MIT license.
+"""
+
+from toml import encoder
+from toml import decoder
+
+__version__ = "0.10.2"
+_spec_ = "0.5.0"
+
+load = decoder.load
+loads = decoder.loads
+TomlDecoder = decoder.TomlDecoder
+TomlDecodeError = decoder.TomlDecodeError
+TomlPreserveCommentDecoder = decoder.TomlPreserveCommentDecoder
+
+dump = encoder.dump
+dumps = encoder.dumps
+TomlEncoder = encoder.TomlEncoder
+TomlArraySeparatorEncoder = encoder.TomlArraySeparatorEncoder
+TomlPreserveInlineDictEncoder = encoder.TomlPreserveInlineDictEncoder
+TomlNumpyEncoder = encoder.TomlNumpyEncoder
+TomlPreserveCommentEncoder = encoder.TomlPreserveCommentEncoder
+TomlPathlibEncoder = encoder.TomlPathlibEncoder
diff --git a/venv/lib/python3.10/site-packages/toml/decoder.py b/venv/lib/python3.10/site-packages/toml/decoder.py
new file mode 100644
index 0000000..bf400e9
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/toml/decoder.py
@@ -0,0 +1,1057 @@
+import datetime
+import io
+from os import linesep
+import re
+import sys
+
+from toml.tz import TomlTz
+
+if sys.version_info < (3,):
+    _range = xrange  # noqa: F821
+else:
+    unicode = str
+    _range = range
+    basestring = str
+    unichr = chr
+
+
+def _detect_pathlib_path(p):
+    if (3, 4) <= sys.version_info:
+        import pathlib
+        if isinstance(p, pathlib.PurePath):
+            return True
+    return False
+
+
+def _ispath(p):
+    if isinstance(p, (bytes, basestring)):
+        return True
+    return _detect_pathlib_path(p)
+
+
+def _getpath(p):
+    if (3, 6) <= sys.version_info:
+        import os
+        return os.fspath(p)
+    if _detect_pathlib_path(p):
+        return str(p)
+    return p
+
+
+try:
+    FNFError = FileNotFoundError
+except NameError:
+    FNFError = IOError
+
+
+TIME_RE = re.compile(r"([0-9]{2}):([0-9]{2}):([0-9]{2})(\.([0-9]{3,6}))?")
+
+
+class TomlDecodeError(ValueError):
+    """Base toml Exception / Error."""
+
+    def __init__(self, msg, doc, pos):
+        lineno = doc.count('\n', 0, pos) + 1
+        colno = pos - doc.rfind('\n', 0, pos)
+        emsg = '{} (line {} column {} char {})'.format(msg, lineno, colno, pos)
+        ValueError.__init__(self, emsg)
+        self.msg = msg
+        self.doc = doc
+        self.pos = pos
+        self.lineno = lineno
+        self.colno = colno
+
+
+# Matches a TOML number, which allows underscores for readability
+_number_with_underscores = re.compile('([0-9])(_([0-9]))*')
+
+
+class CommentValue(object):
+    def __init__(self, val, comment, beginline, _dict):
+        self.val = val
+        separator = "\n" if beginline else " "
+        self.comment = separator + comment
+        self._dict = _dict
+
+    def __getitem__(self, key):
+        return self.val[key]
+
+    def __setitem__(self, key, value):
+        self.val[key] = value
+
+    def dump(self, dump_value_func):
+        retstr = dump_value_func(self.val)
+        if isinstance(self.val, self._dict):
+            return self.comment + "\n" + unicode(retstr)
+        else:
+            return unicode(retstr) + self.comment
+
+
+def _strictly_valid_num(n):
+    n = n.strip()
+    if not n:
+        return False
+    if n[0] == '_':
+        return False
+    if n[-1] == '_':
+        return False
+    if "_." in n or "._" in n:
+        return False
+    if len(n) == 1:
+        return True
+    if n[0] == '0' and n[1] not in ['.', 'o', 'b', 'x']:
+        return False
+    if n[0] == '+' or n[0] == '-':
+        n = n[1:]
+        if len(n) > 1 and n[0] == '0' and n[1] != '.':
+            return False
+    if '__' in n:
+        return False
+    return True
+
+
+def load(f, _dict=dict, decoder=None):
+    """Parses named file or files as toml and returns a dictionary
+
+    Args:
+        f: Path to the file to open, array of files to read into single dict
+           or a file descriptor
+        _dict: (optional) Specifies the class of the returned toml dictionary
+        decoder: The decoder to use
+
+    Returns:
+        Parsed toml file represented as a dictionary
+
+    Raises:
+        TypeError -- When f is invalid type
+        TomlDecodeError: Error while decoding toml
+        IOError / FileNotFoundError -- When an array with no valid (existing)
+        (Python 2 / Python 3)          file paths is passed
+    """
+
+    if _ispath(f):
+        with io.open(_getpath(f), encoding='utf-8') as ffile:
+            return loads(ffile.read(), _dict, decoder)
+    elif isinstance(f, list):
+        from os import path as op
+        from warnings import warn
+        if not [path for path in f if op.exists(path)]:
+            error_msg = "Load expects a list to contain filenames only."
+            error_msg += linesep
+            error_msg += ("The list needs to contain the path of at least one "
+                          "existing file.")
+            raise FNFError(error_msg)
+        if decoder is None:
+            decoder = TomlDecoder(_dict)
+        d = decoder.get_empty_table()
+        for l in f:  # noqa: E741
+            if op.exists(l):
+                d.update(load(l, _dict, decoder))
+            else:
+                warn("Non-existent filename in list with at least one valid "
+                     "filename")
+        return d
+    else:
+        try:
+            return loads(f.read(), _dict, decoder)
+        except AttributeError:
+            raise TypeError("You can only load a file descriptor, filename or "
+                            "list")
+
+
+_groupname_re = re.compile(r'^[A-Za-z0-9_-]+$')
+
+
+def loads(s, _dict=dict, decoder=None):
+    """Parses string as toml
+
+    Args:
+        s: String to be parsed
+        _dict: (optional) Specifies the class of the returned toml dictionary
+
+    Returns:
+        Parsed toml file represented as a dictionary
+
+    Raises:
+        TypeError: When a non-string is passed
+        TomlDecodeError: Error while decoding toml
+    """
+
+    implicitgroups = []
+    if decoder is None:
+        decoder = TomlDecoder(_dict)
+    retval = decoder.get_empty_table()
+    currentlevel = retval
+    if not isinstance(s, basestring):
+        raise TypeError("Expecting something like a string")
+
+    if not isinstance(s, unicode):
+        s = s.decode('utf8')
+
+    original = s
+    sl = list(s)
+    openarr = 0
+    openstring = False
+    openstrchar = ""
+    multilinestr = False
+    arrayoftables = False
+    beginline = True
+    keygroup = False
+    dottedkey = False
+    keyname = 0
+    key = ''
+    prev_key = ''
+    line_no = 1
+
+    for i, item in enumerate(sl):
+        if item == '\r' and sl[i + 1] == '\n':
+            sl[i] = ' '
+            continue
+        if keyname:
+            key += item
+            if item == '\n':
+                raise TomlDecodeError("Key name found without value."
+                                      " Reached end of line.", original, i)
+            if openstring:
+                if item == openstrchar:
+                    oddbackslash = False
+                    k = 1
+                    while i >= k and sl[i - k] == '\\':
+                        oddbackslash = not oddbackslash
+                        k += 1
+                    if not oddbackslash:
+                        keyname = 2
+                        openstring = False
+                        openstrchar = ""
+                continue
+            elif keyname == 1:
+                if item.isspace():
+                    keyname = 2
+                    continue
+                elif item == '.':
+                    dottedkey = True
+                    continue
+                elif item.isalnum() or item == '_' or item == '-':
+                    continue
+                elif (dottedkey and sl[i - 1] == '.' and
+                      (item == '"' or item == "'")):
+                    openstring = True
+                    openstrchar = item
+                    continue
+            elif keyname == 2:
+                if item.isspace():
+                    if dottedkey:
+                        nextitem = sl[i + 1]
+                        if not nextitem.isspace() and nextitem != '.':
+                            keyname = 1
+                    continue
+                if item == '.':
+                    dottedkey = True
+                    nextitem = sl[i + 1]
+                    if not nextitem.isspace() and nextitem != '.':
+                        keyname = 1
+                    continue
+            if item == '=':
+                keyname = 0
+                prev_key = key[:-1].rstrip()
+                key = ''
+                dottedkey = False
+            else:
+                raise TomlDecodeError("Found invalid character in key name: '" +
+                                      item + "'. Try quoting the key name.",
+                                      original, i)
+        if item == "'" and openstrchar != '"':
+            k = 1
+            try:
+                while sl[i - k] == "'":
+                    k += 1
+                    if k == 3:
+                        break
+            except IndexError:
+                pass
+            if k == 3:
+                multilinestr = not multilinestr
+                openstring = multilinestr
+            else:
+                openstring = not openstring
+            if openstring:
+                openstrchar = "'"
+            else:
+                openstrchar = ""
+        if item == '"' and openstrchar != "'":
+            oddbackslash = False
+            k = 1
+            tripquote = False
+            try:
+                while sl[i - k] == '"':
+                    k += 1
+                    if k == 3:
+                        tripquote = True
+                        break
+                if k == 1 or (k == 3 and tripquote):
+                    while sl[i - k] == '\\':
+                        oddbackslash = not oddbackslash
+                        k += 1
+            except IndexError:
+                pass
+            if not oddbackslash:
+                if tripquote:
+                    multilinestr = not multilinestr
+                    openstring = multilinestr
+                else:
+                    openstring = not openstring
+            if openstring:
+                openstrchar = '"'
+            else:
+                openstrchar = ""
+        if item == '#' and (not openstring and not keygroup and
+                            not arrayoftables):
+            j = i
+            comment = ""
+            try:
+                while sl[j] != '\n':
+                    comment += s[j]
+                    sl[j] = ' '
+                    j += 1
+            except IndexError:
+                break
+            if not openarr:
+                decoder.preserve_comment(line_no, prev_key, comment, beginline)
+        if item == '[' and (not openstring and not keygroup and
+                            not arrayoftables):
+            if beginline:
+                if len(sl) > i + 1 and sl[i + 1] == '[':
+                    arrayoftables = True
+                else:
+                    keygroup = True
+            else:
+                openarr += 1
+        if item == ']' and not openstring:
+            if keygroup:
+                keygroup = False
+            elif arrayoftables:
+                if sl[i - 1] == ']':
+                    arrayoftables = False
+            else:
+                openarr -= 1
+        if item == '\n':
+            if openstring or multilinestr:
+                if not multilinestr:
+                    raise TomlDecodeError("Unbalanced quotes", original, i)
+                if ((sl[i - 1] == "'" or sl[i - 1] == '"') and (
+                        sl[i - 2] == sl[i - 1])):
+                    sl[i] = sl[i - 1]
+                    if sl[i - 3] == sl[i - 1]:
+                        sl[i - 3] = ' '
+            elif openarr:
+                sl[i] = ' '
+            else:
+                beginline = True
+            line_no += 1
+        elif beginline and sl[i] != ' ' and sl[i] != '\t':
+            beginline = False
+            if not keygroup and not arrayoftables:
+                if sl[i] == '=':
+                    raise TomlDecodeError("Found empty keyname. ", original, i)
+                keyname = 1
+                key += item
+    if keyname:
+        raise TomlDecodeError("Key name found without value."
+                              " Reached end of file.", original, len(s))
+    if openstring:  # reached EOF and have an unterminated string
+        raise TomlDecodeError("Unterminated string found."
+                              " Reached end of file.", original, len(s))
+    s = ''.join(sl)
+    s = s.split('\n')
+    multikey = None
+    multilinestr = ""
+    multibackslash = False
+    pos = 0
+    for idx, line in enumerate(s):
+        if idx > 0:
+            pos += len(s[idx - 1]) + 1
+
+        decoder.embed_comments(idx, currentlevel)
+
+        if not multilinestr or multibackslash or '\n' not in multilinestr:
+            line = line.strip()
+        if line == "" and (not multikey or multibackslash):
+            continue
+        if multikey:
+            if multibackslash:
+                multilinestr += line
+            else:
+                multilinestr += line
+            multibackslash = False
+            closed = False
+            if multilinestr[0] == '[':
+                closed = line[-1] == ']'
+            elif len(line) > 2:
+                closed = (line[-1] == multilinestr[0] and
+                          line[-2] == multilinestr[0] and
+                          line[-3] == multilinestr[0])
+            if closed:
+                try:
+                    value, vtype = decoder.load_value(multilinestr)
+                except ValueError as err:
+                    raise TomlDecodeError(str(err), original, pos)
+                currentlevel[multikey] = value
+                multikey = None
+                multilinestr = ""
+            else:
+                k = len(multilinestr) - 1
+                while k > -1 and multilinestr[k] == '\\':
+                    multibackslash = not multibackslash
+                    k -= 1
+                if multibackslash:
+                    multilinestr = multilinestr[:-1]
+                else:
+                    multilinestr += "\n"
+            continue
+        if line[0] == '[':
+            arrayoftables = False
+            if len(line) == 1:
+                raise TomlDecodeError("Opening key group bracket on line by "
+                                      "itself.", original, pos)
+            if line[1] == '[':
+                arrayoftables = True
+                line = line[2:]
+                splitstr = ']]'
+            else:
+                line = line[1:]
+                splitstr = ']'
+            i = 1
+            quotesplits = decoder._get_split_on_quotes(line)
+            quoted = False
+            for quotesplit in quotesplits:
+                if not quoted and splitstr in quotesplit:
+                    break
+                i += quotesplit.count(splitstr)
+                quoted = not quoted
+            line = line.split(splitstr, i)
+            if len(line) < i + 1 or line[-1].strip() != "":
+                raise TomlDecodeError("Key group not on a line by itself.",
+                                      original, pos)
+            groups = splitstr.join(line[:-1]).split('.')
+            i = 0
+            while i < len(groups):
+                groups[i] = groups[i].strip()
+                if len(groups[i]) > 0 and (groups[i][0] == '"' or
+                                           groups[i][0] == "'"):
+                    groupstr = groups[i]
+                    j = i + 1
+                    while ((not groupstr[0] == groupstr[-1]) or
+                           len(groupstr) == 1):
+                        j += 1
+                        if j > len(groups) + 2:
+                            raise TomlDecodeError("Invalid group name '" +
+                                                  groupstr + "' Something " +
+                                                  "went wrong.", original, pos)
+                        groupstr = '.'.join(groups[i:j]).strip()
+                    groups[i] = groupstr[1:-1]
+                    groups[i + 1:j] = []
+                else:
+                    if not _groupname_re.match(groups[i]):
+                        raise TomlDecodeError("Invalid group name '" +
+                                              groups[i] + "'. Try quoting it.",
+                                              original, pos)
+                i += 1
+            currentlevel = retval
+            for i in _range(len(groups)):
+                group = groups[i]
+                if group == "":
+                    raise TomlDecodeError("Can't have a keygroup with an empty "
+                                          "name", original, pos)
+                try:
+                    currentlevel[group]
+                    if i == len(groups) - 1:
+                        if group in implicitgroups:
+                            implicitgroups.remove(group)
+                            if arrayoftables:
+                                raise TomlDecodeError("An implicitly defined "
+                                                      "table can't be an array",
+                                                      original, pos)
+                        elif arrayoftables:
+                            currentlevel[group].append(decoder.get_empty_table()
+                                                       )
+                        else:
+                            raise TomlDecodeError("What? " + group +
+                                                  " already exists?" +
+                                                  str(currentlevel),
+                                                  original, pos)
+                except TypeError:
+                    currentlevel = currentlevel[-1]
+                    if group not in currentlevel:
+                        currentlevel[group] = decoder.get_empty_table()
+                        if i == len(groups) - 1 and arrayoftables:
+                            currentlevel[group] = [decoder.get_empty_table()]
+                except KeyError:
+                    if i != len(groups) - 1:
+                        implicitgroups.append(group)
+                    currentlevel[group] = decoder.get_empty_table()
+                    if i == len(groups) - 1 and arrayoftables:
+                        currentlevel[group] = [decoder.get_empty_table()]
+                currentlevel = currentlevel[group]
+                if arrayoftables:
+                    try:
+                        currentlevel = currentlevel[-1]
+                    except KeyError:
+                        pass
+        elif line[0] == "{":
+            if line[-1] != "}":
+                raise TomlDecodeError("Line breaks are not allowed in inline"
+                                      "objects", original, pos)
+            try:
+                decoder.load_inline_object(line, currentlevel, multikey,
+                                           multibackslash)
+            except ValueError as err:
+                raise TomlDecodeError(str(err), original, pos)
+        elif "=" in line:
+            try:
+                ret = decoder.load_line(line, currentlevel, multikey,
+                                        multibackslash)
+            except ValueError as err:
+                raise TomlDecodeError(str(err), original, pos)
+            if ret is not None:
+                multikey, multilinestr, multibackslash = ret
+    return retval
+
+
+def _load_date(val):
+    microsecond = 0
+    tz = None
+    try:
+        if len(val) > 19:
+            if val[19] == '.':
+                if val[-1].upper() == 'Z':
+                    subsecondval = val[20:-1]
+                    tzval = "Z"
+                else:
+                    subsecondvalandtz = val[20:]
+                    if '+' in subsecondvalandtz:
+                        splitpoint = subsecondvalandtz.index('+')
+                        subsecondval = subsecondvalandtz[:splitpoint]
+                        tzval = subsecondvalandtz[splitpoint:]
+                    elif '-' in subsecondvalandtz:
+                        splitpoint = subsecondvalandtz.index('-')
+                        subsecondval = subsecondvalandtz[:splitpoint]
+                        tzval = subsecondvalandtz[splitpoint:]
+                    else:
+                        tzval = None
+                        subsecondval = subsecondvalandtz
+                if tzval is not None:
+                    tz = TomlTz(tzval)
+                microsecond = int(int(subsecondval) *
+                                  (10 ** (6 - len(subsecondval))))
+            else:
+                tz = TomlTz(val[19:])
+    except ValueError:
+        tz = None
+    if "-" not in val[1:]:
+        return None
+    try:
+        if len(val) == 10:
+            d = datetime.date(
+                int(val[:4]), int(val[5:7]),
+                int(val[8:10]))
+        else:
+            d = datetime.datetime(
+                int(val[:4]), int(val[5:7]),
+                int(val[8:10]), int(val[11:13]),
+                int(val[14:16]), int(val[17:19]), microsecond, tz)
+    except ValueError:
+        return None
+    return d
+
+
+def _load_unicode_escapes(v, hexbytes, prefix):
+    skip = False
+    i = len(v) - 1
+    while i > -1 and v[i] == '\\':
+        skip = not skip
+        i -= 1
+    for hx in hexbytes:
+        if skip:
+            skip = False
+            i = len(hx) - 1
+            while i > -1 and hx[i] == '\\':
+                skip = not skip
+                i -= 1
+            v += prefix
+            v += hx
+            continue
+        hxb = ""
+        i = 0
+        hxblen = 4
+        if prefix == "\\U":
+            hxblen = 8
+        hxb = ''.join(hx[i:i + hxblen]).lower()
+        if hxb.strip('0123456789abcdef'):
+            raise ValueError("Invalid escape sequence: " + hxb)
+        if hxb[0] == "d" and hxb[1].strip('01234567'):
+            raise ValueError("Invalid escape sequence: " + hxb +
+                             ". Only scalar unicode points are allowed.")
+        v += unichr(int(hxb, 16))
+        v += unicode(hx[len(hxb):])
+    return v
+
+
+# Unescape TOML string values.
+
+# content after the \
+_escapes = ['0', 'b', 'f', 'n', 'r', 't', '"']
+# What it should be replaced by
+_escapedchars = ['\0', '\b', '\f', '\n', '\r', '\t', '\"']
+# Used for substitution
+_escape_to_escapedchars = dict(zip(_escapes, _escapedchars))
+
+
+def _unescape(v):
+    """Unescape characters in a TOML string."""
+    i = 0
+    backslash = False
+    while i < len(v):
+        if backslash:
+            backslash = False
+            if v[i] in _escapes:
+                v = v[:i - 1] + _escape_to_escapedchars[v[i]] + v[i + 1:]
+            elif v[i] == '\\':
+                v = v[:i - 1] + v[i:]
+            elif v[i] == 'u' or v[i] == 'U':
+                i += 1
+            else:
+                raise ValueError("Reserved escape sequence used")
+            continue
+        elif v[i] == '\\':
+            backslash = True
+        i += 1
+    return v
+
+
+class InlineTableDict(object):
+    """Sentinel subclass of dict for inline tables."""
+
+
+class TomlDecoder(object):
+
+    def __init__(self, _dict=dict):
+        self._dict = _dict
+
+    def get_empty_table(self):
+        return self._dict()
+
+    def get_empty_inline_table(self):
+        class DynamicInlineTableDict(self._dict, InlineTableDict):
+            """Concrete sentinel subclass for inline tables.
+            It is a subclass of _dict which is passed in dynamically at load
+            time
+
+            It is also a subclass of InlineTableDict
+            """
+
+        return DynamicInlineTableDict()
+
+    def load_inline_object(self, line, currentlevel, multikey=False,
+                           multibackslash=False):
+        candidate_groups = line[1:-1].split(",")
+        groups = []
+        if len(candidate_groups) == 1 and not candidate_groups[0].strip():
+            candidate_groups.pop()
+        while len(candidate_groups) > 0:
+            candidate_group = candidate_groups.pop(0)
+            try:
+                _, value = candidate_group.split('=', 1)
+            except ValueError:
+                raise ValueError("Invalid inline table encountered")
+            value = value.strip()
+            if ((value[0] == value[-1] and value[0] in ('"', "'")) or (
+                    value[0] in '-0123456789' or
+                    value in ('true', 'false') or
+                    (value[0] == "[" and value[-1] == "]") or
+                    (value[0] == '{' and value[-1] == '}'))):
+                groups.append(candidate_group)
+            elif len(candidate_groups) > 0:
+                candidate_groups[0] = (candidate_group + "," +
+                                       candidate_groups[0])
+            else:
+                raise ValueError("Invalid inline table value encountered")
+        for group in groups:
+            status = self.load_line(group, currentlevel, multikey,
+                                    multibackslash)
+            if status is not None:
+                break
+
+    def _get_split_on_quotes(self, line):
+        doublequotesplits = line.split('"')
+        quoted = False
+        quotesplits = []
+        if len(doublequotesplits) > 1 and "'" in doublequotesplits[0]:
+            singlequotesplits = doublequotesplits[0].split("'")
+            doublequotesplits = doublequotesplits[1:]
+            while len(singlequotesplits) % 2 == 0 and len(doublequotesplits):
+                singlequotesplits[-1] += '"' + doublequotesplits[0]
+                doublequotesplits = doublequotesplits[1:]
+                if "'" in singlequotesplits[-1]:
+                    singlequotesplits = (singlequotesplits[:-1] +
+                                         singlequotesplits[-1].split("'"))
+            quotesplits += singlequotesplits
+        for doublequotesplit in doublequotesplits:
+            if quoted:
+                quotesplits.append(doublequotesplit)
+            else:
+                quotesplits += doublequotesplit.split("'")
+                quoted = not quoted
+        return quotesplits
+
+    def load_line(self, line, currentlevel, multikey, multibackslash):
+        i = 1
+        quotesplits = self._get_split_on_quotes(line)
+        quoted = False
+        for quotesplit in quotesplits:
+            if not quoted and '=' in quotesplit:
+                break
+            i += quotesplit.count('=')
+            quoted = not quoted
+        pair = line.split('=', i)
+        strictly_valid = _strictly_valid_num(pair[-1])
+        if _number_with_underscores.match(pair[-1]):
+            pair[-1] = pair[-1].replace('_', '')
+        while len(pair[-1]) and (pair[-1][0] != ' ' and pair[-1][0] != '\t' and
+                                 pair[-1][0] != "'" and pair[-1][0] != '"' and
+                                 pair[-1][0] != '[' and pair[-1][0] != '{' and
+                                 pair[-1].strip() != 'true' and
+                                 pair[-1].strip() != 'false'):
+            try:
+                float(pair[-1])
+                break
+            except ValueError:
+                pass
+            if _load_date(pair[-1]) is not None:
+                break
+            if TIME_RE.match(pair[-1]):
+                break
+            i += 1
+            prev_val = pair[-1]
+            pair = line.split('=', i)
+            if prev_val == pair[-1]:
+                raise ValueError("Invalid date or number")
+            if strictly_valid:
+                strictly_valid = _strictly_valid_num(pair[-1])
+        pair = ['='.join(pair[:-1]).strip(), pair[-1].strip()]
+        if '.' in pair[0]:
+            if '"' in pair[0] or "'" in pair[0]:
+                quotesplits = self._get_split_on_quotes(pair[0])
+                quoted = False
+                levels = []
+                for quotesplit in quotesplits:
+                    if quoted:
+                        levels.append(quotesplit)
+                    else:
+                        levels += [level.strip() for level in
+                                   quotesplit.split('.')]
+                    quoted = not quoted
+            else:
+                levels = pair[0].split('.')
+            while levels[-1] == "":
+                levels = levels[:-1]
+            for level in levels[:-1]:
+                if level == "":
+                    continue
+                if level not in currentlevel:
+                    currentlevel[level] = self.get_empty_table()
+                currentlevel = currentlevel[level]
+            pair[0] = levels[-1].strip()
+        elif (pair[0][0] == '"' or pair[0][0] == "'") and \
+                (pair[0][-1] == pair[0][0]):
+            pair[0] = _unescape(pair[0][1:-1])
+        k, koffset = self._load_line_multiline_str(pair[1])
+        if k > -1:
+            while k > -1 and pair[1][k + koffset] == '\\':
+                multibackslash = not multibackslash
+                k -= 1
+            if multibackslash:
+                multilinestr = pair[1][:-1]
+            else:
+                multilinestr = pair[1] + "\n"
+            multikey = pair[0]
+        else:
+            value, vtype = self.load_value(pair[1], strictly_valid)
+        try:
+            currentlevel[pair[0]]
+            raise ValueError("Duplicate keys!")
+        except TypeError:
+            raise ValueError("Duplicate keys!")
+        except KeyError:
+            if multikey:
+                return multikey, multilinestr, multibackslash
+            else:
+                currentlevel[pair[0]] = value
+
+    def _load_line_multiline_str(self, p):
+        poffset = 0
+        if len(p) < 3:
+            return -1, poffset
+        if p[0] == '[' and (p.strip()[-1] != ']' and
+                            self._load_array_isstrarray(p)):
+            newp = p[1:].strip().split(',')
+            while len(newp) > 1 and newp[-1][0] != '"' and newp[-1][0] != "'":
+                newp = newp[:-2] + [newp[-2] + ',' + newp[-1]]
+            newp = newp[-1]
+            poffset = len(p) - len(newp)
+            p = newp
+        if p[0] != '"' and p[0] != "'":
+            return -1, poffset
+        if p[1] != p[0] or p[2] != p[0]:
+            return -1, poffset
+        if len(p) > 5 and p[-1] == p[0] and p[-2] == p[0] and p[-3] == p[0]:
+            return -1, poffset
+        return len(p) - 1, poffset
+
+    def load_value(self, v, strictly_valid=True):
+        if not v:
+            raise ValueError("Empty value is invalid")
+        if v == 'true':
+            return (True, "bool")
+        elif v.lower() == 'true':
+            raise ValueError("Only all lowercase booleans allowed")
+        elif v == 'false':
+            return (False, "bool")
+        elif v.lower() == 'false':
+            raise ValueError("Only all lowercase booleans allowed")
+        elif v[0] == '"' or v[0] == "'":
+            quotechar = v[0]
+            testv = v[1:].split(quotechar)
+            triplequote = False
+            triplequotecount = 0
+            if len(testv) > 1 and testv[0] == '' and testv[1] == '':
+                testv = testv[2:]
+                triplequote = True
+            closed = False
+            for tv in testv:
+                if tv == '':
+                    if triplequote:
+                        triplequotecount += 1
+                    else:
+                        closed = True
+                else:
+                    oddbackslash = False
+                    try:
+                        i = -1
+                        j = tv[i]
+                        while j == '\\':
+                            oddbackslash = not oddbackslash
+                            i -= 1
+                            j = tv[i]
+                    except IndexError:
+                        pass
+                    if not oddbackslash:
+                        if closed:
+                            raise ValueError("Found tokens after a closed " +
+                                             "string. Invalid TOML.")
+                        else:
+                            if not triplequote or triplequotecount > 1:
+                                closed = True
+                            else:
+                                triplequotecount = 0
+            if quotechar == '"':
+                escapeseqs = v.split('\\')[1:]
+                backslash = False
+                for i in escapeseqs:
+                    if i == '':
+                        backslash = not backslash
+                    else:
+                        if i[0] not in _escapes and (i[0] != 'u' and
+                                                     i[0] != 'U' and
+                                                     not backslash):
+                            raise ValueError("Reserved escape sequence used")
+                        if backslash:
+                            backslash = False
+                for prefix in ["\\u", "\\U"]:
+                    if prefix in v:
+                        hexbytes = v.split(prefix)
+                        v = _load_unicode_escapes(hexbytes[0], hexbytes[1:],
+                                                  prefix)
+                v = _unescape(v)
+            if len(v) > 1 and v[1] == quotechar and (len(v) < 3 or
+                                                     v[1] == v[2]):
+                v = v[2:-2]
+            return (v[1:-1], "str")
+        elif v[0] == '[':
+            return (self.load_array(v), "array")
+        elif v[0] == '{':
+            inline_object = self.get_empty_inline_table()
+            self.load_inline_object(v, inline_object)
+            return (inline_object, "inline_object")
+        elif TIME_RE.match(v):
+            h, m, s, _, ms = TIME_RE.match(v).groups()
+            time = datetime.time(int(h), int(m), int(s), int(ms) if ms else 0)
+            return (time, "time")
+        else:
+            parsed_date = _load_date(v)
+            if parsed_date is not None:
+                return (parsed_date, "date")
+            if not strictly_valid:
+                raise ValueError("Weirdness with leading zeroes or "
+                                 "underscores in your number.")
+            itype = "int"
+            neg = False
+            if v[0] == '-':
+                neg = True
+                v = v[1:]
+            elif v[0] == '+':
+                v = v[1:]
+            v = v.replace('_', '')
+            lowerv = v.lower()
+            if '.' in v or ('x' not in v and ('e' in v or 'E' in v)):
+                if '.' in v and v.split('.', 1)[1] == '':
+                    raise ValueError("This float is missing digits after "
+                                     "the point")
+                if v[0] not in '0123456789':
+                    raise ValueError("This float doesn't have a leading "
+                                     "digit")
+                v = float(v)
+                itype = "float"
+            elif len(lowerv) == 3 and (lowerv == 'inf' or lowerv == 'nan'):
+                v = float(v)
+                itype = "float"
+            if itype == "int":
+                v = int(v, 0)
+            if neg:
+                return (0 - v, itype)
+            return (v, itype)
+
+    def bounded_string(self, s):
+        if len(s) == 0:
+            return True
+        if s[-1] != s[0]:
+            return False
+        i = -2
+        backslash = False
+        while len(s) + i > 0:
+            if s[i] == "\\":
+                backslash = not backslash
+                i -= 1
+            else:
+                break
+        return not backslash
+
+    def _load_array_isstrarray(self, a):
+        a = a[1:-1].strip()
+        if a != '' and (a[0] == '"' or a[0] == "'"):
+            return True
+        return False
+
+    def load_array(self, a):
+        atype = None
+        retval = []
+        a = a.strip()
+        if '[' not in a[1:-1] or "" != a[1:-1].split('[')[0].strip():
+            strarray = self._load_array_isstrarray(a)
+            if not a[1:-1].strip().startswith('{'):
+                a = a[1:-1].split(',')
+            else:
+                # a is an inline object, we must find the matching parenthesis
+                # to define groups
+                new_a = []
+                start_group_index = 1
+                end_group_index = 2
+                open_bracket_count = 1 if a[start_group_index] == '{' else 0
+                in_str = False
+                while end_group_index < len(a[1:]):
+                    if a[end_group_index] == '"' or a[end_group_index] == "'":
+                        if in_str:
+                            backslash_index = end_group_index - 1
+                            while (backslash_index > -1 and
+                                   a[backslash_index] == '\\'):
+                                in_str = not in_str
+                                backslash_index -= 1
+                        in_str = not in_str
+                    if not in_str and a[end_group_index] == '{':
+                        open_bracket_count += 1
+                    if in_str or a[end_group_index] != '}':
+                        end_group_index += 1
+                        continue
+                    elif a[end_group_index] == '}' and open_bracket_count > 1:
+                        open_bracket_count -= 1
+                        end_group_index += 1
+                        continue
+
+                    # Increase end_group_index by 1 to get the closing bracket
+                    end_group_index += 1
+
+                    new_a.append(a[start_group_index:end_group_index])
+
+                    # The next start index is at least after the closing
+                    # bracket, a closing bracket can be followed by a comma
+                    # since we are in an array.
+                    start_group_index = end_group_index + 1
+                    while (start_group_index < len(a[1:]) and
+                           a[start_group_index] != '{'):
+                        start_group_index += 1
+                    end_group_index = start_group_index + 1
+                a = new_a
+            b = 0
+            if strarray:
+                while b < len(a) - 1:
+                    ab = a[b].strip()
+                    while (not self.bounded_string(ab) or
+                           (len(ab) > 2 and
+                            ab[0] == ab[1] == ab[2] and
+                            ab[-2] != ab[0] and
+                            ab[-3] != ab[0])):
+                        a[b] = a[b] + ',' + a[b + 1]
+                        ab = a[b].strip()
+                        if b < len(a) - 2:
+                            a = a[:b + 1] + a[b + 2:]
+                        else:
+                            a = a[:b + 1]
+                    b += 1
+        else:
+            al = list(a[1:-1])
+            a = []
+            openarr = 0
+            j = 0
+            for i in _range(len(al)):
+                if al[i] == '[':
+                    openarr += 1
+                elif al[i] == ']':
+                    openarr -= 1
+                elif al[i] == ',' and not openarr:
+                    a.append(''.join(al[j:i]))
+                    j = i + 1
+            a.append(''.join(al[j:]))
+        for i in _range(len(a)):
+            a[i] = a[i].strip()
+            if a[i] != '':
+                nval, ntype = self.load_value(a[i])
+                if atype:
+                    if ntype != atype:
+                        raise ValueError("Not a homogeneous array")
+                else:
+                    atype = ntype
+                retval.append(nval)
+        return retval
+
+    def preserve_comment(self, line_no, key, comment, beginline):
+        pass
+
+    def embed_comments(self, idx, currentlevel):
+        pass
+
+
+class TomlPreserveCommentDecoder(TomlDecoder):
+
+    def __init__(self, _dict=dict):
+        self.saved_comments = {}
+        super(TomlPreserveCommentDecoder, self).__init__(_dict)
+
+    def preserve_comment(self, line_no, key, comment, beginline):
+        self.saved_comments[line_no] = (key, comment, beginline)
+
+    def embed_comments(self, idx, currentlevel):
+        if idx not in self.saved_comments:
+            return
+
+        key, comment, beginline = self.saved_comments[idx]
+        currentlevel[key] = CommentValue(currentlevel[key], comment, beginline,
+                                         self._dict)
diff --git a/venv/lib/python3.10/site-packages/toml/encoder.py b/venv/lib/python3.10/site-packages/toml/encoder.py
new file mode 100644
index 0000000..bf17a72
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/toml/encoder.py
@@ -0,0 +1,304 @@
+import datetime
+import re
+import sys
+from decimal import Decimal
+
+from toml.decoder import InlineTableDict
+
+if sys.version_info >= (3,):
+    unicode = str
+
+
+def dump(o, f, encoder=None):
+    """Writes out dict as toml to a file
+
+    Args:
+        o: Object to dump into toml
+        f: File descriptor where the toml should be stored
+        encoder: The ``TomlEncoder`` to use for constructing the output string
+
+    Returns:
+        String containing the toml corresponding to dictionary
+
+    Raises:
+        TypeError: When anything other than file descriptor is passed
+    """
+
+    if not f.write:
+        raise TypeError("You can only dump an object to a file descriptor")
+    d = dumps(o, encoder=encoder)
+    f.write(d)
+    return d
+
+
+def dumps(o, encoder=None):
+    """Stringifies input dict as toml
+
+    Args:
+        o: Object to dump into toml
+        encoder: The ``TomlEncoder`` to use for constructing the output string
+
+    Returns:
+        String containing the toml corresponding to dict
+
+    Examples:
+        ```python
+        >>> import toml
+        >>> output = {
+        ... 'a': "I'm a string",
+        ... 'b': ["I'm", "a", "list"],
+        ... 'c': 2400
+        ... }
+        >>> toml.dumps(output)
+        'a = "I\'m a string"\nb = [ "I\'m", "a", "list",]\nc = 2400\n'
+        ```
+    """
+
+    retval = ""
+    if encoder is None:
+        encoder = TomlEncoder(o.__class__)
+    addtoretval, sections = encoder.dump_sections(o, "")
+    retval += addtoretval
+    outer_objs = [id(o)]
+    while sections:
+        section_ids = [id(section) for section in sections.values()]
+        for outer_obj in outer_objs:
+            if outer_obj in section_ids:
+                raise ValueError("Circular reference detected")
+        outer_objs += section_ids
+        newsections = encoder.get_empty_table()
+        for section in sections:
+            addtoretval, addtosections = encoder.dump_sections(
+                sections[section], section)
+
+            if addtoretval or (not addtoretval and not addtosections):
+                if retval and retval[-2:] != "\n\n":
+                    retval += "\n"
+                retval += "[" + section + "]\n"
+                if addtoretval:
+                    retval += addtoretval
+            for s in addtosections:
+                newsections[section + "." + s] = addtosections[s]
+        sections = newsections
+    return retval
+
+
+def _dump_str(v):
+    if sys.version_info < (3,) and hasattr(v, 'decode') and isinstance(v, str):
+        v = v.decode('utf-8')
+    v = "%r" % v
+    if v[0] == 'u':
+        v = v[1:]
+    singlequote = v.startswith("'")
+    if singlequote or v.startswith('"'):
+        v = v[1:-1]
+    if singlequote:
+        v = v.replace("\\'", "'")
+        v = v.replace('"', '\\"')
+    v = v.split("\\x")
+    while len(v) > 1:
+        i = -1
+        if not v[0]:
+            v = v[1:]
+        v[0] = v[0].replace("\\\\", "\\")
+        # No, I don't know why != works and == breaks
+        joinx = v[0][i] != "\\"
+        while v[0][:i] and v[0][i] == "\\":
+            joinx = not joinx
+            i -= 1
+        if joinx:
+            joiner = "x"
+        else:
+            joiner = "u00"
+        v = [v[0] + joiner + v[1]] + v[2:]
+    return unicode('"' + v[0] + '"')
+
+
+def _dump_float(v):
+    return "{}".format(v).replace("e+0", "e+").replace("e-0", "e-")
+
+
+def _dump_time(v):
+    utcoffset = v.utcoffset()
+    if utcoffset is None:
+        return v.isoformat()
+    # The TOML norm specifies that it's local time thus we drop the offset
+    return v.isoformat()[:-6]
+
+
+class TomlEncoder(object):
+
+    def __init__(self, _dict=dict, preserve=False):
+        self._dict = _dict
+        self.preserve = preserve
+        self.dump_funcs = {
+            str: _dump_str,
+            unicode: _dump_str,
+            list: self.dump_list,
+            bool: lambda v: unicode(v).lower(),
+            int: lambda v: v,
+            float: _dump_float,
+            Decimal: _dump_float,
+            datetime.datetime: lambda v: v.isoformat().replace('+00:00', 'Z'),
+            datetime.time: _dump_time,
+            datetime.date: lambda v: v.isoformat()
+        }
+
+    def get_empty_table(self):
+        return self._dict()
+
+    def dump_list(self, v):
+        retval = "["
+        for u in v:
+            retval += " " + unicode(self.dump_value(u)) + ","
+        retval += "]"
+        return retval
+
+    def dump_inline_table(self, section):
+        """Preserve inline table in its compact syntax instead of expanding
+        into subsection.
+
+        https://github.com/toml-lang/toml#user-content-inline-table
+        """
+        retval = ""
+        if isinstance(section, dict):
+            val_list = []
+            for k, v in section.items():
+                val = self.dump_inline_table(v)
+                val_list.append(k + " = " + val)
+            retval += "{ " + ", ".join(val_list) + " }\n"
+            return retval
+        else:
+            return unicode(self.dump_value(section))
+
+    def dump_value(self, v):
+        # Lookup function corresponding to v's type
+        dump_fn = self.dump_funcs.get(type(v))
+        if dump_fn is None and hasattr(v, '__iter__'):
+            dump_fn = self.dump_funcs[list]
+        # Evaluate function (if it exists) else return v
+        return dump_fn(v) if dump_fn is not None else self.dump_funcs[str](v)
+
+    def dump_sections(self, o, sup):
+        retstr = ""
+        if sup != "" and sup[-1] != ".":
+            sup += '.'
+        retdict = self._dict()
+        arraystr = ""
+        for section in o:
+            section = unicode(section)
+            qsection = section
+            if not re.match(r'^[A-Za-z0-9_-]+$', section):
+                qsection = _dump_str(section)
+            if not isinstance(o[section], dict):
+                arrayoftables = False
+                if isinstance(o[section], list):
+                    for a in o[section]:
+                        if isinstance(a, dict):
+                            arrayoftables = True
+                if arrayoftables:
+                    for a in o[section]:
+                        arraytabstr = "\n"
+                        arraystr += "[[" + sup + qsection + "]]\n"
+                        s, d = self.dump_sections(a, sup + qsection)
+                        if s:
+                            if s[0] == "[":
+                                arraytabstr += s
+                            else:
+                                arraystr += s
+                        while d:
+                            newd = self._dict()
+                            for dsec in d:
+                                s1, d1 = self.dump_sections(d[dsec], sup +
+                                                            qsection + "." +
+                                                            dsec)
+                                if s1:
+                                    arraytabstr += ("[" + sup + qsection +
+                                                    "." + dsec + "]\n")
+                                    arraytabstr += s1
+                                for s1 in d1:
+                                    newd[dsec + "." + s1] = d1[s1]
+                            d = newd
+                        arraystr += arraytabstr
+                else:
+                    if o[section] is not None:
+                        retstr += (qsection + " = " +
+                                   unicode(self.dump_value(o[section])) + '\n')
+            elif self.preserve and isinstance(o[section], InlineTableDict):
+                retstr += (qsection + " = " +
+                           self.dump_inline_table(o[section]))
+            else:
+                retdict[qsection] = o[section]
+        retstr += arraystr
+        return (retstr, retdict)
+
+
+class TomlPreserveInlineDictEncoder(TomlEncoder):
+
+    def __init__(self, _dict=dict):
+        super(TomlPreserveInlineDictEncoder, self).__init__(_dict, True)
+
+
+class TomlArraySeparatorEncoder(TomlEncoder):
+
+    def __init__(self, _dict=dict, preserve=False, separator=","):
+        super(TomlArraySeparatorEncoder, self).__init__(_dict, preserve)
+        if separator.strip() == "":
+            separator = "," + separator
+        elif separator.strip(' \t\n\r,'):
+            raise ValueError("Invalid separator for arrays")
+        self.separator = separator
+
+    def dump_list(self, v):
+        t = []
+        retval = "["
+        for u in v:
+            t.append(self.dump_value(u))
+        while t != []:
+            s = []
+            for u in t:
+                if isinstance(u, list):
+                    for r in u:
+                        s.append(r)
+                else:
+                    retval += " " + unicode(u) + self.separator
+            t = s
+        retval += "]"
+        return retval
+
+
+class TomlNumpyEncoder(TomlEncoder):
+
+    def __init__(self, _dict=dict, preserve=False):
+        import numpy as np
+        super(TomlNumpyEncoder, self).__init__(_dict, preserve)
+        self.dump_funcs[np.float16] = _dump_float
+        self.dump_funcs[np.float32] = _dump_float
+        self.dump_funcs[np.float64] = _dump_float
+        self.dump_funcs[np.int16] = self._dump_int
+        self.dump_funcs[np.int32] = self._dump_int
+        self.dump_funcs[np.int64] = self._dump_int
+
+    def _dump_int(self, v):
+        return "{}".format(int(v))
+
+
+class TomlPreserveCommentEncoder(TomlEncoder):
+
+    def __init__(self, _dict=dict, preserve=False):
+        from toml.decoder import CommentValue
+        super(TomlPreserveCommentEncoder, self).__init__(_dict, preserve)
+        self.dump_funcs[CommentValue] = lambda v: v.dump(self.dump_value)
+
+
+class TomlPathlibEncoder(TomlEncoder):
+
+    def _dump_pathlib_path(self, v):
+        return _dump_str(str(v))
+
+    def dump_value(self, v):
+        if (3, 4) <= sys.version_info:
+            import pathlib
+            if isinstance(v, pathlib.PurePath):
+                v = str(v)
+        return super(TomlPathlibEncoder, self).dump_value(v)
diff --git a/venv/lib/python3.10/site-packages/toml/ordered.py b/venv/lib/python3.10/site-packages/toml/ordered.py
new file mode 100644
index 0000000..9c20c41
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/toml/ordered.py
@@ -0,0 +1,15 @@
+from collections import OrderedDict
+from toml import TomlEncoder
+from toml import TomlDecoder
+
+
+class TomlOrderedDecoder(TomlDecoder):
+
+    def __init__(self):
+        super(self.__class__, self).__init__(_dict=OrderedDict)
+
+
+class TomlOrderedEncoder(TomlEncoder):
+
+    def __init__(self):
+        super(self.__class__, self).__init__(_dict=OrderedDict)
diff --git a/venv/lib/python3.10/site-packages/toml/tz.py b/venv/lib/python3.10/site-packages/toml/tz.py
new file mode 100644
index 0000000..bf20593
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/toml/tz.py
@@ -0,0 +1,24 @@
+from datetime import tzinfo, timedelta
+
+
+class TomlTz(tzinfo):
+    def __init__(self, toml_offset):
+        if toml_offset == "Z":
+            self._raw_offset = "+00:00"
+        else:
+            self._raw_offset = toml_offset
+        self._sign = -1 if self._raw_offset[0] == '-' else 1
+        self._hours = int(self._raw_offset[1:3])
+        self._minutes = int(self._raw_offset[4:6])
+
+    def __deepcopy__(self, memo):
+        return self.__class__(self._raw_offset)
+
+    def tzname(self, dt):
+        return "UTC" + self._raw_offset
+
+    def utcoffset(self, dt):
+        return self._sign * timedelta(hours=self._hours, minutes=self._minutes)
+
+    def dst(self, dt):
+        return timedelta(0)
diff --git a/venv/lib/python3.10/site-packages/tomli-2.0.1.dist-info/INSTALLER b/venv/lib/python3.10/site-packages/tomli-2.0.1.dist-info/INSTALLER
new file mode 100644
index 0000000..a1b589e
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/tomli-2.0.1.dist-info/INSTALLER
@@ -0,0 +1 @@
+pip
diff --git a/venv/lib/python3.10/site-packages/tomli-2.0.1.dist-info/LICENSE b/venv/lib/python3.10/site-packages/tomli-2.0.1.dist-info/LICENSE
new file mode 100644
index 0000000..e859590
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/tomli-2.0.1.dist-info/LICENSE
@@ -0,0 +1,21 @@
+MIT License
+
+Copyright (c) 2021 Taneli Hukkinen
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
diff --git a/venv/lib/python3.10/site-packages/tomli-2.0.1.dist-info/METADATA b/venv/lib/python3.10/site-packages/tomli-2.0.1.dist-info/METADATA
new file mode 100644
index 0000000..efd87ec
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/tomli-2.0.1.dist-info/METADATA
@@ -0,0 +1,206 @@
+Metadata-Version: 2.1
+Name: tomli
+Version: 2.0.1
+Summary: A lil' TOML parser
+Keywords: toml
+Author-email: Taneli Hukkinen 
+Requires-Python: >=3.7
+Description-Content-Type: text/markdown
+Classifier: License :: OSI Approved :: MIT License
+Classifier: Operating System :: MacOS
+Classifier: Operating System :: Microsoft :: Windows
+Classifier: Operating System :: POSIX :: Linux
+Classifier: Programming Language :: Python :: 3 :: Only
+Classifier: Programming Language :: Python :: 3.7
+Classifier: Programming Language :: Python :: 3.8
+Classifier: Programming Language :: Python :: 3.9
+Classifier: Programming Language :: Python :: 3.10
+Classifier: Programming Language :: Python :: Implementation :: CPython
+Classifier: Programming Language :: Python :: Implementation :: PyPy
+Classifier: Topic :: Software Development :: Libraries :: Python Modules
+Classifier: Typing :: Typed
+Project-URL: Changelog, https://github.com/hukkin/tomli/blob/master/CHANGELOG.md
+Project-URL: Homepage, https://github.com/hukkin/tomli
+
+[![Build Status](https://github.com/hukkin/tomli/workflows/Tests/badge.svg?branch=master)](https://github.com/hukkin/tomli/actions?query=workflow%3ATests+branch%3Amaster+event%3Apush)
+[![codecov.io](https://codecov.io/gh/hukkin/tomli/branch/master/graph/badge.svg)](https://codecov.io/gh/hukkin/tomli)
+[![PyPI version](https://img.shields.io/pypi/v/tomli)](https://pypi.org/project/tomli)
+
+# Tomli
+
+> A lil' TOML parser
+
+**Table of Contents**  *generated with [mdformat-toc](https://github.com/hukkin/mdformat-toc)*
+
+
+
+- [Intro](#intro)
+- [Installation](#installation)
+- [Usage](#usage)
+  - [Parse a TOML string](#parse-a-toml-string)
+  - [Parse a TOML file](#parse-a-toml-file)
+  - [Handle invalid TOML](#handle-invalid-toml)
+  - [Construct `decimal.Decimal`s from TOML floats](#construct-decimaldecimals-from-toml-floats)
+- [FAQ](#faq)
+  - [Why this parser?](#why-this-parser)
+  - [Is comment preserving round-trip parsing supported?](#is-comment-preserving-round-trip-parsing-supported)
+  - [Is there a `dumps`, `write` or `encode` function?](#is-there-a-dumps-write-or-encode-function)
+  - [How do TOML types map into Python types?](#how-do-toml-types-map-into-python-types)
+- [Performance](#performance)
+
+
+
+## Intro
+
+Tomli is a Python library for parsing [TOML](https://toml.io).
+Tomli is fully compatible with [TOML v1.0.0](https://toml.io/en/v1.0.0).
+
+## Installation
+
+```bash
+pip install tomli
+```
+
+## Usage
+
+### Parse a TOML string
+
+```python
+import tomli
+
+toml_str = """
+           gretzky = 99
+
+           [kurri]
+           jari = 17
+           """
+
+toml_dict = tomli.loads(toml_str)
+assert toml_dict == {"gretzky": 99, "kurri": {"jari": 17}}
+```
+
+### Parse a TOML file
+
+```python
+import tomli
+
+with open("path_to_file/conf.toml", "rb") as f:
+    toml_dict = tomli.load(f)
+```
+
+The file must be opened in binary mode (with the `"rb"` flag).
+Binary mode will enforce decoding the file as UTF-8 with universal newlines disabled,
+both of which are required to correctly parse TOML.
+
+### Handle invalid TOML
+
+```python
+import tomli
+
+try:
+    toml_dict = tomli.loads("]] this is invalid TOML [[")
+except tomli.TOMLDecodeError:
+    print("Yep, definitely not valid.")
+```
+
+Note that error messages are considered informational only.
+They should not be assumed to stay constant across Tomli versions.
+
+### Construct `decimal.Decimal`s from TOML floats
+
+```python
+from decimal import Decimal
+import tomli
+
+toml_dict = tomli.loads("precision-matters = 0.982492", parse_float=Decimal)
+assert toml_dict["precision-matters"] == Decimal("0.982492")
+```
+
+Note that `decimal.Decimal` can be replaced with another callable that converts a TOML float from string to a Python type.
+The `decimal.Decimal` is, however, a practical choice for use cases where float inaccuracies can not be tolerated.
+
+Illegal types are `dict` and `list`, and their subtypes.
+A `ValueError` will be raised if `parse_float` produces illegal types.
+
+## FAQ
+
+### Why this parser?
+
+- it's lil'
+- pure Python with zero dependencies
+- the fastest pure Python parser [\*](#performance):
+  15x as fast as [tomlkit](https://pypi.org/project/tomlkit/),
+  2.4x as fast as [toml](https://pypi.org/project/toml/)
+- outputs [basic data types](#how-do-toml-types-map-into-python-types) only
+- 100% spec compliant: passes all tests in
+  [a test set](https://github.com/toml-lang/compliance/pull/8)
+  soon to be merged to the official
+  [compliance tests for TOML](https://github.com/toml-lang/compliance)
+  repository
+- thoroughly tested: 100% branch coverage
+
+### Is comment preserving round-trip parsing supported?
+
+No.
+
+The `tomli.loads` function returns a plain `dict` that is populated with builtin types and types from the standard library only.
+Preserving comments requires a custom type to be returned so will not be supported,
+at least not by the `tomli.loads` and `tomli.load` functions.
+
+Look into [TOML Kit](https://github.com/sdispater/tomlkit) if preservation of style is what you need.
+
+### Is there a `dumps`, `write` or `encode` function?
+
+[Tomli-W](https://github.com/hukkin/tomli-w) is the write-only counterpart of Tomli, providing `dump` and `dumps` functions.
+
+The core library does not include write capability, as most TOML use cases are read-only, and Tomli intends to be minimal.
+
+### How do TOML types map into Python types?
+
+| TOML type        | Python type         | Details                                                      |
+| ---------------- | ------------------- | ------------------------------------------------------------ |
+| Document Root    | `dict`              |                                                              |
+| Key              | `str`               |                                                              |
+| String           | `str`               |                                                              |
+| Integer          | `int`               |                                                              |
+| Float            | `float`             |                                                              |
+| Boolean          | `bool`              |                                                              |
+| Offset Date-Time | `datetime.datetime` | `tzinfo` attribute set to an instance of `datetime.timezone` |
+| Local Date-Time  | `datetime.datetime` | `tzinfo` attribute set to `None`                             |
+| Local Date       | `datetime.date`     |                                                              |
+| Local Time       | `datetime.time`     |                                                              |
+| Array            | `list`              |                                                              |
+| Table            | `dict`              |                                                              |
+| Inline Table     | `dict`              |                                                              |
+
+## Performance
+
+The `benchmark/` folder in this repository contains a performance benchmark for comparing the various Python TOML parsers.
+The benchmark can be run with `tox -e benchmark-pypi`.
+Running the benchmark on my personal computer output the following:
+
+```console
+foo@bar:~/dev/tomli$ tox -e benchmark-pypi
+benchmark-pypi installed: attrs==19.3.0,click==7.1.2,pytomlpp==1.0.2,qtoml==0.3.0,rtoml==0.7.0,toml==0.10.2,tomli==1.1.0,tomlkit==0.7.2
+benchmark-pypi run-test-pre: PYTHONHASHSEED='2658546909'
+benchmark-pypi run-test: commands[0] | python -c 'import datetime; print(datetime.date.today())'
+2021-07-23
+benchmark-pypi run-test: commands[1] | python --version
+Python 3.8.10
+benchmark-pypi run-test: commands[2] | python benchmark/run.py
+Parsing data.toml 5000 times:
+------------------------------------------------------
+    parser |  exec time | performance (more is better)
+-----------+------------+-----------------------------
+     rtoml |    0.901 s | baseline (100%)
+  pytomlpp |     1.08 s | 83.15%
+     tomli |     3.89 s | 23.15%
+      toml |     9.36 s | 9.63%
+     qtoml |     11.5 s | 7.82%
+   tomlkit |     56.8 s | 1.59%
+```
+
+The parsers are ordered from fastest to slowest, using the fastest parser as baseline.
+Tomli performed the best out of all pure Python TOML parsers,
+losing only to pytomlpp (wraps C++) and rtoml (wraps Rust).
+
diff --git a/venv/lib/python3.10/site-packages/tomli-2.0.1.dist-info/RECORD b/venv/lib/python3.10/site-packages/tomli-2.0.1.dist-info/RECORD
new file mode 100644
index 0000000..0bda10c
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/tomli-2.0.1.dist-info/RECORD
@@ -0,0 +1,14 @@
+tomli-2.0.1.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4
+tomli-2.0.1.dist-info/LICENSE,sha256=uAgWsNUwuKzLTCIReDeQmEpuO2GSLCte6S8zcqsnQv4,1072
+tomli-2.0.1.dist-info/METADATA,sha256=zPDceKmPwJGLWtZykrHixL7WVXWmJGzZ1jyRT5lCoPI,8875
+tomli-2.0.1.dist-info/RECORD,,
+tomli-2.0.1.dist-info/WHEEL,sha256=jPMR_Dzkc4X4icQtmz81lnNY_kAsfog7ry7qoRvYLXw,81
+tomli/__init__.py,sha256=JhUwV66DB1g4Hvt1UQCVMdfCu-IgAV8FXmvDU9onxd4,396
+tomli/__pycache__/__init__.cpython-310.pyc,,
+tomli/__pycache__/_parser.cpython-310.pyc,,
+tomli/__pycache__/_re.cpython-310.pyc,,
+tomli/__pycache__/_types.cpython-310.pyc,,
+tomli/_parser.py,sha256=g9-ENaALS-B8dokYpCuzUFalWlog7T-SIYMjLZSWrtM,22633
+tomli/_re.py,sha256=dbjg5ChZT23Ka9z9DHOXfdtSpPwUfdgMXnj8NOoly-w,2943
+tomli/_types.py,sha256=-GTG2VUqkpxwMqzmVO4F7ybKddIbAnuAHXfmWQcTi3Q,254
+tomli/py.typed,sha256=8PjyZ1aVoQpRVvt71muvuq5qE-jTFZkK-GLHkhdebmc,26
diff --git a/venv/lib/python3.10/site-packages/tomli-2.0.1.dist-info/WHEEL b/venv/lib/python3.10/site-packages/tomli-2.0.1.dist-info/WHEEL
new file mode 100644
index 0000000..c727d14
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/tomli-2.0.1.dist-info/WHEEL
@@ -0,0 +1,4 @@
+Wheel-Version: 1.0
+Generator: flit 3.6.0
+Root-Is-Purelib: true
+Tag: py3-none-any
diff --git a/venv/lib/python3.10/site-packages/setuptools/_vendor/tomli/__init__.py b/venv/lib/python3.10/site-packages/tomli/__init__.py
similarity index 100%
rename from venv/lib/python3.10/site-packages/setuptools/_vendor/tomli/__init__.py
rename to venv/lib/python3.10/site-packages/tomli/__init__.py
diff --git a/venv/lib/python3.10/site-packages/setuptools/_vendor/tomli/_parser.py b/venv/lib/python3.10/site-packages/tomli/_parser.py
similarity index 100%
rename from venv/lib/python3.10/site-packages/setuptools/_vendor/tomli/_parser.py
rename to venv/lib/python3.10/site-packages/tomli/_parser.py
diff --git a/venv/lib/python3.10/site-packages/setuptools/_vendor/tomli/_re.py b/venv/lib/python3.10/site-packages/tomli/_re.py
similarity index 100%
rename from venv/lib/python3.10/site-packages/setuptools/_vendor/tomli/_re.py
rename to venv/lib/python3.10/site-packages/tomli/_re.py
diff --git a/venv/lib/python3.10/site-packages/pip/_vendor/tomli/_types.py b/venv/lib/python3.10/site-packages/tomli/_types.py
similarity index 100%
rename from venv/lib/python3.10/site-packages/pip/_vendor/tomli/_types.py
rename to venv/lib/python3.10/site-packages/tomli/_types.py
diff --git a/venv/lib/python3.10/site-packages/tomli/py.typed b/venv/lib/python3.10/site-packages/tomli/py.typed
new file mode 100644
index 0000000..7632ecf
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/tomli/py.typed
@@ -0,0 +1 @@
+# Marker file for PEP 561
diff --git a/venv/lib/python3.10/site-packages/tox-3.25.0.dist-info/INSTALLER b/venv/lib/python3.10/site-packages/tox-3.25.0.dist-info/INSTALLER
new file mode 100644
index 0000000..a1b589e
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/tox-3.25.0.dist-info/INSTALLER
@@ -0,0 +1 @@
+pip
diff --git a/venv/lib/python3.10/site-packages/tox-3.25.0.dist-info/LICENSE b/venv/lib/python3.10/site-packages/tox-3.25.0.dist-info/LICENSE
new file mode 100644
index 0000000..8babb03
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/tox-3.25.0.dist-info/LICENSE
@@ -0,0 +1,19 @@
+
+Permission is hereby granted, free of charge, to any person obtaining a
+copy of this software and associated documentation files (the
+"Software"), to deal in the Software without restriction, including
+without limitation the rights to use, copy, modify, merge, publish,
+distribute, sublicense, and/or sell copies of the Software, and to
+permit persons to whom the Software is furnished to do so, subject to
+the following conditions:
+
+The above copyright notice and this permission notice shall be included
+in all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
diff --git a/venv/lib/python3.10/site-packages/tox-3.25.0.dist-info/METADATA b/venv/lib/python3.10/site-packages/tox-3.25.0.dist-info/METADATA
new file mode 100644
index 0000000..fce85a7
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/tox-3.25.0.dist-info/METADATA
@@ -0,0 +1,172 @@
+Metadata-Version: 2.1
+Name: tox
+Version: 3.25.0
+Summary: tox is a generic virtualenv management and test command line tool
+Home-page: https://tox.readthedocs.io
+Author: Holger Krekel, Oliver Bestwalter, Bernát Gábor and others
+Maintainer: Bernát Gábor, Oliver Bestwalter, Anthony Sottile, Jürgen Gmach
+Maintainer-email: gaborjbernat@gmail.com
+License: MIT
+Project-URL: Source, https://github.com/tox-dev/tox
+Project-URL: Tracker, https://github.com/tox-dev/tox/issues
+Project-URL: Changelog, https://tox.readthedocs.io/en/latest/changelog.html
+Keywords: virtual,environments,isolated,testing
+Platform: any
+Classifier: Development Status :: 5 - Production/Stable
+Classifier: Framework :: tox
+Classifier: Intended Audience :: Developers
+Classifier: License :: OSI Approved :: MIT License
+Classifier: Operating System :: MacOS :: MacOS X
+Classifier: Operating System :: Microsoft :: Windows
+Classifier: Operating System :: POSIX
+Classifier: Programming Language :: Python :: 2
+Classifier: Programming Language :: Python :: 2.7
+Classifier: Programming Language :: Python :: 3
+Classifier: Programming Language :: Python :: 3.5
+Classifier: Programming Language :: Python :: 3.6
+Classifier: Programming Language :: Python :: 3.7
+Classifier: Programming Language :: Python :: 3.8
+Classifier: Programming Language :: Python :: 3.9
+Classifier: Programming Language :: Python :: 3.10
+Classifier: Programming Language :: Python :: Implementation :: CPython
+Classifier: Programming Language :: Python :: Implementation :: PyPy
+Classifier: Topic :: Software Development :: Libraries
+Classifier: Topic :: Software Development :: Testing
+Classifier: Topic :: Utilities
+Requires-Python: !=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,>=2.7
+Description-Content-Type: text/markdown
+License-File: LICENSE
+Requires-Dist: filelock (>=3.0.0)
+Requires-Dist: packaging (>=14)
+Requires-Dist: pluggy (>=0.12.0)
+Requires-Dist: py (>=1.4.17)
+Requires-Dist: six (>=1.14.0)
+Requires-Dist: toml (>=0.9.4)
+Requires-Dist: virtualenv (!=20.0.0,!=20.0.1,!=20.0.2,!=20.0.3,!=20.0.4,!=20.0.5,!=20.0.6,!=20.0.7,>=16.0.0)
+Requires-Dist: colorama (>=0.4.1) ; platform_system == "Windows"
+Requires-Dist: importlib-metadata (>=0.12) ; python_version < "3.8"
+Provides-Extra: docs
+Requires-Dist: pygments-github-lexers (>=0.0.5) ; extra == 'docs'
+Requires-Dist: sphinx (>=2.0.0) ; extra == 'docs'
+Requires-Dist: sphinxcontrib-autoprogram (>=0.1.5) ; extra == 'docs'
+Requires-Dist: towncrier (>=18.5.0) ; extra == 'docs'
+Provides-Extra: testing
+Requires-Dist: flaky (>=3.4.0) ; extra == 'testing'
+Requires-Dist: freezegun (>=0.3.11) ; extra == 'testing'
+Requires-Dist: pytest (>=4.0.0) ; extra == 'testing'
+Requires-Dist: pytest-cov (>=2.5.1) ; extra == 'testing'
+Requires-Dist: pytest-mock (>=1.10.0) ; extra == 'testing'
+Requires-Dist: pytest-randomly (>=1.0.0) ; extra == 'testing'
+Requires-Dist: psutil (>=5.6.1) ; (platform_python_implementation == "cpython") and extra == 'testing'
+Requires-Dist: pathlib2 (>=2.3.3) ; (python_version < "3.4") and extra == 'testing'
+
+[![PyPI](https://img.shields.io/pypi/v/tox?style=flat-square)](https://pypi.org/project/tox/)
+[![Supported Python
+versions](https://img.shields.io/pypi/pyversions/tox.svg)](https://pypi.org/project/tox/)
+[![check](https://github.com/tox-dev/tox/actions/workflows/check.yml/badge.svg)](https://github.com/tox-dev/tox/actions/workflows/check.yml)
+[![Documentation
+status](https://readthedocs.org/projects/tox/badge/?version=latest&style=flat-square)](https://tox.readthedocs.io/en/latest/?badge=latest)
+[![Code style:
+black](https://img.shields.io/badge/code%20style-black-000000.svg)](https://github.com/psf/black)
+[![Downloads](https://pepy.tech/badge/tox/month)](https://pepy.tech/project/tox/month)
+
+
+    tox logo
+
+
+# tox automation project
+
+**Command line driven CI frontend and development task automation tool**
+
+At its core tox provides a convenient way to run arbitrary commands in isolated environments to serve as a single entry
+point for build, test and release activities.
+
+tox is highly [configurable](https://tox.readthedocs.io/en/latest/config.html) and
+[pluggable](https://tox.readthedocs.io/en/latest/plugins.html).
+
+## Example: run tests with Python 3.7 and Python 3.8
+
+tox is mainly used as a command line tool and needs a `tox.ini` or a `tool.tox` section in `pyproject.toml` containing
+the configuration.
+
+To test a simple project that has some tests, here is an example with a `tox.ini` in the root of the project:
+
+```ini
+[tox]
+envlist = py37,py38
+
+[testenv]
+deps = pytest
+commands = pytest
+```
+
+```console
+$ tox
+
+[lots of output from what tox does]
+[lots of output from commands that were run]
+
+__________________ summary _________________
+  py37: commands succeeded
+  py38: commands succeeded
+  congratulations :)
+```
+
+tox created two `testenvs` - one based on Python 3.7 and one based on Python 3.8, it installed pytest in them and ran the
+tests. The report at the end summarizes which `testenvs` have failed and which have succeeded.
+
+**Note:** To learn more about what you can do with tox, have a look at
+[the collection of examples in the documentation](https://tox.readthedocs.io/en/latest/examples.html) or
+[existing projects using tox](https://github.com/search?l=INI&q=tox.ini+in%3Apath&type=Code).
+
+### How it works
+
+tox creates virtual environments for all configured so-called `testenvs`, it then installs the project and other
+necessary dependencies and runs the configured set of commands. See
+[system overview](https://tox.readthedocs.io/en/latest/#system-overview) for more details.
+
+
+    tox flow
+
+
+### tox can be used for ...
+
+- creating development environments
+- running static code analysis and test tools
+- automating package builds
+- running tests against the package built by tox
+- checking that packages install correctly with different Python versions/interpreters
+- unifying Continuous Integration and command line based testing
+- building and deploying project documentation
+- releasing a package to PyPI or any other platform
+- limit: your imagination
+
+### Documentation
+
+Documentation for tox can be found at [Read The Docs](https://tox.readthedocs.org).
+
+### Communication and questions
+
+For the fastest and interactive feedback please join our
+[![Discord](https://img.shields.io/discord/802911963368783933?style=flat-square)](https://discord.gg/edtj86wzBX) server.
+If you have questions or suggestions you can first check if they have already been answered or discussed on our
+[issue tracker](https://github.com/tox-dev/tox/issues?utf8=%E2%9C%93&q=is%3Aissue+sort%3Aupdated-desc+label%3A%22type%3Aquestion+%3Agrey_question%3A%22+).
+On [Stack Overflow (tagged with `tox`)](https://stackoverflow.com/questions/tagged/tox).
+
+### Contributing
+
+Contributions are welcome. See [contributing](https://github.com/tox-dev/tox/blob/master/CONTRIBUTING.rst) and our
+[Contributor Covenant Code of Conduct](https://github.com/tox-dev/tox/blob/master/CODE_OF_CONDUCT.md).
+
+Currently, the [code](https://github.com/tox-dev/tox) and the [issues](https://github.com/tox-dev/tox/issues) are hosted
+on GitHub.
+
+The project is licensed under [MIT](https://github.com/tox-dev/tox/blob/master/LICENSE).
+
+
diff --git a/venv/lib/python3.10/site-packages/tox-3.25.0.dist-info/RECORD b/venv/lib/python3.10/site-packages/tox-3.25.0.dist-info/RECORD
new file mode 100644
index 0000000..6159279
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/tox-3.25.0.dist-info/RECORD
@@ -0,0 +1,118 @@
+../../../bin/tox,sha256=MUALcfWqZVtdmieRDlwSHvUm9EPFWDA2qUFAd9C-uSI,249
+../../../bin/tox-quickstart,sha256=YGouBGYVoGVKp-B8-tGAPWa5TlhlnRygdOO0dEEyHxM,255
+tox-3.25.0.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4
+tox-3.25.0.dist-info/LICENSE,sha256=dOVYRlVEF1M1gPTeeJ3MBJMzjokbbMQLjw8rLuZ2FBs,1024
+tox-3.25.0.dist-info/METADATA,sha256=QmXagIf-949vFmrBI1OelJfDNG94PMNf-jXSOZFyzE0,7573
+tox-3.25.0.dist-info/RECORD,,
+tox-3.25.0.dist-info/REQUESTED,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+tox-3.25.0.dist-info/WHEEL,sha256=z9j0xAa_JmUKMpmz72K0ZGALSM_n-wQVmGbleXx2VHg,110
+tox-3.25.0.dist-info/entry_points.txt,sha256=4n8CmpW2xyUiFpntvWs7purct_WEyJVNlepf_ryaDcg,74
+tox-3.25.0.dist-info/top_level.txt,sha256=j-NhlvLEu1pj85DD4uEVKjq6vNP0HBKVuG93PJxTxjI,4
+tox/__init__.py,sha256=_jMg9VLz7TsXlGkI7TQAMVPdLYoNUpbKk7vjqN2pFNY,1040
+tox/__main__.py,sha256=JLiMXOWktPvsQILXuwR31NTqUdx0Cx03CWAIExZLT-s,57
+tox/__pycache__/__init__.cpython-310.pyc,,
+tox/__pycache__/__main__.cpython-310.pyc,,
+tox/__pycache__/_pytestplugin.cpython-310.pyc,,
+tox/__pycache__/_quickstart.cpython-310.pyc,,
+tox/__pycache__/action.cpython-310.pyc,,
+tox/__pycache__/cli.cpython-310.pyc,,
+tox/__pycache__/constants.cpython-310.pyc,,
+tox/__pycache__/exception.cpython-310.pyc,,
+tox/__pycache__/hookspecs.cpython-310.pyc,,
+tox/__pycache__/reporter.cpython-310.pyc,,
+tox/__pycache__/venv.cpython-310.pyc,,
+tox/__pycache__/version.cpython-310.pyc,,
+tox/_pytestplugin.py,sha256=pNwVRAf0BId-AwUFCyvDFwIMJKIXl5l1TBRWYaVMmkc,19321
+tox/_quickstart.py,sha256=LneJ29mRScDLTzXE-KgdO5yGcpirwXJX0b685t2SJk0,9381
+tox/action.py,sha256=aOqxUvZanEW3q5ziO7djI75qDvcyDbucJCe7zimvM9M,11671
+tox/cli.py,sha256=rTThNwHslVD-UXuK4Ow3qVhtlFO1IiTbzgfgszHUXJ4,219
+tox/config/__init__.py,sha256=tJ2g_0jJ7sbJtDXlqiRoPxjqjnxKPS0pUsR_1LwCqKk,75588
+tox/config/__pycache__/__init__.cpython-310.pyc,,
+tox/config/__pycache__/parallel.cpython-310.pyc,,
+tox/config/__pycache__/reporter.cpython-310.pyc,,
+tox/config/parallel.py,sha256=MFe62P7rWI1_Eze9KzzVpUqZUHZGPlTupxgBUbFrnbo,2062
+tox/config/reporter.py,sha256=wwFiWyi-jtj6sbBcm0jTZJ1XK_ziXdj9ziKS5hoot_w,646
+tox/constants.py,sha256=oXkMIA8Olqvuv0H-PaT8Pe7nx8h80u2aWLUxnciH9_o,1972
+tox/exception.py,sha256=wSY-3jSzyr2E1UpOphd_XXS_mjDBh5VJ9SbS0laZVRI,3312
+tox/helper/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+tox/helper/__pycache__/__init__.cpython-310.pyc,,
+tox/helper/__pycache__/build_isolated.cpython-310.pyc,,
+tox/helper/__pycache__/build_requires.cpython-310.pyc,,
+tox/helper/__pycache__/get_site_package_dir.cpython-310.pyc,,
+tox/helper/__pycache__/get_version.cpython-310.pyc,,
+tox/helper/build_isolated.py,sha256=7em7tB9PjT55xMZAqxKxjiO_Gj6f_RMYT-S_NBCkQd8,1289
+tox/helper/build_requires.py,sha256=06N05UHHKZx-DdirVVDUJ6uFTrPXup8wz62bpGduehs,727
+tox/helper/get_site_package_dir.py,sha256=isOluiM6IA3o9ZRX0wkrOsJYCDrusqYueAaxzUCf8ww,679
+tox/helper/get_version.py,sha256=7L4BCktGEDT23Tz2gRL66Np2Kcic85Q46CNXvrYomG0,472
+tox/hookspecs.py,sha256=DF9W3c-9eCuuoSlXGEz3tOHTzuf1aTMVrFFcnyKdSAU,3903
+tox/interpreters/__init__.py,sha256=io_S2hKcQzQ60uRoqlYr4aw_1HagNYGasns926pFsfo,4379
+tox/interpreters/__pycache__/__init__.cpython-310.pyc,,
+tox/interpreters/__pycache__/common.cpython-310.pyc,,
+tox/interpreters/__pycache__/py_spec.cpython-310.pyc,,
+tox/interpreters/__pycache__/unix.cpython-310.pyc,,
+tox/interpreters/__pycache__/via_path.cpython-310.pyc,,
+tox/interpreters/common.py,sha256=dZLyl9x8-Zrag8pehQ6oAC_F7b6MS4OTp9T7HmB9nKk,804
+tox/interpreters/py_spec.py,sha256=-7fqs-0t8MgpALoP0maJVG46ODPDEyN2Ss6zoQX4mU0,2448
+tox/interpreters/unix.py,sha256=L4RrRvD8V6BpNYPLTjbH2CNxEP1u1ECYH8Xo-BYTO8I,550
+tox/interpreters/via_path.py,sha256=im82GiHFqTEQV4s_SGVPaM0HarnhbRmzKxRdRumFMDc,2231
+tox/interpreters/windows/__init__.py,sha256=XcwNxk0fW9s-b_Zi-4B6MHiJKdt-ubwjoBl3p5a7MNU,1442
+tox/interpreters/windows/__pycache__/__init__.cpython-310.pyc,,
+tox/interpreters/windows/__pycache__/pep514.cpython-310.pyc,,
+tox/interpreters/windows/pep514.py,sha256=U2Cx2SzcK9t0Lp8sHl9UrH38X_TjavNjbhbVCNjEFIA,5335
+tox/logs/__init__.py,sha256=AXr_s2W9-JLV4hT8_TGr8nlL24otk6avSIgYvBUxkyE,137
+tox/logs/__pycache__/__init__.cpython-310.pyc,,
+tox/logs/__pycache__/command.cpython-310.pyc,,
+tox/logs/__pycache__/env.cpython-310.pyc,,
+tox/logs/__pycache__/result.cpython-310.pyc,,
+tox/logs/command.py,sha256=v_NeOHc45Ajxj8KSCSyDUjFBV6U98gSP34ItQLWt7K8,414
+tox/logs/env.py,sha256=vteZtorNOaS3De74treMjxa7mouFpMMAgpg0CvWjRcQ,1088
+tox/logs/result.py,sha256=KOCmIu78K0Wri8JpoPGZl3aLf3d1ZNqGppTg2QOaDck,1260
+tox/package/__init__.py,sha256=mNaarIFMUIJuq7rgeXfhacbZITX7SS7kZ8h5dDrNNqg,2563
+tox/package/__pycache__/__init__.cpython-310.pyc,,
+tox/package/__pycache__/local.cpython-310.pyc,,
+tox/package/__pycache__/view.cpython-310.pyc,,
+tox/package/builder/__init__.py,sha256=tL7IC2r7xIcG77X8zsjr6ofyk3iZBeUUqj8REMcvG4Q,222
+tox/package/builder/__pycache__/__init__.cpython-310.pyc,,
+tox/package/builder/__pycache__/isolated.cpython-310.pyc,,
+tox/package/builder/__pycache__/legacy.cpython-310.pyc,,
+tox/package/builder/isolated.py,sha256=bR0sTxH2SgRIPtx1KHaxGZhxIgfOitpK2FvCCu8He_8,5434
+tox/package/builder/legacy.py,sha256=4lC483-8dG9-VXJ0nz4AGqXF0_RsWQtoiDdRkw5qpk0,2261
+tox/package/local.py,sha256=npkFlYp-lk2aa7rpEASBAJV-oVlVYi1qFob7O8f_B3s,1865
+tox/package/view.py,sha256=tW4Ske4XNEbemipJrliIkeh87aXFgEV2gVxfzTxah_Y,1687
+tox/reporter.py,sha256=ruS0F6qKxuqwCUebDTgj4w-KDAR2AiNIxA-EQrFsZdc,4617
+tox/session/__init__.py,sha256=lsyoO70yMMhugAgQpbZI5Jxj7EAfsmm0KD-DiVah_dU,10985
+tox/session/__pycache__/__init__.cpython-310.pyc,,
+tox/session/commands/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+tox/session/commands/__pycache__/__init__.cpython-310.pyc,,
+tox/session/commands/__pycache__/help.cpython-310.pyc,,
+tox/session/commands/__pycache__/help_ini.cpython-310.pyc,,
+tox/session/commands/__pycache__/provision.cpython-310.pyc,,
+tox/session/commands/__pycache__/show_config.cpython-310.pyc,,
+tox/session/commands/__pycache__/show_env.cpython-310.pyc,,
+tox/session/commands/help.py,sha256=W--C3bU0KW4pfeBA2TfbJdaW_IM4fRGnP0jabBmy4kE,674
+tox/session/commands/help_ini.py,sha256=CmG7yHmJknxLACTPeS_MPGzeuNeNJlkAMQjFVoVwKhI,467
+tox/session/commands/provision.py,sha256=APG5UOfIY_sbTyilXkV9kQBQm1pKt2ouDw29LSPKpy4,897
+tox/session/commands/run/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+tox/session/commands/run/__pycache__/__init__.cpython-310.pyc,,
+tox/session/commands/run/__pycache__/parallel.cpython-310.pyc,,
+tox/session/commands/run/__pycache__/sequential.cpython-310.pyc,,
+tox/session/commands/run/parallel.py,sha256=oGVQmCxAhYVePzj7TSyr_dobhfkSDJ1VswKCR1_0nJw,5614
+tox/session/commands/run/sequential.py,sha256=PoYzagF5IMQ6Btmg_sOODhKcYJAQOVqJ6pjkOOThcaU,2431
+tox/session/commands/show_config.py,sha256=sXJ_0HT_EB_Qv0bkTFyTCQSQ1fpWbJ7ekFyp-3R0D4s,2419
+tox/session/commands/show_env.py,sha256=cidk7WJB6u5J2foSu-upkyOSmeVVxuMWMfxOYNdEmsg,1081
+tox/util/__init__.py,sha256=WrGdI6FHghi8LCqntrnl0s9IzibhS6OoI146bvzi73Q,506
+tox/util/__pycache__/__init__.cpython-310.pyc,,
+tox/util/__pycache__/graph.cpython-310.pyc,,
+tox/util/__pycache__/lock.cpython-310.pyc,,
+tox/util/__pycache__/main.cpython-310.pyc,,
+tox/util/__pycache__/path.cpython-310.pyc,,
+tox/util/__pycache__/spinner.cpython-310.pyc,,
+tox/util/__pycache__/stdlib.cpython-310.pyc,,
+tox/util/graph.py,sha256=VgO0xYRCSuRtJzt3kGni8z_5_Nu87aVrnPvktbJIDc4,2199
+tox/util/lock.py,sha256=IqEH0K3m9Q6hP6g_8QPVUyqkjUYCQN5sOvKXotFrb54,1295
+tox/util/main.py,sha256=IbjLm2rzgvqVtzH1KQL2WX5KfDI0PpskJS1Ah_Su7bY,117
+tox/util/path.py,sha256=r1rHAuTzxFM7wB4o8nEyDOdkDSJB0Y46NQKtPqLIApc,223
+tox/util/spinner.py,sha256=dyD3VGAnCWz2T-yvMBhx7q0CNZgj8k3KvjPLWwY1pkk,5688
+tox/util/stdlib.py,sha256=8vqdprYGVb3OfkXHXa4pZ9yCZw6EV2MUH1VtHtABZys,1624
+tox/venv.py,sha256=mmuVIrpDB5PkkGuYYkYafRErUrfquJM3unfU1ba3Meg,30643
+tox/version.py,sha256=Hq7kBr7cTKqN6TXmdsMwVPkmrrTyeS0DCJAtpBGNAks,80
diff --git a/venv/lib/python3.10/site-packages/tox-3.25.0.dist-info/REQUESTED b/venv/lib/python3.10/site-packages/tox-3.25.0.dist-info/REQUESTED
new file mode 100644
index 0000000..e69de29
diff --git a/venv/lib/python3.10/site-packages/tox-3.25.0.dist-info/WHEEL b/venv/lib/python3.10/site-packages/tox-3.25.0.dist-info/WHEEL
new file mode 100644
index 0000000..0b18a28
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/tox-3.25.0.dist-info/WHEEL
@@ -0,0 +1,6 @@
+Wheel-Version: 1.0
+Generator: bdist_wheel (0.37.1)
+Root-Is-Purelib: true
+Tag: py2-none-any
+Tag: py3-none-any
+
diff --git a/venv/lib/python3.10/site-packages/tox-3.25.0.dist-info/entry_points.txt b/venv/lib/python3.10/site-packages/tox-3.25.0.dist-info/entry_points.txt
new file mode 100644
index 0000000..c443729
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/tox-3.25.0.dist-info/entry_points.txt
@@ -0,0 +1,3 @@
+[console_scripts]
+tox = tox:cmdline
+tox-quickstart = tox._quickstart:main
diff --git a/venv/lib/python3.10/site-packages/tox-3.25.0.dist-info/top_level.txt b/venv/lib/python3.10/site-packages/tox-3.25.0.dist-info/top_level.txt
new file mode 100644
index 0000000..053148f
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/tox-3.25.0.dist-info/top_level.txt
@@ -0,0 +1 @@
+tox
diff --git a/venv/lib/python3.10/site-packages/tox/__init__.py b/venv/lib/python3.10/site-packages/tox/__init__.py
new file mode 100644
index 0000000..b3df3d5
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/tox/__init__.py
@@ -0,0 +1,32 @@
+"""Everything made explicitly available via `__all__` can be considered as part of the tox API.
+
+We will emit deprecation warnings for one minor release before making changes to these objects.
+
+If objects are marked experimental they might change between minor versions.
+
+To override/modify tox behaviour via plugins see `tox.hookspec` and its use with pluggy.
+"""
+import pluggy
+
+from . import exception
+from .constants import INFO, PIP, PYTHON
+from .hookspecs import hookspec
+from .version import __version__
+
+__all__ = (
+    "__version__",  # tox version
+    "cmdline",  # run tox as part of another program/IDE (same behaviour as called standalone)
+    "hookimpl",  # Hook implementation marker to be imported by plugins
+    "exception",  # tox specific exceptions
+    # EXPERIMENTAL CONSTANTS API
+    "PYTHON",
+    "INFO",
+    "PIP",
+    # DEPRECATED - will be removed from API in tox 4
+    "hookspec",
+)
+
+hookimpl = pluggy.HookimplMarker("tox")
+
+# NOTE: must come last due to circular import
+from .session import cmdline  # isort:skip
diff --git a/venv/lib/python3.10/site-packages/tox/__main__.py b/venv/lib/python3.10/site-packages/tox/__main__.py
new file mode 100644
index 0000000..821fa48
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/tox/__main__.py
@@ -0,0 +1,4 @@
+import tox
+
+if __name__ == "__main__":
+    tox.cmdline()
diff --git a/venv/lib/python3.10/site-packages/tox/_pytestplugin.py b/venv/lib/python3.10/site-packages/tox/_pytestplugin.py
new file mode 100644
index 0000000..d0c8703
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/tox/_pytestplugin.py
@@ -0,0 +1,619 @@
+from __future__ import print_function, unicode_literals
+
+import os
+import subprocess
+import sys
+import textwrap
+import time
+import traceback
+from collections import OrderedDict
+from fnmatch import fnmatch
+
+import py
+import pytest
+import six
+
+import tox
+import tox.session
+from tox import venv
+from tox.config import parseconfig
+from tox.config.parallel import ENV_VAR_KEY_PRIVATE as PARALLEL_ENV_VAR_KEY_PRIVATE
+from tox.config.parallel import ENV_VAR_KEY_PUBLIC as PARALLEL_ENV_VAR_KEY_PUBLIC
+from tox.reporter import update_default_reporter
+from tox.venv import CreationConfig, VirtualEnv, getdigest
+
+mark_dont_run_on_windows = pytest.mark.skipif(os.name == "nt", reason="non windows test")
+mark_dont_run_on_posix = pytest.mark.skipif(os.name == "posix", reason="non posix test")
+
+
+def pytest_configure():
+    if "TOXENV" in os.environ:
+        del os.environ["TOXENV"]
+    if "HUDSON_URL" in os.environ:
+        del os.environ["HUDSON_URL"]
+
+
+def pytest_addoption(parser):
+    parser.addoption(
+        "--no-network",
+        action="store_true",
+        dest="no_network",
+        help="don't run tests requiring network",
+    )
+
+
+def pytest_report_header():
+    return "tox comes from: {!r}".format(tox.__file__)
+
+
+@pytest.fixture
+def work_in_clean_dir(tmpdir):
+    with tmpdir.as_cwd():
+        yield
+
+
+@pytest.fixture(autouse=True)
+def check_cwd_not_changed_by_test():
+    old = os.getcwd()
+    yield
+    new = os.getcwd()
+    if old != new:
+        pytest.fail("test changed cwd: {!r} => {!r}".format(old, new))
+
+
+@pytest.fixture(autouse=True)
+def check_os_environ_stable():
+    old = os.environ.copy()
+
+    to_clean = {
+        k: os.environ.pop(k, None)
+        for k in {
+            PARALLEL_ENV_VAR_KEY_PRIVATE,
+            PARALLEL_ENV_VAR_KEY_PUBLIC,
+            str("TOX_WORK_DIR"),
+            str("PYTHONPATH"),
+        }
+    }
+
+    yield
+
+    for key, value in to_clean.items():
+        if value is not None:
+            os.environ[key] = value
+
+    new = os.environ
+    extra = {k: new[k] for k in set(new) - set(old)}
+    miss = {k: old[k] for k in set(old) - set(new)}
+    diff = {
+        "{} = {} vs {}".format(k, old[k], new[k])
+        for k in set(old) & set(new)
+        if old[k] != new[k] and not (k.startswith("PYTEST_") or k.startswith("COV_"))
+    }
+    if extra or miss or diff:
+        msg = "test changed environ"
+        if extra:
+            msg += " extra {}".format(extra)
+        if miss:
+            msg += " miss {}".format(miss)
+        if diff:
+            msg += " diff {}".format(diff)
+        pytest.fail(msg)
+
+
+@pytest.fixture(name="newconfig")
+def create_new_config_file(tmpdir):
+    def create_new_config_file_(args, source=None, plugins=(), filename="tox.ini"):
+        if source is None:
+            source = args
+            args = []
+        s = textwrap.dedent(source)
+        p = tmpdir.join(filename)
+        p.write(s)
+        tox.session.setup_reporter(args)
+        with tmpdir.as_cwd():
+            return parseconfig(args, plugins=plugins)
+
+    return create_new_config_file_
+
+
+@pytest.fixture
+def cmd(request, monkeypatch, capfd):
+    if request.config.option.no_network:
+        pytest.skip("--no-network was specified, test cannot run")
+    request.addfinalizer(py.path.local().chdir)
+
+    def run(*argv):
+        reset_report()
+        with RunResult(argv, capfd) as result:
+            _collect_session(result)
+
+            # noinspection PyBroadException
+            try:
+                tox.session.main([str(x) for x in argv])
+                assert False  # this should always exist with SystemExit
+            except SystemExit as exception:
+                result.ret = exception.code
+            except OSError as e:
+                traceback.print_exc()
+                result.ret = e.errno
+            except Exception:
+                traceback.print_exc()
+                result.ret = 1
+        return result
+
+    def _collect_session(result):
+        prev_build = tox.session.build_session
+
+        def build_session(config):
+            result.session = prev_build(config)
+            return result.session
+
+        monkeypatch.setattr(tox.session, "build_session", build_session)
+
+    yield run
+
+
+class RunResult:
+    def __init__(self, args, capfd):
+        self.args = args
+        self.ret = None
+        self.duration = None
+        self.out = None
+        self.err = None
+        self.session = None
+        self.capfd = capfd
+
+    def __enter__(self):
+        self._start = time.time()
+        return self
+
+    def __exit__(self, exc_type, exc_val, exc_tb):
+        self.duration = time.time() - self._start
+        self.out, self.err = self.capfd.readouterr()
+
+    def _read(self, out, pos):
+        out.buffer.seek(pos)
+        return out.buffer.read().decode(out.encoding, errors=out.errors)
+
+    @property
+    def outlines(self):
+        out = [] if self.out is None else self.out.splitlines()
+        err = [] if self.err is None else self.err.splitlines()
+        return err + out
+
+    def __repr__(self):
+        res = "RunResult(ret={}, args={!r}, out=\n{}\n, err=\n{})".format(
+            self.ret,
+            self.args,
+            self.out,
+            self.err,
+        )
+        if six.PY2:
+            return res.encode("UTF-8")
+        else:
+            return res
+
+    def output(self):
+        return "{}\n{}\n{}".format(self.ret, self.err, self.out)
+
+    def assert_success(self, is_run_test_env=True):
+        msg = self.output()
+        assert self.ret == 0, msg
+        if is_run_test_env:
+            assert any("  congratulations :)" == line for line in reversed(self.outlines)), msg
+
+    def assert_fail(self, is_run_test_env=True):
+        msg = self.output()
+        assert self.ret, msg
+        if is_run_test_env:
+            assert not any("  congratulations :)" == line for line in reversed(self.outlines)), msg
+
+
+class ReportExpectMock:
+    def __init__(self):
+        from tox import reporter
+
+        self.instance = reporter._INSTANCE
+        self.clear()
+        self._index = -1
+
+    def clear(self):
+        self._index = -1
+        if not six.PY2:
+            self.instance.reported_lines.clear()
+        else:
+            del self.instance.reported_lines[:]
+
+    def getnext(self, cat):
+        __tracebackhide__ = True
+        newindex = self._index + 1
+        while newindex < len(self.instance.reported_lines):
+            call = self.instance.reported_lines[newindex]
+            lcat = call[0]
+            if fnmatch(lcat, cat):
+                self._index = newindex
+                return call
+            newindex += 1
+        raise LookupError(
+            "looking for {!r}, no reports found at >={:d} in {!r}".format(
+                cat,
+                self._index + 1,
+                self.instance.reported_lines,
+            ),
+        )
+
+    def expect(self, cat, messagepattern="*", invert=False):
+        __tracebackhide__ = True
+        if not messagepattern.startswith("*"):
+            messagepattern = "*{}".format(messagepattern)
+        while self._index < len(self.instance.reported_lines):
+            try:
+                call = self.getnext(cat)
+            except LookupError:
+                break
+            for lmsg in call[1:]:
+                lmsg = str(lmsg).replace("\n", " ")
+                if fnmatch(lmsg, messagepattern):
+                    if invert:
+                        raise AssertionError(
+                            "found {}({!r}), didn't expect it".format(cat, messagepattern),
+                        )
+                    return
+        if not invert:
+            raise AssertionError(
+                "looking for {}({!r}), no reports found at >={:d} in {!r}".format(
+                    cat,
+                    messagepattern,
+                    self._index + 1,
+                    self.instance.reported_lines,
+                ),
+            )
+
+    def not_expect(self, cat, messagepattern="*"):
+        return self.expect(cat, messagepattern, invert=True)
+
+
+class pcallMock:
+    def __init__(self, args, cwd, env, stdout, stderr, shell):
+        self.arg0 = args[0]
+        self.args = args
+        self.cwd = cwd
+        self.env = env
+        self.stdout = stdout
+        self.stderr = stderr
+        self.shell = shell
+        self.pid = os.getpid()
+        self.returncode = 0
+
+    @staticmethod
+    def communicate():
+        return "", ""
+
+    def wait(self):
+        pass
+
+
+@pytest.fixture(name="mocksession")
+def create_mocksession(request):
+    config = request.getfixturevalue("newconfig")([], "")
+
+    class MockSession(tox.session.Session):
+        def __init__(self, config):
+            self.logging_levels(config.option.quiet_level, config.option.verbose_level)
+            super(MockSession, self).__init__(config, popen=self.popen)
+            self._pcalls = []
+            self.report = ReportExpectMock()
+
+        def _clearmocks(self):
+            if not six.PY2:
+                self._pcalls.clear()
+            else:
+                del self._pcalls[:]
+            self.report.clear()
+
+        def popen(self, args, cwd, shell=None, stdout=None, stderr=None, env=None, **_):
+            process_call_mock = pcallMock(args, cwd, env, stdout, stderr, shell)
+            self._pcalls.append(process_call_mock)
+            return process_call_mock
+
+        def new_config(self, config):
+            self.logging_levels(config.option.quiet_level, config.option.verbose_level)
+            self.config = config
+            self.venv_dict.clear()
+            self.existing_venvs.clear()
+
+        def logging_levels(self, quiet, verbose):
+            update_default_reporter(quiet, verbose)
+            if hasattr(self, "config"):
+                self.config.option.quiet_level = quiet
+                self.config.option.verbose_level = verbose
+
+    return MockSession(config)
+
+
+@pytest.fixture
+def newmocksession(mocksession, newconfig):
+    def newmocksession_(args, source, plugins=()):
+        config = newconfig(args, source, plugins=plugins)
+        mocksession._reset(config, mocksession.popen)
+        return mocksession
+
+    return newmocksession_
+
+
+def getdecoded(out):
+    try:
+        return out.decode("utf-8")
+    except UnicodeDecodeError:
+        return "INTERNAL not-utf8-decodeable, truncated string:\n{}".format(py.io.saferepr(out))
+
+
+@pytest.fixture
+def initproj(tmpdir):
+    """Create a factory function for creating example projects.
+
+    Constructed folder/file hierarchy examples:
+
+    with `src_root` other than `.`:
+
+      tmpdir/
+          name/                  # base
+            src_root/            # src_root
+                name/            # package_dir
+                    __init__.py
+                name.egg-info/   # created later on package build
+            setup.py
+
+    with `src_root` given as `.`:
+
+      tmpdir/
+          name/                  # base, src_root
+            name/                # package_dir
+                __init__.py
+            name.egg-info/       # created later on package build
+            setup.py
+    """
+
+    def initproj_(nameversion, filedefs=None, src_root=".", add_missing_setup_py=True):
+        if filedefs is None:
+            filedefs = {}
+        if not src_root:
+            src_root = "."
+        if isinstance(nameversion, six.string_types):
+            parts = nameversion.rsplit(str("-"), 1)
+            if len(parts) == 1:
+                parts.append("0.1")
+            name, version = parts
+        else:
+            name, version = nameversion
+        base = tmpdir.join(name)
+        src_root_path = _path_join(base, src_root)
+        assert base == src_root_path or src_root_path.relto(
+            base,
+        ), "`src_root` must be the constructed project folder or its direct or indirect subfolder"
+
+        base.ensure(dir=1)
+        create_files(base, filedefs)
+        if not _filedefs_contains(base, filedefs, "setup.py") and add_missing_setup_py:
+            create_files(
+                base,
+                {
+                    "setup.py": """
+                from setuptools import setup, find_packages
+                setup(
+                    name='{name}',
+                    description='{name} project',
+                    version='{version}',
+                    license='MIT',
+                    platforms=['unix', 'win32'],
+                    packages=find_packages('{src_root}'),
+                    package_dir={{'':'{src_root}'}},
+                )
+            """.format(
+                        **locals()
+                    ),
+                },
+            )
+        if not _filedefs_contains(base, filedefs, src_root_path.join(name)):
+            create_files(
+                src_root_path,
+                {
+                    name: {
+                        "__init__.py": textwrap.dedent(
+                            '''
+                """ module {} """
+                __version__ = {!r}''',
+                        )
+                        .strip()
+                        .format(name, version),
+                    },
+                },
+            )
+        manifestlines = [
+            "include {}".format(p.relto(base)) for p in base.visit(lambda x: x.check(file=1))
+        ]
+        create_files(base, {"MANIFEST.in": "\n".join(manifestlines)})
+        base.chdir()
+        return base
+
+    with py.path.local().as_cwd():
+        yield initproj_
+
+
+def _path_parts(path):
+    path = path and str(path)  # py.path.local support
+    parts = []
+    while path:
+        folder, name = os.path.split(path)
+        if folder == path:  # root folder
+            folder, name = name, folder
+        if name:
+            parts.append(name)
+        path = folder
+    parts.reverse()
+    return parts
+
+
+def _path_join(base, *args):
+    # workaround for a py.path.local bug on Windows (`path.join('/x', abs=1)`
+    # should be py.path.local('X:\\x') where `X` is the current drive, when in
+    # fact it comes out as py.path.local('\\x'))
+    return py.path.local(base.join(*args, abs=1))
+
+
+def _filedefs_contains(base, filedefs, path):
+    """
+    whether `filedefs` defines a file/folder with the given `path`
+
+    `path`, if relative, will be interpreted relative to the `base` folder, and
+    whether relative or not, must refer to either the `base` folder or one of
+    its direct or indirect children. The base folder itself is considered
+    created if the filedefs structure is not empty.
+
+    """
+    unknown = object()
+    base = py.path.local(base)
+    path = _path_join(base, path)
+
+    path_rel_parts = _path_parts(path.relto(base))
+    for part in path_rel_parts:
+        if not isinstance(filedefs, dict):
+            return False
+        filedefs = filedefs.get(part, unknown)
+        if filedefs is unknown:
+            return False
+    return path_rel_parts or path == base and filedefs
+
+
+def create_files(base, filedefs):
+    for key, value in filedefs.items():
+        if isinstance(value, dict):
+            create_files(base.ensure(key, dir=1), value)
+        elif isinstance(value, six.string_types):
+            s = textwrap.dedent(value)
+
+            if not isinstance(s, six.text_type):
+                if not isinstance(s, six.binary_type):
+                    s = str(s)
+                else:
+                    s = six.ensure_text(s)
+
+            base.join(key).write_text(s, encoding="UTF-8")
+
+
+@pytest.fixture()
+def mock_venv(monkeypatch):
+    """This creates a mock virtual environment (e.g. will inherit the current interpreter).
+    Note: because we inherit, to keep things sane you must call the py environment and only that;
+    and cannot install any packages."""
+
+    # first ensure we have a clean python path
+    monkeypatch.delenv(str("PYTHONPATH"), raising=False)
+
+    # object to collect some data during the execution
+    class Result(object):
+        def __init__(self, session):
+            self.popens = popen_list
+            self.session = session
+
+    res = OrderedDict()
+
+    # convince tox that the current running virtual environment is already the env we would create
+    class ProxyCurrentPython:
+        @classmethod
+        def readconfig(cls, path):
+            if path.dirname.endswith("{}py".format(os.sep)):
+                return CreationConfig(
+                    base_resolved_python_sha256=getdigest(sys.executable),
+                    base_resolved_python_path=sys.executable,
+                    tox_version=tox.__version__,
+                    sitepackages=False,
+                    usedevelop=False,
+                    deps=[],
+                    alwayscopy=False,
+                )
+            elif path.dirname.endswith("{}.package".format(os.sep)):
+                return CreationConfig(
+                    base_resolved_python_sha256=getdigest(sys.executable),
+                    base_resolved_python_path=sys.executable,
+                    tox_version=tox.__version__,
+                    sitepackages=False,
+                    usedevelop=False,
+                    deps=[(getdigest(""), "setuptools >= 35.0.2"), (getdigest(""), "wheel")],
+                    alwayscopy=False,
+                )
+            assert False  # pragma: no cover
+
+    monkeypatch.setattr(CreationConfig, "readconfig", ProxyCurrentPython.readconfig)
+
+    # provide as Python the current python executable
+    def venv_lookup(venv, name):
+        assert name == "python"
+        venv.envconfig.envdir = py.path.local(sys.executable).join("..", "..")
+        return sys.executable
+
+    monkeypatch.setattr(VirtualEnv, "_venv_lookup", venv_lookup)
+
+    # don't allow overriding the tox config data for the host Python
+    def finish_venv(self):
+        return
+
+    monkeypatch.setattr(VirtualEnv, "finish", finish_venv)
+
+    # we lie that it's an environment with no packages in it
+    @tox.hookimpl
+    def tox_runenvreport(venv, action):
+        return []
+
+    monkeypatch.setattr(venv, "tox_runenvreport", tox_runenvreport)
+
+    # intercept the build session to save it and we intercept the popen invocations
+    # collect all popen calls
+    popen_list = []
+
+    def popen(cmd, **kwargs):
+        # we don't want to perform installation of new packages,
+        # just replace with an always ok cmd
+        if "pip" in cmd and "install" in cmd:
+            cmd = ["python", "-c", "print({!r})".format(cmd)]
+        ret = None
+        try:
+            ret = subprocess.Popen(cmd, **kwargs)
+        except tox.exception.InvocationError as exception:  # pragma: no cover
+            ret = exception  # pragma: no cover
+        finally:
+            popen_list.append((kwargs.get("env"), ret, cmd))
+        return ret
+
+    def build_session(config):
+        session = tox.session.Session(config, popen=popen)
+        res[id(session)] = Result(session)
+        return session
+
+    monkeypatch.setattr(tox.session, "build_session", build_session)
+    return res
+
+
+@pytest.fixture(scope="session")
+def current_tox_py():
+    """generate the current (test runners) python versions key
+    e.g. py37 when running under Python 3.7"""
+    return "{}{}{}".format("pypy" if tox.INFO.IS_PYPY else "py", *sys.version_info)
+
+
+def pytest_runtest_setup(item):
+    reset_report()
+
+
+def pytest_runtest_teardown(item):
+    reset_report()
+
+
+def pytest_pyfunc_call(pyfuncitem):
+    reset_report()
+
+
+def reset_report(quiet=0, verbose=0):
+    from tox.reporter import _INSTANCE
+
+    _INSTANCE._reset(quiet_level=quiet, verbose_level=verbose)
diff --git a/venv/lib/python3.10/site-packages/tox/_quickstart.py b/venv/lib/python3.10/site-packages/tox/_quickstart.py
new file mode 100644
index 0000000..175d970
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/tox/_quickstart.py
@@ -0,0 +1,285 @@
+# -*- coding: utf-8 -*-
+"""
+    tox._quickstart
+    ~~~~~~~~~~~~~~~~~
+
+    Command-line script to quickly setup a configuration for a Python project
+
+    This file was heavily inspired by and uses code from ``sphinx-quickstart``
+    in the BSD-licensed `Sphinx project`_.
+
+    .. Sphinx project_: http://sphinx.pocoo.org/
+
+    License for Sphinx
+    ==================
+
+    Copyright (c) 2007-2011 by the Sphinx team (see AUTHORS file).
+    All rights reserved.
+
+    Redistribution and use in source and binary forms, with or without
+    modification, are permitted provided that the following conditions are
+    met:
+
+    * Redistributions of source code must retain the above copyright
+      notice, this list of conditions and the following disclaimer.
+
+    * Redistributions in binary form must reproduce the above copyright
+      notice, this list of conditions and the following disclaimer in the
+      documentation and/or other materials provided with the distribution.
+
+    THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+    "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+    LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+    A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+    OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+    SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+    LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+    DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+    THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+    (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+    OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+"""
+import argparse
+import codecs
+import os
+import sys
+import textwrap
+
+import six
+
+import tox
+
+ALTERNATIVE_CONFIG_NAME = "tox-generated.ini"
+QUICKSTART_CONF = """\
+# tox (https://tox.readthedocs.io/) is a tool for running tests
+# in multiple virtualenvs. This configuration file will run the
+# test suite on all supported python versions. To use it, "pip install tox"
+# and then run "tox" from this directory.
+
+[tox]
+envlist = {envlist}
+
+[testenv]
+deps =
+    {deps}
+commands =
+    {commands}
+"""
+
+
+class ValidationError(Exception):
+    """Raised for validation errors."""
+
+
+def nonempty(x):
+    if not x:
+        raise ValidationError("Please enter some text.")
+    return x
+
+
+def choice(*line):
+    def val(x):
+        if x not in line:
+            raise ValidationError("Please enter one of {}.".format(", ".join(line)))
+        return x
+
+    return val
+
+
+def boolean(x):
+    if x.upper() not in ("Y", "YES", "N", "NO"):
+        raise ValidationError("Please enter either 'y' or 'n'.")
+    return x.upper() in ("Y", "YES")
+
+
+def list_modificator(answer, existing=None):
+    if not existing:
+        existing = []
+    if not isinstance(existing, list):
+        existing = [existing]
+    if not answer:
+        return existing
+    existing.extend([t.strip() for t in answer.split(",") if t.strip()])
+    return existing
+
+
+def do_prompt(map_, key, text, default=None, validator=nonempty, modificator=None):
+    while True:
+        prompt = "> {} [{}]: ".format(text, default) if default else "> {}: ".format(text)
+        answer = six.moves.input(prompt)
+        if default and not answer:
+            answer = default
+        # FIXME use six instead of self baked solution
+        # noinspection PyUnresolvedReferences
+        if sys.version_info < (3,) and not isinstance(answer, unicode):  # noqa
+            # for Python 2.x, try to get a Unicode string out of it
+            if answer.decode("ascii", "replace").encode("ascii", "replace") != answer:
+                term_encoding = getattr(sys.stdin, "encoding", None)
+                if term_encoding:
+                    answer = answer.decode(term_encoding)
+                else:
+                    print(
+                        "* Note: non-ASCII characters entered but terminal encoding unknown"
+                        " -> assuming UTF-8 or Latin-1.",
+                    )
+                    try:
+                        answer = answer.decode("utf-8")
+                    except UnicodeDecodeError:
+                        answer = answer.decode("latin1")
+        if validator:
+            try:
+                answer = validator(answer)
+            except ValidationError as exception:
+                print("* {}".format(exception))
+                continue
+        break
+    map_[key] = modificator(answer, map_.get(key)) if modificator else answer
+
+
+def ask_user(map_):
+    """modify *map_* in place by getting info from the user."""
+    print("Welcome to the tox {} quickstart utility.".format(tox.__version__))
+    print(
+        "This utility will ask you a few questions and then generate a simple configuration "
+        "file to help get you started using tox.\n"
+        "Please enter values for the following settings (just press Enter to accept a "
+        "default value, if one is given in brackets).\n",
+    )
+    print(
+        textwrap.dedent(
+            """What Python versions do you want to test against?
+            [1] {}
+            [2] py27, {}
+            [3] (All versions) {}
+            [4] Choose each one-by-one""",
+        ).format(
+            tox.PYTHON.CURRENT_RELEASE_ENV,
+            tox.PYTHON.CURRENT_RELEASE_ENV,
+            ", ".join(tox.PYTHON.QUICKSTART_PY_ENVS),
+        ),
+    )
+    do_prompt(
+        map_,
+        "canned_pyenvs",
+        "Enter the number of your choice",
+        default="3",
+        validator=choice("1", "2", "3", "4"),
+    )
+    if map_["canned_pyenvs"] == "1":
+        map_[tox.PYTHON.CURRENT_RELEASE_ENV] = True
+    elif map_["canned_pyenvs"] == "2":
+        for pyenv in ("py27", tox.PYTHON.CURRENT_RELEASE_ENV):
+            map_[pyenv] = True
+    elif map_["canned_pyenvs"] == "3":
+        for pyenv in tox.PYTHON.QUICKSTART_PY_ENVS:
+            map_[pyenv] = True
+    elif map_["canned_pyenvs"] == "4":
+        for pyenv in tox.PYTHON.QUICKSTART_PY_ENVS:
+            if pyenv not in map_:
+                do_prompt(
+                    map_,
+                    pyenv,
+                    "Test your project with {} (Y/n)".format(pyenv),
+                    "Y",
+                    validator=boolean,
+                )
+    print(
+        textwrap.dedent(
+            """What command should be used to test your project? Examples:\
+            - pytest\n"
+            - python -m unittest discover
+            - python setup.py test
+            - trial package.module""",
+        ),
+    )
+    do_prompt(
+        map_,
+        "commands",
+        "Type the command to run your tests",
+        default="pytest",
+        modificator=list_modificator,
+    )
+    print("What extra dependencies do your tests have?")
+    map_["deps"] = get_default_deps(map_["commands"])
+    if map_["deps"]:
+        print("default dependencies are: {}".format(map_["deps"]))
+    do_prompt(
+        map_,
+        "deps",
+        "Comma-separated list of dependencies",
+        validator=None,
+        modificator=list_modificator,
+    )
+
+
+def get_default_deps(commands):
+    if commands and any(c in str(commands) for c in ["pytest", "py.test"]):
+        return ["pytest"]
+    if "trial" in commands:
+        return ["twisted"]
+    return []
+
+
+def post_process_input(map_):
+    envlist = [env for env in tox.PYTHON.QUICKSTART_PY_ENVS if map_.get(env) is True]
+    map_["envlist"] = ", ".join(envlist)
+    map_["commands"] = "\n    ".join(cmd.strip() for cmd in map_["commands"])
+    map_["deps"] = "\n    ".join(dep.strip() for dep in set(map_["deps"]))
+
+
+def generate(map_):
+    """Generate project based on values in *d*."""
+    dpath = map_.get("path", os.getcwd())
+    altpath = os.path.join(dpath, ALTERNATIVE_CONFIG_NAME)
+    while True:
+        name = map_.get("name", tox.INFO.DEFAULT_CONFIG_NAME)
+        targetpath = os.path.join(dpath, name)
+        if not os.path.isfile(targetpath):
+            break
+        do_prompt(map_, "name", "{} exists - choose an alternative".format(targetpath), altpath)
+    with codecs.open(targetpath, "w", encoding="utf-8") as f:
+        f.write(prepare_content(QUICKSTART_CONF.format(**map_)))
+        print(
+            "Finished: {} has been created. For information on this file, "
+            "see https://tox.readthedocs.io/en/latest/config.html\n"
+            "Execute `tox` to test your project.".format(targetpath),
+        )
+
+
+def prepare_content(content):
+    return "\n".join(line.rstrip() for line in content.split("\n"))
+
+
+def parse_args():
+    parser = argparse.ArgumentParser(
+        description="Command-line script to quickly tox config file for a Python project.",
+    )
+    parser.add_argument(
+        "root",
+        type=str,
+        nargs="?",
+        default=".",
+        help="Custom root directory to write config to. Defaults to current directory.",
+    )
+    parser.add_argument(
+        "--version",
+        action="version",
+        version="%(prog)s {}".format(tox.__version__),
+    )
+    return parser.parse_args()
+
+
+def main():
+    args = parse_args()
+    map_ = {"path": args.root}
+    try:
+        ask_user(map_)
+    except (KeyboardInterrupt, EOFError):
+        print("\n[Interrupted.]")
+        return 1
+    post_process_input(map_)
+    generate(map_)
+
+
+if __name__ == "__main__":
+    sys.exit(main())
diff --git a/venv/lib/python3.10/site-packages/tox/action.py b/venv/lib/python3.10/site-packages/tox/action.py
new file mode 100644
index 0000000..e7f9b77
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/tox/action.py
@@ -0,0 +1,293 @@
+from __future__ import absolute_import, unicode_literals
+
+import os
+import pipes
+import signal
+import subprocess
+import sys
+import time
+from contextlib import contextmanager
+from threading import Thread
+
+import py
+
+from tox import reporter
+from tox.constants import INFO
+from tox.exception import InvocationError
+from tox.reporter import Verbosity
+from tox.util.lock import get_unique_file
+from tox.util.stdlib import is_main_thread
+
+
+class Action(object):
+    """Action is an effort to group operations with the same goal (within reporting)"""
+
+    def __init__(
+        self,
+        name,
+        msg,
+        args,
+        log_dir,
+        generate_tox_log,
+        command_log,
+        popen,
+        python,
+        suicide_timeout,
+        interrupt_timeout,
+        terminate_timeout,
+    ):
+        self.name = name
+        self.args = args
+        self.msg = msg
+        self.activity = self.msg.split(" ", 1)[0]
+        self.log_dir = log_dir
+        self.generate_tox_log = generate_tox_log
+        self.via_popen = popen
+        self.command_log = command_log
+        self._timed_report = None
+        self.python = python
+        self.suicide_timeout = suicide_timeout
+        self.interrupt_timeout = interrupt_timeout
+        self.terminate_timeout = terminate_timeout
+        if is_main_thread():
+            # python allows only main thread to install signal handlers
+            # see https://docs.python.org/3/library/signal.html#signals-and-threads
+            self._install_sigterm_handler()
+
+    def __enter__(self):
+        msg = "{} {}".format(self.msg, " ".join(map(str, self.args)))
+        self._timed_report = reporter.timed_operation(self.name, msg)
+        self._timed_report.__enter__()
+
+        return self
+
+    def __exit__(self, type, value, traceback):
+        self._timed_report.__exit__(type, value, traceback)
+
+    def setactivity(self, name, msg):
+        self.activity = name
+        if msg:
+            reporter.verbosity0("{} {}: {}".format(self.name, name, msg), bold=True)
+        else:
+            reporter.verbosity1("{} {}: {}".format(self.name, name, msg), bold=True)
+
+    def info(self, name, msg):
+        reporter.verbosity1("{} {}: {}".format(self.name, name, msg), bold=True)
+
+    def popen(
+        self,
+        args,
+        cwd=None,
+        env=None,
+        redirect=True,
+        returnout=False,
+        ignore_ret=False,
+        capture_err=True,
+        callback=None,
+        report_fail=True,
+    ):
+        """this drives an interaction with a subprocess"""
+        cwd = py.path.local() if cwd is None else cwd
+        cmd_args = [str(x) for x in self._rewrite_args(cwd, args)]
+        cmd_args_shell = " ".join(pipes.quote(i) for i in cmd_args)
+        stream_getter = self._get_standard_streams(
+            capture_err,
+            cmd_args_shell,
+            redirect,
+            returnout,
+            cwd,
+        )
+        exit_code, output = None, None
+        with stream_getter as (fin, out_path, stderr, stdout):
+            try:
+                process = self.via_popen(
+                    cmd_args,
+                    stdout=stdout,
+                    stderr=stderr,
+                    cwd=str(cwd),
+                    env=os.environ.copy() if env is None else env,
+                    universal_newlines=True,
+                    shell=False,
+                    creationflags=(
+                        subprocess.CREATE_NEW_PROCESS_GROUP
+                        if sys.platform == "win32"
+                        else 0
+                        # needed for Windows signal send ability (CTRL+C)
+                    ),
+                )
+            except OSError as exception:
+                exit_code = exception.errno
+            else:
+                if callback is not None:
+                    callback(process)
+                reporter.log_popen(cwd, out_path, cmd_args_shell, process.pid)
+                output = self.evaluate_cmd(fin, process, redirect)
+                exit_code = process.returncode
+            finally:
+                if out_path is not None and out_path.exists():
+                    lines = out_path.read_text("UTF-8").split("\n")
+                    # first three lines are the action, cwd, and cmd - remove it
+                    output = "\n".join(lines[3:])
+                try:
+                    if exit_code and not ignore_ret:
+                        if report_fail:
+                            msg = "invocation failed (exit code {:d})".format(exit_code)
+                            if out_path is not None:
+                                msg += ", logfile: {}".format(out_path)
+                                if not out_path.exists():
+                                    msg += " warning log file missing"
+                            reporter.error(msg)
+                            if out_path is not None and out_path.exists():
+                                reporter.separator("=", "log start", Verbosity.QUIET)
+                                reporter.quiet(output)
+                                reporter.separator("=", "log end", Verbosity.QUIET)
+                        raise InvocationError(cmd_args_shell, exit_code, output)
+                finally:
+                    self.command_log.add_command(cmd_args, output, exit_code)
+        return output
+
+    def evaluate_cmd(self, input_file_handler, process, redirect):
+        try:
+            if self.generate_tox_log and not redirect:
+                if process.stderr is not None:
+                    # prevent deadlock
+                    raise ValueError("stderr must not be piped here")
+                # we read binary from the process and must write using a binary stream
+                buf = getattr(sys.stdout, "buffer", sys.stdout)
+                last_time = time.time()
+                while True:
+                    # we have to read one byte at a time, otherwise there
+                    # might be no output for a long time with slow tests
+                    data = input_file_handler.read(1)
+                    if data:
+                        buf.write(data)
+                        if b"\n" in data or (time.time() - last_time) > 1:
+                            # we flush on newlines or after 1 second to
+                            # provide quick enough feedback to the user
+                            # when printing a dot per test
+                            buf.flush()
+                            last_time = time.time()
+                    elif process.poll() is not None:
+                        if process.stdout is not None:
+                            process.stdout.close()
+                        break
+                    else:
+                        time.sleep(0.1)
+                        # the seek updates internal read buffers
+                        input_file_handler.seek(0, 1)
+                input_file_handler.close()
+            out, _ = process.communicate()  # wait to finish
+        except KeyboardInterrupt as exception:
+            reporter.error("got KeyboardInterrupt signal")
+            main_thread = is_main_thread()
+            while True:
+                try:
+                    if main_thread:
+                        # spin up a new thread to disable further interrupt on main thread
+                        stopper = Thread(target=self.handle_interrupt, args=(process,))
+                        stopper.start()
+                        stopper.join()
+                    else:
+                        self.handle_interrupt(process)
+                except KeyboardInterrupt:
+                    continue
+                break
+            raise exception
+        return out
+
+    def handle_interrupt(self, process):
+        """A three level stop mechanism for children - INT -> TERM -> KILL"""
+        msg = "from {} {{}} pid {}".format(os.getpid(), process.pid)
+        if self._wait(process, self.suicide_timeout) is None:
+            self.info("KeyboardInterrupt", msg.format("SIGINT"))
+            process.send_signal(signal.CTRL_C_EVENT if sys.platform == "win32" else signal.SIGINT)
+            if self._wait(process, self.interrupt_timeout) is None:
+                self.info("KeyboardInterrupt", msg.format("SIGTERM"))
+                process.terminate()
+                if self._wait(process, self.terminate_timeout) is None:
+                    self.info("KeyboardInterrupt", msg.format("SIGKILL"))
+                    process.kill()
+                    process.communicate()
+
+    @staticmethod
+    def _wait(process, timeout):
+        if sys.version_info >= (3, 3):
+            # python 3 has timeout feature built-in
+            try:
+                process.communicate(timeout=timeout)
+            except subprocess.TimeoutExpired:
+                pass
+        else:
+            # on Python 2 we need to simulate it
+            delay = 0.01
+            while process.poll() is None and timeout > 0:
+                time.sleep(delay)
+                timeout -= delay
+        return process.poll()
+
+    @contextmanager
+    def _get_standard_streams(self, capture_err, cmd_args_shell, redirect, returnout, cwd):
+        stdout = out_path = input_file_handler = None
+        stderr = subprocess.STDOUT if capture_err else None
+
+        if self.generate_tox_log or redirect:
+            out_path = self.get_log_path(self.name)
+            with out_path.open("wt") as stdout, out_path.open("rb") as input_file_handler:
+                msg = "action: {}, msg: {}\ncwd: {}\ncmd: {}\n".format(
+                    self.name.replace("\n", " "),
+                    self.msg.replace("\n", " "),
+                    str(cwd).replace("\n", " "),
+                    cmd_args_shell.replace("\n", " "),
+                )
+                stdout.write(msg)
+                stdout.flush()
+                input_file_handler.read()  # read the header, so it won't be written to stdout
+                yield input_file_handler, out_path, stderr, stdout
+                return
+
+        if returnout:
+            stdout = subprocess.PIPE
+
+        yield input_file_handler, out_path, stderr, stdout
+
+    def get_log_path(self, actionid):
+        log_file = get_unique_file(self.log_dir, prefix=actionid, suffix=".log")
+        return log_file
+
+    def _rewrite_args(self, cwd, args):
+
+        executable = None
+        if INFO.IS_WIN:
+            # shebang lines are not adhered on Windows so if it's a python script
+            # pre-pend the interpreter
+            ext = os.path.splitext(str(args[0]))[1].lower()
+            if ext == ".py":
+                executable = str(self.python)
+        if executable is None:
+            executable = args[0]
+            args = args[1:]
+
+        new_args = [executable]
+
+        # to make the command shorter try to use relative paths for all subsequent arguments
+        # note the executable cannot be relative as the Windows applies cwd after invocation
+        for arg in args:
+            if arg and os.path.isabs(str(arg)):
+                arg_path = py.path.local(arg)
+                if arg_path.exists() and arg_path.common(cwd) is not None:
+                    potential_arg = cwd.bestrelpath(arg_path)
+                    if len(potential_arg.split("..")) < 2:
+                        # just one parent directory accepted as relative path
+                        arg = potential_arg
+            new_args.append(str(arg))
+
+        return new_args
+
+    def _install_sigterm_handler(self):
+        """Handle sigterm as if it were a keyboardinterrupt"""
+
+        def sigterm_handler(signum, frame):
+            reporter.error("Got SIGTERM, handling it as a KeyboardInterrupt")
+            raise KeyboardInterrupt()
+
+        signal.signal(signal.SIGTERM, sigterm_handler)
diff --git a/venv/lib/python3.10/site-packages/tox/cli.py b/venv/lib/python3.10/site-packages/tox/cli.py
new file mode 100644
index 0000000..2fe755c
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/tox/cli.py
@@ -0,0 +1,11 @@
+from tox.config import Parser, get_plugin_manager
+
+
+def cli_parser():
+    parser = Parser()
+    pm = get_plugin_manager(tuple())
+    pm.hook.tox_addoption(parser=parser)
+    return parser.argparser
+
+
+cli = cli_parser()
diff --git a/venv/lib/python3.10/site-packages/tox/config/__init__.py b/venv/lib/python3.10/site-packages/tox/config/__init__.py
new file mode 100644
index 0000000..b49c06f
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/tox/config/__init__.py
@@ -0,0 +1,2128 @@
+from __future__ import print_function
+
+import argparse
+import io
+import itertools
+import json
+import os
+import random
+import re
+import shlex
+import string
+import sys
+import traceback
+import warnings
+from collections import OrderedDict
+from fnmatch import fnmatchcase
+from subprocess import list2cmdline
+from threading import Thread
+
+import pluggy
+import py
+import six
+import toml
+from packaging import requirements
+from packaging.utils import canonicalize_name
+from packaging.version import Version
+
+import tox
+from tox.constants import INFO
+from tox.exception import MissingDependency
+from tox.interpreters import Interpreters, NoInterpreterInfo
+from tox.reporter import (
+    REPORTER_TIMESTAMP_ON_ENV,
+    error,
+    update_default_reporter,
+    using,
+    verbosity1,
+)
+from tox.util.path import ensure_empty_dir
+from tox.util.stdlib import importlib_metadata
+
+from .parallel import ENV_VAR_KEY_PRIVATE as PARALLEL_ENV_VAR_KEY_PRIVATE
+from .parallel import ENV_VAR_KEY_PUBLIC as PARALLEL_ENV_VAR_KEY_PUBLIC
+from .parallel import add_parallel_config, add_parallel_flags
+from .reporter import add_verbosity_commands
+
+try:
+    from shlex import quote as shlex_quote
+except ImportError:
+    from pipes import quote as shlex_quote
+
+
+hookimpl = tox.hookimpl
+# DEPRECATED - REMOVE - left for compatibility with plugins importing from here.
+# Import hookimpl directly from tox instead.
+
+
+WITHIN_PROVISION = os.environ.get(str("TOX_PROVISION")) == "1"
+
+SUICIDE_TIMEOUT = 0.0
+INTERRUPT_TIMEOUT = 0.3
+TERMINATE_TIMEOUT = 0.2
+
+_FACTOR_LINE_PATTERN = re.compile(r"^([\w{}.!,-]+):\s+(.+)")
+_ENVSTR_SPLIT_PATTERN = re.compile(r"((?:{[^}]+})+)|,")
+_ENVSTR_EXPAND_PATTERN = re.compile(r"{([^}]+)}")
+_WHITESPACE_PATTERN = re.compile(r"\s+")
+
+
+def get_plugin_manager(plugins=()):
+    # initialize plugin manager
+    import tox.venv
+
+    pm = pluggy.PluginManager("tox")
+    pm.add_hookspecs(tox.hookspecs)
+    pm.register(tox.config)
+    pm.register(tox.interpreters)
+    pm.register(tox.venv)
+    pm.register(tox.session)
+    from tox import package
+
+    pm.register(package)
+    pm.load_setuptools_entrypoints("tox")
+    for plugin in plugins:
+        pm.register(plugin)
+    pm.check_pending()
+    return pm
+
+
+class Parser:
+    """Command line and ini-parser control object."""
+
+    def __init__(self):
+        class HelpFormatter(argparse.ArgumentDefaultsHelpFormatter):
+            def __init__(self, prog):
+                super(HelpFormatter, self).__init__(prog, max_help_position=35, width=190)
+
+        self.argparser = argparse.ArgumentParser(
+            description="tox options",
+            add_help=False,
+            prog="tox",
+            formatter_class=HelpFormatter,
+        )
+        self._testenv_attr = []
+
+    def add_argument(self, *args, **kwargs):
+        """add argument to command line parser.  This takes the
+        same arguments that ``argparse.ArgumentParser.add_argument``.
+        """
+        return self.argparser.add_argument(*args, **kwargs)
+
+    def add_testenv_attribute(self, name, type, help, default=None, postprocess=None):
+        """add an ini-file variable for "testenv" section.
+
+        Types are specified as strings like "bool", "line-list", "string", "argv", "path",
+        "argvlist".
+
+        The ``postprocess`` function will be called for each testenv
+        like ``postprocess(testenv_config=testenv_config, value=value)``
+        where ``value`` is the value as read from the ini (or the default value)
+        and ``testenv_config`` is a :py:class:`tox.config.TestenvConfig` instance
+        which will receive all ini-variables as object attributes.
+
+        Any postprocess function must return a value which will then be set
+        as the final value in the testenv section.
+        """
+        self._testenv_attr.append(VenvAttribute(name, type, default, help, postprocess))
+
+    def add_testenv_attribute_obj(self, obj):
+        """add an ini-file variable as an object.
+
+        This works as the ``add_testenv_attribute`` function but expects
+        "name", "type", "help", and "postprocess" attributes on the object.
+        """
+        assert hasattr(obj, "name")
+        assert hasattr(obj, "type")
+        assert hasattr(obj, "help")
+        assert hasattr(obj, "postprocess")
+        self._testenv_attr.append(obj)
+
+    def parse_cli(self, args, strict=False):
+        args, argv = self.argparser.parse_known_args(args)
+        if argv and (strict or WITHIN_PROVISION):
+            self.argparser.error("unrecognized arguments: {}".format(" ".join(argv)))
+        return args
+
+    def _format_help(self):
+        return self.argparser.format_help()
+
+
+class VenvAttribute:
+    def __init__(self, name, type, default, help, postprocess):
+        self.name = name
+        self.type = type
+        self.default = default
+        self.help = help
+        self.postprocess = postprocess
+
+
+class DepOption:
+    name = "deps"
+    type = "line-list"
+    help = "each line specifies a dependency in pip/setuptools format."
+    default = ()
+
+    def postprocess(self, testenv_config, value):
+        deps = []
+        config = testenv_config.config
+        for depline in value:
+            m = re.match(r":(\w+):\s*(\S+)", depline)
+            if m:
+                iname, name = m.groups()
+                ixserver = config.indexserver[iname]
+            else:
+                name = depline.strip()
+                ixserver = None
+                # we need to process options, in case they contain a space,
+                # as the subprocess call to pip install will otherwise fail.
+                # in case of a short option, we remove the space
+                for option in tox.PIP.INSTALL_SHORT_OPTIONS_ARGUMENT:
+                    if name.startswith(option):
+                        name = "{}{}".format(option, name[len(option) :].strip())
+                # in case of a long option, we add an equal sign
+                for option in tox.PIP.INSTALL_LONG_OPTIONS_ARGUMENT:
+                    name_start = "{} ".format(option)
+                    if name.startswith(name_start):
+                        name = "{}={}".format(option, name[len(option) :].strip())
+            name = self._cut_off_dep_comment(name)
+            name = self._replace_forced_dep(name, config)
+            deps.append(DepConfig(name, ixserver))
+        return deps
+
+    def _replace_forced_dep(self, name, config):
+        """Override given dependency config name. Take ``--force-dep-version`` option into account.
+
+        :param name: dep config, for example ["pkg==1.0", "other==2.0"].
+        :param config: ``Config`` instance
+        :return: the new dependency that should be used for virtual environments
+        """
+        if not config.option.force_dep:
+            return name
+        for forced_dep in config.option.force_dep:
+            if self._is_same_dep(forced_dep, name):
+                return forced_dep
+        return name
+
+    @staticmethod
+    def _cut_off_dep_comment(name):
+        return re.sub(r"\s+#.*", "", name).strip()
+
+    @classmethod
+    def _is_same_dep(cls, dep1, dep2):
+        """Definitions are the same if they refer to the same package, even if versions differ."""
+        dep1_name = canonicalize_name(requirements.Requirement(dep1).name)
+        try:
+            dep2_name = canonicalize_name(requirements.Requirement(dep2).name)
+        except requirements.InvalidRequirement:
+            # we couldn't parse a version, probably a URL
+            return False
+        return dep1_name == dep2_name
+
+
+class PosargsOption:
+    name = "args_are_paths"
+    type = "bool"
+    default = True
+    help = "treat positional args in commands as paths"
+
+    def postprocess(self, testenv_config, value):
+        config = testenv_config.config
+        args = config.option.args
+        if args:
+            if value:
+                args = []
+                for arg in config.option.args:
+                    if arg and not os.path.isabs(arg):
+                        origpath = os.path.join(config.invocationcwd.strpath, arg)
+                        if os.path.exists(origpath):
+                            arg = os.path.relpath(origpath, testenv_config.changedir.strpath)
+                    args.append(arg)
+            testenv_config._reader.addsubstitutions(args)
+        return value
+
+
+class InstallcmdOption:
+    name = "install_command"
+    type = "argv_install_command"
+    default = r"python -m pip install \{opts\} \{packages\}"
+    help = "install command for dependencies and package under test."
+
+    def postprocess(self, testenv_config, value):
+        if "{packages}" not in value:
+            raise tox.exception.ConfigError(
+                "'install_command' must contain '{packages}' substitution",
+            )
+        return value
+
+
+def parseconfig(args, plugins=()):
+    """Parse the configuration file and create a Config object.
+
+    :param plugins:
+    :param list[str] args: list of arguments.
+    :rtype: :class:`Config`
+    :raise SystemExit: toxinit file is not found
+    """
+    pm = get_plugin_manager(plugins)
+    config, option = parse_cli(args, pm)
+    update_default_reporter(config.option.quiet_level, config.option.verbose_level)
+
+    for config_file in propose_configs(option.configfile):
+        config_type = config_file.basename
+
+        content = None
+        if config_type == "pyproject.toml":
+            toml_content = get_py_project_toml(config_file)
+            try:
+                content = toml_content["tool"]["tox"]["legacy_tox_ini"]
+            except KeyError:
+                continue
+        try:
+            ParseIni(config, config_file, content)
+        except SkipThisIni:
+            continue
+        pm.hook.tox_configure(config=config)  # post process config object
+        break
+    else:
+        parser = Parser()
+        pm.hook.tox_addoption(parser=parser)
+        # if no tox config file, now we need do a strict argument evaluation
+        # raise on unknown args
+        parser.parse_cli(args, strict=True)
+        if option.help or option.helpini:
+            return config
+        if option.devenv:
+            # To load defaults, we parse an empty config
+            ParseIni(config, py.path.local(), "")
+            pm.hook.tox_configure(config=config)
+            return config
+        msg = "tox config file (either {}) not found"
+        candidates = ", ".join(INFO.CONFIG_CANDIDATES)
+        feedback(msg.format(candidates), sysexit=not (option.help or option.helpini))
+    return config
+
+
+def get_py_project_toml(path):
+    with io.open(str(path), encoding="UTF-8") as file_handler:
+        config_data = toml.load(file_handler)
+        return config_data
+
+
+def propose_configs(cli_config_file):
+    from_folder = py.path.local()
+    if cli_config_file is not None:
+        if os.path.isfile(cli_config_file):
+            yield py.path.local(cli_config_file)
+            return
+        if os.path.isdir(cli_config_file):
+            from_folder = py.path.local(cli_config_file)
+        else:
+            print(
+                "ERROR: {} is neither file or directory".format(cli_config_file),
+                file=sys.stderr,
+            )
+            return
+    for basename in INFO.CONFIG_CANDIDATES:
+        if from_folder.join(basename).isfile():
+            yield from_folder.join(basename)
+        for path in from_folder.parts(reverse=True):
+            ini_path = path.join(basename)
+            if ini_path.check():
+                yield ini_path
+
+
+def parse_cli(args, pm):
+    parser = Parser()
+    pm.hook.tox_addoption(parser=parser)
+    option = parser.parse_cli(args)
+    if option.version:
+        print(get_version_info(pm))
+        raise SystemExit(0)
+    interpreters = Interpreters(hook=pm.hook)
+    config = Config(
+        pluginmanager=pm,
+        option=option,
+        interpreters=interpreters,
+        parser=parser,
+        args=args,
+    )
+    return config, option
+
+
+def feedback(msg, sysexit=False):
+    print("ERROR: {}".format(msg), file=sys.stderr)
+    if sysexit:
+        raise SystemExit(1)
+
+
+def get_version_info(pm):
+    out = ["{} imported from {}".format(tox.__version__, tox.__file__)]
+    plugin_dist_info = pm.list_plugin_distinfo()
+    if plugin_dist_info:
+        out.append("registered plugins:")
+        for mod, egg_info in plugin_dist_info:
+            source = getattr(mod, "__file__", repr(mod))
+            out.append("    {}-{} at {}".format(egg_info.project_name, egg_info.version, source))
+    return "\n".join(out)
+
+
+class SetenvDict(object):
+    _DUMMY = object()
+
+    def __init__(self, definitions, reader):
+        self.definitions = definitions
+        self.reader = reader
+        self.resolved = {}
+        self._lookupstack = []
+
+    def __repr__(self):
+        return "{}: {}".format(self.__class__.__name__, self.definitions)
+
+    def __contains__(self, name):
+        return name in self.definitions
+
+    def get(self, name, default=None):
+        try:
+            return self.resolved[name]
+        except KeyError:
+            try:
+                if name in self._lookupstack:
+                    raise KeyError(name)
+                val = self.definitions[name]
+            except KeyError:
+                return os.environ.get(name, default)
+            self._lookupstack.append(name)
+            try:
+                self.resolved[name] = res = self.reader._replace(val, name="setenv")
+            finally:
+                self._lookupstack.pop()
+            return res
+
+    def __getitem__(self, name):
+        x = self.get(name, self._DUMMY)
+        if x is self._DUMMY:
+            raise KeyError(name)
+        return x
+
+    def keys(self):
+        return self.definitions.keys()
+
+    def __setitem__(self, name, value):
+        self.definitions[name] = value
+        self.resolved[name] = value
+
+    def items(self):
+        return ((name, self[name]) for name in self.definitions)
+
+    def export(self):
+        # post-process items to avoid internal syntax/semantics
+        # such as {} being escaped using \{\}, suitable for use with
+        # os.environ .
+        return {
+            name: Replacer._unescape(value)
+            for name, value in self.items()
+            if value is not self._DUMMY
+        }
+
+
+@tox.hookimpl
+def tox_addoption(parser):
+    parser.add_argument(
+        "--version",
+        action="store_true",
+        help="report version information to stdout.",
+    )
+    parser.add_argument("-h", "--help", action="store_true", help="show help about options")
+    parser.add_argument(
+        "--help-ini",
+        "--hi",
+        action="store_true",
+        dest="helpini",
+        help="show help about ini-names",
+    )
+    add_verbosity_commands(parser)
+    parser.add_argument(
+        "--showconfig",
+        action="store_true",
+        help="show live configuration (by default all env, with -l only default targets,"
+        " specific via TOXENV/-e)",
+    )
+    parser.add_argument(
+        "-l",
+        "--listenvs",
+        action="store_true",
+        help="show list of test environments (with description if verbose)",
+    )
+    parser.add_argument(
+        "-a",
+        "--listenvs-all",
+        action="store_true",
+        help="show list of all defined environments (with description if verbose)",
+    )
+    parser.add_argument(
+        "-c",
+        dest="configfile",
+        help="config file name or directory with 'tox.ini' file.",
+    )
+    parser.add_argument(
+        "-e",
+        action="append",
+        dest="env",
+        metavar="envlist",
+        help="work against specified environments (ALL selects all).",
+    )
+    parser.add_argument(
+        "--devenv",
+        metavar="ENVDIR",
+        help=(
+            "sets up a development environment at ENVDIR based on the env's tox "
+            "configuration specified by `-e` (-e defaults to py)."
+        ),
+    )
+    parser.add_argument("--notest", action="store_true", help="skip invoking test commands.")
+    parser.add_argument(
+        "--sdistonly",
+        action="store_true",
+        help="only perform the sdist packaging activity.",
+    )
+    parser.add_argument(
+        "--skip-pkg-install",
+        action="store_true",
+        help="skip package installation for this run",
+    )
+    add_parallel_flags(parser)
+    parser.add_argument(
+        "--parallel--safe-build",
+        action="store_true",
+        dest="parallel_safe_build",
+        help="(deprecated) ensure two tox builds can run in parallel "
+        "(uses a lock file in the tox workdir with .lock extension)",
+    )
+    parser.add_argument(
+        "--installpkg",
+        metavar="PATH",
+        help="use specified package for installation into venv, instead of creating an sdist.",
+    )
+    parser.add_argument(
+        "--develop",
+        action="store_true",
+        help="install package in the venv using 'setup.py develop' via 'pip -e .'",
+    )
+    parser.add_argument(
+        "-i",
+        "--index-url",
+        action="append",
+        dest="indexurl",
+        metavar="URL",
+        help="set indexserver url (if URL is of form name=url set the "
+        "url for the 'name' indexserver, specifically)",
+    )
+    parser.add_argument(
+        "--pre",
+        action="store_true",
+        help="install pre-releases and development versions of dependencies. "
+        "This will pass the --pre option to install_command "
+        "(pip by default).",
+    )
+    parser.add_argument(
+        "-r",
+        "--recreate",
+        action="store_true",
+        help="force recreation of virtual environments",
+    )
+    parser.add_argument(
+        "--result-json",
+        dest="resultjson",
+        metavar="PATH",
+        help="write a json file with detailed information "
+        "about all commands and results involved.",
+    )
+    parser.add_argument(
+        "--discover",
+        dest="discover",
+        nargs="+",
+        metavar="PATH",
+        help="for python discovery first try the python executables under these paths",
+        default=[],
+    )
+
+    # We choose 1 to 4294967295 because it is the range of PYTHONHASHSEED.
+    parser.add_argument(
+        "--hashseed",
+        metavar="SEED",
+        help="set PYTHONHASHSEED to SEED before running commands.  "
+        "Defaults to a random integer in the range [1, 4294967295] "
+        "([1, 1024] on Windows). "
+        "Passing 'noset' suppresses this behavior.",
+    )
+    parser.add_argument(
+        "--force-dep",
+        action="append",
+        metavar="REQ",
+        help="Forces a certain version of one of the dependencies "
+        "when configuring the virtual environment. REQ Examples "
+        "'pytest<2.7' or 'django>=1.6'.",
+    )
+    parser.add_argument(
+        "--sitepackages",
+        action="store_true",
+        help="override sitepackages setting to True in all envs",
+    )
+    parser.add_argument(
+        "--alwayscopy",
+        action="store_true",
+        help="override alwayscopy setting to True in all envs",
+    )
+    parser.add_argument(
+        "--no-provision",
+        action="store",
+        nargs="?",
+        default=False,
+        const=True,
+        metavar="REQUIRES_JSON",
+        help="do not perform provision, but fail and if a path was provided "
+        "write provision metadata as JSON to it",
+    )
+
+    cli_skip_missing_interpreter(parser)
+    parser.add_argument("--workdir", metavar="PATH", help="tox working directory")
+
+    parser.add_argument(
+        "args",
+        nargs="*",
+        help="additional arguments available to command positional substitution",
+    )
+
+    def _set_envdir_from_devenv(testenv_config, value):
+        if (
+            testenv_config.config.option.devenv is not None
+            and testenv_config.envname != testenv_config.config.provision_tox_env
+        ):
+            return py.path.local(testenv_config.config.option.devenv)
+        else:
+            return value
+
+    parser.add_testenv_attribute(
+        name="envdir",
+        type="path",
+        default="{toxworkdir}/{envname}",
+        help="set venv directory -- be very careful when changing this as tox "
+        "will remove this directory when recreating an environment",
+        postprocess=_set_envdir_from_devenv,
+    )
+
+    # add various core venv interpreter attributes
+    def setenv(testenv_config, value):
+        setenv = value
+        config = testenv_config.config
+        if "PYTHONHASHSEED" not in setenv and config.hashseed is not None:
+            setenv["PYTHONHASHSEED"] = config.hashseed
+
+        setenv["TOX_ENV_NAME"] = str(testenv_config.envname)
+        setenv["TOX_ENV_DIR"] = str(testenv_config.envdir)
+        return setenv
+
+    parser.add_testenv_attribute(
+        name="setenv",
+        type="dict_setenv",
+        postprocess=setenv,
+        help="list of X=Y lines with environment variable settings",
+    )
+
+    def basepython_default(testenv_config, value):
+        """either user set or proposed from the factor name
+
+        in both cases we check that the factor name implied python version and the resolved
+        python interpreter version match up; if they don't we warn, unless ignore base
+        python conflict is set in which case the factor name implied version if forced
+        """
+        for factor in testenv_config.factors:
+            match = tox.PYTHON.PY_FACTORS_RE.match(factor)
+            if match:
+                base_exe = {"py": "python"}.get(match.group(1), match.group(1))
+                version_s = match.group(2)
+                if not version_s:
+                    version_info = ()
+                elif len(version_s) == 1:
+                    version_info = (version_s,)
+                else:
+                    version_info = (version_s[0], version_s[1:])
+                implied_version = ".".join(version_info)
+                implied_python = "{}{}".format(base_exe, implied_version)
+                break
+        else:
+            implied_python, version_info, implied_version = None, (), ""
+
+        if testenv_config.config.ignore_basepython_conflict and implied_python is not None:
+            return implied_python
+
+        proposed_python = (implied_python or sys.executable) if value is None else str(value)
+        if implied_python is not None and implied_python != proposed_python:
+            testenv_config.basepython = proposed_python
+            python_info_for_proposed = testenv_config.python_info
+            if not isinstance(python_info_for_proposed, NoInterpreterInfo):
+                proposed_version = ".".join(
+                    str(x) for x in python_info_for_proposed.version_info[: len(version_info)]
+                )
+                if proposed_version != implied_version:
+                    # TODO(stephenfin): Raise an exception here in tox 4.0
+                    warnings.warn(
+                        "conflicting basepython version (set {}, should be {}) for env '{}';"
+                        "resolve conflict or set ignore_basepython_conflict".format(
+                            proposed_version,
+                            implied_version,
+                            testenv_config.envname,
+                        ),
+                    )
+
+        return proposed_python
+
+    parser.add_testenv_attribute(
+        name="basepython",
+        type="basepython",
+        default=None,
+        postprocess=basepython_default,
+        help="executable name or path of interpreter used to create a virtual test environment.",
+    )
+
+    def merge_description(testenv_config, value):
+        """the reader by default joins generated description with new line,
+        replace new line with space"""
+        return value.replace("\n", " ")
+
+    parser.add_testenv_attribute(
+        name="description",
+        type="string",
+        default="",
+        postprocess=merge_description,
+        help="short description of this environment",
+    )
+
+    parser.add_testenv_attribute(
+        name="envtmpdir",
+        type="path",
+        default="{envdir}/tmp",
+        help="venv temporary directory",
+    )
+
+    parser.add_testenv_attribute(
+        name="envlogdir",
+        type="path",
+        default="{envdir}/log",
+        help="venv log directory",
+    )
+
+    parser.add_testenv_attribute(
+        name="downloadcache",
+        type="string",
+        default=None,
+        help="(ignored) has no effect anymore, pip-8 uses local caching by default",
+    )
+
+    parser.add_testenv_attribute(
+        name="changedir",
+        type="path",
+        default="{toxinidir}",
+        help="directory to change to when running commands",
+    )
+
+    parser.add_testenv_attribute_obj(PosargsOption())
+
+    def skip_install_default(testenv_config, value):
+        return value is True or testenv_config.config.option.skip_pkg_install is True
+
+    parser.add_testenv_attribute(
+        name="skip_install",
+        type="bool",
+        default=False,
+        postprocess=skip_install_default,
+        help="Do not install the current package. This can be used when you need the virtualenv "
+        "management but do not want to install the current package",
+    )
+
+    parser.add_testenv_attribute(
+        name="ignore_errors",
+        type="bool",
+        default=False,
+        help="if set to True all commands will be executed irrespective of their result error "
+        "status.",
+    )
+
+    def recreate(testenv_config, value):
+        if testenv_config.config.option.recreate:
+            return True
+        return value
+
+    parser.add_testenv_attribute(
+        name="recreate",
+        type="bool",
+        default=False,
+        postprocess=recreate,
+        help="always recreate this test environment.",
+    )
+
+    def passenv(testenv_config, value):
+        # Flatten the list to deal with space-separated values.
+        value = list(itertools.chain.from_iterable([x.split(" ") for x in value]))
+
+        passenv = {
+            "CURL_CA_BUNDLE",
+            "LANG",
+            "LANGUAGE",
+            "LC_ALL",
+            "LD_LIBRARY_PATH",
+            "PATH",
+            "PIP_INDEX_URL",
+            "PIP_EXTRA_INDEX_URL",
+            "REQUESTS_CA_BUNDLE",
+            "SSL_CERT_FILE",
+            "TOX_WORK_DIR",
+            "HTTP_PROXY",
+            "HTTPS_PROXY",
+            "NO_PROXY",
+            str(REPORTER_TIMESTAMP_ON_ENV),
+            str(PARALLEL_ENV_VAR_KEY_PUBLIC),
+        }
+
+        # read in global passenv settings
+        p = os.environ.get("TOX_TESTENV_PASSENV", None)
+        if p is not None:
+            env_values = [x for x in p.split() if x]
+            value.extend(env_values)
+
+        # we ensure that tmp directory settings are passed on
+        # we could also set it to the per-venv "envtmpdir"
+        # but this leads to very long paths when run with jenkins
+        # so we just pass it on by default for now.
+        if tox.INFO.IS_WIN:
+            passenv.add("SYSTEMDRIVE")  # needed for pip6
+            passenv.add("SYSTEMROOT")  # needed for python's crypto module
+            passenv.add("PATHEXT")  # needed for discovering executables
+            passenv.add("COMSPEC")  # needed for distutils cygwincompiler
+            passenv.add("TEMP")
+            passenv.add("TMP")
+            # for `multiprocessing.cpu_count()` on Windows (prior to Python 3.4).
+            passenv.add("NUMBER_OF_PROCESSORS")
+            passenv.add("PROCESSOR_ARCHITECTURE")  # platform.machine()
+            passenv.add("USERPROFILE")  # needed for `os.path.expanduser()`
+            passenv.add("MSYSTEM")  # fixes #429
+            # PROGRAM* required for compiler tool discovery #2382
+            passenv.add("PROGRAMFILES")
+            passenv.add("PROGRAMFILES(X86)")
+            passenv.add("PROGRAMDATA")
+        else:
+            passenv.add("TMPDIR")
+
+            # add non-uppercased variables to passenv if present (only necessary for UNIX)
+            passenv.update(name for name in os.environ if name.upper() in passenv)
+
+        for spec in value:
+            for name in os.environ:
+                if fnmatchcase(name.upper(), spec.upper()):
+                    passenv.add(name)
+        return passenv
+
+    parser.add_testenv_attribute(
+        name="passenv",
+        type="line-list",
+        postprocess=passenv,
+        help="environment variables needed during executing test commands (taken from invocation "
+        "environment). Note that tox always  passes through some basic environment variables "
+        "which are needed for basic functioning of the Python system. See --showconfig for the "
+        "eventual passenv setting.",
+    )
+
+    parser.add_testenv_attribute(
+        name="whitelist_externals",
+        type="line-list",
+        help="DEPRECATED: use allowlist_externals",
+    )
+
+    parser.add_testenv_attribute(
+        name="allowlist_externals",
+        type="line-list",
+        help="each lines specifies a path or basename for which tox will not warn "
+        "about it coming from outside the test environment.",
+    )
+
+    parser.add_testenv_attribute(
+        name="platform",
+        type="string",
+        default=".*",
+        help="regular expression which must match against ``sys.platform``. "
+        "otherwise testenv will be skipped.",
+    )
+
+    def sitepackages(testenv_config, value):
+        return testenv_config.config.option.sitepackages or value
+
+    def alwayscopy(testenv_config, value):
+        return testenv_config.config.option.alwayscopy or value
+
+    parser.add_testenv_attribute(
+        name="sitepackages",
+        type="bool",
+        default=False,
+        postprocess=sitepackages,
+        help="Set to ``True`` if you want to create virtual environments that also "
+        "have access to globally installed packages.",
+    )
+
+    parser.add_testenv_attribute(
+        "download",
+        type="bool",
+        default=False,
+        help="download the latest pip, setuptools and wheel when creating the virtual"
+        "environment (default is to use the one bundled in virtualenv)",
+    )
+
+    parser.add_testenv_attribute(
+        name="alwayscopy",
+        type="bool",
+        default=False,
+        postprocess=alwayscopy,
+        help="Set to ``True`` if you want virtualenv to always copy files rather "
+        "than symlinking.",
+    )
+
+    def pip_pre(testenv_config, value):
+        return testenv_config.config.option.pre or value
+
+    parser.add_testenv_attribute(
+        name="pip_pre",
+        type="bool",
+        default=False,
+        postprocess=pip_pre,
+        help="If ``True``, adds ``--pre`` to the ``opts`` passed to the install command. ",
+    )
+
+    def develop(testenv_config, value):
+        option = testenv_config.config.option
+        return not option.installpkg and (value or option.develop or option.devenv is not None)
+
+    parser.add_testenv_attribute(
+        name="usedevelop",
+        type="bool",
+        postprocess=develop,
+        default=False,
+        help="install package in develop/editable mode",
+    )
+
+    parser.add_testenv_attribute_obj(InstallcmdOption())
+
+    parser.add_testenv_attribute(
+        name="list_dependencies_command",
+        type="argv",
+        default="python -m pip freeze",
+        help="list dependencies for a virtual environment",
+    )
+
+    parser.add_testenv_attribute_obj(DepOption())
+
+    parser.add_testenv_attribute(
+        name="suicide_timeout",
+        type="float",
+        default=SUICIDE_TIMEOUT,
+        help="timeout to allow process to exit before sending SIGINT",
+    )
+
+    parser.add_testenv_attribute(
+        name="interrupt_timeout",
+        type="float",
+        default=INTERRUPT_TIMEOUT,
+        help="timeout before sending SIGTERM after SIGINT",
+    )
+
+    parser.add_testenv_attribute(
+        name="terminate_timeout",
+        type="float",
+        default=TERMINATE_TIMEOUT,
+        help="timeout before sending SIGKILL after SIGTERM",
+    )
+
+    parser.add_testenv_attribute(
+        name="commands",
+        type="argvlist",
+        default="",
+        help="each line specifies a test command and can use substitution.",
+    )
+
+    parser.add_testenv_attribute(
+        name="commands_pre",
+        type="argvlist",
+        default="",
+        help="each line specifies a setup command action and can use substitution.",
+    )
+
+    parser.add_testenv_attribute(
+        name="commands_post",
+        type="argvlist",
+        default="",
+        help="each line specifies a teardown command and can use substitution.",
+    )
+
+    parser.add_testenv_attribute(
+        "ignore_outcome",
+        type="bool",
+        default=False,
+        help="if set to True a failing result of this testenv will not make "
+        "tox fail, only a warning will be produced",
+    )
+
+    parser.add_testenv_attribute(
+        "extras",
+        type="line-list",
+        help="list of extras to install with the source distribution or develop install",
+    )
+
+    add_parallel_config(parser)
+
+
+def cli_skip_missing_interpreter(parser):
+    class SkipMissingInterpreterAction(argparse.Action):
+        def __call__(self, parser, namespace, values, option_string=None):
+            value = "true" if values is None else values
+            if value not in ("config", "true", "false"):
+                raise argparse.ArgumentTypeError("value must be config, true or false")
+            setattr(namespace, self.dest, value)
+
+    parser.add_argument(
+        "-s",
+        "--skip-missing-interpreters",
+        default="config",
+        metavar="val",
+        nargs="?",
+        action=SkipMissingInterpreterAction,
+        help="don't fail tests for missing interpreters: {config,true,false} choice",
+    )
+
+
+class Config(object):
+    """Global Tox config object."""
+
+    def __init__(self, pluginmanager, option, interpreters, parser, args):
+        self.envconfigs = OrderedDict()
+        """Mapping envname -> envconfig"""
+        self.invocationcwd = py.path.local()
+        self.interpreters = interpreters
+        self.pluginmanager = pluginmanager
+        self.option = option
+        self._parser = parser
+        self._testenv_attr = parser._testenv_attr
+        self.args = args
+
+        """option namespace containing all parsed command line options"""
+
+    @property
+    def homedir(self):
+        homedir = get_homedir()
+        if homedir is None:
+            homedir = self.toxinidir  # FIXME XXX good idea?
+        return homedir
+
+
+class TestenvConfig:
+    """Testenv Configuration object.
+
+    In addition to some core attributes/properties this config object holds all
+    per-testenv ini attributes as attributes, see "tox --help-ini" for an overview.
+    """
+
+    def __init__(self, envname, config, factors, reader):
+        #: test environment name
+        self.envname = envname
+        #: global tox config object
+        self.config = config
+        #: set of factors
+        self.factors = factors
+        self._reader = reader
+        self._missing_subs = {}
+        """Holds substitutions that could not be resolved.
+
+        Pre 2.8.1 missing substitutions crashed with a ConfigError although this would not be a
+        problem if the env is not part of the current testrun. So we need to remember this and
+        check later when the testenv is actually run and crash only then.
+        """
+
+    # Python 3 only, as __getattribute__ is ignored for old-style types on Python 2
+    def __getattribute__(self, name):
+        rv = object.__getattribute__(self, name)
+        if isinstance(rv, Exception):
+            raise rv
+        return rv
+
+    if six.PY2:
+
+        def __getattr__(self, name):
+            if name in self._missing_subs:
+                raise self._missing_subs[name]
+            raise AttributeError(name)
+
+    def get_envbindir(self):
+        """Path to directory where scripts/binaries reside."""
+        is_bin = (
+            isinstance(self.python_info, NoInterpreterInfo)
+            or tox.INFO.IS_WIN is False
+            or self.python_info.implementation == "Jython"
+            or (
+                # this combination is MSYS2
+                tox.INFO.IS_WIN
+                and self.python_info.os_sep == "/"
+            )
+            or (
+                tox.INFO.IS_WIN
+                and self.python_info.implementation == "PyPy"
+                and self.python_info.extra_version_info < (7, 3, 1)
+            )
+        )
+        return self.envdir.join("bin" if is_bin else "Scripts")
+
+    @property
+    def envbindir(self):
+        return self.get_envbindir()
+
+    @property
+    def envpython(self):
+        """Path to python executable."""
+        return self.get_envpython()
+
+    def get_envpython(self):
+        """path to python/jython executable."""
+        if "jython" in str(self.basepython):
+            name = "jython"
+        else:
+            name = "python"
+        return self.envbindir.join(name)
+
+    def get_envsitepackagesdir(self):
+        """Return sitepackagesdir of the virtualenv environment.
+
+        NOTE: Only available during execution, not during parsing.
+        """
+        x = self.config.interpreters.get_sitepackagesdir(info=self.python_info, envdir=self.envdir)
+        return x
+
+    @property
+    def python_info(self):
+        """Return sitepackagesdir of the virtualenv environment."""
+        return self.config.interpreters.get_info(envconfig=self)
+
+    def getsupportedinterpreter(self):
+        if tox.INFO.IS_WIN and self.basepython and "jython" in self.basepython:
+            raise tox.exception.UnsupportedInterpreter(
+                "Jython/Windows does not support installing scripts",
+            )
+        info = self.config.interpreters.get_info(envconfig=self)
+        if not info.executable:
+            raise tox.exception.InterpreterNotFound(self.basepython)
+        if not info.version_info:
+            raise tox.exception.InvocationError(
+                "Failed to get version_info for {}: {}".format(info.name, info.err),
+            )
+        return info.executable
+
+
+testenvprefix = "testenv:"
+
+
+def get_homedir():
+    try:
+        return py.path.local._gethomedir()
+    except Exception:
+        return None
+
+
+def make_hashseed():
+    max_seed = 4294967295
+    if tox.INFO.IS_WIN:
+        max_seed = 1024
+    return str(random.randint(1, max_seed))
+
+
+class SkipThisIni(Exception):
+    """Internal exception to indicate the parsed ini file should be skipped"""
+
+
+class ParseIni(object):
+    def __init__(self, config, ini_path, ini_data):  # noqa
+        config.toxinipath = ini_path
+        using("tox.ini: {} (pid {})".format(config.toxinipath, os.getpid()))
+        config.toxinidir = config.toxinipath.dirpath() if ini_path.check(file=True) else ini_path
+
+        self._cfg = py.iniconfig.IniConfig(config.toxinipath, ini_data)
+
+        if ini_path.basename == "setup.cfg" and "tox:tox" not in self._cfg:
+            verbosity1("Found no [tox:tox] section in setup.cfg, skipping.")
+            raise SkipThisIni()
+
+        previous_line_of = self._cfg.lineof
+
+        self.expand_section_names(self._cfg)
+
+        def line_of_default_to_zero(section, name=None):
+            at = previous_line_of(section, name=name)
+            if at is None:
+                at = 0
+            return at
+
+        self._cfg.lineof = line_of_default_to_zero
+        config._cfg = self._cfg
+        self.config = config
+
+        prefix = "tox" if ini_path.basename == "setup.cfg" else None
+        fallbacksection = "tox:tox" if ini_path.basename == "setup.cfg" else "tox"
+
+        context_name = getcontextname()
+        if context_name == "jenkins":
+            reader = SectionReader(
+                "tox:jenkins",
+                self._cfg,
+                prefix=prefix,
+                fallbacksections=[fallbacksection],
+            )
+            dist_share_default = "{toxworkdir}/distshare"
+        elif not context_name:
+            reader = SectionReader("tox", self._cfg, prefix=prefix)
+            dist_share_default = "{homedir}/.tox/distshare"
+        else:
+            raise ValueError("invalid context")
+
+        if config.option.hashseed is None:
+            hash_seed = make_hashseed()
+        elif config.option.hashseed == "noset":
+            hash_seed = None
+        else:
+            hash_seed = config.option.hashseed
+        config.hashseed = hash_seed
+
+        reader.addsubstitutions(toxinidir=config.toxinidir, homedir=config.homedir)
+
+        if config.option.workdir is None:
+            config.toxworkdir = reader.getpath("toxworkdir", "{toxinidir}/.tox")
+        else:
+            config.toxworkdir = config.toxinidir.join(config.option.workdir, abs=True)
+
+        if os.path.exists(str(config.toxworkdir)):
+            config.toxworkdir = config.toxworkdir.realpath()
+
+        reader.addsubstitutions(toxworkdir=config.toxworkdir)
+        config.ignore_basepython_conflict = reader.getbool("ignore_basepython_conflict", False)
+
+        config.distdir = reader.getpath("distdir", "{toxworkdir}/dist")
+
+        reader.addsubstitutions(distdir=config.distdir)
+        config.distshare = reader.getpath("distshare", dist_share_default)
+        reader.addsubstitutions(distshare=config.distshare)
+        config.temp_dir = reader.getpath("temp_dir", "{toxworkdir}/.tmp")
+        reader.addsubstitutions(temp_dir=config.temp_dir)
+        config.sdistsrc = reader.getpath("sdistsrc", None)
+        config.setupdir = reader.getpath("setupdir", "{toxinidir}")
+        config.logdir = config.toxworkdir.join("log")
+        within_parallel = PARALLEL_ENV_VAR_KEY_PRIVATE in os.environ
+        if not within_parallel and not WITHIN_PROVISION:
+            ensure_empty_dir(config.logdir)
+
+        # determine indexserver dictionary
+        config.indexserver = {"default": IndexServerConfig("default")}
+        prefix = "indexserver"
+        for line in reader.getlist(prefix):
+            name, url = map(lambda x: x.strip(), line.split("=", 1))
+            config.indexserver[name] = IndexServerConfig(name, url)
+
+        if config.option.skip_missing_interpreters == "config":
+            val = reader.getbool("skip_missing_interpreters", False)
+            config.option.skip_missing_interpreters = "true" if val else "false"
+
+        override = False
+        if config.option.indexurl:
+            for url_def in config.option.indexurl:
+                m = re.match(r"\W*(\w+)=(\S+)", url_def)
+                if m is None:
+                    url = url_def
+                    name = "default"
+                else:
+                    name, url = m.groups()
+                    if not url:
+                        url = None
+                if name != "ALL":
+                    config.indexserver[name].url = url
+                else:
+                    override = url
+        # let ALL override all existing entries
+        if override:
+            for name in config.indexserver:
+                config.indexserver[name] = IndexServerConfig(name, override)
+
+        self.handle_provision(config, reader)
+
+        self.parse_build_isolation(config, reader)
+        res = self._getenvdata(reader, config)
+        config.envlist, all_envs, config.envlist_default, config.envlist_explicit = res
+
+        # factors used in config or predefined
+        known_factors = self._list_section_factors("testenv")
+        known_factors.update({"py", "python"})
+
+        # factors stated in config envlist
+        stated_envlist = reader.getstring("envlist", replace=False)
+        if stated_envlist:
+            for env in _split_env(stated_envlist):
+                known_factors.update(env.split("-"))
+
+        # configure testenvs
+        to_do = []
+        failures = OrderedDict()
+        results = {}
+        cur_self = self
+
+        def run(name, section, subs, config):
+            try:
+                results[name] = cur_self.make_envconfig(name, section, subs, config)
+            except Exception as exception:
+                failures[name] = (exception, traceback.format_exc())
+
+        order = []
+        for name in all_envs:
+            section = "{}{}".format(testenvprefix, name)
+            factors = set(name.split("-"))
+            if (
+                section in self._cfg
+                or factors <= known_factors
+                or all(
+                    tox.PYTHON.PY_FACTORS_RE.match(factor) for factor in factors - known_factors
+                )
+            ):
+                order.append(name)
+                thread = Thread(target=run, args=(name, section, reader._subs, config))
+                thread.daemon = True
+                thread.start()
+                to_do.append(thread)
+        for thread in to_do:
+            while thread.is_alive():
+                thread.join(timeout=20)
+        if failures:
+            raise tox.exception.ConfigError(
+                "\n".join(
+                    "{} failed with {} at {}".format(key, exc, trace)
+                    for key, (exc, trace) in failures.items()
+                ),
+            )
+        for name in order:
+            config.envconfigs[name] = results[name]
+        all_develop = all(
+            name in config.envconfigs and config.envconfigs[name].usedevelop
+            for name in config.envlist
+        )
+
+        config.skipsdist = reader.getbool("skipsdist", all_develop)
+
+        if config.option.devenv is not None:
+            config.option.notest = True
+
+        if config.option.devenv is not None and len(config.envlist) != 1:
+            feedback("--devenv requires only a single -e", sysexit=True)
+
+    def handle_provision(self, config, reader):
+        config.requires = reader.getlist("requires")
+        config.minversion = reader.getstring("minversion", None)
+        config.provision_tox_env = name = reader.getstring("provision_tox_env", ".tox")
+        min_version = "tox >= {}".format(config.minversion or Version(tox.__version__).public)
+        deps = self.ensure_requires_satisfied(config, config.requires, min_version)
+        if config.run_provision:
+            section_name = "testenv:{}".format(name)
+            if section_name not in self._cfg.sections:
+                self._cfg.sections[section_name] = {}
+            self._cfg.sections[section_name]["description"] = "meta tox"
+            env_config = self.make_envconfig(
+                name,
+                "{}{}".format(testenvprefix, name),
+                reader._subs,
+                config,
+            )
+            env_config.deps = deps
+            config.envconfigs[config.provision_tox_env] = env_config
+            raise tox.exception.MissingRequirement(config)
+        # if provisioning is not on, now we need do a strict argument evaluation
+        # raise on unknown args
+        self.config._parser.parse_cli(args=self.config.args, strict=True)
+
+    @classmethod
+    def ensure_requires_satisfied(cls, config, requires, min_version):
+        missing_requirements = []
+        failed_to_parse = False
+        deps = []
+        exists = set()
+        for require in requires + [min_version]:
+            # noinspection PyBroadException
+            try:
+                package = requirements.Requirement(require)
+                # check if the package even applies
+                if package.marker and not package.marker.evaluate({"extra": ""}):
+                    continue
+                package_name = canonicalize_name(package.name)
+                if package_name not in exists:
+                    deps.append(DepConfig(require, None))
+                    exists.add(package_name)
+                    dist = importlib_metadata.distribution(package.name)
+                    if not package.specifier.contains(dist.version, prereleases=True):
+                        raise MissingDependency(package)
+            except requirements.InvalidRequirement as exception:
+                failed_to_parse = True
+                error("failed to parse {!r}".format(exception))
+            except Exception as exception:
+                verbosity1("could not satisfy requires {!r}".format(exception))
+                missing_requirements.append(str(requirements.Requirement(require)))
+        if failed_to_parse:
+            raise tox.exception.BadRequirement()
+        if config.option.no_provision and missing_requirements:
+            msg = "provisioning explicitly disabled within {}, but missing {}"
+            if config.option.no_provision is not True:  # it's a path
+                msg += " and wrote to {}"
+                cls.write_requires_to_json_file(config)
+            raise tox.exception.Error(
+                msg.format(sys.executable, missing_requirements, config.option.no_provision)
+            )
+        if WITHIN_PROVISION and missing_requirements:
+            msg = "break infinite loop provisioning within {} missing {}"
+            raise tox.exception.Error(msg.format(sys.executable, missing_requirements))
+        config.run_provision = bool(len(missing_requirements))
+        return deps
+
+    @staticmethod
+    def write_requires_to_json_file(config):
+        requires_dict = {
+            "minversion": config.minversion,
+            "requires": config.requires,
+        }
+        try:
+            with open(config.option.no_provision, "w", encoding="utf-8") as outfile:
+                json.dump(requires_dict, outfile, indent=4)
+        except TypeError:  # Python 2
+            with open(config.option.no_provision, "w") as outfile:
+                json.dump(requires_dict, outfile, indent=4, encoding="utf-8")
+
+    def parse_build_isolation(self, config, reader):
+        config.isolated_build = reader.getbool("isolated_build", False)
+        config.isolated_build_env = reader.getstring("isolated_build_env", ".package")
+        if config.isolated_build is True:
+            name = config.isolated_build_env
+            section_name = "testenv:{}".format(name)
+            if section_name not in self._cfg.sections:
+                self._cfg.sections[section_name] = {}
+            self._cfg.sections[section_name]["deps"] = ""
+            self._cfg.sections[section_name]["sitepackages"] = "False"
+            self._cfg.sections[section_name]["description"] = "isolated packaging environment"
+            config.envconfigs[name] = self.make_envconfig(
+                name,
+                "{}{}".format(testenvprefix, name),
+                reader._subs,
+                config,
+            )
+
+    def _list_section_factors(self, section):
+        factors = set()
+        if section in self._cfg:
+            for _, value in self._cfg[section].items():
+                exprs = re.findall(r"^([\w{}.!,-]+):\s+", value, re.M)
+                factors.update(*mapcat(_split_factor_expr_all, exprs))
+        return factors
+
+    def make_envconfig(self, name, section, subs, config, replace=True):
+        factors = set(name.split("-"))
+        reader = SectionReader(section, self._cfg, fallbacksections=["testenv"], factors=factors)
+        tc = TestenvConfig(name, config, factors, reader)
+        reader.addsubstitutions(
+            envname=name,
+            envbindir=tc.get_envbindir,
+            envsitepackagesdir=tc.get_envsitepackagesdir,
+            envpython=tc.get_envpython,
+            **subs
+        )
+        for env_attr in config._testenv_attr:
+            atype = env_attr.type
+            try:
+                if atype in (
+                    "bool",
+                    "float",
+                    "path",
+                    "string",
+                    "dict",
+                    "dict_setenv",
+                    "argv",
+                    "argvlist",
+                    "argv_install_command",
+                ):
+                    meth = getattr(reader, "get{}".format(atype))
+                    res = meth(env_attr.name, env_attr.default, replace=replace)
+                elif atype == "basepython":
+                    no_fallback = name in (config.provision_tox_env,)
+                    res = reader.getstring(
+                        env_attr.name,
+                        env_attr.default,
+                        replace=replace,
+                        no_fallback=no_fallback,
+                    )
+                elif atype == "space-separated-list":
+                    res = reader.getlist(env_attr.name, sep=" ")
+                elif atype == "line-list":
+                    res = reader.getlist(env_attr.name, sep="\n")
+                elif atype == "env-list":
+                    res = reader.getstring(env_attr.name, replace=False)
+                    res = tuple(_split_env(res))
+                else:
+                    raise ValueError("unknown type {!r}".format(atype))
+                if env_attr.postprocess:
+                    res = env_attr.postprocess(testenv_config=tc, value=res)
+            except tox.exception.MissingSubstitution as e:
+                tc._missing_subs[env_attr.name] = res = e
+            # On Python 2, exceptions are handled in __getattr__
+            if not six.PY2 or not isinstance(res, Exception):
+                setattr(tc, env_attr.name, res)
+            if atype in ("path", "string", "basepython"):
+                reader.addsubstitutions(**{env_attr.name: res})
+        return tc
+
+    def _getallenvs(self, reader, extra_env_list=None):
+        extra_env_list = extra_env_list or []
+        env_str = reader.getstring("envlist", replace=False)
+        env_list = _split_env(env_str)
+        for env in extra_env_list:
+            if env not in env_list:
+                env_list.append(env)
+
+        all_envs = OrderedDict((i, None) for i in env_list)
+        for section in self._cfg:
+            if section.name.startswith(testenvprefix):
+                all_envs[section.name[len(testenvprefix) :]] = None
+        if not all_envs:
+            all_envs["python"] = None
+        return list(all_envs.keys())
+
+    def _getenvdata(self, reader, config):
+        from_option = self.config.option.env
+        from_environ = os.environ.get("TOXENV")
+        from_config = reader.getstring("envlist", replace=False)
+
+        env_list = []
+        envlist_explicit = False
+        if (
+            (from_option and "ALL" in from_option)
+            or (not from_option and from_environ and "ALL" in from_environ.split(","))
+        ) and PARALLEL_ENV_VAR_KEY_PRIVATE not in os.environ:
+            all_envs = self._getallenvs(reader)
+        else:
+            candidates = (
+                (os.environ.get(PARALLEL_ENV_VAR_KEY_PRIVATE), True),
+                (from_option, True),
+                (from_environ, True),
+                ("py" if self.config.option.devenv is not None else None, False),
+                (from_config, False),
+            )
+            env_str, envlist_explicit = next(((i, e) for i, e in candidates if i), ([], False))
+            env_list = _split_env(env_str)
+            all_envs = self._getallenvs(reader, env_list)
+
+        if not env_list:
+            env_list = all_envs
+
+        provision_tox_env = config.provision_tox_env
+        if config.provision_tox_env in env_list:
+            msg = "provision_tox_env {} cannot be part of envlist".format(provision_tox_env)
+            raise tox.exception.ConfigError(msg)
+
+        package_env = config.isolated_build_env
+        if config.isolated_build is True and package_env in all_envs:
+            all_envs.remove(package_env)
+
+        if config.isolated_build is True and package_env in env_list:
+            msg = "isolated_build_env {} cannot be part of envlist".format(package_env)
+            raise tox.exception.ConfigError(msg)
+
+        return env_list, all_envs, _split_env(from_config), envlist_explicit
+
+    @staticmethod
+    def expand_section_names(config):
+        """Generative section names.
+
+        Allow writing section as [testenv:py{36,37}-cov]
+        The parser will see it as two different sections: [testenv:py36-cov], [testenv:py37-cov]
+
+        """
+        factor_re = re.compile(r"{\s*([\w\s,-]+)\s*}")
+        split_re = re.compile(r"\s*,\s*")
+        to_remove = set()
+        for section in list(config.sections):
+            split_section = factor_re.split(section)
+            for parts in itertools.product(*map(split_re.split, split_section)):
+                section_name = "".join(parts)
+                if section_name not in config.sections:
+                    config.sections[section_name] = config.sections[section]
+                    to_remove.add(section)
+
+        for section in to_remove:
+            del config.sections[section]
+
+
+def _split_env(env):
+    """if handed a list, action="append" was used for -e"""
+    if env is None:
+        return []
+    if not isinstance(env, list):
+        env = [e.split("#", 1)[0].strip() for e in env.split("\n")]
+        env = ",".join(e for e in env if e)
+        env = [env]
+    return mapcat(_expand_envstr, env)
+
+
+def _is_negated_factor(factor):
+    return factor.startswith("!")
+
+
+def _base_factor_name(factor):
+    return factor[1:] if _is_negated_factor(factor) else factor
+
+
+def _split_factor_expr(expr):
+    def split_single(e):
+        raw = e.split("-")
+        included = {_base_factor_name(factor) for factor in raw if not _is_negated_factor(factor)}
+        excluded = {_base_factor_name(factor) for factor in raw if _is_negated_factor(factor)}
+        return included, excluded
+
+    partial_envs = _expand_envstr(expr)
+    return [split_single(e) for e in partial_envs]
+
+
+def _split_factor_expr_all(expr):
+    partial_envs = _expand_envstr(expr)
+    return [{_base_factor_name(factor) for factor in e.split("-")} for e in partial_envs]
+
+
+def _expand_envstr(envstr):
+    # split by commas not in groups
+    tokens = _ENVSTR_SPLIT_PATTERN.split(envstr)
+    envlist = ["".join(g).strip() for k, g in itertools.groupby(tokens, key=bool) if k]
+
+    def expand(env):
+        tokens = _ENVSTR_EXPAND_PATTERN.split(env)
+        parts = [_WHITESPACE_PATTERN.sub("", token).split(",") for token in tokens]
+        return ["".join(variant) for variant in itertools.product(*parts)]
+
+    return mapcat(expand, envlist)
+
+
+def mapcat(f, seq):
+    return list(itertools.chain.from_iterable(map(f, seq)))
+
+
+class DepConfig:
+    def __init__(self, name, indexserver=None):
+        self.name = name
+        self.indexserver = indexserver
+
+    def __repr__(self):
+        if self.indexserver:
+            if self.indexserver.name == "default":
+                return self.name
+            return ":{}:{}".format(self.indexserver.name, self.name)
+        return str(self.name)
+
+
+class IndexServerConfig:
+    def __init__(self, name, url=None):
+        self.name = name
+        self.url = url
+
+    def __repr__(self):
+        return "IndexServerConfig(name={}, url={})".format(self.name, self.url)
+
+
+is_section_substitution = re.compile(r"{\[[^{}\s]+\]\S+?}").match
+# Check value matches substitution form of referencing value from other section.
+# E.g. {[base]commands}
+
+
+class SectionReader:
+    def __init__(
+        self,
+        section_name,
+        cfgparser,
+        fallbacksections=None,
+        factors=(),
+        prefix=None,
+        posargs="",
+    ):
+        if prefix is None:
+            self.section_name = section_name
+        else:
+            self.section_name = "{}:{}".format(prefix, section_name)
+        self._cfg = cfgparser
+        self.fallbacksections = fallbacksections or []
+        self.factors = factors
+        self._subs = {}
+        self._subststack = []
+        self._setenv = None
+        self.posargs = posargs
+
+    def get_environ_value(self, name):
+        if self._setenv is None:
+            return os.environ.get(name)
+        return self._setenv.get(name)
+
+    def addsubstitutions(self, _posargs=None, **kw):
+        self._subs.update(kw)
+        if _posargs:
+            self.posargs = _posargs
+
+    def getpath(self, name, defaultpath, replace=True):
+        path = self.getstring(name, defaultpath, replace=replace)
+        if path is not None:
+            toxinidir = self._subs["toxinidir"]
+            return toxinidir.join(path, abs=True)
+
+    def getlist(self, name, sep="\n"):
+        s = self.getstring(name, None)
+        if s is None:
+            return []
+        return [x.strip() for x in s.split(sep) if x.strip()]
+
+    def getdict(self, name, default=None, sep="\n", replace=True):
+        value = self.getstring(name, None, replace=replace)
+        return self._getdict(value, default=default, sep=sep, replace=replace)
+
+    def getdict_setenv(self, name, default=None, sep="\n", replace=True):
+        value = self.getstring(name, None, replace=replace, crossonly=True)
+        definitions = self._getdict(value, default=default, sep=sep, replace=replace)
+        self._setenv = SetenvDict(definitions, reader=self)
+        return self._setenv
+
+    def _getdict(self, value, default, sep, replace=True):
+        if value is None or not replace:
+            return default or {}
+
+        env_values = {}
+        for line in value.split(sep):
+            if line.strip():
+                if line.startswith("#"):  # comment lines are ignored
+                    pass
+                elif line.startswith("file|"):  # file markers contain paths to env files
+                    file_path = line[5:].strip()
+                    if os.path.exists(file_path):
+                        with open(file_path, "rt") as file_handler:
+                            content = file_handler.read()
+                        env_values.update(self._getdict(content, "", sep, replace))
+                else:
+                    name, value = line.split("=", 1)
+                    env_values[name.strip()] = value.strip()
+        return env_values
+
+    def getfloat(self, name, default=None, replace=True):
+        s = self.getstring(name, default, replace=replace)
+        if not s or not replace:
+            s = default
+        if s is None:
+            raise KeyError("no config value [{}] {} found".format(self.section_name, name))
+
+        if not isinstance(s, float):
+            try:
+                s = float(s)
+            except ValueError:
+                raise tox.exception.ConfigError("{}: invalid float {!r}".format(name, s))
+        return s
+
+    def getbool(self, name, default=None, replace=True):
+        s = self.getstring(name, default, replace=replace)
+        if not s or not replace:
+            s = default
+        if s is None:
+            raise KeyError("no config value [{}] {} found".format(self.section_name, name))
+
+        if not isinstance(s, bool):
+            if s.lower() == "true":
+                s = True
+            elif s.lower() == "false":
+                s = False
+            else:
+                raise tox.exception.ConfigError(
+                    "{}: boolean value {!r} needs to be 'True' or 'False'".format(name, s),
+                )
+        return s
+
+    def getargvlist(self, name, default="", replace=True):
+        s = self.getstring(name, default, replace=False)
+        return _ArgvlistReader.getargvlist(self, s, replace=replace, name=name)
+
+    def getargv(self, name, default="", replace=True):
+        return self.getargvlist(name, default, replace=replace)[0]
+
+    def getargv_install_command(self, name, default="", replace=True):
+        s = self.getstring(name, default, replace=False)
+        if not s:
+            # This occurs when factors are used, and a testenv doesn't have
+            # a factorised value for install_command, most commonly occurring
+            # if setting platform is also used.
+            # An empty value causes error install_command must contain '{packages}'.
+            s = default
+
+        if "{packages}" in s:
+            s = s.replace("{packages}", r"\{packages\}")
+        if "{opts}" in s:
+            s = s.replace("{opts}", r"\{opts\}")
+
+        return _ArgvlistReader.getargvlist(self, s, replace=replace, name=name)[0]
+
+    def getstring(self, name, default=None, replace=True, crossonly=False, no_fallback=False):
+        x = None
+        sections = [self.section_name] + ([] if no_fallback else self.fallbacksections)
+        for s in sections:
+            try:
+                x = self._cfg[s][name]
+                break
+            except KeyError:
+                continue
+
+        if x is None:
+            x = default
+        else:
+            # It is needed to apply factors before unwrapping
+            # dependencies, otherwise it can break the substitution
+            # process. Once they are unwrapped, we call apply factors
+            # again for those new dependencies.
+            x = self._apply_factors(x)
+            x = self._replace_if_needed(x, name, replace, crossonly)
+            x = self._apply_factors(x)
+
+        x = self._replace_if_needed(x, name, replace, crossonly)
+        return x
+
+    def getposargs(self, default=None):
+        if self.posargs:
+            posargs = self.posargs
+            if sys.platform.startswith("win"):
+                posargs_string = list2cmdline([x for x in posargs if x])
+            else:
+                posargs_string = " ".join(shlex_quote(x) for x in posargs if x)
+            return posargs_string
+        else:
+            return default or ""
+
+    def _replace_if_needed(self, x, name, replace, crossonly):
+        if replace and x and hasattr(x, "replace"):
+            x = self._replace(x, name=name, crossonly=crossonly)
+        return x
+
+    def _apply_factors(self, s):
+        def factor_line(line):
+            m = _FACTOR_LINE_PATTERN.search(line)
+            if not m:
+                return line
+
+            expr, line = m.groups()
+            if any(
+                included <= self.factors and not any(x in self.factors for x in excluded)
+                for included, excluded in _split_factor_expr(expr)
+            ):
+                return line
+
+        lines = s.strip().splitlines()
+        return "\n".join(filter(None, map(factor_line, lines)))
+
+    def _replace(self, value, name=None, section_name=None, crossonly=False):
+        if "{" not in value:
+            return value
+
+        section_name = section_name if section_name else self.section_name
+        assert name
+        self._subststack.append((section_name, name))
+        try:
+            replaced = Replacer(self, crossonly=crossonly).do_replace(value)
+            assert self._subststack.pop() == (section_name, name)
+        except tox.exception.MissingSubstitution:
+            if not section_name.startswith(testenvprefix):
+                raise tox.exception.ConfigError(
+                    "substitution env:{!r}: unknown or recursive definition in"
+                    " section {!r}.".format(value, section_name),
+                )
+            raise
+        return replaced
+
+
+class Replacer:
+    RE_ITEM_REF = re.compile(
+        r"""
+        (?[^[:{}]+):)?    # optional sub_type for special rules
+        (?P(?:\[[^,{}]*\])?[^:,{}]*)  # substitution key
+        (?::(?P([^{}]|\\{|\\})*))?   # default value
+        [}]
+        """,
+        re.VERBOSE,
+    )
+
+    def __init__(self, reader, crossonly=False):
+        self.reader = reader
+        self.crossonly = crossonly
+
+    def do_replace(self, value):
+        """
+        Recursively expand substitutions starting from the innermost expression
+        """
+
+        def substitute_once(x):
+            return self.RE_ITEM_REF.sub(self._replace_match, x)
+
+        expanded = substitute_once(value)
+
+        while expanded != value:  # substitution found
+            value = expanded
+            expanded = substitute_once(value)
+
+        return expanded
+
+    @staticmethod
+    def _unescape(s):
+        return s.replace("\\{", "{").replace("\\}", "}")
+
+    def _replace_match(self, match):
+        g = match.groupdict()
+        sub_value = g["substitution_value"]
+        if self.crossonly:
+            if sub_value.startswith("["):
+                return self._substitute_from_other_section(sub_value)
+            # in crossonly we return all other hits verbatim
+            start, end = match.span()
+            return match.string[start:end]
+
+        full_match = match.group(0)
+        # ":" is swallowed by the regex, so the raw matched string is checked
+        if full_match.startswith("{:"):
+            if full_match != "{:}":
+                raise tox.exception.ConfigError(
+                    "Malformed substitution with prefix ':': {}".format(full_match),
+                )
+
+            return os.pathsep
+
+        default_value = g["default_value"]
+        # special case: opts and packages. Leave {opts} and
+        # {packages} intact, they are replaced manually in
+        # _venv.VirtualEnv.run_install_command.
+        if sub_value in ("opts", "packages"):
+            return "{{{}}}".format(sub_value)
+
+        if sub_value == "posargs":
+            return self.reader.getposargs(default_value)
+
+        sub_type = g["sub_type"]
+        if sub_type == "posargs":
+            if default_value:
+                value = "{}:{}".format(sub_value, default_value)
+            else:
+                value = sub_value
+            return self.reader.getposargs(value)
+
+        if not sub_type and not sub_value:
+            raise tox.exception.ConfigError(
+                "Malformed substitution; no substitution type provided. "
+                "If you were using `{}` for `os.pathsep`, please use `{:}`.",
+            )
+
+        if not sub_type and not default_value and sub_value == "/":
+            return os.sep
+
+        if sub_type == "env":
+            return self._replace_env(sub_value, default_value)
+        if sub_type == "tty":
+            if is_interactive():
+                return match.group("substitution_value")
+            return match.group("default_value")
+        if sub_type == "posargs":
+            return self.reader.getposargs(sub_value)
+        if sub_type is not None:
+            raise tox.exception.ConfigError(
+                "No support for the {} substitution type".format(sub_type),
+            )
+        return self._replace_substitution(sub_value)
+
+    def _replace_env(self, key, default):
+        if not key:
+            raise tox.exception.ConfigError("env: requires an environment variable name")
+        value = self.reader.get_environ_value(key)
+        if value is not None:
+            return value
+        if default is not None:
+            return default
+        raise tox.exception.MissingSubstitution(key)
+
+    def _substitute_from_other_section(self, key):
+        if key.startswith("[") and "]" in key:
+            i = key.find("]")
+            section, item = key[1:i], key[i + 1 :]
+            cfg = self.reader._cfg
+            if section in cfg and item in cfg[section]:
+                if (section, item) in self.reader._subststack:
+                    raise tox.exception.SubstitutionStackError(
+                        "{} already in {}".format((section, item), self.reader._subststack),
+                    )
+                x = str(cfg[section][item])
+                return self.reader._replace(
+                    x,
+                    name=item,
+                    section_name=section,
+                    crossonly=self.crossonly,
+                )
+
+        raise tox.exception.ConfigError("substitution key {!r} not found".format(key))
+
+    def _replace_substitution(self, sub_key):
+        val = self.reader._subs.get(sub_key, None)
+        if val is None:
+            val = self._substitute_from_other_section(sub_key)
+        if callable(val):
+            val = val()
+        return str(val)
+
+
+def is_interactive():
+    return sys.stdin.isatty()
+
+
+class _ArgvlistReader:
+    @classmethod
+    def getargvlist(cls, reader, value, replace=True, name=None):
+        """Parse ``commands`` argvlist multiline string.
+
+        :param SectionReader reader: reader to be used.
+        :param str value: Content stored by key.
+
+        :rtype: list[list[str]]
+        :raise :class:`tox.exception.ConfigError`:
+            line-continuation ends nowhere while resolving for specified section
+        """
+        commands = []
+        current_command = ""
+        for line in value.splitlines():
+            line = line.rstrip()
+            if not line:
+                continue
+            if line.endswith("\\"):
+                current_command += " {}".format(line[:-1])
+                continue
+            current_command += line
+
+            if is_section_substitution(current_command):
+                replaced = reader._replace(current_command, crossonly=True, name=name)
+                commands.extend(cls.getargvlist(reader, replaced, name=name))
+            else:
+                commands.append(cls.processcommand(reader, current_command, replace, name=name))
+            current_command = ""
+        else:
+            if current_command:
+                raise tox.exception.ConfigError(
+                    "line-continuation ends nowhere while resolving for [{}] {}".format(
+                        reader.section_name,
+                        "commands",
+                    ),
+                )
+        return commands
+
+    @classmethod
+    def processcommand(cls, reader, command, replace=True, name=None):
+        # Iterate through each word of the command substituting as
+        # appropriate to construct the new command string. This
+        # string is then broken up into exec argv components using
+        # shlex.
+        if replace:
+            newcommand = ""
+            for word in CommandParser(command).words():
+                if word == "[]":
+                    newcommand += reader.getposargs()
+                    continue
+
+                new_arg = ""
+                new_word = reader._replace(word, name=name)
+                new_word = reader._replace(new_word, name=name)
+                new_word = Replacer._unescape(new_word)
+                new_arg += new_word
+                newcommand += new_arg
+        else:
+            newcommand = command
+
+        # Construct shlex object that will not escape any values,
+        # use all values as is in argv.
+        shlexer = shlex.shlex(newcommand, posix=True)
+        shlexer.whitespace_split = True
+        shlexer.escape = ""
+        return list(shlexer)
+
+
+class CommandParser(object):
+    class State(object):
+        def __init__(self):
+            self.word = ""
+            self.depth = 0
+            self.yield_words = []
+
+    def __init__(self, command):
+        self.command = command
+
+    def words(self):
+        ps = CommandParser.State()
+
+        def word_has_ended():
+            return (
+                (
+                    cur_char in string.whitespace
+                    and ps.word
+                    and ps.word[-1] not in string.whitespace
+                )
+                or (cur_char == "{" and ps.depth == 0 and not ps.word.endswith("\\"))
+                or (ps.depth == 0 and ps.word and ps.word[-1] == "}")
+                or (cur_char not in string.whitespace and ps.word and ps.word.strip() == "")
+            )
+
+        def yield_this_word():
+            yieldword = ps.word
+            ps.word = ""
+            if yieldword:
+                ps.yield_words.append(yieldword)
+
+        def yield_if_word_ended():
+            if word_has_ended():
+                yield_this_word()
+
+        def accumulate():
+            ps.word += cur_char
+
+        def push_substitution():
+            ps.depth += 1
+
+        def pop_substitution():
+            ps.depth -= 1
+
+        for cur_char in self.command:
+            if cur_char in string.whitespace:
+                if ps.depth == 0:
+                    yield_if_word_ended()
+                accumulate()
+            elif cur_char == "{":
+                yield_if_word_ended()
+                accumulate()
+                push_substitution()
+            elif cur_char == "}":
+                accumulate()
+                pop_substitution()
+            else:
+                yield_if_word_ended()
+                accumulate()
+
+        if ps.word.strip():
+            yield_this_word()
+        return ps.yield_words
+
+
+def getcontextname():
+    if any(env in os.environ for env in ["JENKINS_URL", "HUDSON_URL"]):
+        return "jenkins"
+    return None
diff --git a/venv/lib/python3.10/site-packages/tox/config/parallel.py b/venv/lib/python3.10/site-packages/tox/config/parallel.py
new file mode 100644
index 0000000..1519d58
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/tox/config/parallel.py
@@ -0,0 +1,80 @@
+from __future__ import absolute_import, unicode_literals
+
+from argparse import ArgumentTypeError
+
+ENV_VAR_KEY_PUBLIC = "TOX_PARALLEL_ENV"
+ENV_VAR_KEY_PRIVATE = "_TOX_PARALLEL_ENV"
+OFF_VALUE = 0
+DEFAULT_PARALLEL = OFF_VALUE
+
+
+def auto_detect_cpus():
+    try:
+        from os import sched_getaffinity  # python 3 only
+
+        def cpu_count():
+            return len(sched_getaffinity(0))
+
+    except ImportError:
+        # python 2 options
+        try:
+            from os import cpu_count
+        except ImportError:
+            from multiprocessing import cpu_count
+
+    try:
+        n = cpu_count()
+    except NotImplementedError:  # pragma: no cov
+        n = None  # pragma: no cov
+    return n if n else 1
+
+
+def parse_num_processes(s):
+    if s == "all":
+        return None
+    if s == "auto":
+        return auto_detect_cpus()
+    else:
+        value = int(s)
+        if value < 0:
+            raise ArgumentTypeError("value must be positive")
+        return value
+
+
+def add_parallel_flags(parser):
+    parser.add_argument(
+        "-p",
+        "--parallel",
+        nargs="?",
+        const="auto",
+        dest="parallel",
+        help="run tox environments in parallel, the argument controls limit: all,"
+        " auto or missing argument - cpu count, some positive number, 0 to turn off",
+        action="store",
+        type=parse_num_processes,
+        default=DEFAULT_PARALLEL,
+        metavar="VAL",
+    )
+    parser.add_argument(
+        "-o",
+        "--parallel-live",
+        action="store_true",
+        dest="parallel_live",
+        help="connect to stdout while running environments",
+    )
+
+
+def add_parallel_config(parser):
+    parser.add_testenv_attribute(
+        "depends",
+        type="env-list",
+        help="tox environments that this environment depends on (must be run after those)",
+    )
+
+    parser.add_testenv_attribute(
+        "parallel_show_output",
+        type="bool",
+        default=False,
+        help="if set to True the content of the output will always be shown "
+        "when running in parallel mode",
+    )
diff --git a/venv/lib/python3.10/site-packages/tox/config/reporter.py b/venv/lib/python3.10/site-packages/tox/config/reporter.py
new file mode 100644
index 0000000..9ada946
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/tox/config/reporter.py
@@ -0,0 +1,22 @@
+from __future__ import absolute_import, unicode_literals
+
+
+def add_verbosity_commands(parser):
+    parser.add_argument(
+        "-v",
+        "--verbose",
+        action="count",
+        dest="verbose_level",
+        default=0,
+        help="increase verbosity of reporting output."
+        "-vv mode turns off output redirection for package installation, "
+        "above level two verbosity flags are passed through to pip (with two less level)",
+    )
+    parser.add_argument(
+        "-q",
+        "--quiet",
+        action="count",
+        dest="quiet_level",
+        default=0,
+        help="progressively silence reporting output.",
+    )
diff --git a/venv/lib/python3.10/site-packages/tox/constants.py b/venv/lib/python3.10/site-packages/tox/constants.py
new file mode 100644
index 0000000..c31f260
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/tox/constants.py
@@ -0,0 +1,65 @@
+"""All non private names (no leading underscore) here are part of the tox API.
+
+They live in the tox namespace and can be accessed as tox.[NAMESPACE.]NAME
+"""
+import os
+import re
+import sys
+
+_THIS_FILE = os.path.realpath(os.path.abspath(__file__))
+
+
+class PYTHON:
+    PY_FACTORS_RE = re.compile("^(?!py$)(py|pypy|jython)([2-9][0-9]?[0-9]?)?$")
+    CURRENT_RELEASE_ENV = "py37"
+    """Should hold currently released py -> for easy updating"""
+    QUICKSTART_PY_ENVS = ["py27", "py35", "py36", CURRENT_RELEASE_ENV, "pypy", "jython"]
+    """For choices in tox-quickstart"""
+
+
+class INFO:
+    DEFAULT_CONFIG_NAME = "tox.ini"
+    CONFIG_CANDIDATES = ("pyproject.toml", "tox.ini", "setup.cfg")
+    IS_WIN = sys.platform == "win32"
+    IS_PYPY = hasattr(sys, "pypy_version_info")
+
+
+class PIP:
+    SHORT_OPTIONS = ["c", "e", "r", "b", "t", "d"]
+    LONG_OPTIONS = [
+        "build",
+        "cache-dir",
+        "client-cert",
+        "constraint",
+        "download",
+        "editable",
+        "exists-action",
+        "extra-index-url",
+        "global-option",
+        "find-links",
+        "index-url",
+        "install-options",
+        "prefix",
+        "proxy",
+        "no-binary",
+        "only-binary",
+        "requirement",
+        "retries",
+        "root",
+        "src",
+        "target",
+        "timeout",
+        "trusted-host",
+        "upgrade-strategy",
+    ]
+    INSTALL_SHORT_OPTIONS_ARGUMENT = ["-{}".format(option) for option in SHORT_OPTIONS]
+    INSTALL_LONG_OPTIONS_ARGUMENT = ["--{}".format(option) for option in LONG_OPTIONS]
+
+
+_HELP_DIR = os.path.join(os.path.dirname(_THIS_FILE), "helper")
+VERSION_QUERY_SCRIPT = os.path.join(_HELP_DIR, "get_version.py")
+SITE_PACKAGE_QUERY_SCRIPT = os.path.join(_HELP_DIR, "get_site_package_dir.py")
+BUILD_REQUIRE_SCRIPT = os.path.join(_HELP_DIR, "build_requires.py")
+BUILD_ISOLATED = os.path.join(_HELP_DIR, "build_isolated.py")
+PARALLEL_RESULT_JSON_PREFIX = ".tox-result"
+PARALLEL_RESULT_JSON_SUFFIX = ".json"
diff --git a/venv/lib/python3.10/site-packages/tox/exception.py b/venv/lib/python3.10/site-packages/tox/exception.py
new file mode 100644
index 0000000..c5f842a
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/tox/exception.py
@@ -0,0 +1,103 @@
+import os
+import pipes
+import signal
+
+
+def exit_code_str(exception_name, command, exit_code):
+    """String representation for an InvocationError, with exit code
+
+    NOTE: this might also be used by plugin tests (tox-venv at the time of writing),
+    so some coordination is needed if this is ever moved or a different solution for this hack
+    is found.
+
+    NOTE: this is a separate function because pytest-mock `spy` does not work on Exceptions
+    We can use neither a class method nor a static because of https://bugs.python.org/issue23078.
+    Even a normal method failed with "TypeError: descriptor '__getattribute__' requires a
+    'BaseException' object but received a 'type'".
+    """
+    str_ = "{} for command {}".format(exception_name, command)
+    if exit_code is not None:
+        if exit_code < 0 or (os.name == "posix" and exit_code > 128):
+            signals = {
+                number: name for name, number in vars(signal).items() if name.startswith("SIG")
+            }
+            if exit_code < 0:
+                # Signal reported via subprocess.Popen.
+                sig_name = signals.get(-exit_code)
+                str_ += " (exited with code {:d} ({}))".format(exit_code, sig_name)
+            else:
+                str_ += " (exited with code {:d})".format(exit_code)
+                number = exit_code - 128
+                name = signals.get(number)
+                if name:
+                    str_ += (
+                        ")\nNote: this might indicate a fatal error signal "
+                        "({:d} - 128 = {:d}: {})".format(exit_code, number, name)
+                    )
+        str_ += " (exited with code {:d})".format(exit_code)
+    return str_
+
+
+class Error(Exception):
+    def __str__(self):
+        return "{}: {}".format(self.__class__.__name__, self.args[0])
+
+
+class MissingSubstitution(Error):
+    FLAG = "TOX_MISSING_SUBSTITUTION"
+    """placeholder for debugging configurations"""
+
+    def __init__(self, name):
+        self.name = name
+        super(Error, self).__init__(name)
+
+
+class ConfigError(Error):
+    """Error in tox configuration."""
+
+
+class SubstitutionStackError(ConfigError, ValueError):
+    """Error in tox configuration recursive substitution."""
+
+
+class UnsupportedInterpreter(Error):
+    """Signals an unsupported Interpreter."""
+
+
+class InterpreterNotFound(Error):
+    """Signals that an interpreter could not be found."""
+
+
+class InvocationError(Error):
+    """An error while invoking a script."""
+
+    def __init__(self, command, exit_code=None, out=None):
+        super(Error, self).__init__(command, exit_code)
+        self.command = command
+        self.exit_code = exit_code
+        self.out = out
+
+    def __str__(self):
+        return exit_code_str(self.__class__.__name__, self.command, self.exit_code)
+
+
+class MissingDirectory(Error):
+    """A directory did not exist."""
+
+
+class MissingDependency(Error):
+    """A dependency could not be found or determined."""
+
+
+class MissingRequirement(Error):
+    """A requirement defined in :config:`require` is not met."""
+
+    def __init__(self, config):
+        self.config = config
+
+    def __str__(self):
+        return " ".join(pipes.quote(i) for i in self.config.requires)
+
+
+class BadRequirement(Error):
+    """A requirement defined in :config:`require` cannot be parsed."""
diff --git a/venv/lib/python3.10/site-packages/tox/helper/__init__.py b/venv/lib/python3.10/site-packages/tox/helper/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/venv/lib/python3.10/site-packages/tox/helper/build_isolated.py b/venv/lib/python3.10/site-packages/tox/helper/build_isolated.py
new file mode 100644
index 0000000..4c57c57
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/tox/helper/build_isolated.py
@@ -0,0 +1,42 @@
+"""PEP 517 build backend invocation script.
+
+It accepts externally parsed build configuration from `[build-system]`
+in `pyproject.toml` and invokes an API endpoint for building an sdist
+tarball.
+"""
+
+import os
+import sys
+
+
+def _ensure_module_in_paths(module, paths):
+    """Verify that the imported backend belongs in-tree."""
+    if not paths:
+        return
+
+    module_path = os.path.normcase(os.path.abspath(module.__file__))
+    normalized_paths = (os.path.normcase(os.path.abspath(path)) for path in paths)
+
+    if any(os.path.commonprefix((module_path, path)) == path for path in normalized_paths):
+        return
+
+    raise SystemExit(
+        "build-backend ({!r}) must exist in one of the paths "
+        "specified by backend-path ({!r})".format(module, paths),
+    )
+
+
+dist_folder = sys.argv[1]
+backend_spec = sys.argv[2]
+backend_obj = sys.argv[3] if len(sys.argv) >= 4 else None
+backend_paths = sys.argv[4].split(os.path.pathsep) if (len(sys.argv) >= 5 and sys.argv[4]) else []
+
+sys.path[:0] = backend_paths
+
+backend = __import__(backend_spec, fromlist=["_trash"])
+_ensure_module_in_paths(backend, backend_paths)
+if backend_obj:
+    backend = getattr(backend, backend_obj)
+
+basename = backend.build_sdist(dist_folder, {"--global-option": ["--formats=gztar"]})
+print(basename)
diff --git a/venv/lib/python3.10/site-packages/tox/helper/build_requires.py b/venv/lib/python3.10/site-packages/tox/helper/build_requires.py
new file mode 100644
index 0000000..a91671c
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/tox/helper/build_requires.py
@@ -0,0 +1,24 @@
+import json
+import os
+import sys
+
+backend_spec = sys.argv[1]
+backend_obj = sys.argv[2] if len(sys.argv) >= 3 else None
+backend_paths = sys.argv[3].split(os.path.pathsep) if len(sys.argv) >= 4 else []
+
+sys.path[:0] = backend_paths
+
+backend = __import__(backend_spec, fromlist=["_trash"])
+if backend_obj:
+    backend = getattr(backend, backend_obj)
+
+try:
+    for_build_requires = backend.get_requires_for_build_sdist(None)
+except AttributeError:
+    # PEP 517 states that get_requires_for_build_sdist is optional for a build
+    # backend object. When the backend object omits it, the default
+    # implementation must be equivalent to return []
+    for_build_requires = []
+
+output = json.dumps(for_build_requires)
+print(output)
diff --git a/venv/lib/python3.10/site-packages/tox/helper/get_site_package_dir.py b/venv/lib/python3.10/site-packages/tox/helper/get_site_package_dir.py
new file mode 100644
index 0000000..adb6ca1
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/tox/helper/get_site_package_dir.py
@@ -0,0 +1,22 @@
+from __future__ import unicode_literals
+
+import json
+import sys
+import sysconfig
+import warnings
+
+dest_prefix = sys.argv[1]
+with warnings.catch_warnings():  # disable warning for PEP-632
+    warnings.simplefilter("ignore")
+    try:
+        import distutils.sysconfig
+
+        data = distutils.sysconfig.get_python_lib(prefix=dest_prefix)
+    except ImportError:  # if removed or not installed ignore
+        config_vars = {
+            k: dest_prefix if any(v == p for p in (sys.prefix, sys.base_prefix)) else v
+            for k, v in sysconfig.get_config_vars().items()
+        }
+        data = sysconfig.get_path("purelib", vars=config_vars)
+
+print(json.dumps({"dir": data}))
diff --git a/venv/lib/python3.10/site-packages/tox/helper/get_version.py b/venv/lib/python3.10/site-packages/tox/helper/get_version.py
new file mode 100644
index 0000000..40acdc2
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/tox/helper/get_version.py
@@ -0,0 +1,19 @@
+from __future__ import unicode_literals
+
+import json
+import os
+import platform
+import sys
+
+info = {
+    "executable": sys.executable,
+    "implementation": platform.python_implementation(),
+    "version_info": list(sys.version_info),
+    "version": sys.version,
+    "is_64": sys.maxsize > 2**32,
+    "sysplatform": sys.platform,
+    "os_sep": os.sep,
+    "extra_version_info": getattr(sys, "pypy_version_info", None),
+}
+info_as_dump = json.dumps(info)
+print(info_as_dump)
diff --git a/venv/lib/python3.10/site-packages/tox/hookspecs.py b/venv/lib/python3.10/site-packages/tox/hookspecs.py
new file mode 100644
index 0000000..5ea9c07
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/tox/hookspecs.py
@@ -0,0 +1,122 @@
+"""Hook specifications for tox - see https://pluggy.readthedocs.io/"""
+import pluggy
+
+hookspec = pluggy.HookspecMarker("tox")
+
+
+@hookspec
+def tox_addoption(parser):
+    """add command line options to the argparse-style parser object."""
+
+
+@hookspec
+def tox_configure(config):
+    """Called after command line options are parsed and ini-file has been read.
+
+    Please be aware that the config object layout may change between major tox versions.
+    """
+
+
+@hookspec(firstresult=True)
+def tox_package(session, venv):
+    """Return the package to be installed for the given venv.
+
+    Called once for every environment."""
+
+
+@hookspec(firstresult=True)
+def tox_get_python_executable(envconfig):
+    """Return a python executable for the given python base name.
+
+    The first plugin/hook which returns an executable path will determine it.
+
+    ``envconfig`` is the testenv configuration which contains
+    per-testenv configuration, notably the ``.envname`` and ``.basepython``
+    setting.
+    """
+
+
+@hookspec(firstresult=True)
+def tox_testenv_create(venv, action):
+    """Perform creation action for this venv.
+
+    Some example usage:
+
+    - To *add* behavior but still use tox's implementation to set up a
+      virtualenv, implement this hook but do not return a value (or explicitly
+      return ``None``).
+    - To *override* tox's virtualenv creation, implement this hook and return
+      a non-``None`` value.
+
+    .. note:: This api is experimental due to the unstable api of
+        :class:`tox.venv.VirtualEnv`.
+
+    .. note:: This hook uses ``firstresult=True`` (see `pluggy first result only`_) -- hooks
+        implementing this will be run until one returns non-``None``.
+
+    .. _`pluggy first result only`: https://pluggy.readthedocs.io/en/latest/#first-result-only
+    """
+
+
+@hookspec(firstresult=True)
+def tox_testenv_install_deps(venv, action):
+    """Perform install dependencies action for this venv.
+
+    Some example usage:
+
+    - To *add* behavior but still use tox's implementation to install
+      dependencies, implement this hook but do not return a value (or
+      explicitly return ``None``).  One use-case may be to install (or ensure)
+      non-python dependencies such as debian packages.
+    - To *override* tox's installation of dependencies, implement this hook
+      and return a non-``None`` value.  One use-case may be to install via
+      a different installation tool such as `pip-accel`_ or `pip-faster`_.
+
+    .. note:: This api is experimental due to the unstable api of
+        :class:`tox.venv.VirtualEnv`.
+
+    .. note:: This hook uses ``firstresult=True`` (see `pluggy first result only`_) -- hooks
+        implementing this will be run until one returns non-``None``.
+
+    .. _pip-accel: https://github.com/paylogic/pip-accel
+    .. _pip-faster: https://github.com/Yelp/venv-update
+    """
+
+
+@hookspec
+def tox_runtest_pre(venv):
+    """Perform arbitrary action before running tests for this venv.
+
+    This could be used to indicate that tests for a given venv have started, for instance.
+    """
+
+
+@hookspec(firstresult=True)
+def tox_runtest(venv, redirect):
+    """Run the tests for this venv.
+
+    .. note:: This hook uses ``firstresult=True`` (see `pluggy first result only`_) -- hooks
+        implementing this will be run until one returns non-``None``.
+    """
+
+
+@hookspec
+def tox_runtest_post(venv):
+    """Perform arbitrary action after running tests for this venv.
+
+    This could be used to have per-venv test reporting of pass/fail status.
+    """
+
+
+@hookspec(firstresult=True)
+def tox_runenvreport(venv, action):
+    """Get the installed packages and versions in this venv.
+
+    This could be used for alternative (ie non-pip) package managers, this
+    plugin should return a ``list`` of type ``str``
+    """
+
+
+@hookspec
+def tox_cleanup(session):
+    """Called just before the session is destroyed, allowing any final cleanup operation"""
diff --git a/venv/lib/python3.10/site-packages/tox/interpreters/__init__.py b/venv/lib/python3.10/site-packages/tox/interpreters/__init__.py
new file mode 100644
index 0000000..7bc2fbd
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/tox/interpreters/__init__.py
@@ -0,0 +1,141 @@
+from __future__ import unicode_literals
+
+import json
+import sys
+
+import tox
+from tox import reporter
+from tox.constants import SITE_PACKAGE_QUERY_SCRIPT
+from tox.interpreters.via_path import get_python_info
+
+
+class Interpreters:
+    def __init__(self, hook):
+        self.name2executable = {}
+        self.executable2info = {}
+        self.hook = hook
+
+    def get_executable(self, envconfig):
+        """return path object to the executable for the given
+        name (e.g. python2.7, python3.6, python etc.)
+        if name is already an existing path, return name.
+        If an interpreter cannot be found, return None.
+        """
+        try:
+            return self.name2executable[envconfig.envname]
+        except KeyError:
+            exe = self.hook.tox_get_python_executable(envconfig=envconfig)
+            reporter.verbosity2("{} uses {}".format(envconfig.envname, exe))
+            self.name2executable[envconfig.envname] = exe
+            return exe
+
+    def get_info(self, envconfig):
+        executable = self.get_executable(envconfig)
+        name = envconfig.basepython
+        if not executable:
+            return NoInterpreterInfo(name=name)
+        try:
+            return self.executable2info[executable]
+        except KeyError:
+            info = run_and_get_interpreter_info(name, executable)
+            self.executable2info[executable] = info
+            return info
+
+    def get_sitepackagesdir(self, info, envdir):
+        if not info.executable:
+            return ""
+        envdir = str(envdir)
+        try:
+            res = exec_on_interpreter(str(info.executable), SITE_PACKAGE_QUERY_SCRIPT, str(envdir))
+        except ExecFailed as e:
+            reporter.verbosity1("execution failed: {} -- {}".format(e.out, e.err))
+            return ""
+        else:
+            return res["dir"]
+
+
+def run_and_get_interpreter_info(name, executable):
+    assert executable
+    try:
+        result = get_python_info(str(executable))
+        result["version_info"] = tuple(result["version_info"])  # fix json dump transformation
+        if result["extra_version_info"] is not None:
+            result["extra_version_info"] = tuple(
+                result["extra_version_info"],
+            )  # fix json dump transformation
+        del result["version"]
+        result["executable"] = str(executable)
+    except ExecFailed as e:
+        return NoInterpreterInfo(name, executable=e.executable, out=e.out, err=e.err)
+    else:
+        return InterpreterInfo(**result)
+
+
+def exec_on_interpreter(*args):
+    from subprocess import PIPE, Popen
+
+    popen = Popen(args, stdout=PIPE, stderr=PIPE, universal_newlines=True)
+    out, err = popen.communicate()
+    if popen.returncode:
+        raise ExecFailed(args[0], args[1:], out, err)
+    if err:
+        sys.stderr.write(err)
+    try:
+        result = json.loads(out)
+    except Exception:
+        raise ExecFailed(args[0], args[1:], out, "could not decode {!r}".format(out))
+    return result
+
+
+class ExecFailed(Exception):
+    def __init__(self, executable, source, out, err):
+        self.executable = executable
+        self.source = source
+        self.out = out
+        self.err = err
+
+
+class InterpreterInfo:
+    def __init__(
+        self,
+        implementation,
+        executable,
+        version_info,
+        sysplatform,
+        is_64,
+        os_sep,
+        extra_version_info,
+    ):
+        self.implementation = implementation
+        self.executable = executable
+
+        self.version_info = version_info
+        self.sysplatform = sysplatform
+        self.is_64 = is_64
+        self.os_sep = os_sep
+        self.extra_version_info = extra_version_info
+
+    def __str__(self):
+        return "".format(self.executable, self.version_info)
+
+
+class NoInterpreterInfo:
+    def __init__(self, name, executable=None, out=None, err="not found"):
+        self.name = name
+        self.executable = executable
+        self.version_info = None
+        self.out = out
+        self.err = err
+
+    def __str__(self):
+        if self.executable:
+            return "".format(self.executable)
+        else:
+            return "".format(self.name)
+
+
+if tox.INFO.IS_WIN:
+    from .windows import tox_get_python_executable
+else:
+    from .unix import tox_get_python_executable
+assert tox_get_python_executable
diff --git a/venv/lib/python3.10/site-packages/tox/interpreters/common.py b/venv/lib/python3.10/site-packages/tox/interpreters/common.py
new file mode 100644
index 0000000..a1087fe
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/tox/interpreters/common.py
@@ -0,0 +1,25 @@
+import os
+
+from tox.interpreters.py_spec import CURRENT, PythonSpec
+from tox.interpreters.via_path import exe_spec
+
+
+def base_discover(envconfig):
+    base_python = envconfig.basepython
+    spec = PythonSpec.from_name(base_python)
+
+    # 1. check passed in discover elements
+    discovers = envconfig.config.option.discover
+    if not discovers:
+        discovers = os.environ.get(str("TOX_DISCOVER"), "").split(os.pathsep)
+    for discover in discovers:
+        if os.path.exists(discover):
+            cur_spec = exe_spec(discover, envconfig.basepython)
+            if cur_spec is not None and cur_spec.satisfies(spec):
+                return spec, cur_spec.path
+
+    # 2. check current
+    if spec.name is not None and CURRENT.satisfies(spec):
+        return spec, CURRENT.path
+
+    return spec, None
diff --git a/venv/lib/python3.10/site-packages/tox/interpreters/py_spec.py b/venv/lib/python3.10/site-packages/tox/interpreters/py_spec.py
new file mode 100644
index 0000000..7b079e8
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/tox/interpreters/py_spec.py
@@ -0,0 +1,76 @@
+from __future__ import unicode_literals
+
+import os
+import re
+import sys
+
+import six
+
+import tox
+
+
+class PythonSpec(object):
+    def __init__(self, name, major, minor, architecture, path, args=None):
+        self.name = name
+        self.major = major
+        self.minor = minor
+        self.architecture = architecture
+        self.path = path
+        self.args = args
+
+    def __repr__(self):
+        return (
+            "{0.__class__.__name__}(name={0.name!r}, major={0.major!r}, minor={0.minor!r}, "
+            "architecture={0.architecture!r}, path={0.path!r}, args={0.args!r})"
+        ).format(self)
+
+    def __str__(self):
+        msg = repr(self)
+        return msg.encode("utf-8") if six.PY2 else msg
+
+    def satisfies(self, req):
+        if req.is_abs and self.is_abs and self.path != req.path:
+            return False
+        if req.name is not None and req.name != self.name:
+            return False
+        if req.architecture is not None and req.architecture != self.architecture:
+            return False
+        if req.major is not None and req.major != self.major:
+            return False
+        if req.minor is not None and req.minor != self.minor:
+            return False
+        if req.major is None and req.minor is not None:
+            return False
+        return True
+
+    @property
+    def is_abs(self):
+        return self.path is not None and os.path.isabs(self.path)
+
+    @classmethod
+    def from_name(cls, base_python):
+        name, major, minor, architecture, path = None, None, None, None, None
+        if os.path.isabs(base_python):
+            path = base_python
+        else:
+            match = re.match(r"(python|pypy|jython)(\d)?(?:\.(\d+))?(?:-(32|64))?$", base_python)
+            if match:
+                groups = match.groups()
+                name = groups[0]
+                major = int(groups[1]) if len(groups) >= 2 and groups[1] is not None else None
+                minor = int(groups[2]) if len(groups) >= 3 and groups[2] is not None else None
+                architecture = (
+                    int(groups[3]) if len(groups) >= 4 and groups[3] is not None else None
+                )
+            else:
+                path = base_python
+        return cls(name, major, minor, architecture, path)
+
+
+CURRENT = PythonSpec(
+    "pypy" if tox.constants.INFO.IS_PYPY else "python",
+    sys.version_info[0],
+    sys.version_info[1],
+    64 if sys.maxsize > 2**32 else 32,
+    sys.executable,
+)
diff --git a/venv/lib/python3.10/site-packages/tox/interpreters/unix.py b/venv/lib/python3.10/site-packages/tox/interpreters/unix.py
new file mode 100644
index 0000000..08194ad
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/tox/interpreters/unix.py
@@ -0,0 +1,19 @@
+from __future__ import unicode_literals
+
+import tox
+
+from .common import base_discover
+from .via_path import check_with_path
+
+
+@tox.hookimpl
+def tox_get_python_executable(envconfig):
+    spec, path = base_discover(envconfig)
+    if path is not None:
+        return path
+    # 3. check if the literal base python
+    candidates = [envconfig.basepython]
+    # 4. check if the un-versioned name is good
+    if spec.name is not None and spec.name != envconfig.basepython:
+        candidates.append(spec.name)
+    return check_with_path(candidates, spec)
diff --git a/venv/lib/python3.10/site-packages/tox/interpreters/via_path.py b/venv/lib/python3.10/site-packages/tox/interpreters/via_path.py
new file mode 100644
index 0000000..8634d69
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/tox/interpreters/via_path.py
@@ -0,0 +1,79 @@
+from __future__ import unicode_literals
+
+import json
+import os
+import subprocess
+from collections import defaultdict
+from threading import Lock
+
+import py
+
+from tox import reporter
+from tox.constants import VERSION_QUERY_SCRIPT
+
+from .py_spec import PythonSpec
+
+
+def check_with_path(candidates, spec):
+    for path in candidates:
+        base = path
+        if not os.path.isabs(path):
+            path = py.path.local.sysfind(path)
+        if path is not None:
+            if os.path.exists(str(path)):
+                cur_spec = exe_spec(path, base)
+                if cur_spec is not None and cur_spec.satisfies(spec):
+                    return cur_spec.path
+
+
+_SPECS = {}
+_SPECK_LOCK = defaultdict(Lock)
+
+
+def exe_spec(python_exe, base):
+    if not isinstance(python_exe, str):
+        python_exe = str(python_exe)
+    with _SPECK_LOCK[python_exe]:
+        if python_exe not in _SPECS:
+            info = get_python_info(python_exe)
+            if info is not None:
+                found = PythonSpec(
+                    "pypy" if info["implementation"] == "PyPy" else "python",
+                    info["version_info"][0],
+                    info["version_info"][1],
+                    64 if info["is_64"] else 32,
+                    info["executable"],
+                )
+                reporter.verbosity2("{} ({}) is {}".format(base, python_exe, info))
+            else:
+                found = None
+            _SPECS[python_exe] = found
+    return _SPECS[python_exe]
+
+
+_python_info_cache = {}
+
+
+def get_python_info(cmd):
+    try:
+        return _python_info_cache[cmd].copy()
+    except KeyError:
+        pass
+    proc = subprocess.Popen(
+        [cmd] + [VERSION_QUERY_SCRIPT],
+        stdout=subprocess.PIPE,
+        stderr=subprocess.PIPE,
+        universal_newlines=True,
+    )
+    out, err = proc.communicate()
+    if not proc.returncode:
+        try:
+            result = json.loads(out)
+        except ValueError as exception:
+            failure = exception
+        else:
+            _python_info_cache[cmd] = result
+            return result.copy()
+    else:
+        failure = "exit code {}".format(proc.returncode)
+    reporter.verbosity1("{!r} cmd {!r} out {!r} err {!r} ".format(failure, cmd, out, err))
diff --git a/venv/lib/python3.10/site-packages/tox/interpreters/windows/__init__.py b/venv/lib/python3.10/site-packages/tox/interpreters/windows/__init__.py
new file mode 100644
index 0000000..e03c342
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/tox/interpreters/windows/__init__.py
@@ -0,0 +1,49 @@
+from __future__ import unicode_literals
+
+from threading import Lock
+
+import tox
+
+from ..common import base_discover
+from ..py_spec import CURRENT
+from ..via_path import check_with_path
+
+
+@tox.hookimpl
+def tox_get_python_executable(envconfig):
+    spec, path = base_discover(envconfig)
+    if path is not None:
+        return path
+    # second check if the py.exe has it (only for non path specs)
+    if spec.path is None:
+        py_exe = locate_via_pep514(spec)
+        if py_exe is not None:
+            return py_exe
+
+    # third check if the literal base python is on PATH
+    candidates = [envconfig.basepython]
+    # fourth check if the name is on PATH
+    if spec.name is not None and spec.name != envconfig.basepython:
+        candidates.append(spec.name)
+    # or check known locations
+    if spec.major is not None and spec.minor is not None:
+        if spec.name == "python":
+            # The standard names are in predictable places.
+            candidates.append(r"c:\python{}{}\python.exe".format(spec.major, spec.minor))
+    return check_with_path(candidates, spec)
+
+
+_PY_AVAILABLE = []
+_PY_LOCK = Lock()
+
+
+def locate_via_pep514(spec):
+    with _PY_LOCK:
+        if not _PY_AVAILABLE:
+            from . import pep514
+
+            _PY_AVAILABLE.extend(pep514.discover_pythons())
+            _PY_AVAILABLE.append(CURRENT)
+    for cur_spec in _PY_AVAILABLE:
+        if cur_spec.satisfies(spec):
+            return cur_spec.path
diff --git a/venv/lib/python3.10/site-packages/tox/interpreters/windows/pep514.py b/venv/lib/python3.10/site-packages/tox/interpreters/windows/pep514.py
new file mode 100644
index 0000000..3dac564
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/tox/interpreters/windows/pep514.py
@@ -0,0 +1,165 @@
+"""Implement https://www.python.org/dev/peps/pep-0514/ to discover interpreters - Windows only"""
+from __future__ import unicode_literals
+
+import os
+import re
+
+import six
+from six.moves import winreg
+
+from tox import reporter
+from tox.interpreters.py_spec import PythonSpec
+
+
+def enum_keys(key):
+    at = 0
+    while True:
+        try:
+            yield winreg.EnumKey(key, at)
+        except OSError:
+            break
+        at += 1
+
+
+def get_value(key, value_name):
+    try:
+        return winreg.QueryValueEx(key, value_name)[0]
+    except OSError:
+        return None
+
+
+def discover_pythons():
+    for hive, hive_name, key, flags, default_arch in [
+        (winreg.HKEY_CURRENT_USER, "HKEY_CURRENT_USER", r"Software\Python", 0, 64),
+        (
+            winreg.HKEY_LOCAL_MACHINE,
+            "HKEY_LOCAL_MACHINE",
+            r"Software\Python",
+            winreg.KEY_WOW64_64KEY,
+            64,
+        ),
+        (
+            winreg.HKEY_LOCAL_MACHINE,
+            "HKEY_LOCAL_MACHINE",
+            r"Software\Python",
+            winreg.KEY_WOW64_32KEY,
+            32,
+        ),
+    ]:
+        for spec in process_set(hive, hive_name, key, flags, default_arch):
+            yield spec
+
+
+def process_set(hive, hive_name, key, flags, default_arch):
+    try:
+        with winreg.OpenKeyEx(hive, key, 0, winreg.KEY_READ | flags) as root_key:
+            for company in enum_keys(root_key):
+                if company == "PyLauncher":  # reserved
+                    continue
+                for spec in process_company(hive_name, company, root_key, default_arch):
+                    yield spec
+    except OSError:
+        pass
+
+
+def process_company(hive_name, company, root_key, default_arch):
+    with winreg.OpenKeyEx(root_key, company) as company_key:
+        for tag in enum_keys(company_key):
+            for spec in process_tag(hive_name, company, company_key, tag, default_arch):
+                yield spec
+
+
+def process_tag(hive_name, company, company_key, tag, default_arch):
+    with winreg.OpenKeyEx(company_key, tag) as tag_key:
+        major, minor = load_version_data(hive_name, company, tag, tag_key)
+        if major is None:
+            return
+        arch = load_arch_data(hive_name, company, tag, tag_key, default_arch)
+    exe, args = load_exe(hive_name, company, company_key, tag)
+    if exe is not None:
+        name = "python" if company == "PythonCore" else company
+        yield PythonSpec(name, major, minor, arch, exe, args)
+
+
+def load_exe(hive_name, company, company_key, tag):
+    key_path = "{}/{}/{}".format(hive_name, company, tag)
+    try:
+        with winreg.OpenKeyEx(company_key, r"{}\InstallPath".format(tag)) as ip_key:
+            with ip_key:
+                exe = get_value(ip_key, "ExecutablePath")
+                if exe is None:
+                    ip = get_value(ip_key, None)
+                    if ip is None:
+                        msg(key_path, "no ExecutablePath or default for it")
+
+                    else:
+                        exe = os.path.join(ip, "python.exe")
+                if os.path.exists(exe):
+                    args = get_value(ip_key, "ExecutableArguments")
+                    return exe, args
+                else:
+                    msg(key_path, "exe does not exists {}".format(exe))
+    except OSError:
+        msg("{}/{}".format(key_path, "InstallPath"), "missing")
+    return None, None
+
+
+def load_arch_data(hive_name, company, tag, tag_key, default_arch):
+    arch_str = get_value(tag_key, "SysArchitecture")
+    if arch_str is not None:
+        key_path = "{}/{}/{}/SysArchitecture".format(hive_name, company, tag)
+        try:
+            return parse_arch(arch_str)
+        except ValueError as sys_arch:
+            msg(key_path, sys_arch)
+    return default_arch
+
+
+def parse_arch(arch_str):
+    if not isinstance(arch_str, six.string_types):
+        raise ValueError("arch is not string")
+    match = re.match(r"(\d+)bit", arch_str)
+    if match:
+        return int(next(iter(match.groups())))
+    raise ValueError("invalid format {}".format(arch_str))
+
+
+def load_version_data(hive_name, company, tag, tag_key):
+    version_str = get_value(tag_key, "SysVersion")
+    major, minor = None, None
+    if version_str is not None:
+        key_path = "{}/{}/{}/SysVersion".format(hive_name, company, tag)
+        try:
+            major, minor = parse_version(get_value(tag_key, "SysVersion"))
+        except ValueError as sys_version:
+            msg(key_path, sys_version)
+    if major is None:
+        key_path = "{}/{}/{}".format(hive_name, company, tag)
+        try:
+            major, minor = parse_version(tag)
+        except ValueError as tag_version:
+            msg(key_path, tag_version)
+    return major, minor
+
+
+def parse_version(version_str):
+    if not isinstance(version_str, six.string_types):
+        raise ValueError("key is not string")
+    match = re.match(r"(\d+)\.(\d+).*", version_str)
+    if match:
+        return tuple(int(i) for i in match.groups())
+    raise ValueError("invalid format {}".format(version_str))
+
+
+def msg(path, what):
+    reporter.verbosity1("PEP-514 violation in Windows Registry at {} error: {}".format(path, what))
+
+
+def _run():
+    reporter.update_default_reporter(0, reporter.Verbosity.DEBUG)
+    for spec in discover_pythons():
+        print(repr(spec))
+
+
+if __name__ == "__main__":
+    _run()
diff --git a/venv/lib/python3.10/site-packages/tox/logs/__init__.py b/venv/lib/python3.10/site-packages/tox/logs/__init__.py
new file mode 100644
index 0000000..ed54906
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/tox/logs/__init__.py
@@ -0,0 +1,4 @@
+"""This module handles collecting and persisting in json format a tox session"""
+from .result import ResultLog
+
+__all__ = ("ResultLog",)
diff --git a/venv/lib/python3.10/site-packages/tox/logs/command.py b/venv/lib/python3.10/site-packages/tox/logs/command.py
new file mode 100644
index 0000000..a22a2a6
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/tox/logs/command.py
@@ -0,0 +1,14 @@
+from __future__ import absolute_import, unicode_literals
+
+
+class CommandLog(object):
+    """Report commands interacting with third party tools"""
+
+    def __init__(self, env_log, list):
+        self.envlog = env_log
+        self.list = list
+
+    def add_command(self, argv, output, retcode):
+        data = {"command": argv, "output": output, "retcode": retcode}
+        self.list.append(data)
+        return data
diff --git a/venv/lib/python3.10/site-packages/tox/logs/env.py b/venv/lib/python3.10/site-packages/tox/logs/env.py
new file mode 100644
index 0000000..ff8fc8e
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/tox/logs/env.py
@@ -0,0 +1,36 @@
+from __future__ import absolute_import, unicode_literals
+
+from tox.interpreters.via_path import get_python_info
+
+from .command import CommandLog
+
+
+class EnvLog(object):
+    """Report the status of a tox environment"""
+
+    def __init__(self, result_log, name, dict):
+        self.reportlog = result_log
+        self.name = name
+        self.dict = dict
+
+    def set_python_info(self, python_executable):
+        answer = get_python_info(str(python_executable))
+        answer["executable"] = python_executable
+        self.dict["python"] = answer
+
+    def get_commandlog(self, name):
+        """get the command log for a given group name"""
+        data = self.dict.setdefault(name, [])
+        return CommandLog(self, data)
+
+    def set_installed(self, packages):
+        self.dict["installed_packages"] = packages
+
+    def set_header(self, installpkg):
+        """
+        :param py.path.local installpkg: Path to the package.
+        """
+        self.dict["installpkg"] = {
+            "sha256": installpkg.computehash("sha256"),
+            "basename": installpkg.basename,
+        }
diff --git a/venv/lib/python3.10/site-packages/tox/logs/result.py b/venv/lib/python3.10/site-packages/tox/logs/result.py
new file mode 100644
index 0000000..d81e22b
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/tox/logs/result.py
@@ -0,0 +1,44 @@
+"""Generate json report of a run"""
+from __future__ import absolute_import, unicode_literals
+
+import json
+import os
+import socket
+import sys
+
+from tox.version import __version__
+
+from .command import CommandLog
+from .env import EnvLog
+
+
+class ResultLog(object):
+    """The result of a tox session"""
+
+    def __init__(self):
+        command_log = []
+        self.command_log = CommandLog(None, command_log)
+        self.dict = {
+            "reportversion": "1",
+            "toxversion": __version__,
+            "platform": sys.platform,
+            "host": os.getenv(str("HOSTNAME")) or socket.gethostname(),
+            "commands": command_log,
+        }
+
+    @classmethod
+    def from_json(cls, data):
+        result = cls()
+        result.dict = json.loads(data)
+        result.command_log = CommandLog(None, result.dict["commands"])
+        return result
+
+    def get_envlog(self, name):
+        """Return the env log of an environment (create on first call)"""
+        test_envs = self.dict.setdefault("testenvs", {})
+        env_data = test_envs.setdefault(name, {})
+        return EnvLog(self, name, env_data)
+
+    def dumps_json(self):
+        """Return the json dump of the current state, indented"""
+        return json.dumps(self.dict, indent=2)
diff --git a/venv/lib/python3.10/site-packages/tox/package/__init__.py b/venv/lib/python3.10/site-packages/tox/package/__init__.py
new file mode 100644
index 0000000..9a32f3f
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/tox/package/__init__.py
@@ -0,0 +1,73 @@
+import py
+
+import tox
+from tox.reporter import error, info, verbosity0, verbosity2, warning
+from tox.util.lock import hold_lock
+
+from .builder import build_package
+from .local import resolve_package
+from .view import create_session_view
+
+
+@tox.hookimpl
+def tox_package(session, venv):
+    """Build an sdist at first call return that for all calls"""
+    if not hasattr(session, "package"):
+        session.package, session.dist = get_package(session)
+    return session.package
+
+
+def get_package(session):
+    """Perform the package operation"""
+    config = session.config
+    if config.skipsdist:
+        info("skipping sdist step")
+        return None
+    lock_file = session.config.toxworkdir.join("{}.lock".format(session.config.isolated_build_env))
+
+    with hold_lock(lock_file, verbosity0):
+        package = acquire_package(config, session)
+        session_package = create_session_view(package, config.temp_dir)
+        return session_package, package
+
+
+def acquire_package(config, session):
+    """acquire a source distribution (either by loading a local file or triggering a build)"""
+    if not config.option.sdistonly and (config.sdistsrc or config.option.installpkg):
+        path = get_local_package(config)
+    else:
+        try:
+            path = build_package(config, session)
+        except tox.exception.InvocationError as exception:
+            error("FAIL could not package project - v = {!r}".format(exception))
+            return None
+        sdist_file = config.distshare.join(path.basename)
+        if sdist_file != path:
+            info("copying new sdistfile to {!r}".format(str(sdist_file)))
+            try:
+                sdist_file.dirpath().ensure(dir=1)
+            except py.error.Error:
+                warning("could not copy distfile to {}".format(sdist_file.dirpath()))
+            else:
+                path.copy(sdist_file)
+    return path
+
+
+def get_local_package(config):
+    path = config.option.installpkg
+    if not path:
+        path = config.sdistsrc
+    py_path = py.path.local(resolve_package(path))
+    info("using package {!r}, skipping 'sdist' activity ".format(str(py_path)))
+    return py_path
+
+
+@tox.hookimpl
+def tox_cleanup(session):
+    for tox_env in session.venv_dict.values():
+        if hasattr(tox_env, "package") and isinstance(tox_env.package, py.path.local):
+            package = tox_env.package
+            if package.exists():
+                verbosity2("cleanup {}".format(package))
+                package.remove()
+                py.path.local(package.dirname).remove(ignore_errors=True)
diff --git a/venv/lib/python3.10/site-packages/tox/package/builder/__init__.py b/venv/lib/python3.10/site-packages/tox/package/builder/__init__.py
new file mode 100644
index 0000000..11a0657
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/tox/package/builder/__init__.py
@@ -0,0 +1,9 @@
+from .isolated import build
+from .legacy import make_sdist
+
+
+def build_package(config, session):
+    if not config.isolated_build:
+        return make_sdist(config, session)
+    else:
+        return build(config, session)
diff --git a/venv/lib/python3.10/site-packages/tox/package/builder/isolated.py b/venv/lib/python3.10/site-packages/tox/package/builder/isolated.py
new file mode 100644
index 0000000..1b3f6f4
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/tox/package/builder/isolated.py
@@ -0,0 +1,150 @@
+from __future__ import unicode_literals
+
+import json
+import os
+from collections import namedtuple
+
+import six
+from packaging.requirements import Requirement
+from packaging.utils import canonicalize_name
+
+from tox import reporter
+from tox.config import DepConfig, get_py_project_toml
+from tox.constants import BUILD_ISOLATED, BUILD_REQUIRE_SCRIPT
+
+BuildInfo = namedtuple(
+    "BuildInfo",
+    ["requires", "backend_module", "backend_object", "backend_paths"],
+)
+
+
+def build(config, session):
+    build_info = get_build_info(config.setupdir)
+    package_venv = session.getvenv(config.isolated_build_env)
+    package_venv.envconfig.deps_matches_subset = True
+
+    # we allow user specified dependencies so the users can write extensions to
+    # install additional type of dependencies (e.g. binary)
+    user_specified_deps = package_venv.envconfig.deps
+    package_venv.envconfig.deps = [DepConfig(r, None) for r in build_info.requires]
+    package_venv.envconfig.deps.extend(user_specified_deps)
+
+    if package_venv.setupenv():
+        package_venv.finishvenv()
+    if isinstance(package_venv.status, Exception):
+        raise package_venv.status
+
+    build_requires = get_build_requires(build_info, package_venv, config.setupdir)
+    # we need to filter out requirements already specified in pyproject.toml or user deps
+    base_build_deps = {
+        canonicalize_name(Requirement(r.name).name) for r in package_venv.envconfig.deps
+    }
+    build_requires_dep = [
+        DepConfig(r, None)
+        for r in build_requires
+        if canonicalize_name(Requirement(r).name) not in base_build_deps
+    ]
+    if build_requires_dep:
+        with package_venv.new_action("build_requires", package_venv.envconfig.envdir) as action:
+            package_venv.run_install_command(packages=build_requires_dep, action=action)
+        package_venv.finishvenv()
+    return perform_isolated_build(build_info, package_venv, config.distdir, config.setupdir)
+
+
+def get_build_info(folder):
+    toml_file = folder.join("pyproject.toml")
+
+    # as per https://www.python.org/dev/peps/pep-0517/
+
+    def abort(message):
+        reporter.error("{} inside {}".format(message, toml_file))
+        raise SystemExit(1)
+
+    if not toml_file.exists():
+        reporter.error("missing {}".format(toml_file))
+        raise SystemExit(1)
+
+    config_data = get_py_project_toml(toml_file)
+
+    if "build-system" not in config_data:
+        abort("build-system section missing")
+
+    build_system = config_data["build-system"]
+
+    if "requires" not in build_system:
+        abort("missing requires key at build-system section")
+    if "build-backend" not in build_system:
+        abort("missing build-backend key at build-system section")
+
+    requires = build_system["requires"]
+    if not isinstance(requires, list) or not all(isinstance(i, six.text_type) for i in requires):
+        abort("requires key at build-system section must be a list of string")
+
+    backend = build_system["build-backend"]
+    if not isinstance(backend, six.text_type):
+        abort("build-backend key at build-system section must be a string")
+
+    args = backend.split(":")
+    module = args[0]
+    obj = args[1] if len(args) > 1 else ""
+
+    backend_paths = build_system.get("backend-path", [])
+    if not isinstance(backend_paths, list):
+        abort("backend-path key at build-system section must be a list, if specified")
+    backend_paths = [folder.join(p) for p in backend_paths]
+
+    normalized_folder = os.path.normcase(str(folder.realpath()))
+    normalized_paths = (os.path.normcase(str(path.realpath())) for path in backend_paths)
+
+    if not all(
+        os.path.commonprefix((normalized_folder, path)) == normalized_folder
+        for path in normalized_paths
+    ):
+        abort("backend-path must exist in the project root")
+
+    return BuildInfo(requires, module, obj, backend_paths)
+
+
+def perform_isolated_build(build_info, package_venv, dist_dir, setup_dir):
+    with package_venv.new_action(
+        "perform-isolated-build",
+        package_venv.envconfig.envdir,
+    ) as action:
+        # need to start with an empty (but existing) source distribution folder
+        if dist_dir.exists():
+            dist_dir.remove(rec=1, ignore_errors=True)
+        dist_dir.ensure_dir()
+
+        result = package_venv._pcall(
+            [
+                package_venv.envconfig.envpython,
+                BUILD_ISOLATED,
+                str(dist_dir),
+                build_info.backend_module,
+                build_info.backend_object,
+                os.path.pathsep.join(str(p) for p in build_info.backend_paths),
+            ],
+            returnout=True,
+            action=action,
+            cwd=setup_dir,
+        )
+        reporter.verbosity2(result)
+        return dist_dir.join(result.split("\n")[-2])
+
+
+def get_build_requires(build_info, package_venv, setup_dir):
+    with package_venv.new_action("get-build-requires", package_venv.envconfig.envdir) as action:
+        result = package_venv._pcall(
+            [
+                package_venv.envconfig.envpython,
+                BUILD_REQUIRE_SCRIPT,
+                build_info.backend_module,
+                build_info.backend_object,
+                os.path.pathsep.join(str(p) for p in build_info.backend_paths),
+            ],
+            returnout=True,
+            action=action,
+            cwd=setup_dir,
+            capture_err=False,
+        )
+        return json.loads(result.split("\n")[-2])
diff --git a/venv/lib/python3.10/site-packages/tox/package/builder/legacy.py b/venv/lib/python3.10/site-packages/tox/package/builder/legacy.py
new file mode 100644
index 0000000..5b9d1af
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/tox/package/builder/legacy.py
@@ -0,0 +1,59 @@
+import sys
+
+import py
+
+from tox import reporter
+from tox.util.path import ensure_empty_dir
+
+
+def make_sdist(config, session):
+    setup = config.setupdir.join("setup.py")
+    pyproject = config.setupdir.join("pyproject.toml")
+    setup_check = setup.check()
+    if not setup_check and not pyproject.check():
+        reporter.error(
+            "No pyproject.toml or setup.py file found. The expected locations are:\n"
+            "  {pyproject} or {setup}\n"
+            "You can\n"
+            "  1. Create one:\n"
+            "     https://tox.readthedocs.io/en/latest/example/package.html\n"
+            "  2. Configure tox to avoid running sdist:\n"
+            "     https://tox.readthedocs.io/en/latest/example/general.html\n"
+            "  3. Configure tox to use an isolated_build".format(pyproject=pyproject, setup=setup),
+        )
+        raise SystemExit(1)
+    if not setup_check:
+        reporter.error(
+            "pyproject.toml file found.\n"
+            "To use a PEP 517 build-backend you are required to "
+            "configure tox to use an isolated_build:\n"
+            "https://tox.readthedocs.io/en/latest/example/package.html\n",
+        )
+        raise SystemExit(1)
+    with session.newaction("GLOB", "packaging") as action:
+        action.setactivity("sdist-make", setup)
+        ensure_empty_dir(config.distdir)
+        build_log = action.popen(
+            [sys.executable, setup, "sdist", "--formats=zip", "--dist-dir", config.distdir],
+            cwd=config.setupdir,
+            returnout=True,
+        )
+        reporter.verbosity2(build_log)
+        try:
+            return config.distdir.listdir()[0]
+        except py.error.ENOENT:
+            # check if empty or comment only
+            data = []
+            with open(str(setup)) as fp:
+                for line in fp:
+                    if line and line[0] == "#":
+                        continue
+                    data.append(line)
+            if not "".join(data).strip():
+                reporter.error("setup.py is empty")
+                raise SystemExit(1)
+            reporter.error(
+                "No dist directory found. Please check setup.py, e.g with:\n"
+                "     python setup.py sdist",
+            )
+            raise SystemExit(1)
diff --git a/venv/lib/python3.10/site-packages/tox/package/local.py b/venv/lib/python3.10/site-packages/tox/package/local.py
new file mode 100644
index 0000000..0ae8615
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/tox/package/local.py
@@ -0,0 +1,63 @@
+import os
+import re
+
+import packaging.version
+import py
+
+import tox
+from tox import reporter
+from tox.exception import MissingDependency
+
+_SPEC_2_PACKAGE = {}
+
+
+def resolve_package(package_spec):
+    global _SPEC_2_PACKAGE
+    try:
+        return _SPEC_2_PACKAGE[package_spec]
+    except KeyError:
+        _SPEC_2_PACKAGE[package_spec] = x = get_latest_version_of_package(package_spec)
+        return x
+
+
+def get_latest_version_of_package(package_spec):
+    if not os.path.isabs(str(package_spec)):
+        return package_spec
+    p = py.path.local(package_spec)
+    if p.check():
+        return p
+    if not p.dirpath().check(dir=1):
+        raise tox.exception.MissingDirectory(p.dirpath())
+    reporter.info("determining {}".format(p))
+    candidates = p.dirpath().listdir(p.basename)
+    if len(candidates) == 0:
+        raise MissingDependency(package_spec)
+    if len(candidates) > 1:
+        version_package = []
+        for filename in candidates:
+            version = get_version_from_filename(filename.basename)
+            if version is not None:
+                version_package.append((version, filename))
+            else:
+                reporter.warning("could not determine version of: {}".format(str(filename)))
+        if not version_package:
+            raise tox.exception.MissingDependency(package_spec)
+        version_package.sort()
+        _, package_with_largest_version = version_package[-1]
+        return package_with_largest_version
+    else:
+        return candidates[0]
+
+
+_REGEX_FILE_NAME_WITH_VERSION = re.compile(r"[\w_+.-]+-(.*)\.(zip|tar\.gz)")
+
+
+def get_version_from_filename(basename):
+    m = _REGEX_FILE_NAME_WITH_VERSION.match(basename)
+    if m is None:
+        return None
+    version = m.group(1)
+    try:
+        return packaging.version.Version(version)
+    except packaging.version.InvalidVersion:
+        return None
diff --git a/venv/lib/python3.10/site-packages/tox/package/view.py b/venv/lib/python3.10/site-packages/tox/package/view.py
new file mode 100644
index 0000000..e484104
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/tox/package/view.py
@@ -0,0 +1,50 @@
+import os
+from itertools import chain
+
+import six
+
+from tox.reporter import verbosity1
+
+
+def create_session_view(package, temp_dir):
+    """once we build a package we cannot return that directly, as a subsequent call
+    might delete that package (in order to do its own build); therefore we need to
+    return a view of the file that it's not prone to deletion and can be removed when the
+    session ends
+    """
+    if not package:
+        return package
+    package_dir = temp_dir.join("package")
+    package_dir.ensure(dir=True)
+
+    # we'll number the active instances, and use the max value as session folder for a new build
+    # note we cannot change package names as PEP-491 (wheel binary format)
+    # is strict about file name structure
+    exists = [i.basename for i in package_dir.listdir()]
+    file_id = max(chain((0,), (int(i) for i in exists if six.text_type(i).isnumeric())))
+
+    session_dir = package_dir.join(str(file_id + 1))
+    session_dir.ensure(dir=True)
+    session_package = session_dir.join(package.basename)
+
+    # if we can do hard links do that, otherwise just copy
+    links = False
+    if hasattr(os, "link"):
+        try:
+            os.link(str(package), str(session_package))
+            links = True
+        except (OSError, NotImplementedError):
+            pass
+    if not links:
+        package.copy(session_package)
+    operation = "links" if links else "copied"
+    common = session_package.common(package)
+    verbosity1(
+        "package {} {} to {} ({})".format(
+            common.bestrelpath(session_package),
+            operation,
+            common.bestrelpath(package),
+            common,
+        ),
+    )
+    return session_package
diff --git a/venv/lib/python3.10/site-packages/tox/reporter.py b/venv/lib/python3.10/site-packages/tox/reporter.py
new file mode 100644
index 0000000..17a3c92
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/tox/reporter.py
@@ -0,0 +1,157 @@
+"""A progress reporter inspired from the logging modules"""
+from __future__ import absolute_import, unicode_literals
+
+import os
+import time
+from contextlib import contextmanager
+from datetime import datetime
+
+import py
+
+
+class Verbosity(object):
+    DEBUG = 2
+    INFO = 1
+    DEFAULT = 0
+    QUIET = -1
+    EXTRA_QUIET = -2
+
+
+REPORTER_TIMESTAMP_ON_ENV = str("TOX_REPORTER_TIMESTAMP")
+REPORTER_TIMESTAMP_ON = os.environ.get(REPORTER_TIMESTAMP_ON_ENV, False) == "1"
+START = datetime.now()
+
+
+class Reporter(object):
+    def __init__(self, verbose_level=None, quiet_level=None):
+        kwargs = {}
+        if verbose_level is not None:
+            kwargs["verbose_level"] = verbose_level
+        if quiet_level is not None:
+            kwargs["quiet_level"] = quiet_level
+        self._reset(**kwargs)
+
+    def _reset(self, verbose_level=0, quiet_level=0):
+        self.verbose_level = verbose_level
+        self.quiet_level = quiet_level
+        self.reported_lines = []
+        self.tw = py.io.TerminalWriter()
+
+    @property
+    def verbosity(self):
+        return self.verbose_level - self.quiet_level
+
+    def log_popen(self, cwd, outpath, cmd_args_shell, pid):
+        """log information about the action.popen() created process."""
+        msg = "[{}] {}$ {}".format(pid, cwd, cmd_args_shell)
+        if outpath:
+            if outpath.common(cwd) is not None:
+                outpath = cwd.bestrelpath(outpath)
+            msg = "{} >{}".format(msg, outpath)
+        self.verbosity1(msg, of="logpopen")
+
+    @property
+    def messages(self):
+        return [i for _, i in self.reported_lines]
+
+    @contextmanager
+    def timed_operation(self, name, msg):
+        self.verbosity2("{} start: {}".format(name, msg), bold=True)
+        start = time.time()
+        yield
+        duration = time.time() - start
+        self.verbosity2(
+            "{} finish: {} after {:.2f} seconds".format(name, msg, duration),
+            bold=True,
+        )
+
+    def separator(self, of, msg, level):
+        if self.verbosity >= level:
+            self.reported_lines.append(("separator", "- summary -"))
+            self.tw.sep(of, msg)
+
+    def logline_if(self, level, of, msg, key=None, **kwargs):
+        if self.verbosity >= level:
+            message = str(msg) if key is None else "{}{}".format(key, msg)
+            self.logline(of, message, **kwargs)
+
+    def logline(self, of, msg, **opts):
+        self.reported_lines.append((of, msg))
+        timestamp = ""
+        if REPORTER_TIMESTAMP_ON:
+            timestamp = "{} ".format(datetime.now() - START)
+        line_msg = "{}{}\n".format(timestamp, msg)
+        self.tw.write(line_msg, **opts)
+
+    def keyvalue(self, name, value):
+        if name.endswith(":"):
+            name += " "
+        self.tw.write(name, bold=True)
+        self.tw.write(value)
+        self.tw.line()
+
+    def line(self, msg, **opts):
+        self.logline("line", msg, **opts)
+
+    def info(self, msg):
+        self.logline_if(Verbosity.DEBUG, "info", msg)
+
+    def using(self, msg):
+        self.logline_if(Verbosity.INFO, "using", msg, "using ", bold=True)
+
+    def good(self, msg):
+        self.logline_if(Verbosity.QUIET, "good", msg, green=True)
+
+    def warning(self, msg):
+        self.logline_if(Verbosity.QUIET, "warning", msg, "WARNING: ", red=True)
+
+    def error(self, msg):
+        self.logline_if(Verbosity.QUIET, "error", msg, "ERROR: ", red=True)
+
+    def skip(self, msg):
+        self.logline_if(Verbosity.QUIET, "skip", msg, "SKIPPED: ", yellow=True)
+
+    def verbosity0(self, msg, **opts):
+        self.logline_if(Verbosity.DEFAULT, "verbosity0", msg, **opts)
+
+    def verbosity1(self, msg, of="verbosity1", **opts):
+        self.logline_if(Verbosity.INFO, of, msg, **opts)
+
+    def verbosity2(self, msg, **opts):
+        self.logline_if(Verbosity.DEBUG, "verbosity2", msg, **opts)
+
+    def quiet(self, msg):
+        self.logline_if(Verbosity.QUIET, "quiet", msg)
+
+
+_INSTANCE = Reporter()
+
+
+def update_default_reporter(quiet_level, verbose_level):
+    _INSTANCE.quiet_level = quiet_level
+    _INSTANCE.verbose_level = verbose_level
+
+
+def has_level(of):
+    return _INSTANCE.verbosity > of
+
+
+def verbosity():
+    return _INSTANCE.verbosity
+
+
+verbosity0 = _INSTANCE.verbosity0
+verbosity1 = _INSTANCE.verbosity1
+verbosity2 = _INSTANCE.verbosity2
+error = _INSTANCE.error
+warning = _INSTANCE.warning
+good = _INSTANCE.good
+using = _INSTANCE.using
+skip = _INSTANCE.skip
+info = _INSTANCE.info
+line = _INSTANCE.line
+separator = _INSTANCE.separator
+keyvalue = _INSTANCE.keyvalue
+quiet = _INSTANCE.quiet
+timed_operation = _INSTANCE.timed_operation
+log_popen = _INSTANCE.log_popen
diff --git a/venv/lib/python3.10/site-packages/tox/session/__init__.py b/venv/lib/python3.10/site-packages/tox/session/__init__.py
new file mode 100644
index 0000000..0f50bbe
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/tox/session/__init__.py
@@ -0,0 +1,299 @@
+"""
+Automatically package and test a Python project against configurable
+Python2 and Python3 based virtual environments. Environments are
+setup by using virtualenv. Configuration is generally done through an
+INI-style "tox.ini" file.
+"""
+from __future__ import absolute_import, unicode_literals
+
+import json
+import os
+import re
+import subprocess
+import sys
+from collections import OrderedDict
+from contextlib import contextmanager
+
+import py
+
+import tox
+from tox import reporter
+from tox.action import Action
+from tox.config import INTERRUPT_TIMEOUT, SUICIDE_TIMEOUT, TERMINATE_TIMEOUT, parseconfig
+from tox.config.parallel import ENV_VAR_KEY_PRIVATE as PARALLEL_ENV_VAR_KEY_PRIVATE
+from tox.config.parallel import OFF_VALUE as PARALLEL_OFF
+from tox.logs.result import ResultLog
+from tox.reporter import update_default_reporter
+from tox.util import set_os_env_var
+from tox.util.graph import stable_topological_sort
+from tox.util.stdlib import suppress_output
+from tox.venv import VirtualEnv
+
+from .commands.help import show_help
+from .commands.help_ini import show_help_ini
+from .commands.provision import provision_tox
+from .commands.run.parallel import run_parallel
+from .commands.run.sequential import run_sequential
+from .commands.show_config import show_config
+from .commands.show_env import show_envs
+
+
+def cmdline(args=None):
+    if args is None:
+        args = sys.argv[1:]
+    main(args)
+
+
+def setup_reporter(args):
+    from argparse import ArgumentParser
+
+    from tox.config.reporter import add_verbosity_commands
+
+    parser = ArgumentParser(add_help=False)
+    add_verbosity_commands(parser)
+    with suppress_output():
+        try:
+            options, _ = parser.parse_known_args(args)
+            update_default_reporter(options.quiet_level, options.verbose_level)
+        except SystemExit:
+            pass
+
+
+def main(args):
+    setup_reporter(args)
+    try:
+        config = load_config(args)
+        config.logdir.ensure(dir=1)
+        with set_os_env_var(str("TOX_WORK_DIR"), config.toxworkdir):
+            session = build_session(config)
+            exit_code = session.runcommand()
+        if exit_code is None:
+            exit_code = 0
+        raise SystemExit(exit_code)
+    except tox.exception.BadRequirement:
+        raise SystemExit(1)
+    except KeyboardInterrupt:
+        raise SystemExit(2)
+
+
+def load_config(args):
+    try:
+        config = parseconfig(args)
+        if config.option.help:
+            show_help(config)
+            raise SystemExit(0)
+        elif config.option.helpini:
+            show_help_ini(config)
+            raise SystemExit(0)
+    except tox.exception.MissingRequirement as exception:
+        config = exception.config
+    return config
+
+
+def build_session(config):
+    return Session(config)
+
+
+class Session(object):
+    """The session object that ties together configuration, reporting, venv creation, testing."""
+
+    def __init__(self, config, popen=subprocess.Popen):
+        self._reset(config, popen)
+
+    def _reset(self, config, popen=subprocess.Popen):
+        self.config = config
+        self.popen = popen
+        self.resultlog = ResultLog()
+        self.existing_venvs = OrderedDict()
+        self.venv_dict = {} if self.config.run_provision else self._build_venvs()
+
+    def _build_venvs(self):
+        try:
+            need_to_run = OrderedDict((v, self.getvenv(v)) for v in self._evaluated_env_list)
+            try:
+                venv_order = stable_topological_sort(
+                    OrderedDict((name, v.envconfig.depends) for name, v in need_to_run.items()),
+                )
+
+                venvs = OrderedDict((v, need_to_run[v]) for v in venv_order)
+                return venvs
+            except ValueError as exception:
+                reporter.error("circular dependency detected: {}".format(exception))
+        except LookupError:
+            pass
+        except tox.exception.ConfigError as exception:
+            reporter.error(str(exception))
+        raise SystemExit(1)
+
+    def getvenv(self, name):
+        if name in self.existing_venvs:
+            return self.existing_venvs[name]
+        env_config = self.config.envconfigs.get(name, None)
+        if env_config is None:
+            reporter.error("unknown environment {!r}".format(name))
+            raise LookupError(name)
+        elif env_config.envdir == self.config.toxinidir:
+            reporter.error("venv {!r} in {} would delete project".format(name, env_config.envdir))
+            raise tox.exception.ConfigError("envdir must not equal toxinidir")
+        env_log = self.resultlog.get_envlog(name)
+        venv = VirtualEnv(envconfig=env_config, popen=self.popen, env_log=env_log)
+        self.existing_venvs[name] = venv
+        return venv
+
+    @property
+    def _evaluated_env_list(self):
+        tox_env_filter = os.environ.get("TOX_SKIP_ENV")
+        tox_env_filter_re = re.compile(tox_env_filter) if tox_env_filter is not None else None
+        visited = set()
+        for name in self.config.envlist:
+            if name in visited:
+                continue
+            visited.add(name)
+            if tox_env_filter_re is not None and tox_env_filter_re.match(name):
+                msg = "skip environment {}, matches filter {!r}".format(
+                    name,
+                    tox_env_filter_re.pattern,
+                )
+                reporter.verbosity1(msg)
+                continue
+            yield name
+
+    @property
+    def hook(self):
+        return self.config.pluginmanager.hook
+
+    def newaction(self, name, msg, *args):
+        return Action(
+            name,
+            msg,
+            args,
+            self.config.logdir,
+            self.config.option.resultjson,
+            self.resultlog.command_log,
+            self.popen,
+            sys.executable,
+            SUICIDE_TIMEOUT,
+            INTERRUPT_TIMEOUT,
+            TERMINATE_TIMEOUT,
+        )
+
+    def runcommand(self):
+        reporter.using(
+            "tox-{} from {} (pid {})".format(tox.__version__, tox.__file__, os.getpid()),
+        )
+        show_description = reporter.has_level(reporter.Verbosity.DEFAULT)
+        if self.config.run_provision:
+            provision_tox_venv = self.getvenv(self.config.provision_tox_env)
+            return provision_tox(provision_tox_venv, self.config.args)
+        else:
+            if self.config.option.showconfig:
+                self.showconfig()
+            elif self.config.option.listenvs:
+                self.showenvs(all_envs=False, description=show_description)
+            elif self.config.option.listenvs_all:
+                self.showenvs(all_envs=True, description=show_description)
+            else:
+                with self.cleanup():
+                    return self.subcommand_test()
+
+    @contextmanager
+    def cleanup(self):
+        self.config.temp_dir.ensure(dir=True)
+        try:
+            yield
+        finally:
+            self.hook.tox_cleanup(session=self)
+
+    def subcommand_test(self):
+        if self.config.skipsdist:
+            reporter.info("skipping sdist step")
+        else:
+            for venv in self.venv_dict.values():
+                if not venv.envconfig.skip_install:
+                    venv.package = self.hook.tox_package(session=self, venv=venv)
+                    if not venv.package:
+                        return 2
+                    venv.envconfig.setenv[str("TOX_PACKAGE")] = str(venv.package)
+        if self.config.option.sdistonly:
+            return
+
+        within_parallel = PARALLEL_ENV_VAR_KEY_PRIVATE in os.environ
+        try:
+            if not within_parallel and self.config.option.parallel != PARALLEL_OFF:
+                run_parallel(self.config, self.venv_dict)
+            else:
+                run_sequential(self.config, self.venv_dict)
+        finally:
+            retcode = self._summary()
+        return retcode
+
+    def _add_parallel_summaries(self):
+        if self.config.option.parallel != PARALLEL_OFF and "testenvs" in self.resultlog.dict:
+            result_log = self.resultlog.dict["testenvs"]
+            for tox_env in self.venv_dict.values():
+                data = self._load_parallel_env_report(tox_env)
+                if data and "testenvs" in data and tox_env.name in data["testenvs"]:
+                    result_log[tox_env.name] = data["testenvs"][tox_env.name]
+
+    @staticmethod
+    def _load_parallel_env_report(tox_env):
+        """Load report data into memory, remove disk file"""
+        result_json_path = tox_env.get_result_json_path()
+        if result_json_path and result_json_path.exists():
+            with result_json_path.open("r") as file_handler:
+                data = json.load(file_handler)
+            result_json_path.remove()
+            return data
+
+    def _summary(self):
+        is_parallel_child = PARALLEL_ENV_VAR_KEY_PRIVATE in os.environ
+        if not is_parallel_child:
+            reporter.separator("_", "summary", reporter.Verbosity.QUIET)
+        exit_code = 0
+        for venv in self.venv_dict.values():
+            report = reporter.good
+            status = getattr(venv, "status", "undefined")
+            if isinstance(status, tox.exception.InterpreterNotFound):
+                msg = " {}: {}".format(venv.envconfig.envname, str(status))
+                if self.config.option.skip_missing_interpreters == "true":
+                    report = reporter.skip
+                else:
+                    exit_code = 1
+                    report = reporter.error
+            elif status == "platform mismatch":
+                msg = " {}: {} ({!r} does not match {!r})".format(
+                    venv.envconfig.envname,
+                    str(status),
+                    sys.platform,
+                    venv.envconfig.platform,
+                )
+                report = reporter.skip
+            elif status and status == "ignored failed command":
+                msg = "  {}: {}".format(venv.envconfig.envname, str(status))
+            elif status and status != "skipped tests":
+                msg = "  {}: {}".format(venv.envconfig.envname, str(status))
+                report = reporter.error
+                exit_code = 1
+            else:
+                if not status:
+                    status = "commands succeeded"
+                msg = "  {}: {}".format(venv.envconfig.envname, status)
+            if not is_parallel_child:
+                report(msg)
+        if not exit_code and not is_parallel_child:
+            reporter.good("  congratulations :)")
+        path = self.config.option.resultjson
+        if path:
+            if not is_parallel_child:
+                self._add_parallel_summaries()
+            path = py.path.local(path)
+            data = self.resultlog.dumps_json()
+            reporter.line("write json report at: {}".format(path))
+            path.write(data)
+        return exit_code
+
+    def showconfig(self):
+        show_config(self.config)
+
+    def showenvs(self, all_envs=False, description=False):
+        show_envs(self.config, all_envs=all_envs, description=description)
diff --git a/venv/lib/python3.10/site-packages/tox/session/commands/__init__.py b/venv/lib/python3.10/site-packages/tox/session/commands/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/venv/lib/python3.10/site-packages/tox/session/commands/help.py b/venv/lib/python3.10/site-packages/tox/session/commands/help.py
new file mode 100644
index 0000000..9c5cc70
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/tox/session/commands/help.py
@@ -0,0 +1,14 @@
+from tox import reporter
+
+
+def show_help(config):
+    reporter.line(config._parser._format_help())
+    reporter.line("Environment variables", bold=True)
+    reporter.line("TOXENV: comma separated list of environments (overridable by '-e')")
+    reporter.line("TOX_SKIP_ENV: regular expression to filter down from running tox environments")
+    reporter.line(
+        "TOX_TESTENV_PASSENV: space-separated list of extra environment variables to be "
+        "passed into test command environments",
+    )
+    reporter.line("PY_COLORS: 0 disable colorized output, 1 enable (default)")
+    reporter.line("TOX_PARALLEL_NO_SPINNER: 1 disable spinner for CI, 0 enable (default)")
diff --git a/venv/lib/python3.10/site-packages/tox/session/commands/help_ini.py b/venv/lib/python3.10/site-packages/tox/session/commands/help_ini.py
new file mode 100644
index 0000000..8791ded
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/tox/session/commands/help_ini.py
@@ -0,0 +1,16 @@
+from tox import reporter
+
+
+def show_help_ini(config):
+    reporter.separator("-", "per-testenv attributes", reporter.Verbosity.INFO)
+    for env_attr in config._testenv_attr:
+        reporter.line(
+            "{:<15} {:<8} default: {}".format(
+                env_attr.name,
+                "<{}>".format(env_attr.type),
+                env_attr.default,
+            ),
+            bold=True,
+        )
+        reporter.line(env_attr.help)
+        reporter.line("")
diff --git a/venv/lib/python3.10/site-packages/tox/session/commands/provision.py b/venv/lib/python3.10/site-packages/tox/session/commands/provision.py
new file mode 100644
index 0000000..8508f5c
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/tox/session/commands/provision.py
@@ -0,0 +1,25 @@
+"""In case the tox environment is not correctly setup provision it and delegate execution"""
+from __future__ import absolute_import, unicode_literals
+
+import os
+
+from tox.exception import InvocationError
+
+
+def provision_tox(provision_venv, args):
+    ensure_meta_env_up_to_date(provision_venv)
+    with provision_venv.new_action("provision") as action:
+        provision_args = [str(provision_venv.envconfig.envpython), "-m", "tox"] + args
+        try:
+            env = os.environ.copy()
+            env[str("TOX_PROVISION")] = str("1")
+            env.pop("__PYVENV_LAUNCHER__", None)
+            action.popen(provision_args, redirect=False, report_fail=False, env=env)
+            return 0
+        except InvocationError as exception:
+            return exception.exit_code
+
+
+def ensure_meta_env_up_to_date(provision_venv):
+    if provision_venv.setupenv():
+        provision_venv.finishvenv()
diff --git a/venv/lib/python3.10/site-packages/tox/session/commands/run/__init__.py b/venv/lib/python3.10/site-packages/tox/session/commands/run/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/venv/lib/python3.10/site-packages/tox/session/commands/run/parallel.py b/venv/lib/python3.10/site-packages/tox/session/commands/run/parallel.py
new file mode 100644
index 0000000..8675ea5
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/tox/session/commands/run/parallel.py
@@ -0,0 +1,143 @@
+import os
+import sys
+from collections import OrderedDict, deque
+from threading import Event, Semaphore, Thread
+
+from tox import reporter
+from tox.config.parallel import ENV_VAR_KEY_PRIVATE as PARALLEL_ENV_VAR_KEY_PRIVATE
+from tox.config.parallel import ENV_VAR_KEY_PUBLIC as PARALLEL_ENV_VAR_KEY_PUBLIC
+from tox.exception import InvocationError
+from tox.util.main import MAIN_FILE
+from tox.util.spinner import Spinner
+
+
+def run_parallel(config, venv_dict):
+    """here we'll just start parallel sub-processes"""
+    live_out = config.option.parallel_live
+    disable_spinner = bool(os.environ.get("TOX_PARALLEL_NO_SPINNER") == "1")
+    args = [sys.executable, MAIN_FILE] + config.args
+    try:
+        position = args.index("--")
+    except ValueError:
+        position = len(args)
+
+    max_parallel = config.option.parallel
+    if max_parallel is None:
+        max_parallel = len(venv_dict)
+    semaphore = Semaphore(max_parallel)
+    finished = Event()
+
+    show_progress = (
+        not disable_spinner and not live_out and reporter.verbosity() > reporter.Verbosity.QUIET
+    )
+
+    with Spinner(enabled=show_progress) as spinner:
+
+        def run_in_thread(tox_env, os_env, processes):
+            output = None
+            print_out = None
+            env_name = tox_env.envconfig.envname
+            status = "skipped tests" if config.option.notest else None
+            try:
+                os_env[str(PARALLEL_ENV_VAR_KEY_PRIVATE)] = str(env_name)
+                os_env[str(PARALLEL_ENV_VAR_KEY_PUBLIC)] = str(env_name)
+                args_sub = list(args)
+                if hasattr(tox_env, "package"):
+                    args_sub.insert(position, str(tox_env.package))
+                    args_sub.insert(position, "--installpkg")
+                if tox_env.get_result_json_path():
+                    result_json_index = args_sub.index("--result-json")
+                    args_sub[result_json_index + 1] = "{}".format(tox_env.get_result_json_path())
+                with tox_env.new_action("parallel {}".format(tox_env.name)) as action:
+
+                    def collect_process(process):
+                        processes[tox_env] = (action, process)
+
+                    print_out = not live_out and tox_env.envconfig.parallel_show_output
+                    output = action.popen(
+                        args=args_sub,
+                        env=os_env,
+                        redirect=not live_out,
+                        capture_err=print_out,
+                        callback=collect_process,
+                        returnout=print_out,
+                    )
+
+            except InvocationError as err:
+                status = "parallel child exit code {}".format(err.exit_code)
+            finally:
+                semaphore.release()
+                finished.set()
+                tox_env.status = status
+                done.add(env_name)
+                outcome = spinner.succeed
+                if config.option.notest:
+                    outcome = spinner.skip
+                elif status is not None:
+                    outcome = spinner.fail
+                outcome(env_name)
+                if print_out and output is not None:
+                    reporter.verbosity0(output)
+
+        threads = deque()
+        processes = {}
+        todo_keys = set(venv_dict.keys())
+        todo = OrderedDict((n, todo_keys & set(v.envconfig.depends)) for n, v in venv_dict.items())
+        done = set()
+        try:
+            while todo:
+                for name, depends in list(todo.items()):
+                    if depends - done:
+                        # skip if has unfinished dependencies
+                        continue
+                    del todo[name]
+                    venv = venv_dict[name]
+                    semaphore.acquire(blocking=True)
+                    spinner.add(name)
+                    thread = Thread(
+                        target=run_in_thread,
+                        args=(venv, os.environ.copy(), processes),
+                    )
+                    thread.daemon = True
+                    thread.start()
+                    threads.append(thread)
+                if todo:
+                    # wait until someone finishes and retry queuing jobs
+                    finished.wait()
+                    finished.clear()
+            while threads:
+                threads = [
+                    thread for thread in threads if not thread.join(0.1) and thread.is_alive()
+                ]
+        except KeyboardInterrupt:
+            reporter.verbosity0(
+                "[{}] KeyboardInterrupt parallel - stopping children".format(os.getpid()),
+            )
+            while True:
+                # do not allow to interrupt until children interrupt
+                try:
+                    # putting it inside a thread so it's not interrupted
+                    stopper = Thread(target=_stop_child_processes, args=(processes, threads))
+                    stopper.start()
+                    stopper.join()
+                except KeyboardInterrupt:
+                    continue
+                raise KeyboardInterrupt
+
+
+def _stop_child_processes(processes, main_threads):
+    """A three level stop mechanism for children - INT (250ms) -> TERM (100ms) -> KILL"""
+
+    # first stop children
+    def shutdown(tox_env, action, process):
+        action.handle_interrupt(process)
+
+    threads = [Thread(target=shutdown, args=(n, a, p)) for n, (a, p) in processes.items()]
+    for thread in threads:
+        thread.start()
+    for thread in threads:
+        thread.join()
+
+    # then its threads
+    for thread in main_threads:
+        thread.join()
diff --git a/venv/lib/python3.10/site-packages/tox/session/commands/run/sequential.py b/venv/lib/python3.10/site-packages/tox/session/commands/run/sequential.py
new file mode 100644
index 0000000..9076909
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/tox/session/commands/run/sequential.py
@@ -0,0 +1,76 @@
+import py
+
+import tox
+from tox.exception import InvocationError
+
+
+def run_sequential(config, venv_dict):
+    for venv in venv_dict.values():
+        if venv.setupenv():
+            if venv.envconfig.skip_install:
+                venv.finishvenv()
+            else:
+                if venv.envconfig.usedevelop:
+                    develop_pkg(venv, config.setupdir)
+                elif config.skipsdist:
+                    venv.finishvenv()
+                else:
+                    installpkg(venv, venv.package)
+            if venv.status == 0:
+                runenvreport(venv, config)
+        if venv.status == 0:
+            runtestenv(venv, config)
+
+
+def develop_pkg(venv, setupdir):
+    with venv.new_action("developpkg", setupdir) as action:
+        try:
+            venv.developpkg(setupdir, action)
+            return True
+        except InvocationError as exception:
+            venv.status = exception
+            return False
+
+
+def installpkg(venv, path):
+    """Install package in the specified virtual environment.
+
+    :param VenvConfig venv: Destination environment
+    :param str path: Path to the distribution package.
+    :return: True if package installed otherwise False.
+    :rtype: bool
+    """
+    venv.env_log.set_header(installpkg=py.path.local(path))
+    with venv.new_action("installpkg", path) as action:
+        try:
+            venv.installpkg(path, action)
+            return True
+        except tox.exception.InvocationError as exception:
+            venv.status = exception
+            return False
+
+
+def runenvreport(venv, config):
+    """
+    Run an environment report to show which package
+    versions are installed in the venv
+    """
+    try:
+        with venv.new_action("envreport") as action:
+            packages = config.pluginmanager.hook.tox_runenvreport(venv=venv, action=action)
+        action.setactivity("installed", ",".join(packages))
+        venv.env_log.set_installed(packages)
+    except InvocationError as exception:
+        venv.status = exception
+
+
+def runtestenv(venv, config, redirect=False):
+    if venv.status == 0 and config.option.notest:
+        venv.status = "skipped tests"
+    else:
+        if venv.status:
+            return
+        config.pluginmanager.hook.tox_runtest_pre(venv=venv)
+        if venv.status == 0:
+            config.pluginmanager.hook.tox_runtest(venv=venv, redirect=redirect)
+        config.pluginmanager.hook.tox_runtest_post(venv=venv)
diff --git a/venv/lib/python3.10/site-packages/tox/session/commands/show_config.py b/venv/lib/python3.10/site-packages/tox/session/commands/show_config.py
new file mode 100644
index 0000000..f0ff955
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/tox/session/commands/show_config.py
@@ -0,0 +1,82 @@
+import sys
+from collections import OrderedDict
+
+from packaging.requirements import Requirement
+from six import StringIO
+from six.moves import configparser
+
+from tox import reporter
+from tox.util.stdlib import importlib_metadata
+
+DO_NOT_SHOW_CONFIG_ATTRIBUTES = (
+    "interpreters",
+    "envconfigs",
+    "envlist",
+    "pluginmanager",
+    "envlist_explicit",
+)
+
+
+def show_config(config):
+    parser = configparser.RawConfigParser()
+
+    if not config.envlist_explicit or reporter.verbosity() >= reporter.Verbosity.INFO:
+        tox_info(config, parser)
+        version_info(parser)
+    tox_envs_info(config, parser)
+
+    content = StringIO()
+    parser.write(content)
+    value = content.getvalue().rstrip()
+    reporter.verbosity0(value)
+
+
+def tox_envs_info(config, parser):
+    if config.envlist_explicit:
+        env_list = config.envlist
+    elif config.option.listenvs:
+        env_list = config.envlist_default
+    else:
+        env_list = list(config.envconfigs.keys())
+    for name in env_list:
+        env_config = config.envconfigs[name]
+        values = OrderedDict(
+            (attr.name, str(getattr(env_config, attr.name)))
+            for attr in config._parser._testenv_attr
+        )
+        section = "testenv:{}".format(name)
+        set_section(parser, section, values)
+
+
+def tox_info(config, parser):
+    info = OrderedDict(
+        (i, str(getattr(config, i)))
+        for i in sorted(dir(config))
+        if not i.startswith("_") and i not in DO_NOT_SHOW_CONFIG_ATTRIBUTES
+    )
+    info["host_python"] = sys.executable
+    set_section(parser, "tox", info)
+
+
+def version_info(parser):
+    versions = OrderedDict()
+    to_visit = {"tox"}
+    while to_visit:
+        current = to_visit.pop()
+        current_dist = importlib_metadata.distribution(current)
+        current_name = current_dist.metadata["name"]
+        versions[current_name] = current_dist.version
+        if current_dist.requires is not None:
+            for require in current_dist.requires:
+                pkg = Requirement(require)
+                if (
+                    pkg.marker is None or pkg.marker.evaluate({"extra": ""})
+                ) and pkg.name not in versions:
+                    to_visit.add(pkg.name)
+    set_section(parser, "tox:versions", versions)
+
+
+def set_section(parser, section, values):
+    parser.add_section(section)
+    for key, value in values.items():
+        parser.set(section, key, value)
diff --git a/venv/lib/python3.10/site-packages/tox/session/commands/show_env.py b/venv/lib/python3.10/site-packages/tox/session/commands/show_env.py
new file mode 100644
index 0000000..1ed9ba9
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/tox/session/commands/show_env.py
@@ -0,0 +1,32 @@
+from __future__ import absolute_import, unicode_literals
+
+from tox import reporter as report
+
+
+def show_envs(config, all_envs=False, description=False):
+    env_conf = config.envconfigs  # this contains all environments
+    default = config.envlist_default  # this only the defaults
+    ignore = {config.isolated_build_env, config.provision_tox_env}.union(default)
+    extra = [e for e in env_conf if e not in ignore] if all_envs else []
+
+    if description and default:
+        report.line("default environments:")
+    max_length = max(len(env) for env in (default + extra) or [""])
+
+    def report_env(e):
+        if description:
+            text = env_conf[e].description or "[no description]"
+            msg = "{} -> {}".format(e.ljust(max_length), text).strip()
+        else:
+            msg = e
+        report.line(msg)
+
+    for e in default:
+        report_env(e)
+    if all_envs and extra:
+        if description:
+            if default:
+                report.line("")
+            report.line("additional environments:")
+        for e in extra:
+            report_env(e)
diff --git a/venv/lib/python3.10/site-packages/tox/util/__init__.py b/venv/lib/python3.10/site-packages/tox/util/__init__.py
new file mode 100644
index 0000000..c72dea0
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/tox/util/__init__.py
@@ -0,0 +1,18 @@
+from __future__ import absolute_import, unicode_literals
+
+import os
+from contextlib import contextmanager
+
+
+@contextmanager
+def set_os_env_var(env_var_name, value):
+    """Set an environment variable with unrolling once the context exists"""
+    prev_value = os.environ.get(env_var_name)
+    try:
+        os.environ[env_var_name] = str(value)
+        yield
+    finally:
+        if prev_value is None:
+            del os.environ[env_var_name]
+        else:
+            os.environ[env_var_name] = prev_value
diff --git a/venv/lib/python3.10/site-packages/tox/util/graph.py b/venv/lib/python3.10/site-packages/tox/util/graph.py
new file mode 100644
index 0000000..318b5b3
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/tox/util/graph.py
@@ -0,0 +1,68 @@
+from __future__ import absolute_import, unicode_literals
+
+from collections import OrderedDict, defaultdict
+
+
+def stable_topological_sort(graph):
+    to_order = set(graph.keys())  # keep a log of what  we need to order
+
+    # normalize graph - fill missing nodes (assume no dependency)
+    for values in list(graph.values()):
+        for value in values:
+            if value not in graph:
+                graph[value] = ()
+
+    inverse_graph = defaultdict(set)
+    for key, depends in graph.items():
+        for depend in depends:
+            inverse_graph[depend].add(key)
+
+    topology = []
+    degree = {k: len(v) for k, v in graph.items()}
+    ready_to_visit = {n for n, d in degree.items() if not d}
+    need_to_visit = OrderedDict((i, None) for i in graph.keys())
+    while need_to_visit:
+        # to keep stable, pick the first node ready to visit in the original order
+        for node in need_to_visit:
+            if node in ready_to_visit:
+                break
+        else:
+            break
+        del need_to_visit[node]
+
+        topology.append(node)
+
+        # decrease degree for nodes we're going too
+        for to_node in inverse_graph[node]:
+            degree[to_node] -= 1
+            if not degree[to_node]:  # if a node has no more incoming node it's ready to visit
+                ready_to_visit.add(to_node)
+
+    result = [n for n in topology if n in to_order]  # filter out missing nodes we extended
+
+    if len(result) < len(to_order):
+        identify_cycle(graph)
+        msg = "could not order tox environments and failed to detect circle"  # pragma: no cover
+        raise ValueError(msg)  # pragma: no cover
+    return result
+
+
+def identify_cycle(graph):
+    path = OrderedDict()
+    visited = set()
+
+    def visit(vertex):
+        if vertex in visited:
+            return None
+        visited.add(vertex)
+        path[vertex] = None
+        for neighbour in graph.get(vertex, ()):
+            if neighbour in path or visit(neighbour):
+                return path
+        del path[vertex]
+        return None
+
+    for node in graph:
+        result = visit(node)
+        if result is not None:
+            raise ValueError("{}".format(" | ".join(result.keys())))
diff --git a/venv/lib/python3.10/site-packages/tox/util/lock.py b/venv/lib/python3.10/site-packages/tox/util/lock.py
new file mode 100644
index 0000000..fd64734
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/tox/util/lock.py
@@ -0,0 +1,41 @@
+"""holds locking functionality that works across processes"""
+from __future__ import absolute_import, unicode_literals
+
+from contextlib import contextmanager
+
+import py
+from filelock import FileLock, Timeout
+
+from tox.reporter import verbosity1
+
+
+@contextmanager
+def hold_lock(lock_file, reporter=verbosity1):
+    py.path.local(lock_file.dirname).ensure(dir=1)
+    lock = FileLock(str(lock_file))
+    try:
+        try:
+            lock.acquire(0.0001)
+        except Timeout:
+            reporter("lock file {} present, will block until released".format(lock_file))
+            lock.acquire()
+        yield
+    finally:
+        lock.release(force=True)
+
+
+def get_unique_file(path, prefix, suffix):
+    """get a unique file in a folder having a given prefix and suffix,
+    with unique number in between"""
+    lock_file = path.join(".lock")
+    prefix = "{}-".format(prefix)
+    with hold_lock(lock_file):
+        max_value = -1
+        for candidate in path.listdir("{}*{}".format(prefix, suffix)):
+            try:
+                max_value = max(max_value, int(candidate.basename[len(prefix) : -len(suffix)]))
+            except ValueError:
+                continue
+        winner = path.join("{}{}{}".format(prefix, max_value + 1, suffix))
+        winner.ensure(dir=0)
+        return winner
diff --git a/venv/lib/python3.10/site-packages/tox/util/main.py b/venv/lib/python3.10/site-packages/tox/util/main.py
new file mode 100644
index 0000000..ebd0faa
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/tox/util/main.py
@@ -0,0 +1,6 @@
+import inspect
+import os
+
+import tox
+
+MAIN_FILE = os.path.join(os.path.dirname(inspect.getfile(tox)), "__main__.py")
diff --git a/venv/lib/python3.10/site-packages/tox/util/path.py b/venv/lib/python3.10/site-packages/tox/util/path.py
new file mode 100644
index 0000000..b7a2998
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/tox/util/path.py
@@ -0,0 +1,10 @@
+import shutil
+
+from tox import reporter
+
+
+def ensure_empty_dir(path):
+    if path.check():
+        reporter.info("  removing {}".format(path))
+        shutil.rmtree(str(path), ignore_errors=True)
+        path.ensure(dir=1)
diff --git a/venv/lib/python3.10/site-packages/tox/util/spinner.py b/venv/lib/python3.10/site-packages/tox/util/spinner.py
new file mode 100644
index 0000000..ee22589
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/tox/util/spinner.py
@@ -0,0 +1,174 @@
+# -*- coding: utf-8 -*-
+"""A minimal non-colored version of https://pypi.org/project/halo, to track list progress"""
+from __future__ import absolute_import, unicode_literals
+
+import os
+import sys
+import threading
+from collections import OrderedDict, namedtuple
+from datetime import datetime
+
+import py
+
+threads = []
+
+if os.name == "nt":
+    import ctypes
+
+    class _CursorInfo(ctypes.Structure):
+        _fields_ = [("size", ctypes.c_int), ("visible", ctypes.c_byte)]
+
+
+_BaseMessage = namedtuple("_BaseMessage", ["unicode_msg", "ascii_msg"])
+
+
+class SpinnerMessage(_BaseMessage):
+    def for_file(self, file):
+        try:
+            self.unicode_msg.encode(file.encoding)
+        except (AttributeError, TypeError, UnicodeEncodeError):
+            return self.ascii_msg
+        else:
+            return self.unicode_msg
+
+
+class Spinner(object):
+    CLEAR_LINE = "\033[K"
+    max_width = 120
+    FRAMES = SpinnerMessage("⠋⠙⠹⠸⠼⠴⠦⠧⠇⠏", "|-+x*")
+    OK_FLAG = SpinnerMessage("✔ OK", "[ OK ]")
+    FAIL_FLAG = SpinnerMessage("✖ FAIL", "[FAIL]")
+    SKIP_FLAG = SpinnerMessage("⚠ SKIP", "[SKIP]")
+
+    def __init__(self, enabled=True, refresh_rate=0.1):
+        self.refresh_rate = refresh_rate
+        self.enabled = enabled
+        self._file = sys.stdout
+        self.frames = self.FRAMES.for_file(self._file)
+        self.stream = py.io.TerminalWriter(file=self._file)
+        self._envs = OrderedDict()
+        self._frame_index = 0
+
+    def clear(self):
+        if self.enabled:
+            self.stream.write("\r")
+            self.stream.write(self.CLEAR_LINE)
+
+    def render(self):
+        while True:
+            self._stop_spinner.wait(self.refresh_rate)
+            if self._stop_spinner.is_set():
+                break
+            self.render_frame()
+        return self
+
+    def render_frame(self):
+        if self.enabled:
+            self.clear()
+            self.stream.write("\r{}".format(self.frame()))
+
+    def frame(self):
+        frame = self.frames[self._frame_index]
+        self._frame_index += 1
+        self._frame_index = self._frame_index % len(self.frames)
+        text_frame = "[{}] {}".format(len(self._envs), " | ".join(self._envs))
+        if len(text_frame) > self.max_width - 1:
+            text_frame = "{}...".format(text_frame[: self.max_width - 1 - 3])
+        return "{} {}".format(*[(frame, text_frame)][0])
+
+    def __enter__(self):
+        if self.enabled:
+            self.disable_cursor()
+        self.render_frame()
+        self._stop_spinner = threading.Event()
+        self._spinner_thread = threading.Thread(target=self.render)
+        self._spinner_thread.daemon = True
+        self._spinner_thread.start()
+        return self
+
+    def __exit__(self, exc_type, exc_val, exc_tb):
+        if not self._stop_spinner.is_set():
+            if self._spinner_thread:
+                self._stop_spinner.set()
+                self._spinner_thread.join()
+
+            self._frame_index = 0
+            if self.enabled:
+                self.clear()
+                self.enable_cursor()
+
+        return self
+
+    def add(self, name):
+        self._envs[name] = datetime.now()
+
+    def succeed(self, key):
+        self.finalize(key, self.OK_FLAG.for_file(self._file), green=True)
+
+    def fail(self, key):
+        self.finalize(key, self.FAIL_FLAG.for_file(self._file), red=True)
+
+    def skip(self, key):
+        self.finalize(key, self.SKIP_FLAG.for_file(self._file), white=True)
+
+    def finalize(self, key, status, **kwargs):
+        start_at = self._envs[key]
+        del self._envs[key]
+        if self.enabled:
+            self.clear()
+        self.stream.write(
+            "{} {} in {}{}".format(
+                status,
+                key,
+                td_human_readable(datetime.now() - start_at),
+                os.linesep,
+            ),
+            **kwargs
+        )
+        if not self._envs:
+            self.__exit__(None, None, None)
+
+    def disable_cursor(self):
+        if self._file.isatty():
+            if os.name == "nt":
+                ci = _CursorInfo()
+                handle = ctypes.windll.kernel32.GetStdHandle(-11)
+                ctypes.windll.kernel32.GetConsoleCursorInfo(handle, ctypes.byref(ci))
+                ci.visible = False
+                ctypes.windll.kernel32.SetConsoleCursorInfo(handle, ctypes.byref(ci))
+            elif os.name == "posix":
+                self.stream.write("\033[?25l")
+
+    def enable_cursor(self):
+        if self._file.isatty():
+            if os.name == "nt":
+                ci = _CursorInfo()
+                handle = ctypes.windll.kernel32.GetStdHandle(-11)
+                ctypes.windll.kernel32.GetConsoleCursorInfo(handle, ctypes.byref(ci))
+                ci.visible = True
+                ctypes.windll.kernel32.SetConsoleCursorInfo(handle, ctypes.byref(ci))
+            elif os.name == "posix":
+                self.stream.write("\033[?25h")
+
+
+def td_human_readable(delta):
+    seconds = int(delta.total_seconds())
+    periods = [
+        ("year", 60 * 60 * 24 * 365),
+        ("month", 60 * 60 * 24 * 30),
+        ("day", 60 * 60 * 24),
+        ("hour", 60 * 60),
+        ("minute", 60),
+        ("second", 1),
+    ]
+
+    texts = []
+    for period_name, period_seconds in periods:
+        if seconds > period_seconds or period_seconds == 1:
+            period_value, seconds = divmod(seconds, period_seconds)
+            if period_name == "second":
+                ms = delta.total_seconds() - int(delta.total_seconds())
+                period_value = round(period_value + ms, 3)
+            has_s = "s" if period_value != 1 else ""
+            texts.append("{} {}{}".format(period_value, period_name, has_s))
+    return ", ".join(texts)
diff --git a/venv/lib/python3.10/site-packages/tox/util/stdlib.py b/venv/lib/python3.10/site-packages/tox/util/stdlib.py
new file mode 100644
index 0000000..29a3f78
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/tox/util/stdlib.py
@@ -0,0 +1,55 @@
+import sys
+import threading
+from contextlib import contextmanager
+from tempfile import TemporaryFile
+
+if sys.version_info >= (3, 8):
+    from importlib import metadata as importlib_metadata  # noqa
+else:
+    import importlib_metadata  # noqa
+
+
+def is_main_thread():
+    """returns true if we are within the main thread"""
+    cur_thread = threading.current_thread()
+    if sys.version_info >= (3, 4):
+        return cur_thread is threading.main_thread()
+    else:
+        # noinspection PyUnresolvedReferences
+        return isinstance(cur_thread, threading._MainThread)
+
+
+# noinspection PyPep8Naming
+@contextmanager
+def suppress_output():
+    """suppress both stdout and stderr outputs"""
+    if sys.version_info >= (3, 5):
+        from contextlib import redirect_stderr, redirect_stdout
+    else:
+
+        class _RedirectStream(object):
+
+            _stream = None
+
+            def __init__(self, new_target):
+                self._new_target = new_target
+                self._old_targets = []
+
+            def __enter__(self):
+                self._old_targets.append(getattr(sys, self._stream))
+                setattr(sys, self._stream, self._new_target)
+                return self._new_target
+
+            def __exit__(self, exctype, excinst, exctb):
+                setattr(sys, self._stream, self._old_targets.pop())
+
+        class redirect_stdout(_RedirectStream):
+            _stream = "stdout"
+
+        class redirect_stderr(_RedirectStream):
+            _stream = "stderr"
+
+    with TemporaryFile("wt") as file:
+        with redirect_stdout(file):
+            with redirect_stderr(file):
+                yield
diff --git a/venv/lib/python3.10/site-packages/tox/venv.py b/venv/lib/python3.10/site-packages/tox/venv.py
new file mode 100644
index 0000000..13235c8
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/tox/venv.py
@@ -0,0 +1,843 @@
+import codecs
+import json
+import os
+import pipes
+import re
+import sys
+from itertools import chain
+
+import py
+
+import tox
+from tox import reporter
+from tox.action import Action
+from tox.config.parallel import ENV_VAR_KEY_PRIVATE as PARALLEL_ENV_VAR_KEY_PRIVATE
+from tox.constants import INFO, PARALLEL_RESULT_JSON_PREFIX, PARALLEL_RESULT_JSON_SUFFIX
+from tox.package.local import resolve_package
+from tox.util.lock import get_unique_file
+from tox.util.path import ensure_empty_dir
+
+from .config import DepConfig
+
+#: maximum parsed shebang interpreter length (see: prepend_shebang_interpreter)
+MAXINTERP = 2048
+
+
+class CreationConfig:
+    def __init__(
+        self,
+        base_resolved_python_sha256,
+        base_resolved_python_path,
+        tox_version,
+        sitepackages,
+        usedevelop,
+        deps,
+        alwayscopy,
+    ):
+        self.base_resolved_python_sha256 = base_resolved_python_sha256
+        self.base_resolved_python_path = base_resolved_python_path
+        self.tox_version = tox_version
+        self.sitepackages = sitepackages
+        self.usedevelop = usedevelop
+        self.alwayscopy = alwayscopy
+        self.deps = deps
+
+    def writeconfig(self, path):
+        lines = [
+            "{} {}".format(self.base_resolved_python_sha256, self.base_resolved_python_path),
+            "{} {:d} {:d} {:d}".format(
+                self.tox_version,
+                self.sitepackages,
+                self.usedevelop,
+                self.alwayscopy,
+            ),
+        ]
+        for dep in self.deps:
+            lines.append("{} {}".format(*dep))
+        content = "\n".join(lines)
+        path.ensure()
+        path.write(content)
+        return content
+
+    @classmethod
+    def readconfig(cls, path):
+        try:
+            lines = path.readlines(cr=0)
+            base_resolved_python_info = lines.pop(0).split(None, 1)
+            tox_version, sitepackages, usedevelop, alwayscopy = lines.pop(0).split(None, 4)
+            sitepackages = bool(int(sitepackages))
+            usedevelop = bool(int(usedevelop))
+            alwayscopy = bool(int(alwayscopy))
+            deps = []
+            for line in lines:
+                base_resolved_python_sha256, depstring = line.split(None, 1)
+                deps.append((base_resolved_python_sha256, depstring))
+            base_resolved_python_sha256, base_resolved_python_path = base_resolved_python_info
+            return CreationConfig(
+                base_resolved_python_sha256,
+                base_resolved_python_path,
+                tox_version,
+                sitepackages,
+                usedevelop,
+                deps,
+                alwayscopy,
+            )
+        except Exception:
+            return None
+
+    def matches_with_reason(self, other, deps_matches_subset=False):
+        for attr in (
+            "base_resolved_python_sha256",
+            "base_resolved_python_path",
+            "tox_version",
+            "sitepackages",
+            "usedevelop",
+            "alwayscopy",
+        ):
+            left = getattr(self, attr)
+            right = getattr(other, attr)
+            if left != right:
+                return False, "attr {} {!r}!={!r}".format(attr, left, right)
+        self_deps = set(self.deps)
+        other_deps = set(other.deps)
+        if self_deps != other_deps:
+            if deps_matches_subset:
+                diff = other_deps - self_deps
+                if diff:
+                    return False, "missing in previous {!r}".format(diff)
+            else:
+                return False, "{!r}!={!r}".format(self_deps, other_deps)
+        return True, None
+
+    def matches(self, other, deps_matches_subset=False):
+        outcome, _ = self.matches_with_reason(other, deps_matches_subset)
+        return outcome
+
+
+class VirtualEnv(object):
+    def __init__(self, envconfig=None, popen=None, env_log=None):
+        self.envconfig = envconfig
+        self.popen = popen
+        self._actions = []
+        self.env_log = env_log
+        self._result_json_path = None
+
+    def new_action(self, msg, *args):
+        config = self.envconfig.config
+        command_log = self.env_log.get_commandlog(
+            "test" if msg in ("run-test", "run-test-pre", "run-test-post") else "setup",
+        )
+        return Action(
+            self.name,
+            msg,
+            args,
+            self.envconfig.envlogdir,
+            config.option.resultjson,
+            command_log,
+            self.popen,
+            self.envconfig.envpython,
+            self.envconfig.suicide_timeout,
+            self.envconfig.interrupt_timeout,
+            self.envconfig.terminate_timeout,
+        )
+
+    def get_result_json_path(self):
+        if self._result_json_path is None:
+            if self.envconfig.config.option.resultjson:
+                self._result_json_path = get_unique_file(
+                    self.path,
+                    PARALLEL_RESULT_JSON_PREFIX,
+                    PARALLEL_RESULT_JSON_SUFFIX,
+                )
+        return self._result_json_path
+
+    @property
+    def hook(self):
+        return self.envconfig.config.pluginmanager.hook
+
+    @property
+    def path(self):
+        """Path to environment base dir."""
+        return self.envconfig.envdir
+
+    @property
+    def path_config(self):
+        return self.path.join(".tox-config1")
+
+    @property
+    def name(self):
+        """test environment name."""
+        return self.envconfig.envname
+
+    def __repr__(self):
+        return "".format(self.path)
+
+    def getcommandpath(self, name, venv=True, cwd=None):
+        """Return absolute path (str or localpath) for specified command name.
+
+        - If it's a local path we will rewrite it as as a relative path.
+        - If venv is True we will check if the command is coming from the venv
+          or is allowed to come from external.
+        """
+        name = str(name)
+        if os.path.isabs(name):
+            return name
+        if os.path.split(name)[0] == ".":
+            path = cwd.join(name)
+            if path.check():
+                return str(path)
+
+        if venv:
+            path = self._venv_lookup_and_check_external_allowlist(name)
+        else:
+            path = self._normal_lookup(name)
+
+        if path is None:
+            raise tox.exception.InvocationError(
+                "could not find executable {}".format(pipes.quote(name)),
+            )
+
+        return str(path)  # will not be rewritten for reporting
+
+    def _venv_lookup_and_check_external_allowlist(self, name):
+        path = self._venv_lookup(name)
+        if path is None:
+            path = self._normal_lookup(name)
+            if path is not None:
+                self._check_external_allowed_and_warn(path)
+        return path
+
+    def _venv_lookup(self, name):
+        return py.path.local.sysfind(name, paths=[self.envconfig.envbindir])
+
+    def _normal_lookup(self, name):
+        return py.path.local.sysfind(name)
+
+    def _check_external_allowed_and_warn(self, path):
+        if not self.is_allowed_external(path):
+            reporter.warning(
+                "test command found but not installed in testenv\n"
+                "  cmd: {}\n"
+                "  env: {}\n"
+                "Maybe you forgot to specify a dependency? "
+                "See also the allowlist_externals envconfig setting.\n\n"
+                "DEPRECATION WARNING: this will be an error in tox 4 and above!".format(
+                    path,
+                    self.envconfig.envdir,
+                ),
+            )
+
+    def is_allowed_external(self, p):
+        tryadd = [""]
+        if tox.INFO.IS_WIN:
+            tryadd += [os.path.normcase(x) for x in os.environ["PATHEXT"].split(os.pathsep)]
+            p = py.path.local(os.path.normcase(str(p)))
+
+        if self.envconfig.allowlist_externals and self.envconfig.whitelist_externals:
+            raise tox.exception.ConfigError(
+                "Either whitelist_externals or allowlist_externals might be specified, not both",
+            )
+
+        allowed_externals = (
+            self.envconfig.whitelist_externals or self.envconfig.allowlist_externals
+        )
+        for x in allowed_externals:
+            for add in tryadd:
+                if p.fnmatch(x + add):
+                    return True
+        return False
+
+    def update(self, action):
+        """return status string for updating actual venv to match configuration.
+        if status string is empty, all is ok.
+        """
+        rconfig = CreationConfig.readconfig(self.path_config)
+        if self.envconfig.recreate:
+            reason = "-r flag"
+        else:
+            if rconfig is None:
+                reason = "no previous config {}".format(self.path_config)
+            else:
+                live_config = self._getliveconfig()
+                deps_subset_match = getattr(self.envconfig, "deps_matches_subset", False)
+                outcome, reason = rconfig.matches_with_reason(live_config, deps_subset_match)
+        if reason is None:
+            action.info("reusing", self.envconfig.envdir)
+            return
+        action.info("cannot reuse", reason)
+        if rconfig is None:
+            action.setactivity("create", self.envconfig.envdir)
+        else:
+            action.setactivity("recreate", self.envconfig.envdir)
+        try:
+            self.hook.tox_testenv_create(action=action, venv=self)
+            self.just_created = True
+        except tox.exception.UnsupportedInterpreter as exception:
+            return exception
+        try:
+            self.hook.tox_testenv_install_deps(action=action, venv=self)
+        except tox.exception.InvocationError as exception:
+            return "could not install deps {}; v = {!r}".format(self.envconfig.deps, exception)
+
+    def _getliveconfig(self):
+        base_resolved_python_path = self.envconfig.python_info.executable
+        version = tox.__version__
+        sitepackages = self.envconfig.sitepackages
+        develop = self.envconfig.usedevelop
+        alwayscopy = self.envconfig.alwayscopy
+        deps = []
+        for dep in self.get_resolved_dependencies():
+            dep_name_sha256 = getdigest(dep.name)
+            deps.append((dep_name_sha256, dep.name))
+        base_resolved_python_sha256 = getdigest(base_resolved_python_path)
+        return CreationConfig(
+            base_resolved_python_sha256,
+            base_resolved_python_path,
+            version,
+            sitepackages,
+            develop,
+            deps,
+            alwayscopy,
+        )
+
+    def get_resolved_dependencies(self):
+        dependencies = []
+        for dependency in self.envconfig.deps:
+            if dependency.indexserver is None:
+                package = resolve_package(package_spec=dependency.name)
+                if package != dependency.name:
+                    dependency = dependency.__class__(package)
+            dependencies.append(dependency)
+        return dependencies
+
+    def getsupportedinterpreter(self):
+        return self.envconfig.getsupportedinterpreter()
+
+    def matching_platform(self):
+        return re.match(self.envconfig.platform, sys.platform)
+
+    def finish(self):
+        previous_config = CreationConfig.readconfig(self.path_config)
+        live_config = self._getliveconfig()
+        if previous_config is None or not previous_config.matches(live_config):
+            content = live_config.writeconfig(self.path_config)
+            reporter.verbosity1("write config to {} as {!r}".format(self.path_config, content))
+
+    def _needs_reinstall(self, setupdir, action):
+        setup_py = setupdir.join("setup.py")
+
+        if not setup_py.exists():
+            return False
+
+        setup_cfg = setupdir.join("setup.cfg")
+        args = [self.envconfig.envpython, str(setup_py), "--name"]
+        env = self._get_os_environ()
+        output = action.popen(
+            args,
+            cwd=setupdir,
+            redirect=False,
+            returnout=True,
+            env=env,
+            capture_err=False,
+        )
+        name = next(
+            (i for i in output.split("\n") if i and not i.startswith("pydev debugger:")),
+            "",
+        )
+        args = [
+            self.envconfig.envpython,
+            "-c",
+            "import sys;  import json; print(json.dumps(sys.path))",
+        ]
+        out = action.popen(args, redirect=False, returnout=True, env=env)
+        try:
+            sys_path = json.loads(out)
+        except ValueError:
+            sys_path = []
+        egg_info_fname = ".".join((name.replace("-", "_"), "egg-info"))
+        for d in reversed(sys_path):
+            egg_info = py.path.local(d).join(egg_info_fname)
+            if egg_info.check():
+                break
+        else:
+            return True
+        needs_reinstall = any(
+            conf_file.check() and conf_file.mtime() > egg_info.mtime()
+            for conf_file in (setup_py, setup_cfg)
+        )
+
+        # Ensure the modification time of the egg-info folder is updated so we
+        # won't need to do this again.
+        # TODO(stephenfin): Remove once the minimum version of setuptools is
+        # high enough to include https://github.com/pypa/setuptools/pull/1427/
+        if needs_reinstall:
+            egg_info.setmtime()
+
+        return needs_reinstall
+
+    def install_pkg(self, dir, action, name, is_develop=False):
+        assert action is not None
+
+        if getattr(self, "just_created", False):
+            action.setactivity(name, dir)
+            self.finish()
+            pip_flags = ["--exists-action", "w"]
+        else:
+            if is_develop and not self._needs_reinstall(dir, action):
+                action.setactivity("{}-noop".format(name), dir)
+                return
+            action.setactivity("{}-nodeps".format(name), dir)
+            pip_flags = ["--no-deps"] + ([] if is_develop else ["-U"])
+        pip_flags.extend(["-v"] * min(3, reporter.verbosity() - 2))
+        if self.envconfig.extras:
+            dir += "[{}]".format(",".join(self.envconfig.extras))
+        target = [dir]
+        if is_develop:
+            target.insert(0, "-e")
+        self._install(target, extraopts=pip_flags, action=action)
+
+    def developpkg(self, setupdir, action):
+        self.install_pkg(setupdir, action, "develop-inst", is_develop=True)
+
+    def installpkg(self, sdistpath, action):
+        self.install_pkg(sdistpath, action, "inst")
+
+    def _installopts(self, indexserver):
+        options = []
+        if indexserver:
+            options += ["-i", indexserver]
+        if self.envconfig.pip_pre:
+            options.append("--pre")
+        return options
+
+    def run_install_command(self, packages, action, options=()):
+        def expand(val):
+            # expand an install command
+            if val == "{packages}":
+                for package in packages:
+                    yield package
+            elif val == "{opts}":
+                for opt in options:
+                    yield opt
+            else:
+                yield val
+
+        cmd = list(chain.from_iterable(expand(val) for val in self.envconfig.install_command))
+
+        env = self._get_os_environ()
+        self.ensure_pip_os_environ_ok(env)
+
+        old_stdout = sys.stdout
+        sys.stdout = codecs.getwriter("utf8")(sys.stdout)
+        try:
+            self._pcall(
+                cmd,
+                cwd=self.envconfig.config.toxinidir,
+                action=action,
+                redirect=reporter.verbosity() < reporter.Verbosity.DEBUG,
+                env=env,
+            )
+        except KeyboardInterrupt:
+            self.status = "keyboardinterrupt"
+            raise
+        finally:
+            sys.stdout = old_stdout
+
+    def ensure_pip_os_environ_ok(self, env):
+        for key in ("PIP_RESPECT_VIRTUALENV", "PIP_REQUIRE_VIRTUALENV", "__PYVENV_LAUNCHER__"):
+            env.pop(key, None)
+        if all("PYTHONPATH" not in i for i in (self.envconfig.passenv, self.envconfig.setenv)):
+            # If PYTHONPATH not explicitly asked for, remove it.
+            if "PYTHONPATH" in env:
+                if sys.version_info < (3, 4) or bool(env["PYTHONPATH"]):
+                    # https://docs.python.org/3/whatsnew/3.4.html#changes-in-python-command-behavior
+                    # In a posix shell, setting the PATH environment variable to an empty value is
+                    # equivalent to not setting it at all.
+                    reporter.warning(
+                        "Discarding $PYTHONPATH from environment, to override "
+                        "specify PYTHONPATH in 'passenv' in your configuration.",
+                    )
+                env.pop("PYTHONPATH")
+
+        # installing packages at user level may mean we're not installing inside the venv
+        env["PIP_USER"] = "0"
+
+        # installing without dependencies may lead to broken packages
+        env["PIP_NO_DEPS"] = "0"
+
+    def _install(self, deps, extraopts=None, action=None):
+        if not deps:
+            return
+        d = {}
+        ixservers = []
+        for dep in deps:
+            if isinstance(dep, (str, py.path.local)):
+                dep = DepConfig(str(dep), None)
+            assert isinstance(dep, DepConfig), dep
+            if dep.indexserver is None:
+                ixserver = self.envconfig.config.indexserver["default"]
+            else:
+                ixserver = dep.indexserver
+            d.setdefault(ixserver, []).append(dep.name)
+            if ixserver not in ixservers:
+                ixservers.append(ixserver)
+            assert ixserver.url is None or isinstance(ixserver.url, str)
+
+        for ixserver in ixservers:
+            packages = d[ixserver]
+            options = self._installopts(ixserver.url)
+            if extraopts:
+                options.extend(extraopts)
+            self.run_install_command(packages=packages, options=options, action=action)
+
+    def _get_os_environ(self, is_test_command=False):
+        if is_test_command:
+            # for executing tests we construct a clean environment
+            env = {}
+            for env_key in self.envconfig.passenv:
+                if env_key in os.environ:
+                    env[env_key] = os.environ[env_key]
+        else:
+            # for executing non-test commands we use the full
+            # invocation environment
+            env = os.environ.copy()
+
+        # in any case we honor per-testenv setenv configuration
+        env.update(self.envconfig.setenv.export())
+
+        env["VIRTUAL_ENV"] = str(self.path)
+        return env
+
+    def test(
+        self,
+        redirect=False,
+        name="run-test",
+        commands=None,
+        ignore_outcome=None,
+        ignore_errors=None,
+        display_hash_seed=False,
+    ):
+        if commands is None:
+            commands = self.envconfig.commands
+        if ignore_outcome is None:
+            ignore_outcome = self.envconfig.ignore_outcome
+        if ignore_errors is None:
+            ignore_errors = self.envconfig.ignore_errors
+        with self.new_action(name) as action:
+            cwd = self.envconfig.changedir
+            if display_hash_seed:
+                env = self._get_os_environ(is_test_command=True)
+                # Display PYTHONHASHSEED to assist with reproducibility.
+                action.setactivity(name, "PYTHONHASHSEED={!r}".format(env.get("PYTHONHASHSEED")))
+            for i, argv in enumerate(filter(bool, commands)):
+                # have to make strings as _pcall changes argv[0] to a local()
+                # happens if the same environment is invoked twice
+                message = "commands[{}] | {}".format(
+                    i,
+                    " ".join(pipes.quote(str(x)) for x in argv),
+                )
+                action.setactivity(name, message)
+                # check to see if we need to ignore the return code
+                # if so, we need to alter the command line arguments
+                if argv[0].startswith("-"):
+                    ignore_ret = True
+                    if argv[0] == "-":
+                        del argv[0]
+                    else:
+                        argv[0] = argv[0].lstrip("-")
+                else:
+                    ignore_ret = False
+
+                try:
+                    self._pcall(
+                        argv,
+                        cwd=cwd,
+                        action=action,
+                        redirect=redirect,
+                        ignore_ret=ignore_ret,
+                        is_test_command=True,
+                    )
+                except tox.exception.InvocationError as err:
+                    if ignore_outcome:
+                        msg = "command failed but result from testenv is ignored\ncmd:"
+                        reporter.warning("{} {}".format(msg, err))
+                        self.status = "ignored failed command"
+                        continue  # keep processing commands
+
+                    reporter.error(str(err))
+                    self.status = "commands failed"
+                    if not ignore_errors:
+                        break  # Don't process remaining commands
+                except KeyboardInterrupt:
+                    self.status = "keyboardinterrupt"
+                    raise
+
+    def _pcall(
+        self,
+        args,
+        cwd,
+        venv=True,
+        is_test_command=False,
+        action=None,
+        redirect=True,
+        ignore_ret=False,
+        returnout=False,
+        env=None,
+        capture_err=True,
+    ):
+        if env is None:
+            env = self._get_os_environ(is_test_command=is_test_command)
+
+        # construct environment variables
+        env.pop("VIRTUALENV_PYTHON", None)
+        bin_dir = str(self.envconfig.envbindir)
+        path = self.envconfig.setenv.get("PATH") or os.environ["PATH"]
+        env["PATH"] = os.pathsep.join([bin_dir, path])
+        reporter.verbosity2("setting PATH={}".format(env["PATH"]))
+
+        # get command
+        try:
+            args[0] = self.getcommandpath(args[0], venv, cwd)
+        except tox.exception.InvocationError:
+            if ignore_ret:
+                self.status = getattr(self, "status", 0)
+                msg = "command not found but explicitly ignored"
+                reporter.warning("{}\ncmd: {}".format(msg, args[0]))
+                return ""  # in case it's returnout
+            else:
+                raise
+
+        if sys.platform != "win32" and "TOX_LIMITED_SHEBANG" in os.environ:
+            args = prepend_shebang_interpreter(args)
+
+        cwd.ensure(dir=1)  # ensure the cwd exists
+        return action.popen(
+            args,
+            cwd=cwd,
+            env=env,
+            redirect=redirect,
+            ignore_ret=ignore_ret,
+            returnout=returnout,
+            report_fail=not is_test_command,
+            capture_err=capture_err,
+        )
+
+    def setupenv(self):
+        if self.envconfig._missing_subs:
+            self.status = (
+                "unresolvable substitution(s):\n    {}\n"
+                "Environment variables are missing or defined recursively.".format(
+                    "\n    ".join(
+                        "{}: '{}'".format(section_key, exc.name)
+                        for section_key, exc in sorted(self.envconfig._missing_subs.items())
+                    ),
+                )
+            )
+            return
+        if not self.matching_platform():
+            self.status = "platform mismatch"
+            return  # we simply omit non-matching platforms
+        with self.new_action("getenv", self.envconfig.envdir) as action:
+            self.status = 0
+            default_ret_code = 1
+            envlog = self.env_log
+            try:
+                status = self.update(action=action)
+            except IOError as e:
+                if e.args[0] != 2:
+                    raise
+                status = (
+                    "Error creating virtualenv. Note that spaces in paths are "
+                    "not supported by virtualenv. Error details: {!r}".format(e)
+                )
+            except tox.exception.InvocationError as e:
+                status = e
+            except tox.exception.InterpreterNotFound as e:
+                status = e
+                if self.envconfig.config.option.skip_missing_interpreters == "true":
+                    default_ret_code = 0
+            except KeyboardInterrupt:
+                self.status = "keyboardinterrupt"
+                raise
+            if status:
+                str_status = str(status)
+                command_log = envlog.get_commandlog("setup")
+                command_log.add_command(["setup virtualenv"], str_status, default_ret_code)
+                self.status = status
+                if default_ret_code == 0:
+                    reporter.skip(str_status)
+                else:
+                    reporter.error(str_status)
+                return False
+            command_path = self.getcommandpath("python")
+            envlog.set_python_info(command_path)
+            return True
+
+    def finishvenv(self):
+        with self.new_action("finishvenv"):
+            self.finish()
+            return True
+
+
+def getdigest(path):
+    path = py.path.local(path)
+    if not path.check(file=1):
+        return "0" * 32
+    return path.computehash("sha256")
+
+
+def prepend_shebang_interpreter(args):
+    # prepend interpreter directive (if any) to argument list
+    #
+    # When preparing virtual environments in a file container which has large
+    # length, the system might not be able to invoke shebang scripts which
+    # define interpreters beyond system limits (e.g. Linux has a limit of 128;
+    # BINPRM_BUF_SIZE). This method can be used to check if the executable is
+    # a script containing a shebang line. If so, extract the interpreter (and
+    # possible optional argument) and prepend the values to the provided
+    # argument list. tox will only attempt to read an interpreter directive of
+    # a maximum size of 2048 bytes to limit excessive reading and support UNIX
+    # systems which may support a longer interpret length.
+    try:
+        with open(args[0], "rb") as f:
+            if f.read(1) == b"#" and f.read(1) == b"!":
+                interp = f.readline(MAXINTERP + 1).rstrip().decode("UTF-8")
+                if len(interp) > MAXINTERP:  # avoid a truncated interpreter
+                    return args
+                interp_args = interp.split(None, 1)[:2]
+                return interp_args + args
+    except (UnicodeDecodeError, IOError):
+        pass
+    return args
+
+
+_SKIP_VENV_CREATION = os.environ.get("_TOX_SKIP_ENV_CREATION_TEST", False) == "1"
+
+
+@tox.hookimpl
+def tox_testenv_create(venv, action):
+    config_interpreter = venv.getsupportedinterpreter()
+    args = [sys.executable, "-m", "virtualenv"]
+    if venv.envconfig.sitepackages:
+        args.append("--system-site-packages")
+    if venv.envconfig.alwayscopy:
+        args.append("--always-copy")
+    if not venv.envconfig.download:
+        args.append("--no-download")
+    else:
+        args.append("--download")
+    # add interpreter explicitly, to prevent using default (virtualenv.ini)
+    args.extend(["--python", str(config_interpreter)])
+
+    cleanup_for_venv(venv)
+
+    base_path = venv.path.dirpath()
+    base_path.ensure(dir=1)
+    args.append(venv.path.basename)
+    if not _SKIP_VENV_CREATION:
+        try:
+            venv._pcall(
+                args,
+                venv=False,
+                action=action,
+                cwd=base_path,
+                redirect=reporter.verbosity() < reporter.Verbosity.DEBUG,
+            )
+        except KeyboardInterrupt:
+            venv.status = "keyboardinterrupt"
+            raise
+    return True  # Return non-None to indicate plugin has completed
+
+
+def cleanup_for_venv(venv):
+    within_parallel = PARALLEL_ENV_VAR_KEY_PRIVATE in os.environ
+    # if the directory exists and it doesn't look like a virtualenv, produce
+    # an error
+    if venv.path.exists():
+        dir_items = set(os.listdir(str(venv.path))) - {".lock", "log"}
+        dir_items = {p for p in dir_items if not p.startswith(".tox-") or p == ".tox-config1"}
+    else:
+        dir_items = set()
+
+    if not (
+        # doesn't exist => OK
+        not venv.path.exists()
+        # does exist, but it's empty => OK
+        or not dir_items
+        # tox has marked this as an environment it has created in the past
+        or ".tox-config1" in dir_items
+        # it exists and we're on windows with Lib and Scripts => OK
+        or (INFO.IS_WIN and dir_items > {"Scripts", "Lib"})
+        # non-windows, with lib and bin => OK
+        or dir_items > {"bin", "lib"}
+        # pypy has a different lib folder => OK
+        or dir_items > {"bin", "lib_pypy"}
+    ):
+        venv.status = "error"
+        reporter.error(
+            "cowardly refusing to delete `envdir` (it does not look like a virtualenv): "
+            "{}".format(venv.path),
+        )
+        raise SystemExit(2)
+
+    if within_parallel:
+        if venv.path.exists():
+            # do not delete the log folder as that's used by parent
+            for content in venv.path.listdir():
+                if not content.basename == "log":
+                    content.remove(rec=1, ignore_errors=True)
+    else:
+        ensure_empty_dir(venv.path)
+
+
+@tox.hookimpl
+def tox_testenv_install_deps(venv, action):
+    deps = venv.get_resolved_dependencies()
+    if deps:
+        depinfo = ", ".join(map(str, deps))
+        action.setactivity("installdeps", depinfo)
+        venv._install(deps, action=action)
+    return True  # Return non-None to indicate plugin has completed
+
+
+@tox.hookimpl
+def tox_runtest(venv, redirect):
+    venv.test(redirect=redirect)
+    return True  # Return non-None to indicate plugin has completed
+
+
+@tox.hookimpl
+def tox_runtest_pre(venv):
+    venv.status = 0
+    ensure_empty_dir(venv.envconfig.envtmpdir)
+    venv.envconfig.envtmpdir.ensure(dir=1)
+    venv.test(
+        name="run-test-pre",
+        commands=venv.envconfig.commands_pre,
+        redirect=False,
+        ignore_outcome=False,
+        ignore_errors=False,
+        display_hash_seed=True,
+    )
+
+
+@tox.hookimpl
+def tox_runtest_post(venv):
+    venv.test(
+        name="run-test-post",
+        commands=venv.envconfig.commands_post,
+        redirect=False,
+        ignore_outcome=False,
+        ignore_errors=False,
+    )
+
+
+@tox.hookimpl
+def tox_runenvreport(venv, action):
+    # write out version dependency information
+    args = venv.envconfig.list_dependencies_command
+    output = venv._pcall(args, cwd=venv.envconfig.config.toxinidir, action=action, returnout=True)
+    # the output contains a mime-header, skip it
+    output = output.split("\n\n")[-1]
+    packages = output.strip().split("\n")
+    return packages  # Return non-None to indicate plugin has completed
diff --git a/venv/lib/python3.10/site-packages/tox/version.py b/venv/lib/python3.10/site-packages/tox/version.py
new file mode 100644
index 0000000..d5f8304
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/tox/version.py
@@ -0,0 +1,4 @@
+# coding: utf-8
+from __future__ import unicode_literals
+
+__version__ = '3.25.0'
diff --git a/venv/lib/python3.10/site-packages/virtualenv-20.24.3.dist-info/INSTALLER b/venv/lib/python3.10/site-packages/virtualenv-20.24.3.dist-info/INSTALLER
new file mode 100644
index 0000000..a1b589e
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/virtualenv-20.24.3.dist-info/INSTALLER
@@ -0,0 +1 @@
+pip
diff --git a/venv/lib/python3.10/site-packages/virtualenv-20.24.3.dist-info/METADATA b/venv/lib/python3.10/site-packages/virtualenv-20.24.3.dist-info/METADATA
new file mode 100644
index 0000000..90125a8
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/virtualenv-20.24.3.dist-info/METADATA
@@ -0,0 +1,84 @@
+Metadata-Version: 2.1
+Name: virtualenv
+Version: 20.24.3
+Summary: Virtual Python Environment builder
+Project-URL: Documentation, https://virtualenv.pypa.io
+Project-URL: Homepage, https://github.com/pypa/virtualenv
+Project-URL: Source, https://github.com/pypa/virtualenv
+Project-URL: Tracker, https://github.com/pypa/virtualenv/issues
+Maintainer-email: Bernat Gabor 
+License-Expression: MIT
+License-File: LICENSE
+Keywords: environments,isolated,virtual
+Classifier: Development Status :: 5 - Production/Stable
+Classifier: Intended Audience :: Developers
+Classifier: License :: OSI Approved :: MIT License
+Classifier: Operating System :: MacOS :: MacOS X
+Classifier: Operating System :: Microsoft :: Windows
+Classifier: Operating System :: POSIX
+Classifier: Programming Language :: Python :: 3 :: Only
+Classifier: Programming Language :: Python :: 3.7
+Classifier: Programming Language :: Python :: 3.8
+Classifier: Programming Language :: Python :: 3.9
+Classifier: Programming Language :: Python :: 3.10
+Classifier: Programming Language :: Python :: 3.11
+Classifier: Programming Language :: Python :: 3.12
+Classifier: Programming Language :: Python :: Implementation :: CPython
+Classifier: Programming Language :: Python :: Implementation :: PyPy
+Classifier: Topic :: Software Development :: Libraries
+Classifier: Topic :: Software Development :: Testing
+Classifier: Topic :: Utilities
+Requires-Python: >=3.7
+Requires-Dist: distlib<1,>=0.3.7
+Requires-Dist: filelock<4,>=3.12.2
+Requires-Dist: importlib-metadata>=6.6; python_version < '3.8'
+Requires-Dist: platformdirs<4,>=3.9.1
+Provides-Extra: docs
+Requires-Dist: furo>=2023.5.20; extra == 'docs'
+Requires-Dist: proselint>=0.13; extra == 'docs'
+Requires-Dist: sphinx-argparse>=0.4; extra == 'docs'
+Requires-Dist: sphinx>=7.0.1; extra == 'docs'
+Requires-Dist: sphinxcontrib-towncrier>=0.2.1a0; extra == 'docs'
+Requires-Dist: towncrier>=23.6; extra == 'docs'
+Provides-Extra: test
+Requires-Dist: covdefaults>=2.3; extra == 'test'
+Requires-Dist: coverage-enable-subprocess>=1; extra == 'test'
+Requires-Dist: coverage>=7.2.7; extra == 'test'
+Requires-Dist: flaky>=3.7; extra == 'test'
+Requires-Dist: packaging>=23.1; extra == 'test'
+Requires-Dist: pytest-env>=0.8.2; extra == 'test'
+Requires-Dist: pytest-freezer>=0.4.8; platform_python_implementation == 'PyPy' and extra == 'test'
+Requires-Dist: pytest-mock>=3.11.1; extra == 'test'
+Requires-Dist: pytest-randomly>=3.12; extra == 'test'
+Requires-Dist: pytest-timeout>=2.1; extra == 'test'
+Requires-Dist: pytest>=7.4; extra == 'test'
+Requires-Dist: setuptools>=68; extra == 'test'
+Requires-Dist: time-machine>=2.10; platform_python_implementation == 'CPython' and extra == 'test'
+Description-Content-Type: text/markdown
+
+# virtualenv
+
+[![PyPI](https://img.shields.io/pypi/v/virtualenv?style=flat-square)](https://pypi.org/project/virtualenv)
+[![PyPI - Implementation](https://img.shields.io/pypi/implementation/virtualenv?style=flat-square)](https://pypi.org/project/virtualenv)
+[![PyPI - Python Version](https://img.shields.io/pypi/pyversions/virtualenv?style=flat-square)](https://pypi.org/project/virtualenv)
+[![Documentation](https://readthedocs.org/projects/virtualenv/badge/?version=latest&style=flat-square)](http://virtualenv.pypa.io)
+[![Discord](https://img.shields.io/discord/803025117553754132)](https://discord.gg/pypa)
+[![PyPI - Downloads](https://img.shields.io/pypi/dm/virtualenv?style=flat-square)](https://pypistats.org/packages/virtualenv)
+[![PyPI - License](https://img.shields.io/pypi/l/virtualenv?style=flat-square)](https://opensource.org/licenses/MIT)
+[![Build Status](https://github.com/pypa/virtualenv/workflows/check/badge.svg?branch=main&event=push)](https://github.com/pypa/virtualenv/actions?query=workflow%3Acheck)
+[![Code style:
+black](https://img.shields.io/badge/code%20style-black-000000.svg?style=flat-square)](https://github.com/psf/black)
+
+A tool for creating isolated `virtual` python environments.
+
+- [Installation](https://virtualenv.pypa.io/en/latest/installation.html)
+- [Documentation](https://virtualenv.pypa.io)
+- [Changelog](https://virtualenv.pypa.io/en/latest/changelog.html)
+- [Issues](https://github.com/pypa/virtualenv/issues)
+- [PyPI](https://pypi.org/project/virtualenv)
+- [Github](https://github.com/pypa/virtualenv)
+
+## Code of Conduct
+
+Everyone interacting in the virtualenv project's codebases, issue trackers, chat rooms, and mailing lists is expected to
+follow the [PSF Code of Conduct](https://github.com/pypa/.github/blob/main/CODE_OF_CONDUCT.md).
diff --git a/venv/lib/python3.10/site-packages/virtualenv-20.24.3.dist-info/RECORD b/venv/lib/python3.10/site-packages/virtualenv-20.24.3.dist-info/RECORD
new file mode 100644
index 0000000..42692c2
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/virtualenv-20.24.3.dist-info/RECORD
@@ -0,0 +1,200 @@
+../../../bin/virtualenv,sha256=vxig470xFCyVn15oF-WYJyDaw8cDoG8tFL-WMp8tn58,279
+virtualenv-20.24.3.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4
+virtualenv-20.24.3.dist-info/METADATA,sha256=fXFsaFVEJu_CufXgnqOysxqp9Ss92NxGVGjrk_OBkVM,4523
+virtualenv-20.24.3.dist-info/RECORD,,
+virtualenv-20.24.3.dist-info/WHEEL,sha256=9QBuHhg6FNW7lppboF2vKVbCGTVzsFykgRQjjlajrhA,87
+virtualenv-20.24.3.dist-info/entry_points.txt,sha256=eqicfmK2n6Oe1zJautR8FPszChWO3NV2EpPK2Ln7O-o,1184
+virtualenv-20.24.3.dist-info/licenses/LICENSE,sha256=XBWRk3jFsqqrexnOpw2M3HX3aHnjJFTkwDmfi3HRcek,1074
+virtualenv/__init__.py,sha256=Xgxz-UdfWM8x8Waxcn9Lb50VamQJ6VtQi6CYwnSO-nM,183
+virtualenv/__main__.py,sha256=NGVHCJH9cTpjjESbicKHqLsFzuZLmpxteY9_mkoqoyI,2619
+virtualenv/__pycache__/__init__.cpython-310.pyc,,
+virtualenv/__pycache__/__main__.cpython-310.pyc,,
+virtualenv/__pycache__/info.cpython-310.pyc,,
+virtualenv/__pycache__/report.cpython-310.pyc,,
+virtualenv/__pycache__/version.cpython-310.pyc,,
+virtualenv/activation/__init__.py,sha256=Z9G8LOclFHqtaarfVODKzYb5dsf7QU5q8vhF_4bwGxc,464
+virtualenv/activation/__pycache__/__init__.cpython-310.pyc,,
+virtualenv/activation/__pycache__/activator.cpython-310.pyc,,
+virtualenv/activation/__pycache__/via_template.cpython-310.pyc,,
+virtualenv/activation/activator.py,sha256=63QTDV0IIGDisMZ3AU0v6yeYkiCg_2n0Ocdj00xBWr8,1437
+virtualenv/activation/bash/__init__.py,sha256=-gJPcAs3VEY2EP5URBsUaXTHrlQGamSyNO6E4ohe8pw,334
+virtualenv/activation/bash/__pycache__/__init__.cpython-310.pyc,,
+virtualenv/activation/bash/activate.sh,sha256=dPb39PJLgqPrQg_gwFtn0hSlcKM2O-8bhCUf5HduSio,2264
+virtualenv/activation/batch/__init__.py,sha256=0BRCm4OwwKdlRGr2U8PDaReS3XY2N17bqQ1qGcELvXA,692
+virtualenv/activation/batch/__pycache__/__init__.cpython-310.pyc,,
+virtualenv/activation/batch/activate.bat,sha256=zRuHLBOugNbuO8NNvgPERw9pCPYWaU_9xtGpEg9HnjE,1056
+virtualenv/activation/batch/deactivate.bat,sha256=07F0HsJ5cs1VpOxPyR8LFqBgNRD2TPhK4NNxF6NIRas,537
+virtualenv/activation/batch/pydoc.bat,sha256=pVuxn8mn9P_Rd0349fiBEiwIuMvfJQSfgJ2dljUT2fA,24
+virtualenv/activation/cshell/__init__.py,sha256=ZUCemt69joL0KoBSqHQILHTbVeCXjSMpOL9IIofrw6Y,336
+virtualenv/activation/cshell/__pycache__/__init__.cpython-310.pyc,,
+virtualenv/activation/cshell/activate.csh,sha256=PZd9jd6pCh_9Zx9QzbCfeYnC4oc_E-tQIit_VtupwQk,1531
+virtualenv/activation/fish/__init__.py,sha256=gi0Q2mhmsHLtN_vm2Ck6m_tg6ixmJ5BGQ9SZJDjOHVE,241
+virtualenv/activation/fish/__pycache__/__init__.cpython-310.pyc,,
+virtualenv/activation/fish/activate.fish,sha256=c82NUSoKrtAOs4zkb1lt-PNidgKXkUsLAAfItAUsUnQ,3094
+virtualenv/activation/nushell/__init__.py,sha256=ZCnkOTAoRDDXwPL_DpfStE4vEbkIwlZZSwTyEEecO6o,603
+virtualenv/activation/nushell/__pycache__/__init__.cpython-310.pyc,,
+virtualenv/activation/nushell/activate.nu,sha256=c1D6GJAiDT4xaO9Z8pNx8MOZ1v5lVYJjUe-795hE9O0,2779
+virtualenv/activation/powershell/__init__.py,sha256=oerDB1yDxo8fRoW8BuplydTXEgOmaiYRCNGkwFcIsjw,252
+virtualenv/activation/powershell/__pycache__/__init__.cpython-310.pyc,,
+virtualenv/activation/powershell/activate.ps1,sha256=gburXoeiTrZPbc4kxUbp1klZZg9h5frolydZwR7t5dA,1706
+virtualenv/activation/python/__init__.py,sha256=HIHmO_uwsLMIlbopDDgGBz9B8iHU3a2V1eMD3W-TmvQ,816
+virtualenv/activation/python/__pycache__/__init__.cpython-310.pyc,,
+virtualenv/activation/python/__pycache__/activate_this.cpython-310.pyc,,
+virtualenv/activation/python/activate_this.py,sha256=OMqtLmh6JrGoWA1VGaohi-XQt3X2-A2fuIGoGePUdLg,1363
+virtualenv/activation/via_template.py,sha256=lNRhZBD-ESsofYPGVCJZ_M5aSdAWOEfoSnSPlIUzCpU,2607
+virtualenv/app_data/__init__.py,sha256=iFqz7nzy3rVkvk9zDqkTxhdJlt7yyFnpeABfEUcoKFs,1432
+virtualenv/app_data/__pycache__/__init__.cpython-310.pyc,,
+virtualenv/app_data/__pycache__/base.cpython-310.pyc,,
+virtualenv/app_data/__pycache__/na.cpython-310.pyc,,
+virtualenv/app_data/__pycache__/read_only.cpython-310.pyc,,
+virtualenv/app_data/__pycache__/via_disk_folder.cpython-310.pyc,,
+virtualenv/app_data/__pycache__/via_tempdir.cpython-310.pyc,,
+virtualenv/app_data/base.py,sha256=O0C2hA_xuIJyBJJOo1kroibBMqzK5AWFYfgjS9RrKTw,2115
+virtualenv/app_data/na.py,sha256=9ye51rBg5Ywz2i58s6b6XxdBeHvuIIgGmXO65P5eLFo,1500
+virtualenv/app_data/read_only.py,sha256=C4jfyONJLVPAKUKE7QMHBAk_adfhmWK31xX_7TLxCc4,1113
+virtualenv/app_data/via_disk_folder.py,sha256=K8BZbCqhCeTo9EmvV9fr1_5pISA9Vxjkr5-xrOc6O_I,5519
+virtualenv/app_data/via_tempdir.py,sha256=BueKPXPl6HiDpQOyEyldJj9mdDqGYLzAa-iNVbvuC2o,775
+virtualenv/config/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+virtualenv/config/__pycache__/__init__.cpython-310.pyc,,
+virtualenv/config/__pycache__/convert.cpython-310.pyc,,
+virtualenv/config/__pycache__/env_var.cpython-310.pyc,,
+virtualenv/config/__pycache__/ini.cpython-310.pyc,,
+virtualenv/config/cli/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+virtualenv/config/cli/__pycache__/__init__.cpython-310.pyc,,
+virtualenv/config/cli/__pycache__/parser.cpython-310.pyc,,
+virtualenv/config/cli/parser.py,sha256=AnRKDce3Yc7IJqgSS6K6EVMKR4ytR3KYf3Km3i-F1kg,4624
+virtualenv/config/convert.py,sha256=6N1xarym0Vr-SyEu_9G7Z8J7dTjAIZ3DTZUoFomdU3s,2751
+virtualenv/config/env_var.py,sha256=sEC6c3u63bEZ6AC218QgzgLlxMZggBoTAq9s5xnYm9U,748
+virtualenv/config/ini.py,sha256=xewh4RqbZA8mZib4ROwQgLgeUZCGwKJH_ayx4KuKPcs,2669
+virtualenv/create/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+virtualenv/create/__pycache__/__init__.cpython-310.pyc,,
+virtualenv/create/__pycache__/creator.cpython-310.pyc,,
+virtualenv/create/__pycache__/debug.cpython-310.pyc,,
+virtualenv/create/__pycache__/describe.cpython-310.pyc,,
+virtualenv/create/__pycache__/pyenv_cfg.cpython-310.pyc,,
+virtualenv/create/creator.py,sha256=9_-ZpuvUpRvYE2pNmWp_QiYm_iZkOMSZ3PMELwPDrbw,8511
+virtualenv/create/debug.py,sha256=einYMOkMScWa8zJrb5K4qJosCIOdcpPWggfrkANlZk4,3054
+virtualenv/create/describe.py,sha256=5uk2dHCFzyV-tHcaoh55sHIzzrY3e3iafj5VUVWHEuc,3219
+virtualenv/create/pyenv_cfg.py,sha256=ovdWU9riedJ0eJl_t3lCdU6yovPILOi3HjQ6gdpYl5U,1667
+virtualenv/create/via_global_ref/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+virtualenv/create/via_global_ref/__pycache__/__init__.cpython-310.pyc,,
+virtualenv/create/via_global_ref/__pycache__/_virtualenv.cpython-310.pyc,,
+virtualenv/create/via_global_ref/__pycache__/api.cpython-310.pyc,,
+virtualenv/create/via_global_ref/__pycache__/store.cpython-310.pyc,,
+virtualenv/create/via_global_ref/__pycache__/venv.cpython-310.pyc,,
+virtualenv/create/via_global_ref/_virtualenv.py,sha256=CuuGVGNFwdD62gl1NuNVJXFNXNvMLY6z-ZEqlVBE_P8,4329
+virtualenv/create/via_global_ref/api.py,sha256=Ew5Urd_wLRPgvuItqH5jiZ0aDeSuERy6M-1cQLw70Ao,4270
+virtualenv/create/via_global_ref/builtin/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+virtualenv/create/via_global_ref/builtin/__pycache__/__init__.cpython-310.pyc,,
+virtualenv/create/via_global_ref/builtin/__pycache__/builtin_way.cpython-310.pyc,,
+virtualenv/create/via_global_ref/builtin/__pycache__/ref.cpython-310.pyc,,
+virtualenv/create/via_global_ref/builtin/__pycache__/via_global_self_do.cpython-310.pyc,,
+virtualenv/create/via_global_ref/builtin/builtin_way.py,sha256=hIdHtBC47jKW8z2FWcT-8bGmYILyTqnEqbDP30UzWyw,538
+virtualenv/create/via_global_ref/builtin/cpython/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+virtualenv/create/via_global_ref/builtin/cpython/__pycache__/__init__.cpython-310.pyc,,
+virtualenv/create/via_global_ref/builtin/cpython/__pycache__/common.cpython-310.pyc,,
+virtualenv/create/via_global_ref/builtin/cpython/__pycache__/cpython3.cpython-310.pyc,,
+virtualenv/create/via_global_ref/builtin/cpython/__pycache__/mac_os.cpython-310.pyc,,
+virtualenv/create/via_global_ref/builtin/cpython/common.py,sha256=ESoEiGUgW-yQPiyiyW9SqNVdp4gP0y5OQ66HiaRR9cM,2225
+virtualenv/create/via_global_ref/builtin/cpython/cpython3.py,sha256=Mxp2K8196haxc4KMJ5Sgd1whpkWF-7TsxHzyZOy8LYk,4700
+virtualenv/create/via_global_ref/builtin/cpython/mac_os.py,sha256=yjoj0Fe-0sXkA80Vma6oQrS3J2dlWsp9zSEkAY30dfk,11086
+virtualenv/create/via_global_ref/builtin/pypy/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+virtualenv/create/via_global_ref/builtin/pypy/__pycache__/__init__.cpython-310.pyc,,
+virtualenv/create/via_global_ref/builtin/pypy/__pycache__/common.cpython-310.pyc,,
+virtualenv/create/via_global_ref/builtin/pypy/__pycache__/pypy3.cpython-310.pyc,,
+virtualenv/create/via_global_ref/builtin/pypy/common.py,sha256=owmG6VjR9NNzjHeMNP9mLfPycfFRzKnM3MebqundUkc,1730
+virtualenv/create/via_global_ref/builtin/pypy/pypy3.py,sha256=xFjq9rAYED4_k2xgje8HTgOIcePTbC26HCSUtJ2zK7g,2508
+virtualenv/create/via_global_ref/builtin/ref.py,sha256=H2GZZ8FE0inQ9XIwDWk03AWfmkVqbacVrJ2uvs-8ls8,5482
+virtualenv/create/via_global_ref/builtin/via_global_self_do.py,sha256=GLvC_tQD6ZL08R7E9CI7eQN34GMtQms0Ma3Evu38pQM,4448
+virtualenv/create/via_global_ref/store.py,sha256=OVRyaMUBT7Kh_sEy02m5VsHvh0tpxynpKxLjXDDXb1c,667
+virtualenv/create/via_global_ref/venv.py,sha256=o8LORDTjUilKS_ATdcwOSS4mxfpns5Nhh1BulBeTyJo,3475
+virtualenv/discovery/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+virtualenv/discovery/__pycache__/__init__.cpython-310.pyc,,
+virtualenv/discovery/__pycache__/builtin.cpython-310.pyc,,
+virtualenv/discovery/__pycache__/cached_py_info.cpython-310.pyc,,
+virtualenv/discovery/__pycache__/discover.cpython-310.pyc,,
+virtualenv/discovery/__pycache__/py_info.cpython-310.pyc,,
+virtualenv/discovery/__pycache__/py_spec.cpython-310.pyc,,
+virtualenv/discovery/builtin.py,sha256=QgyYGUx82-aL_gter2qnYkj7I_e0EAGzEvm0VtZMGRk,6196
+virtualenv/discovery/cached_py_info.py,sha256=9KJGgN1LfwhGHUOddaL0kYKrbBIu0qyg0n_gtbPCNHM,6504
+virtualenv/discovery/discover.py,sha256=yM_yS8dyFh-CjdErT5JLLanvzPqu6ae7VxHG3n8Fe3U,1191
+virtualenv/discovery/py_info.py,sha256=99Wu0z7AB3vMWbWvSO4RawxdMsbsjHT2J8fNGgS7YMA,25407
+virtualenv/discovery/py_spec.py,sha256=r6gp2IgDCi1FVIypC6KA1y2KKkXWZw3AVowvd1JEQPE,4687
+virtualenv/discovery/windows/__init__.py,sha256=_9NwLhvYvPm2lj-EPUx4-Xtbdj9-buw1AXwC8VynYzA,1855
+virtualenv/discovery/windows/__pycache__/__init__.cpython-310.pyc,,
+virtualenv/discovery/windows/__pycache__/pep514.cpython-310.pyc,,
+virtualenv/discovery/windows/pep514.py,sha256=-1rM_ltsUVk5IkAtBfO78Wjw1rCxlLE07tBc4YQt1lo,5056
+virtualenv/info.py,sha256=JXIBvHzhnv27wK16kbCrE4PcIy3ffqKvJSu-E6cagtg,1866
+virtualenv/report.py,sha256=q5bbS1IZKL8rHlwg9DdUEZazQox2zFWPqHUWv8k4vJs,1373
+virtualenv/run/__init__.py,sha256=LgFwsElY9_LkIVu_41C9HReaw_FIQbpF4FCFQBN4pwk,6227
+virtualenv/run/__pycache__/__init__.cpython-310.pyc,,
+virtualenv/run/__pycache__/session.cpython-310.pyc,,
+virtualenv/run/plugin/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+virtualenv/run/plugin/__pycache__/__init__.cpython-310.pyc,,
+virtualenv/run/plugin/__pycache__/activators.cpython-310.pyc,,
+virtualenv/run/plugin/__pycache__/base.cpython-310.pyc,,
+virtualenv/run/plugin/__pycache__/creators.cpython-310.pyc,,
+virtualenv/run/plugin/__pycache__/discovery.cpython-310.pyc,,
+virtualenv/run/plugin/__pycache__/seeders.cpython-310.pyc,,
+virtualenv/run/plugin/activators.py,sha256=rNo8gEM7Tqk0NFV8RWX8RlrZcmHc13-8R7WF8pO2bLU,2235
+virtualenv/run/plugin/base.py,sha256=fPgs0fkWJOvfsEXY7xztf7IGfM7fdB8B-q8J-FuwmVg,2349
+virtualenv/run/plugin/creators.py,sha256=OZuV-hLEFPC-e_rFkI6xf_OW9pgrzFUq2gCKhyEw5aQ,3439
+virtualenv/run/plugin/discovery.py,sha256=LFzHoAaii17msNp8FkOiz27wYq29hYL5n9IviS1D6BQ,1180
+virtualenv/run/plugin/seeders.py,sha256=up_Ai5SsBzqSGfWonlQMgiIKs8NJon6WtxI8CINx1vk,1050
+virtualenv/run/session.py,sha256=ZAWRTo-GXs9JS6hZbzpR4bZJuEH8YkWa7Sg0XCXGjS0,2454
+virtualenv/seed/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+virtualenv/seed/__pycache__/__init__.cpython-310.pyc,,
+virtualenv/seed/__pycache__/seeder.cpython-310.pyc,,
+virtualenv/seed/embed/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+virtualenv/seed/embed/__pycache__/__init__.cpython-310.pyc,,
+virtualenv/seed/embed/__pycache__/base_embed.cpython-310.pyc,,
+virtualenv/seed/embed/__pycache__/pip_invoke.cpython-310.pyc,,
+virtualenv/seed/embed/base_embed.py,sha256=x-E0rrihqS1GfddP-A0SwvUt83mCXaiqySTGjXQ1fN4,4237
+virtualenv/seed/embed/pip_invoke.py,sha256=NXzRlGgnEhTWXzcMN7xDEo2YfSsO3Xy61ts0oyjioso,2214
+virtualenv/seed/embed/via_app_data/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+virtualenv/seed/embed/via_app_data/__pycache__/__init__.cpython-310.pyc,,
+virtualenv/seed/embed/via_app_data/__pycache__/via_app_data.cpython-310.pyc,,
+virtualenv/seed/embed/via_app_data/pip_install/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+virtualenv/seed/embed/via_app_data/pip_install/__pycache__/__init__.cpython-310.pyc,,
+virtualenv/seed/embed/via_app_data/pip_install/__pycache__/base.cpython-310.pyc,,
+virtualenv/seed/embed/via_app_data/pip_install/__pycache__/copy.cpython-310.pyc,,
+virtualenv/seed/embed/via_app_data/pip_install/__pycache__/symlink.cpython-310.pyc,,
+virtualenv/seed/embed/via_app_data/pip_install/base.py,sha256=1kUSWLxqzJrDefyHk4r7fnSxsi2qcuptLgn4ZoqERGc,8319
+virtualenv/seed/embed/via_app_data/pip_install/copy.py,sha256=5uifZ3wkCjtuu7hJ2QfNtjr5ZDHGmWvWv_c9uQntuiE,1226
+virtualenv/seed/embed/via_app_data/pip_install/symlink.py,sha256=-cZU2KDMKrZ58XTJRi1XuXD3JS6SSAFU9k7q0booC8s,2084
+virtualenv/seed/embed/via_app_data/via_app_data.py,sha256=zC-FUMFHH0IUzrHYbt-O6Q3CwByvoqaMGN9FKc8BKMU,5875
+virtualenv/seed/seeder.py,sha256=pT83wA9x13LfVcMl-ZdY3rfyqGGnp1MJbrn8JTpZB84,1173
+virtualenv/seed/wheels/__init__.py,sha256=6KAF-mAU_DZLv338qt9CIewI1_dX-YXRuUBqcKFY_Xk,204
+virtualenv/seed/wheels/__pycache__/__init__.cpython-310.pyc,,
+virtualenv/seed/wheels/__pycache__/acquire.cpython-310.pyc,,
+virtualenv/seed/wheels/__pycache__/bundle.cpython-310.pyc,,
+virtualenv/seed/wheels/__pycache__/periodic_update.cpython-310.pyc,,
+virtualenv/seed/wheels/__pycache__/util.cpython-310.pyc,,
+virtualenv/seed/wheels/acquire.py,sha256=ajwzKw6FDL0pvvyAJagI8pwn7ZrZAXinC64kAvB5X88,4566
+virtualenv/seed/wheels/bundle.py,sha256=czWXDunZY5uzQcpSNO7cT9q2NxZPNRKt9ha_z7eosys,1863
+virtualenv/seed/wheels/embed/__init__.py,sha256=8Y_iLt86hm7ntPLm2vi-OTGaDavfxhNfM56e5yv2F3A,1538
+virtualenv/seed/wheels/embed/__pycache__/__init__.cpython-310.pyc,,
+virtualenv/seed/wheels/embed/pip-23.2.1-py3-none-any.whl,sha256=fM9HI0XyDTW9ydGEH_XzEyYMLDP-QX9IwwrEbMyr9b4,2086091
+virtualenv/seed/wheels/embed/setuptools-68.0.0-py3-none-any.whl,sha256=EeUsZ0FaOB0Q1rRiztnPuXBmF58OhxOZ4AbEqxAfyF8,804037
+virtualenv/seed/wheels/embed/wheel-0.41.1-py3-none-any.whl,sha256=RzIZvUy-3GLOoMswkIm1k-R8FcSiUxAV-U5OO5oPaYE,64755
+virtualenv/seed/wheels/periodic_update.py,sha256=AC9R5nre6XABcrW3lh2UMUipjRyHeLO_XyfY3ELMnyA,15527
+virtualenv/seed/wheels/util.py,sha256=vz_9r3Ik9_QwoVAk_HRjYAIBhgJcyVyvjOXFUCZYW7g,3962
+virtualenv/util/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+virtualenv/util/__pycache__/__init__.cpython-310.pyc,,
+virtualenv/util/__pycache__/error.cpython-310.pyc,,
+virtualenv/util/__pycache__/lock.cpython-310.pyc,,
+virtualenv/util/__pycache__/zipapp.cpython-310.pyc,,
+virtualenv/util/error.py,sha256=eYsT9f-fA5cr2R5oScuBvUe9xK8joS0ghM7OMfZ2Ot8,324
+virtualenv/util/lock.py,sha256=MCEmzAX2FlZtMH51V9R01fKe85soPB_ke0zYL9Fm0w8,4833
+virtualenv/util/path/__init__.py,sha256=xhBbGXTUbBkgkR-epiviB1myE6nOx3p94ZndET9wGeI,340
+virtualenv/util/path/__pycache__/__init__.cpython-310.pyc,,
+virtualenv/util/path/__pycache__/_permission.cpython-310.pyc,,
+virtualenv/util/path/__pycache__/_sync.cpython-310.pyc,,
+virtualenv/util/path/__pycache__/_win.cpython-310.pyc,,
+virtualenv/util/path/_permission.py,sha256=jn093j12xRqwbiZjUHUjtu-JEHcSdqafLyBNB-4Haes,682
+virtualenv/util/path/_sync.py,sha256=_aSv6ZRFpOeZU-EdM91wnx3ergBN9Slhw7HQExzD2zU,2071
+virtualenv/util/path/_win.py,sha256=LCr15DR9_qAlptytpdZx6DzWocVOMm-73TDBPjPwNqQ,773
+virtualenv/util/subprocess/__init__.py,sha256=dAH--gH6qwpG67nkp0s5Lm1GWC0aLVNDyaKiafNnjws,749
+virtualenv/util/subprocess/__pycache__/__init__.cpython-310.pyc,,
+virtualenv/util/zipapp.py,sha256=Mc0qbizaNBH0Gomi25xWx8zEelvZRcIE3aTUOUsOR3I,1001
+virtualenv/version.py,sha256=7-YdrH4WBwhJsRd_iCmzCJRH_XW4sATRtxOXAGc_-8w,164
diff --git a/venv/lib/python3.10/site-packages/virtualenv-20.24.3.dist-info/WHEEL b/venv/lib/python3.10/site-packages/virtualenv-20.24.3.dist-info/WHEEL
new file mode 100644
index 0000000..ba1a8af
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/virtualenv-20.24.3.dist-info/WHEEL
@@ -0,0 +1,4 @@
+Wheel-Version: 1.0
+Generator: hatchling 1.18.0
+Root-Is-Purelib: true
+Tag: py3-none-any
diff --git a/venv/lib/python3.10/site-packages/virtualenv-20.24.3.dist-info/entry_points.txt b/venv/lib/python3.10/site-packages/virtualenv-20.24.3.dist-info/entry_points.txt
new file mode 100644
index 0000000..807d9ba
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/virtualenv-20.24.3.dist-info/entry_points.txt
@@ -0,0 +1,26 @@
+[console_scripts]
+virtualenv = virtualenv.__main__:run_with_catch
+
+[virtualenv.activate]
+bash = virtualenv.activation.bash:BashActivator
+batch = virtualenv.activation.batch:BatchActivator
+cshell = virtualenv.activation.cshell:CShellActivator
+fish = virtualenv.activation.fish:FishActivator
+nushell = virtualenv.activation.nushell:NushellActivator
+powershell = virtualenv.activation.powershell:PowerShellActivator
+python = virtualenv.activation.python:PythonActivator
+
+[virtualenv.create]
+cpython3-mac-framework = virtualenv.create.via_global_ref.builtin.cpython.mac_os:CPython3macOsFramework
+cpython3-posix = virtualenv.create.via_global_ref.builtin.cpython.cpython3:CPython3Posix
+cpython3-win = virtualenv.create.via_global_ref.builtin.cpython.cpython3:CPython3Windows
+pypy3-posix = virtualenv.create.via_global_ref.builtin.pypy.pypy3:PyPy3Posix
+pypy3-win = virtualenv.create.via_global_ref.builtin.pypy.pypy3:Pypy3Windows
+venv = virtualenv.create.via_global_ref.venv:Venv
+
+[virtualenv.discovery]
+builtin = virtualenv.discovery.builtin:Builtin
+
+[virtualenv.seed]
+app-data = virtualenv.seed.embed.via_app_data.via_app_data:FromAppData
+pip = virtualenv.seed.embed.pip_invoke:PipInvoke
diff --git a/venv/lib/python3.10/site-packages/pip-22.3.1.dist-info/LICENSE.txt b/venv/lib/python3.10/site-packages/virtualenv-20.24.3.dist-info/licenses/LICENSE
similarity index 93%
rename from venv/lib/python3.10/site-packages/pip-22.3.1.dist-info/LICENSE.txt
rename to venv/lib/python3.10/site-packages/virtualenv-20.24.3.dist-info/licenses/LICENSE
index 8e7b65e..be9700d 100644
--- a/venv/lib/python3.10/site-packages/pip-22.3.1.dist-info/LICENSE.txt
+++ b/venv/lib/python3.10/site-packages/virtualenv-20.24.3.dist-info/licenses/LICENSE
@@ -1,4 +1,4 @@
-Copyright (c) 2008-present The pip developers (see AUTHORS.txt file)
+Copyright (c) 2020-202x The virtualenv developers
 
 Permission is hereby granted, free of charge, to any person obtaining
 a copy of this software and associated documentation files (the
diff --git a/venv/lib/python3.10/site-packages/virtualenv/__init__.py b/venv/lib/python3.10/site-packages/virtualenv/__init__.py
new file mode 100644
index 0000000..cc11e7f
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/virtualenv/__init__.py
@@ -0,0 +1,10 @@
+from __future__ import annotations
+
+from .run import cli_run, session_via_cli
+from .version import __version__
+
+__all__ = [
+    "__version__",
+    "cli_run",
+    "session_via_cli",
+]
diff --git a/venv/lib/python3.10/site-packages/virtualenv/__main__.py b/venv/lib/python3.10/site-packages/virtualenv/__main__.py
new file mode 100644
index 0000000..ee7341b
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/virtualenv/__main__.py
@@ -0,0 +1,70 @@
+from __future__ import annotations
+
+import logging
+import os
+import sys
+from timeit import default_timer
+
+
+def run(args=None, options=None, env=None):
+    env = os.environ if env is None else env
+    start = default_timer()
+    from virtualenv.run import cli_run
+    from virtualenv.util.error import ProcessCallFailedError
+
+    if args is None:
+        args = sys.argv[1:]
+    try:
+        session = cli_run(args, options, env)
+        logging.warning(LogSession(session, start))
+    except ProcessCallFailedError as exception:
+        print(f"subprocess call failed for {exception.cmd} with code {exception.code}")  # noqa: T201
+        print(exception.out, file=sys.stdout, end="")  # noqa: T201
+        print(exception.err, file=sys.stderr, end="")  # noqa: T201
+        raise SystemExit(exception.code)  # noqa: TRY200, B904
+
+
+class LogSession:
+    def __init__(self, session, start) -> None:
+        self.session = session
+        self.start = start
+
+    def __str__(self) -> str:
+        spec = self.session.creator.interpreter.spec
+        elapsed = (default_timer() - self.start) * 1000
+        lines = [
+            f"created virtual environment {spec} in {elapsed:.0f}ms",
+            f"  creator {self.session.creator!s}",
+        ]
+        if self.session.seeder.enabled:
+            lines.append(f"  seeder {self.session.seeder!s}")
+            path = self.session.creator.purelib.iterdir()
+            packages = sorted("==".join(i.stem.split("-")) for i in path if i.suffix == ".dist-info")
+            lines.append(f"    added seed packages: {', '.join(packages)}")
+
+        if self.session.activators:
+            lines.append(f"  activators {','.join(i.__class__.__name__ for i in self.session.activators)}")
+        return "\n".join(lines)
+
+
+def run_with_catch(args=None, env=None):
+    from virtualenv.config.cli.parser import VirtualEnvOptions
+
+    env = os.environ if env is None else env
+    options = VirtualEnvOptions()
+    try:
+        run(args, options, env)
+    except (KeyboardInterrupt, SystemExit, Exception) as exception:
+        try:
+            if getattr(options, "with_traceback", False):
+                raise
+            if not (isinstance(exception, SystemExit) and exception.code == 0):
+                logging.error("%s: %s", type(exception).__name__, exception)  # noqa: TRY400
+            code = exception.code if isinstance(exception, SystemExit) else 1
+            sys.exit(code)
+        finally:
+            logging.shutdown()  # force flush of log messages before the trace is printed
+
+
+if __name__ == "__main__":  # pragma: no cov
+    run_with_catch()  # pragma: no cov
diff --git a/venv/lib/python3.10/site-packages/virtualenv/activation/__init__.py b/venv/lib/python3.10/site-packages/virtualenv/activation/__init__.py
new file mode 100644
index 0000000..f6cf756
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/virtualenv/activation/__init__.py
@@ -0,0 +1,19 @@
+from __future__ import annotations
+
+from .bash import BashActivator
+from .batch import BatchActivator
+from .cshell import CShellActivator
+from .fish import FishActivator
+from .nushell import NushellActivator
+from .powershell import PowerShellActivator
+from .python import PythonActivator
+
+__all__ = [
+    "BashActivator",
+    "PowerShellActivator",
+    "CShellActivator",
+    "PythonActivator",
+    "BatchActivator",
+    "FishActivator",
+    "NushellActivator",
+]
diff --git a/venv/lib/python3.10/site-packages/virtualenv/activation/activator.py b/venv/lib/python3.10/site-packages/virtualenv/activation/activator.py
new file mode 100644
index 0000000..7d41e8a
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/virtualenv/activation/activator.py
@@ -0,0 +1,50 @@
+from __future__ import annotations
+
+import os
+from abc import ABCMeta, abstractmethod
+
+
+class Activator(metaclass=ABCMeta):
+    """Generates activate script for the virtual environment."""
+
+    def __init__(self, options) -> None:
+        """
+        Create a new activator generator.
+
+        :param options: the parsed options as defined within :meth:`add_parser_arguments`
+        """
+        self.flag_prompt = os.path.basename(os.getcwd()) if options.prompt == "." else options.prompt
+
+    @classmethod
+    def supports(cls, interpreter):  # noqa: ARG003
+        """
+        Check if the activation script is supported in the given interpreter.
+
+        :param interpreter: the interpreter we need to support
+        :return: ``True`` if supported, ``False`` otherwise
+        """
+        return True
+
+    @classmethod  # noqa: B027
+    def add_parser_arguments(cls, parser, interpreter):
+        """
+        Add CLI arguments for this activation script.
+
+        :param parser: the CLI parser
+        :param interpreter: the interpreter this virtual environment is based of
+        """
+
+    @abstractmethod
+    def generate(self, creator):
+        """
+        Generate activate script for the given creator.
+
+        :param creator: the creator (based of :class:`virtualenv.create.creator.Creator`) we used to create this \
+        virtual environment
+        """
+        raise NotImplementedError
+
+
+__all__ = [
+    "Activator",
+]
diff --git a/venv/lib/python3.10/site-packages/virtualenv/activation/bash/__init__.py b/venv/lib/python3.10/site-packages/virtualenv/activation/bash/__init__.py
new file mode 100644
index 0000000..5e095dd
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/virtualenv/activation/bash/__init__.py
@@ -0,0 +1,18 @@
+from __future__ import annotations
+
+from pathlib import Path
+
+from virtualenv.activation.via_template import ViaTemplateActivator
+
+
+class BashActivator(ViaTemplateActivator):
+    def templates(self):
+        yield "activate.sh"
+
+    def as_name(self, template):
+        return Path(template).stem
+
+
+__all__ = [
+    "BashActivator",
+]
diff --git a/venv/lib/python3.10/site-packages/virtualenv/activation/bash/activate.sh b/venv/lib/python3.10/site-packages/virtualenv/activation/bash/activate.sh
new file mode 100644
index 0000000..b06e3fd
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/virtualenv/activation/bash/activate.sh
@@ -0,0 +1,87 @@
+# This file must be used with "source bin/activate" *from bash*
+# you cannot run it directly
+
+
+if [ "${BASH_SOURCE-}" = "$0" ]; then
+    echo "You must source this script: \$ source $0" >&2
+    exit 33
+fi
+
+deactivate () {
+    unset -f pydoc >/dev/null 2>&1 || true
+
+    # reset old environment variables
+    # ! [ -z ${VAR+_} ] returns true if VAR is declared at all
+    if ! [ -z "${_OLD_VIRTUAL_PATH:+_}" ] ; then
+        PATH="$_OLD_VIRTUAL_PATH"
+        export PATH
+        unset _OLD_VIRTUAL_PATH
+    fi
+    if ! [ -z "${_OLD_VIRTUAL_PYTHONHOME+_}" ] ; then
+        PYTHONHOME="$_OLD_VIRTUAL_PYTHONHOME"
+        export PYTHONHOME
+        unset _OLD_VIRTUAL_PYTHONHOME
+    fi
+
+    # The hash command must be called to get it to forget past
+    # commands. Without forgetting past commands the $PATH changes
+    # we made may not be respected
+    hash -r 2>/dev/null
+
+    if ! [ -z "${_OLD_VIRTUAL_PS1+_}" ] ; then
+        PS1="$_OLD_VIRTUAL_PS1"
+        export PS1
+        unset _OLD_VIRTUAL_PS1
+    fi
+
+    unset VIRTUAL_ENV
+    unset VIRTUAL_ENV_PROMPT
+    if [ ! "${1-}" = "nondestructive" ] ; then
+    # Self destruct!
+        unset -f deactivate
+    fi
+}
+
+# unset irrelevant variables
+deactivate nondestructive
+
+VIRTUAL_ENV='__VIRTUAL_ENV__'
+if ([ "$OSTYPE" = "cygwin" ] || [ "$OSTYPE" = "msys" ]) && $(command -v cygpath &> /dev/null) ; then
+    VIRTUAL_ENV=$(cygpath -u "$VIRTUAL_ENV")
+fi
+export VIRTUAL_ENV
+
+_OLD_VIRTUAL_PATH="$PATH"
+PATH="$VIRTUAL_ENV/__BIN_NAME__:$PATH"
+export PATH
+
+if [ "x__VIRTUAL_PROMPT__" != x ] ; then
+    VIRTUAL_ENV_PROMPT="__VIRTUAL_PROMPT__"
+else
+    VIRTUAL_ENV_PROMPT=$(basename "$VIRTUAL_ENV")
+fi
+export VIRTUAL_ENV_PROMPT
+
+# unset PYTHONHOME if set
+if ! [ -z "${PYTHONHOME+_}" ] ; then
+    _OLD_VIRTUAL_PYTHONHOME="$PYTHONHOME"
+    unset PYTHONHOME
+fi
+
+if [ -z "${VIRTUAL_ENV_DISABLE_PROMPT-}" ] ; then
+    _OLD_VIRTUAL_PS1="${PS1-}"
+    PS1="(${VIRTUAL_ENV_PROMPT}) ${PS1-}"
+    export PS1
+fi
+
+# Make sure to unalias pydoc if it's already there
+alias pydoc 2>/dev/null >/dev/null && unalias pydoc || true
+
+pydoc () {
+    python -m pydoc "$@"
+}
+
+# The hash command must be called to get it to forget past
+# commands. Without forgetting past commands the $PATH changes
+# we made may not be respected
+hash -r 2>/dev/null
diff --git a/venv/lib/python3.10/site-packages/virtualenv/activation/batch/__init__.py b/venv/lib/python3.10/site-packages/virtualenv/activation/batch/__init__.py
new file mode 100644
index 0000000..a6d58eb
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/virtualenv/activation/batch/__init__.py
@@ -0,0 +1,26 @@
+from __future__ import annotations
+
+import os
+
+from virtualenv.activation.via_template import ViaTemplateActivator
+
+
+class BatchActivator(ViaTemplateActivator):
+    @classmethod
+    def supports(cls, interpreter):
+        return interpreter.os == "nt"
+
+    def templates(self):
+        yield "activate.bat"
+        yield "deactivate.bat"
+        yield "pydoc.bat"
+
+    def instantiate_template(self, replacements, template, creator):
+        # ensure the text has all newlines as \r\n - required by batch
+        base = super().instantiate_template(replacements, template, creator)
+        return base.replace(os.linesep, "\n").replace("\n", os.linesep)
+
+
+__all__ = [
+    "BatchActivator",
+]
diff --git a/venv/lib/python3.10/site-packages/virtualenv/activation/batch/activate.bat b/venv/lib/python3.10/site-packages/virtualenv/activation/batch/activate.bat
new file mode 100644
index 0000000..0bad4c1
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/virtualenv/activation/batch/activate.bat
@@ -0,0 +1,38 @@
+@set "VIRTUAL_ENV=__VIRTUAL_ENV__"
+
+@set "VIRTUAL_ENV_PROMPT=__VIRTUAL_PROMPT__"
+@if NOT DEFINED VIRTUAL_ENV_PROMPT (
+    @for %%d in ("%VIRTUAL_ENV%") do @set "VIRTUAL_ENV_PROMPT=%%~nxd"
+)
+
+@if defined _OLD_VIRTUAL_PROMPT (
+    @set "PROMPT=%_OLD_VIRTUAL_PROMPT%"
+) else (
+    @if not defined PROMPT (
+        @set "PROMPT=$P$G"
+    )
+    @if not defined VIRTUAL_ENV_DISABLE_PROMPT (
+        @set "_OLD_VIRTUAL_PROMPT=%PROMPT%"
+    )
+)
+@if not defined VIRTUAL_ENV_DISABLE_PROMPT (
+    @set "PROMPT=(%VIRTUAL_ENV_PROMPT%) %PROMPT%"
+)
+
+@REM Don't use () to avoid problems with them in %PATH%
+@if defined _OLD_VIRTUAL_PYTHONHOME @goto ENDIFVHOME
+    @set "_OLD_VIRTUAL_PYTHONHOME=%PYTHONHOME%"
+:ENDIFVHOME
+
+@set PYTHONHOME=
+
+@REM if defined _OLD_VIRTUAL_PATH (
+@if not defined _OLD_VIRTUAL_PATH @goto ENDIFVPATH1
+    @set "PATH=%_OLD_VIRTUAL_PATH%"
+:ENDIFVPATH1
+@REM ) else (
+@if defined _OLD_VIRTUAL_PATH @goto ENDIFVPATH2
+    @set "_OLD_VIRTUAL_PATH=%PATH%"
+:ENDIFVPATH2
+
+@set "PATH=%VIRTUAL_ENV%\__BIN_NAME__;%PATH%"
diff --git a/venv/lib/python3.10/site-packages/virtualenv/activation/batch/deactivate.bat b/venv/lib/python3.10/site-packages/virtualenv/activation/batch/deactivate.bat
new file mode 100644
index 0000000..8939c6c
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/virtualenv/activation/batch/deactivate.bat
@@ -0,0 +1,18 @@
+@set VIRTUAL_ENV=
+@set VIRTUAL_ENV_PROMPT=
+
+@REM Don't use () to avoid problems with them in %PATH%
+@if not defined _OLD_VIRTUAL_PROMPT @goto ENDIFVPROMPT
+    @set "PROMPT=%_OLD_VIRTUAL_PROMPT%"
+    @set _OLD_VIRTUAL_PROMPT=
+:ENDIFVPROMPT
+
+@if not defined _OLD_VIRTUAL_PYTHONHOME @goto ENDIFVHOME
+    @set "PYTHONHOME=%_OLD_VIRTUAL_PYTHONHOME%"
+    @set _OLD_VIRTUAL_PYTHONHOME=
+:ENDIFVHOME
+
+@if not defined _OLD_VIRTUAL_PATH @goto ENDIFVPATH
+    @set "PATH=%_OLD_VIRTUAL_PATH%"
+    @set _OLD_VIRTUAL_PATH=
+:ENDIFVPATH
diff --git a/venv/lib/python3.10/site-packages/virtualenv/activation/batch/pydoc.bat b/venv/lib/python3.10/site-packages/virtualenv/activation/batch/pydoc.bat
new file mode 100644
index 0000000..3d46a23
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/virtualenv/activation/batch/pydoc.bat
@@ -0,0 +1 @@
+python.exe -m pydoc %*
diff --git a/venv/lib/python3.10/site-packages/virtualenv/activation/cshell/__init__.py b/venv/lib/python3.10/site-packages/virtualenv/activation/cshell/__init__.py
new file mode 100644
index 0000000..7001f99
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/virtualenv/activation/cshell/__init__.py
@@ -0,0 +1,17 @@
+from __future__ import annotations
+
+from virtualenv.activation.via_template import ViaTemplateActivator
+
+
+class CShellActivator(ViaTemplateActivator):
+    @classmethod
+    def supports(cls, interpreter):
+        return interpreter.os != "nt"
+
+    def templates(self):
+        yield "activate.csh"
+
+
+__all__ = [
+    "CShellActivator",
+]
diff --git a/venv/lib/python3.10/site-packages/virtualenv/activation/cshell/activate.csh b/venv/lib/python3.10/site-packages/virtualenv/activation/cshell/activate.csh
new file mode 100644
index 0000000..f0c9cca
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/virtualenv/activation/cshell/activate.csh
@@ -0,0 +1,55 @@
+# This file must be used with "source bin/activate.csh" *from csh*.
+# You cannot run it directly.
+# Created by Davide Di Blasi .
+
+set newline='\
+'
+
+alias deactivate 'test $?_OLD_VIRTUAL_PATH != 0 && setenv PATH "$_OLD_VIRTUAL_PATH:q" && unset _OLD_VIRTUAL_PATH; rehash; test $?_OLD_VIRTUAL_PROMPT != 0 && set prompt="$_OLD_VIRTUAL_PROMPT:q" && unset _OLD_VIRTUAL_PROMPT; unsetenv VIRTUAL_ENV; unsetenv VIRTUAL_ENV_PROMPT; test "\!:*" != "nondestructive" && unalias deactivate && unalias pydoc'
+
+# Unset irrelevant variables.
+deactivate nondestructive
+
+setenv VIRTUAL_ENV '__VIRTUAL_ENV__'
+
+set _OLD_VIRTUAL_PATH="$PATH:q"
+setenv PATH "$VIRTUAL_ENV:q/__BIN_NAME__:$PATH:q"
+
+
+
+if ('__VIRTUAL_PROMPT__' != "") then
+    setenv VIRTUAL_ENV_PROMPT '__VIRTUAL_PROMPT__'
+else
+    setenv VIRTUAL_ENV_PROMPT "$VIRTUAL_ENV:t:q"
+endif
+
+if ( $?VIRTUAL_ENV_DISABLE_PROMPT ) then
+    if ( $VIRTUAL_ENV_DISABLE_PROMPT == "" ) then
+        set do_prompt = "1"
+    else
+        set do_prompt = "0"
+    endif
+else
+    set do_prompt = "1"
+endif
+
+if ( $do_prompt == "1" ) then
+    # Could be in a non-interactive environment,
+    # in which case, $prompt is undefined and we wouldn't
+    # care about the prompt anyway.
+    if ( $?prompt ) then
+        set _OLD_VIRTUAL_PROMPT="$prompt:q"
+        if ( "$prompt:q" =~ *"$newline:q"* ) then
+            :
+        else
+            set prompt = '('"$VIRTUAL_ENV_PROMPT:q"') '"$prompt:q"
+        endif
+    endif
+endif
+
+unset env_name
+unset do_prompt
+
+alias pydoc python -m pydoc
+
+rehash
diff --git a/venv/lib/python3.10/site-packages/virtualenv/activation/fish/__init__.py b/venv/lib/python3.10/site-packages/virtualenv/activation/fish/__init__.py
new file mode 100644
index 0000000..57f790f
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/virtualenv/activation/fish/__init__.py
@@ -0,0 +1,13 @@
+from __future__ import annotations
+
+from virtualenv.activation.via_template import ViaTemplateActivator
+
+
+class FishActivator(ViaTemplateActivator):
+    def templates(self):
+        yield "activate.fish"
+
+
+__all__ = [
+    "FishActivator",
+]
diff --git a/venv/lib/python3.10/site-packages/virtualenv/activation/fish/activate.fish b/venv/lib/python3.10/site-packages/virtualenv/activation/fish/activate.fish
new file mode 100644
index 0000000..fcedde4
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/virtualenv/activation/fish/activate.fish
@@ -0,0 +1,103 @@
+# This file must be used using `source bin/activate.fish` *within a running fish ( http://fishshell.com ) session*.
+# Do not run it directly.
+
+function _bashify_path -d "Converts a fish path to something bash can recognize"
+    set fishy_path $argv
+    set bashy_path $fishy_path[1]
+    for path_part in $fishy_path[2..-1]
+        set bashy_path "$bashy_path:$path_part"
+    end
+    echo $bashy_path
+end
+
+function _fishify_path -d "Converts a bash path to something fish can recognize"
+    echo $argv | tr ':' '\n'
+end
+
+function deactivate -d 'Exit virtualenv mode and return to the normal environment.'
+    # reset old environment variables
+    if test -n "$_OLD_VIRTUAL_PATH"
+        # https://github.com/fish-shell/fish-shell/issues/436 altered PATH handling
+        if test (echo $FISH_VERSION | head -c 1) -lt 3
+            set -gx PATH (_fishify_path "$_OLD_VIRTUAL_PATH")
+        else
+            set -gx PATH $_OLD_VIRTUAL_PATH
+        end
+        set -e _OLD_VIRTUAL_PATH
+    end
+
+    if test -n "$_OLD_VIRTUAL_PYTHONHOME"
+        set -gx PYTHONHOME "$_OLD_VIRTUAL_PYTHONHOME"
+        set -e _OLD_VIRTUAL_PYTHONHOME
+    end
+
+    if test -n "$_OLD_FISH_PROMPT_OVERRIDE"
+       and functions -q _old_fish_prompt
+        # Set an empty local `$fish_function_path` to allow the removal of `fish_prompt` using `functions -e`.
+        set -l fish_function_path
+
+        # Erase virtualenv's `fish_prompt` and restore the original.
+        functions -e fish_prompt
+        functions -c _old_fish_prompt fish_prompt
+        functions -e _old_fish_prompt
+        set -e _OLD_FISH_PROMPT_OVERRIDE
+    end
+
+    set -e VIRTUAL_ENV
+    set -e VIRTUAL_ENV_PROMPT
+
+    if test "$argv[1]" != 'nondestructive'
+        # Self-destruct!
+        functions -e pydoc
+        functions -e deactivate
+        functions -e _bashify_path
+        functions -e _fishify_path
+    end
+end
+
+# Unset irrelevant variables.
+deactivate nondestructive
+
+set -gx VIRTUAL_ENV '__VIRTUAL_ENV__'
+
+# https://github.com/fish-shell/fish-shell/issues/436 altered PATH handling
+if test (echo $FISH_VERSION | head -c 1) -lt 3
+   set -gx _OLD_VIRTUAL_PATH (_bashify_path $PATH)
+else
+    set -gx _OLD_VIRTUAL_PATH $PATH
+end
+set -gx PATH "$VIRTUAL_ENV"'/__BIN_NAME__' $PATH
+
+# Prompt override provided?
+# If not, just use the environment name.
+if test -n '__VIRTUAL_PROMPT__'
+    set -gx VIRTUAL_ENV_PROMPT '__VIRTUAL_PROMPT__'
+else
+    set -gx VIRTUAL_ENV_PROMPT (basename "$VIRTUAL_ENV")
+end
+
+# Unset `$PYTHONHOME` if set.
+if set -q PYTHONHOME
+    set -gx _OLD_VIRTUAL_PYTHONHOME $PYTHONHOME
+    set -e PYTHONHOME
+end
+
+function pydoc
+    python -m pydoc $argv
+end
+
+if test -z "$VIRTUAL_ENV_DISABLE_PROMPT"
+    # Copy the current `fish_prompt` function as `_old_fish_prompt`.
+    functions -c fish_prompt _old_fish_prompt
+
+    function fish_prompt
+        # Run the user's prompt first; it might depend on (pipe)status.
+        set -l prompt (_old_fish_prompt)
+
+        printf '(%s) ' $VIRTUAL_ENV_PROMPT
+
+        string join -- \n $prompt # handle multi-line prompts
+    end
+
+    set -gx _OLD_FISH_PROMPT_OVERRIDE "$VIRTUAL_ENV"
+end
diff --git a/venv/lib/python3.10/site-packages/virtualenv/activation/nushell/__init__.py b/venv/lib/python3.10/site-packages/virtualenv/activation/nushell/__init__.py
new file mode 100644
index 0000000..68cd4a3
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/virtualenv/activation/nushell/__init__.py
@@ -0,0 +1,21 @@
+from __future__ import annotations
+
+from virtualenv.activation.via_template import ViaTemplateActivator
+
+
+class NushellActivator(ViaTemplateActivator):
+    def templates(self):
+        yield "activate.nu"
+
+    def replacements(self, creator, dest_folder):  # noqa: ARG002
+        return {
+            "__VIRTUAL_PROMPT__": "" if self.flag_prompt is None else self.flag_prompt,
+            "__VIRTUAL_ENV__": str(creator.dest),
+            "__VIRTUAL_NAME__": creator.env_name,
+            "__BIN_NAME__": str(creator.bin_dir.relative_to(creator.dest)),
+        }
+
+
+__all__ = [
+    "NushellActivator",
+]
diff --git a/venv/lib/python3.10/site-packages/virtualenv/activation/nushell/activate.nu b/venv/lib/python3.10/site-packages/virtualenv/activation/nushell/activate.nu
new file mode 100644
index 0000000..19d4fa1
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/virtualenv/activation/nushell/activate.nu
@@ -0,0 +1,96 @@
+# virtualenv activation module
+# Activate with `overlay use activate.nu`
+# Deactivate with `deactivate`, as usual
+#
+# To customize the overlay name, you can call `overlay use activate.nu as foo`,
+# but then simply `deactivate` won't work because it is just an alias to hide
+# the "activate" overlay. You'd need to call `overlay hide foo` manually.
+
+export-env {
+    def is-string [x] {
+        ($x | describe) == 'string'
+    }
+
+    def has-env [...names] {
+        $names | each {|n|
+            $n in $env
+        } | all {|i| $i == true}
+    }
+
+    # Emulates a `test -z`, but btter as it handles e.g 'false'
+    def is-env-true [name: string] {
+      if (has-env $name) {
+        # Try to parse 'true', '0', '1', and fail if not convertible
+        let parsed = (do -i { $env | get $name | into bool })
+        if ($parsed | describe) == 'bool' {
+          $parsed
+        } else {
+          not ($env | get -i $name | is-empty)
+        }
+      } else {
+        false
+      }
+    }
+
+    let virtual_env = '__VIRTUAL_ENV__'
+    let bin = '__BIN_NAME__'
+
+    let is_windows = ($nu.os-info.family) == 'windows'
+    let path_name = (if (has-env 'Path') {
+            'Path'
+        } else {
+            'PATH'
+        }
+    )
+
+    let venv_path = ([$virtual_env $bin] | path join)
+    let new_path = ($env | get $path_name | prepend $venv_path)
+
+    # If there is no default prompt, then use the env name instead
+    let virtual_env_prompt = (if ('__VIRTUAL_PROMPT__' | is-empty) {
+        ($virtual_env | path basename)
+    } else {
+        '__VIRTUAL_PROMPT__'
+    })
+
+    let new_env = {
+        $path_name         : $new_path
+        VIRTUAL_ENV        : $virtual_env
+        VIRTUAL_ENV_PROMPT : $virtual_env_prompt
+    }
+
+    let new_env = (if (is-env-true 'VIRTUAL_ENV_DISABLE_PROMPT') {
+      $new_env
+    } else {
+      # Creating the new prompt for the session
+      let virtual_prefix = $'(char lparen)($virtual_env_prompt)(char rparen) '
+
+      # Back up the old prompt builder
+      let old_prompt_command = (if (has-env 'PROMPT_COMMAND') {
+              $env.PROMPT_COMMAND
+          } else {
+              ''
+        })
+
+      let new_prompt = (if (has-env 'PROMPT_COMMAND') {
+          if 'closure' in ($old_prompt_command | describe) {
+              {|| $'($virtual_prefix)(do $old_prompt_command)' }
+          } else {
+              {|| $'($virtual_prefix)($old_prompt_command)' }
+          }
+      } else {
+          {|| $'($virtual_prefix)' }
+      })
+
+      $new_env | merge {
+        PROMPT_COMMAND      : $new_prompt
+        VIRTUAL_PREFIX      : $virtual_prefix
+      }
+    })
+
+    # Environment variables that will be loaded as the virtual env
+    load-env $new_env
+}
+
+export alias pydoc = python -m pydoc
+export alias deactivate = overlay hide activate
diff --git a/venv/lib/python3.10/site-packages/virtualenv/activation/powershell/__init__.py b/venv/lib/python3.10/site-packages/virtualenv/activation/powershell/__init__.py
new file mode 100644
index 0000000..1f6d0f4
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/virtualenv/activation/powershell/__init__.py
@@ -0,0 +1,13 @@
+from __future__ import annotations
+
+from virtualenv.activation.via_template import ViaTemplateActivator
+
+
+class PowerShellActivator(ViaTemplateActivator):
+    def templates(self):
+        yield "activate.ps1"
+
+
+__all__ = [
+    "PowerShellActivator",
+]
diff --git a/venv/bin/activate.ps1 b/venv/lib/python3.10/site-packages/virtualenv/activation/powershell/activate.ps1
similarity index 65%
rename from venv/bin/activate.ps1
rename to venv/lib/python3.10/site-packages/virtualenv/activation/powershell/activate.ps1
index 04c2472..5ccfe12 100644
--- a/venv/bin/activate.ps1
+++ b/venv/lib/python3.10/site-packages/virtualenv/activation/powershell/activate.ps1
@@ -16,6 +16,10 @@ function global:deactivate([switch] $NonDestructive) {
         Remove-Item env:VIRTUAL_ENV -ErrorAction SilentlyContinue
     }
 
+    if ($env:VIRTUAL_ENV_PROMPT) {
+        Remove-Item env:VIRTUAL_ENV_PROMPT -ErrorAction SilentlyContinue
+    }
+
     if (!$NonDestructive) {
         # Self destruct!
         Remove-Item function:deactivate
@@ -33,28 +37,25 @@ deactivate -nondestructive
 $VIRTUAL_ENV = $BASE_DIR
 $env:VIRTUAL_ENV = $VIRTUAL_ENV
 
+if ("__VIRTUAL_PROMPT__" -ne "") {
+    $env:VIRTUAL_ENV_PROMPT = "__VIRTUAL_PROMPT__"
+}
+else {
+    $env:VIRTUAL_ENV_PROMPT = $( Split-Path $env:VIRTUAL_ENV -Leaf )
+}
+
 New-Variable -Scope global -Name _OLD_VIRTUAL_PATH -Value $env:PATH
 
-$env:PATH = "$env:VIRTUAL_ENV/bin:" + $env:PATH
+$env:PATH = "$env:VIRTUAL_ENV/__BIN_NAME____PATH_SEP__" + $env:PATH
 if (!$env:VIRTUAL_ENV_DISABLE_PROMPT) {
     function global:_old_virtual_prompt {
         ""
     }
     $function:_old_virtual_prompt = $function:prompt
 
-    if ("" -ne "") {
-        function global:prompt {
-            # Add the custom prefix to the existing prompt
-            $previous_prompt_value = & $function:_old_virtual_prompt
-            ("() " + $previous_prompt_value)
-        }
-    }
-    else {
-        function global:prompt {
-            # Add a prefix to the current prompt, but don't discard it.
-            $previous_prompt_value = & $function:_old_virtual_prompt
-            $new_prompt_value = "($( Split-Path $env:VIRTUAL_ENV -Leaf )) "
-            ($new_prompt_value + $previous_prompt_value)
-        }
+    function global:prompt {
+        # Add the custom prefix to the existing prompt
+        $previous_prompt_value = & $function:_old_virtual_prompt
+        ("(" + $env:VIRTUAL_ENV_PROMPT + ") " + $previous_prompt_value)
     }
 }
diff --git a/venv/lib/python3.10/site-packages/virtualenv/activation/python/__init__.py b/venv/lib/python3.10/site-packages/virtualenv/activation/python/__init__.py
new file mode 100644
index 0000000..3126a39
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/virtualenv/activation/python/__init__.py
@@ -0,0 +1,28 @@
+from __future__ import annotations
+
+import os
+from collections import OrderedDict
+
+from virtualenv.activation.via_template import ViaTemplateActivator
+
+
+class PythonActivator(ViaTemplateActivator):
+    def templates(self):
+        yield "activate_this.py"
+
+    def replacements(self, creator, dest_folder):
+        replacements = super().replacements(creator, dest_folder)
+        lib_folders = OrderedDict((os.path.relpath(str(i), str(dest_folder)), None) for i in creator.libs)
+        lib_folders = os.pathsep.join(lib_folders.keys()).replace("\\", "\\\\")  # escape Windows path characters
+        replacements.update(
+            {
+                "__LIB_FOLDERS__": lib_folders,
+                "__DECODE_PATH__": "",
+            },
+        )
+        return replacements
+
+
+__all__ = [
+    "PythonActivator",
+]
diff --git a/venv/bin/activate_this.py b/venv/lib/python3.10/site-packages/virtualenv/activation/python/activate_this.py
similarity index 50%
rename from venv/bin/activate_this.py
rename to venv/lib/python3.10/site-packages/virtualenv/activation/python/activate_this.py
index ceaf6e8..33a3eae 100644
--- a/venv/bin/activate_this.py
+++ b/venv/lib/python3.10/site-packages/virtualenv/activation/python/activate_this.py
@@ -1,30 +1,35 @@
-"""Activate virtualenv for current interpreter:
+"""
+Activate virtualenv for current interpreter:
 
 Use exec(open(this_file).read(), {'__file__': this_file}).
 
 This can be used when you must use an existing Python interpreter, not the virtualenv bin/python.
-"""
+"""  # noqa: D415
+from __future__ import annotations
+
 import os
 import site
 import sys
 
 try:
     abs_file = os.path.abspath(__file__)
-except NameError:
-    raise AssertionError("You must use exec(open(this_file).read(), {'__file__': this_file}))")
+except NameError as exc:
+    msg = "You must use exec(open(this_file).read(), {'__file__': this_file}))"
+    raise AssertionError(msg) from exc
 
 bin_dir = os.path.dirname(abs_file)
-base = bin_dir[: -len("bin") - 1]  # strip away the bin part from the __file__, plus the path separator
+base = bin_dir[: -len("__BIN_NAME__") - 1]  # strip away the bin part from the __file__, plus the path separator
 
 # prepend bin to PATH (this file is inside the bin directory)
-os.environ["PATH"] = os.pathsep.join([bin_dir] + os.environ.get("PATH", "").split(os.pathsep))
+os.environ["PATH"] = os.pathsep.join([bin_dir, *os.environ.get("PATH", "").split(os.pathsep)])
 os.environ["VIRTUAL_ENV"] = base  # virtual env is right above bin directory
+os.environ["VIRTUAL_ENV_PROMPT"] = "__VIRTUAL_PROMPT__" or os.path.basename(base)  # noqa: SIM222
 
 # add the virtual environments libraries to the host python import mechanism
 prev_length = len(sys.path)
-for lib in "../lib/python3.10/site-packages".split(os.pathsep):
+for lib in "__LIB_FOLDERS__".split(os.pathsep):
     path = os.path.realpath(os.path.join(bin_dir, lib))
-    site.addsitedir(path.decode("utf-8") if "" else path)
+    site.addsitedir(path.decode("utf-8") if "__DECODE_PATH__" else path)
 sys.path[:] = sys.path[prev_length:] + sys.path[0:prev_length]
 
 sys.real_prefix = sys.prefix
diff --git a/venv/lib/python3.10/site-packages/virtualenv/activation/via_template.py b/venv/lib/python3.10/site-packages/virtualenv/activation/via_template.py
new file mode 100644
index 0000000..239318c
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/virtualenv/activation/via_template.py
@@ -0,0 +1,74 @@
+from __future__ import annotations
+
+import os
+import sys
+from abc import ABCMeta, abstractmethod
+
+from .activator import Activator
+
+if sys.version_info >= (3, 10):
+    from importlib.resources import files
+
+    def read_binary(module_name: str, filename: str) -> bytes:
+        return (files(module_name) / filename).read_bytes()
+
+else:
+    from importlib.resources import read_binary
+
+
+class ViaTemplateActivator(Activator, metaclass=ABCMeta):
+    @abstractmethod
+    def templates(self):
+        raise NotImplementedError
+
+    def generate(self, creator):
+        dest_folder = creator.bin_dir
+        replacements = self.replacements(creator, dest_folder)
+        generated = self._generate(replacements, self.templates(), dest_folder, creator)
+        if self.flag_prompt is not None:
+            creator.pyenv_cfg["prompt"] = self.flag_prompt
+        return generated
+
+    def replacements(self, creator, dest_folder):  # noqa: ARG002
+        return {
+            "__VIRTUAL_PROMPT__": "" if self.flag_prompt is None else self.flag_prompt,
+            "__VIRTUAL_ENV__": str(creator.dest),
+            "__VIRTUAL_NAME__": creator.env_name,
+            "__BIN_NAME__": str(creator.bin_dir.relative_to(creator.dest)),
+            "__PATH_SEP__": os.pathsep,
+        }
+
+    def _generate(self, replacements, templates, to_folder, creator):
+        generated = []
+        for template in templates:
+            text = self.instantiate_template(replacements, template, creator)
+            dest = to_folder / self.as_name(template)
+            # remove the file if it already exists - this prevents permission
+            # errors when the dest is not writable
+            if dest.exists():
+                dest.unlink()
+            # use write_bytes to avoid platform specific line normalization (\n -> \r\n)
+            dest.write_bytes(text.encode("utf-8"))
+            generated.append(dest)
+        return generated
+
+    def as_name(self, template):
+        return template
+
+    def instantiate_template(self, replacements, template, creator):
+        # read content as binary to avoid platform specific line normalization (\n -> \r\n)
+        binary = read_binary(self.__module__, template)
+        text = binary.decode("utf-8", errors="strict")
+        for key, value in replacements.items():
+            value_uni = self._repr_unicode(creator, value)
+            text = text.replace(key, value_uni)
+        return text
+
+    @staticmethod
+    def _repr_unicode(creator, value):  # noqa: ARG004
+        return value  # by default, we just let it be unicode
+
+
+__all__ = [
+    "ViaTemplateActivator",
+]
diff --git a/venv/lib/python3.10/site-packages/virtualenv/app_data/__init__.py b/venv/lib/python3.10/site-packages/virtualenv/app_data/__init__.py
new file mode 100644
index 0000000..148c941
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/virtualenv/app_data/__init__.py
@@ -0,0 +1,56 @@
+"""Application data stored by virtualenv."""
+
+from __future__ import annotations
+
+import logging
+import os
+
+from platformdirs import user_data_dir
+
+from .na import AppDataDisabled
+from .read_only import ReadOnlyAppData
+from .via_disk_folder import AppDataDiskFolder
+from .via_tempdir import TempAppData
+
+
+def _default_app_data_dir(env):
+    key = "VIRTUALENV_OVERRIDE_APP_DATA"
+    if key in env:
+        return env[key]
+    return user_data_dir(appname="virtualenv", appauthor="pypa")
+
+
+def make_app_data(folder, **kwargs):
+    is_read_only = kwargs.pop("read_only")
+    env = kwargs.pop("env")
+    if kwargs:  # py3+ kwonly
+        msg = "unexpected keywords: {}"
+        raise TypeError(msg)
+
+    if folder is None:
+        folder = _default_app_data_dir(env)
+    folder = os.path.abspath(folder)
+
+    if is_read_only:
+        return ReadOnlyAppData(folder)
+
+    if not os.path.isdir(folder):
+        try:
+            os.makedirs(folder)
+            logging.debug("created app data folder %s", folder)
+        except OSError as exception:
+            logging.info("could not create app data folder %s due to %r", folder, exception)
+
+    if os.access(folder, os.W_OK):
+        return AppDataDiskFolder(folder)
+    logging.debug("app data folder %s has no write access", folder)
+    return TempAppData()
+
+
+__all__ = (
+    "AppDataDisabled",
+    "AppDataDiskFolder",
+    "ReadOnlyAppData",
+    "TempAppData",
+    "make_app_data",
+)
diff --git a/venv/lib/python3.10/site-packages/virtualenv/app_data/base.py b/venv/lib/python3.10/site-packages/virtualenv/app_data/base.py
new file mode 100644
index 0000000..4d82e21
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/virtualenv/app_data/base.py
@@ -0,0 +1,96 @@
+"""Application data stored by virtualenv."""
+
+from __future__ import annotations
+
+from abc import ABCMeta, abstractmethod
+from contextlib import contextmanager
+
+from virtualenv.info import IS_ZIPAPP
+
+
+class AppData(metaclass=ABCMeta):
+    """Abstract storage interface for the virtualenv application."""
+
+    @abstractmethod
+    def close(self):
+        """Called before virtualenv exits."""
+
+    @abstractmethod
+    def reset(self):
+        """Called when the user passes in the reset app data."""
+
+    @abstractmethod
+    def py_info(self, path):
+        raise NotImplementedError
+
+    @abstractmethod
+    def py_info_clear(self):
+        raise NotImplementedError
+
+    @property
+    def can_update(self):
+        raise NotImplementedError
+
+    @abstractmethod
+    def embed_update_log(self, distribution, for_py_version):
+        raise NotImplementedError
+
+    @property
+    def house(self):
+        raise NotImplementedError
+
+    @property
+    def transient(self):
+        raise NotImplementedError
+
+    @abstractmethod
+    def wheel_image(self, for_py_version, name):
+        raise NotImplementedError
+
+    @contextmanager
+    def ensure_extracted(self, path, to_folder=None):
+        """Some paths might be within the zipapp, unzip these to a path on the disk."""
+        if IS_ZIPAPP:
+            with self.extract(path, to_folder) as result:
+                yield result
+        else:
+            yield path
+
+    @abstractmethod
+    @contextmanager
+    def extract(self, path, to_folder):
+        raise NotImplementedError
+
+    @abstractmethod
+    @contextmanager
+    def locked(self, path):
+        raise NotImplementedError
+
+
+class ContentStore(metaclass=ABCMeta):
+    @abstractmethod
+    def exists(self):
+        raise NotImplementedError
+
+    @abstractmethod
+    def read(self):
+        raise NotImplementedError
+
+    @abstractmethod
+    def write(self, content):
+        raise NotImplementedError
+
+    @abstractmethod
+    def remove(self):
+        raise NotImplementedError
+
+    @abstractmethod
+    @contextmanager
+    def locked(self):
+        pass
+
+
+__all__ = [
+    "ContentStore",
+    "AppData",
+]
diff --git a/venv/lib/python3.10/site-packages/virtualenv/app_data/na.py b/venv/lib/python3.10/site-packages/virtualenv/app_data/na.py
new file mode 100644
index 0000000..921e83a
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/virtualenv/app_data/na.py
@@ -0,0 +1,72 @@
+from __future__ import annotations
+
+from contextlib import contextmanager
+
+from .base import AppData, ContentStore
+
+
+class AppDataDisabled(AppData):
+    """No application cache available (most likely as we don't have write permissions)."""
+
+    transient = True
+    can_update = False
+
+    def __init__(self) -> None:
+        pass
+
+    error = RuntimeError("no app data folder available, probably no write access to the folder")
+
+    def close(self):
+        """Do nothing."""
+
+    def reset(self):
+        """Do nothing."""
+
+    def py_info(self, path):  # noqa: ARG002
+        return ContentStoreNA()
+
+    def embed_update_log(self, distribution, for_py_version):  # noqa: ARG002
+        return ContentStoreNA()
+
+    def extract(self, path, to_folder):  # noqa: ARG002
+        raise self.error
+
+    @contextmanager
+    def locked(self, path):  # noqa: ARG002
+        """Do nothing."""
+        yield
+
+    @property
+    def house(self):
+        raise self.error
+
+    def wheel_image(self, for_py_version, name):  # noqa: ARG002
+        raise self.error
+
+    def py_info_clear(self):
+        """Nothing to clear."""
+
+
+class ContentStoreNA(ContentStore):
+    def exists(self):
+        return False
+
+    def read(self):
+        """Nothing to read."""
+        return
+
+    def write(self, content):
+        """Nothing to write."""
+
+    def remove(self):
+        """Nothing to remove."""
+
+    @contextmanager
+    def locked(self):
+        yield
+
+
+__all__ = [
+    "AppDataDisabled",
+    "ContentStoreNA",
+]
diff --git a/venv/lib/python3.10/site-packages/virtualenv/app_data/read_only.py b/venv/lib/python3.10/site-packages/virtualenv/app_data/read_only.py
new file mode 100644
index 0000000..952dbad
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/virtualenv/app_data/read_only.py
@@ -0,0 +1,42 @@
+from __future__ import annotations
+
+import os.path
+
+from virtualenv.util.lock import NoOpFileLock
+
+from .via_disk_folder import AppDataDiskFolder, PyInfoStoreDisk
+
+
+class ReadOnlyAppData(AppDataDiskFolder):
+    can_update = False
+
+    def __init__(self, folder: str) -> None:
+        if not os.path.isdir(folder):
+            msg = f"read-only app data directory {folder} does not exist"
+            raise RuntimeError(msg)
+        super().__init__(folder)
+        self.lock = NoOpFileLock(folder)
+
+    def reset(self) -> None:
+        msg = "read-only app data does not support reset"
+        raise RuntimeError(msg)
+
+    def py_info_clear(self) -> None:
+        raise NotImplementedError
+
+    def py_info(self, path):
+        return _PyInfoStoreDiskReadOnly(self.py_info_at, path)
+
+    def embed_update_log(self, distribution, for_py_version):
+        raise NotImplementedError
+
+
+class _PyInfoStoreDiskReadOnly(PyInfoStoreDisk):
+    def write(self, content):  # noqa: ARG002
+        msg = "read-only app data python info cannot be updated"
+        raise RuntimeError(msg)
+
+
+__all__ = [
+    "ReadOnlyAppData",
+]
diff --git a/venv/lib/python3.10/site-packages/virtualenv/app_data/via_disk_folder.py b/venv/lib/python3.10/site-packages/virtualenv/app_data/via_disk_folder.py
new file mode 100644
index 0000000..fa87149
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/virtualenv/app_data/via_disk_folder.py
@@ -0,0 +1,174 @@
+"""
+A rough layout of the current storage goes as:
+
+virtualenv-app-data
+├── py -  
+│  └── *.json/lock
+├── wheel 
+│   ├── house
+│   │   └── *.whl 
+│   └──  -> 3.9
+│       ├── img-
+│       │   └── image
+│       │           └──  -> CopyPipInstall / SymlinkPipInstall
+│       │               └──  -> pip-20.1.1-py2.py3-none-any
+│       └── embed
+│           └── 3 -> json format versioning
+│               └── *.json -> for every distribution contains data about newer embed versions and releases
+└─── unzip 
+     └── 
+         ├── py_info.py
+         ├── debug.py
+         └── _virtualenv.py
+"""  # noqa: D415
+
+from __future__ import annotations
+
+import json
+import logging
+from abc import ABCMeta
+from contextlib import contextmanager, suppress
+from hashlib import sha256
+
+from virtualenv.util.lock import ReentrantFileLock
+from virtualenv.util.path import safe_delete
+from virtualenv.util.zipapp import extract
+from virtualenv.version import __version__
+
+from .base import AppData, ContentStore
+
+
+class AppDataDiskFolder(AppData):
+    """Store the application data on the disk within a folder layout."""
+
+    transient = False
+    can_update = True
+
+    def __init__(self, folder) -> None:
+        self.lock = ReentrantFileLock(folder)
+
+    def __repr__(self) -> str:
+        return f"{type(self).__name__}({self.lock.path})"
+
+    def __str__(self) -> str:
+        return str(self.lock.path)
+
+    def reset(self):
+        logging.debug("reset app data folder %s", self.lock.path)
+        safe_delete(self.lock.path)
+
+    def close(self):
+        """Do nothing."""
+
+    @contextmanager
+    def locked(self, path):
+        path_lock = self.lock / path
+        with path_lock:
+            yield path_lock.path
+
+    @contextmanager
+    def extract(self, path, to_folder):
+        root = ReentrantFileLock(to_folder()) if to_folder is not None else self.lock / "unzip" / __version__
+        with root.lock_for_key(path.name):
+            dest = root.path / path.name
+            if not dest.exists():
+                extract(path, dest)
+            yield dest
+
+    @property
+    def py_info_at(self):
+        return self.lock / "py_info" / "1"
+
+    def py_info(self, path):
+        return PyInfoStoreDisk(self.py_info_at, path)
+
+    def py_info_clear(self):
+        """clear py info."""
+        py_info_folder = self.py_info_at
+        with py_info_folder:
+            for filename in py_info_folder.path.iterdir():
+                if filename.suffix == ".json":
+                    with py_info_folder.lock_for_key(filename.stem):
+                        if filename.exists():
+                            filename.unlink()
+
+    def embed_update_log(self, distribution, for_py_version):
+        return EmbedDistributionUpdateStoreDisk(self.lock / "wheel" / for_py_version / "embed" / "3", distribution)
+
+    @property
+    def house(self):
+        path = self.lock.path / "wheel" / "house"
+        path.mkdir(parents=True, exist_ok=True)
+        return path
+
+    def wheel_image(self, for_py_version, name):
+        return self.lock.path / "wheel" / for_py_version / "image" / "1" / name
+
+
+class JSONStoreDisk(ContentStore, metaclass=ABCMeta):
+    def __init__(self, in_folder, key, msg, msg_args) -> None:
+        self.in_folder = in_folder
+        self.key = key
+        self.msg = msg
+        self.msg_args = (*msg_args, self.file)
+
+    @property
+    def file(self):
+        return self.in_folder.path / f"{self.key}.json"
+
+    def exists(self):
+        return self.file.exists()
+
+    def read(self):
+        data, bad_format = None, False
+        try:
+            data = json.loads(self.file.read_text(encoding="utf-8"))
+        except ValueError:
+            bad_format = True
+        except Exception:  # noqa: BLE001, S110
+            pass
+        else:
+            logging.debug("got %s from %s", self.msg, self.msg_args)
+            return data
+        if bad_format:
+            with suppress(OSError):  # reading and writing on the same file may cause race on multiple processes
+                self.remove()
+        return None
+
+    def remove(self):
+        self.file.unlink()
+        logging.debug("removed %s at %s", self.msg, self.msg_args)
+
+    @contextmanager
+    def locked(self):
+        with self.in_folder.lock_for_key(self.key):
+            yield
+
+    def write(self, content):
+        folder = self.file.parent
+        folder.mkdir(parents=True, exist_ok=True)
+        self.file.write_text(json.dumps(content, sort_keys=True, indent=2), encoding="utf-8")
+        logging.debug("wrote %s at %s", self.msg, self.msg_args)
+
+
+class PyInfoStoreDisk(JSONStoreDisk):
+    def __init__(self, in_folder, path) -> None:
+        key = sha256(str(path).encode("utf-8")).hexdigest()
+        super().__init__(in_folder, key, "python info of %s", (path,))
+
+
+class EmbedDistributionUpdateStoreDisk(JSONStoreDisk):
+    def __init__(self, in_folder, distribution) -> None:
+        super().__init__(
+            in_folder,
+            distribution,
+            "embed update of distribution %s",
+            (distribution,),
+        )
+
+
+__all__ = [
+    "AppDataDiskFolder",
+    "JSONStoreDisk",
+    "PyInfoStoreDisk",
+]
diff --git a/venv/lib/python3.10/site-packages/virtualenv/app_data/via_tempdir.py b/venv/lib/python3.10/site-packages/virtualenv/app_data/via_tempdir.py
new file mode 100644
index 0000000..0a30dfe
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/virtualenv/app_data/via_tempdir.py
@@ -0,0 +1,32 @@
+from __future__ import annotations
+
+import logging
+from tempfile import mkdtemp
+
+from virtualenv.util.path import safe_delete
+
+from .via_disk_folder import AppDataDiskFolder
+
+
+class TempAppData(AppDataDiskFolder):
+    transient = True
+    can_update = False
+
+    def __init__(self) -> None:
+        super().__init__(folder=mkdtemp())
+        logging.debug("created temporary app data folder %s", self.lock.path)
+
+    def reset(self):
+        """This is a temporary folder, is already empty to start with."""
+
+    def close(self):
+        logging.debug("remove temporary app data folder %s", self.lock.path)
+        safe_delete(self.lock.path)
+
+    def embed_update_log(self, distribution, for_py_version):
+        raise NotImplementedError
+
+
+__all__ = [
+    "TempAppData",
+]
diff --git a/venv/lib/python3.10/site-packages/virtualenv/config/__init__.py b/venv/lib/python3.10/site-packages/virtualenv/config/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/venv/lib/python3.10/site-packages/virtualenv/config/cli/__init__.py b/venv/lib/python3.10/site-packages/virtualenv/config/cli/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/venv/lib/python3.10/site-packages/virtualenv/config/cli/parser.py b/venv/lib/python3.10/site-packages/virtualenv/config/cli/parser.py
new file mode 100644
index 0000000..9323d4e
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/virtualenv/config/cli/parser.py
@@ -0,0 +1,126 @@
+from __future__ import annotations
+
+import os
+from argparse import SUPPRESS, ArgumentDefaultsHelpFormatter, ArgumentParser, Namespace
+from collections import OrderedDict
+
+from virtualenv.config.convert import get_type
+from virtualenv.config.env_var import get_env_var
+from virtualenv.config.ini import IniConfig
+
+
+class VirtualEnvOptions(Namespace):
+    def __init__(self, **kwargs) -> None:
+        super().__init__(**kwargs)
+        self._src = None
+        self._sources = {}
+
+    def set_src(self, key, value, src):
+        setattr(self, key, value)
+        if src.startswith("env var"):
+            src = "env var"
+        self._sources[key] = src
+
+    def __setattr__(self, key, value) -> None:
+        if getattr(self, "_src", None) is not None:
+            self._sources[key] = self._src
+        super().__setattr__(key, value)
+
+    def get_source(self, key):
+        return self._sources.get(key)
+
+    @property
+    def verbosity(self):
+        if not hasattr(self, "verbose") and not hasattr(self, "quiet"):
+            return None
+        return max(self.verbose - self.quiet, 0)
+
+    def __repr__(self) -> str:
+        return f"{type(self).__name__}({', '.join(f'{k}={v}' for k, v in vars(self).items() if not k.startswith('_'))})"
+
+
+class VirtualEnvConfigParser(ArgumentParser):
+    """Custom option parser which updates its defaults by checking the configuration files and environmental vars."""
+
+    def __init__(self, options=None, env=None, *args, **kwargs) -> None:
+        env = os.environ if env is None else env
+        self.file_config = IniConfig(env)
+        self.epilog_list = []
+        self.env = env
+        kwargs["epilog"] = self.file_config.epilog
+        kwargs["add_help"] = False
+        kwargs["formatter_class"] = HelpFormatter
+        kwargs["prog"] = "virtualenv"
+        super().__init__(*args, **kwargs)
+        self._fixed = set()
+        if options is not None and not isinstance(options, VirtualEnvOptions):
+            msg = "options must be of type VirtualEnvOptions"
+            raise TypeError(msg)
+        self.options = VirtualEnvOptions() if options is None else options
+        self._interpreter = None
+        self._app_data = None
+
+    def _fix_defaults(self):
+        for action in self._actions:
+            action_id = id(action)
+            if action_id not in self._fixed:
+                self._fix_default(action)
+                self._fixed.add(action_id)
+
+    def _fix_default(self, action):
+        if hasattr(action, "default") and hasattr(action, "dest") and action.default != SUPPRESS:
+            as_type = get_type(action)
+            names = OrderedDict((i.lstrip("-").replace("-", "_"), None) for i in action.option_strings)
+            outcome = None
+            for name in names:
+                outcome = get_env_var(name, as_type, self.env)
+                if outcome is not None:
+                    break
+            if outcome is None and self.file_config:
+                for name in names:
+                    outcome = self.file_config.get(name, as_type)
+                    if outcome is not None:
+                        break
+            if outcome is not None:
+                action.default, action.default_source = outcome
+            else:
+                outcome = action.default, "default"
+            self.options.set_src(action.dest, *outcome)
+
+    def enable_help(self):
+        self._fix_defaults()
+        self.add_argument("-h", "--help", action="help", default=SUPPRESS, help="show this help message and exit")
+
+    def parse_known_args(self, args=None, namespace=None):
+        if namespace is None:
+            namespace = self.options
+        elif namespace is not self.options:
+            msg = "can only pass in parser.options"
+            raise ValueError(msg)
+        self._fix_defaults()
+        self.options._src = "cli"  # noqa: SLF001
+        try:
+            namespace.env = self.env
+            return super().parse_known_args(args, namespace=namespace)
+        finally:
+            self.options._src = None  # noqa: SLF001
+
+
+class HelpFormatter(ArgumentDefaultsHelpFormatter):
+    def __init__(self, prog) -> None:
+        super().__init__(prog, max_help_position=32, width=240)
+
+    def _get_help_string(self, action):
+        text = super()._get_help_string(action)
+        if hasattr(action, "default_source"):
+            default = " (default: %(default)s)"
+            if text.endswith(default):
+                text = f"{text[: -len(default)]} (default: %(default)s -> from %(default_source)s)"
+        return text
+
+
+__all__ = [
+    "HelpFormatter",
+    "VirtualEnvConfigParser",
+    "VirtualEnvOptions",
+]
diff --git a/venv/lib/python3.10/site-packages/virtualenv/config/convert.py b/venv/lib/python3.10/site-packages/virtualenv/config/convert.py
new file mode 100644
index 0000000..7f1e997
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/virtualenv/config/convert.py
@@ -0,0 +1,100 @@
+from __future__ import annotations
+
+import logging
+import os
+from typing import ClassVar
+
+
+class TypeData:
+    def __init__(self, default_type, as_type) -> None:
+        self.default_type = default_type
+        self.as_type = as_type
+
+    def __repr__(self) -> str:
+        return f"{self.__class__.__name__}(base={self.default_type}, as={self.as_type})"
+
+    def convert(self, value):
+        return self.default_type(value)
+
+
+class BoolType(TypeData):
+    BOOLEAN_STATES: ClassVar[dict[str, bool]] = {
+        "1": True,
+        "yes": True,
+        "true": True,
+        "on": True,
+        "0": False,
+        "no": False,
+        "false": False,
+        "off": False,
+    }
+
+    def convert(self, value):
+        if value.lower() not in self.BOOLEAN_STATES:
+            msg = f"Not a boolean: {value}"
+            raise ValueError(msg)
+        return self.BOOLEAN_STATES[value.lower()]
+
+
+class NoneType(TypeData):
+    def convert(self, value):
+        if not value:
+            return None
+        return str(value)
+
+
+class ListType(TypeData):
+    def _validate(self):
+        """no op."""
+
+    def convert(self, value, flatten=True):  # noqa: ARG002, FBT002
+        values = self.split_values(value)
+        result = []
+        for value in values:
+            sub_values = value.split(os.pathsep)
+            result.extend(sub_values)
+        return [self.as_type(i) for i in result]
+
+    def split_values(self, value):
+        """
+        Split the provided value into a list.
+
+        First this is done by newlines. If there were no newlines in the text,
+        then we next try to split by comma.
+        """
+        if isinstance(value, (str, bytes)):
+            # Use `splitlines` rather than a custom check for whether there is
+            # more than one line. This ensures that the full `splitlines()`
+            # logic is supported here.
+            values = value.splitlines()
+            if len(values) <= 1:
+                values = value.split(",")
+            values = filter(None, [x.strip() for x in values])
+        else:
+            values = list(value)
+
+        return values
+
+
+def convert(value, as_type, source):
+    """Convert the value as a given type where the value comes from the given source."""
+    try:
+        return as_type.convert(value)
+    except Exception as exception:
+        logging.warning("%s failed to convert %r as %r because %r", source, value, as_type, exception)
+        raise
+
+
+_CONVERT = {bool: BoolType, type(None): NoneType, list: ListType}
+
+
+def get_type(action):
+    default_type = type(action.default)
+    as_type = default_type if action.type is None else action.type
+    return _CONVERT.get(default_type, TypeData)(default_type, as_type)
+
+
+__all__ = [
+    "convert",
+    "get_type",
+]
diff --git a/venv/lib/python3.10/site-packages/virtualenv/config/env_var.py b/venv/lib/python3.10/site-packages/virtualenv/config/env_var.py
new file mode 100644
index 0000000..e127234
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/virtualenv/config/env_var.py
@@ -0,0 +1,30 @@
+from __future__ import annotations
+
+from contextlib import suppress
+
+from .convert import convert
+
+
+def get_env_var(key, as_type, env):
+    """
+    Get the environment variable option.
+
+    :param key: the config key requested
+    :param as_type: the type we would like to convert it to
+    :param env: environment variables to use
+    :return:
+    """
+    environ_key = f"VIRTUALENV_{key.upper()}"
+    if env.get(environ_key):
+        value = env[environ_key]
+
+        with suppress(Exception):  # note the converter already logs a warning when failures happen
+            source = f"env var {environ_key}"
+            as_type = convert(value, as_type, source)
+            return as_type, source
+    return None
+
+
+__all__ = [
+    "get_env_var",
+]
diff --git a/venv/lib/python3.10/site-packages/virtualenv/config/ini.py b/venv/lib/python3.10/site-packages/virtualenv/config/ini.py
new file mode 100644
index 0000000..cd6ecf5
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/virtualenv/config/ini.py
@@ -0,0 +1,75 @@
+from __future__ import annotations
+
+import logging
+import os
+from configparser import ConfigParser
+from pathlib import Path
+from typing import ClassVar
+
+from platformdirs import user_config_dir
+
+from .convert import convert
+
+
+class IniConfig:
+    VIRTUALENV_CONFIG_FILE_ENV_VAR: ClassVar[str] = "VIRTUALENV_CONFIG_FILE"
+    STATE: ClassVar[dict[bool | None, str]] = {None: "failed to parse", True: "active", False: "missing"}
+
+    section = "virtualenv"
+
+    def __init__(self, env=None) -> None:
+        env = os.environ if env is None else env
+        config_file = env.get(self.VIRTUALENV_CONFIG_FILE_ENV_VAR, None)
+        self.is_env_var = config_file is not None
+        if config_file is None:
+            config_file = Path(user_config_dir(appname="virtualenv", appauthor="pypa")) / "virtualenv.ini"
+        else:
+            config_file = Path(config_file)
+        self.config_file = config_file
+        self._cache = {}
+
+        exception = None
+        self.has_config_file = None
+        try:
+            self.has_config_file = self.config_file.exists()
+        except OSError as exc:
+            exception = exc
+        else:
+            if self.has_config_file:
+                self.config_file = self.config_file.resolve()
+                self.config_parser = ConfigParser()
+                try:
+                    self._load()
+                    self.has_virtualenv_section = self.config_parser.has_section(self.section)
+                except Exception as exc:  # noqa: BLE001
+                    exception = exc
+        if exception is not None:
+            logging.error("failed to read config file %s because %r", config_file, exception)
+
+    def _load(self):
+        with self.config_file.open("rt", encoding="utf-8") as file_handler:
+            return self.config_parser.read_file(file_handler)
+
+    def get(self, key, as_type):
+        cache_key = key, as_type
+        if cache_key in self._cache:
+            return self._cache[cache_key]
+        try:
+            source = "file"
+            raw_value = self.config_parser.get(self.section, key.lower())
+            value = convert(raw_value, as_type, source)
+            result = value, source
+        except Exception:  # noqa: BLE001
+            result = None
+        self._cache[cache_key] = result
+        return result
+
+    def __bool__(self) -> bool:
+        return bool(self.has_config_file) and bool(self.has_virtualenv_section)
+
+    @property
+    def epilog(self):
+        return (
+            f"\nconfig file {self.config_file} {self.STATE[self.has_config_file]} "
+            f"(change{'d' if self.is_env_var else ''} via env var {self.VIRTUALENV_CONFIG_FILE_ENV_VAR})"
+        )
diff --git a/venv/lib/python3.10/site-packages/virtualenv/create/__init__.py b/venv/lib/python3.10/site-packages/virtualenv/create/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/venv/lib/python3.10/site-packages/virtualenv/create/creator.py b/venv/lib/python3.10/site-packages/virtualenv/create/creator.py
new file mode 100644
index 0000000..06f9d39
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/virtualenv/create/creator.py
@@ -0,0 +1,226 @@
+from __future__ import annotations
+
+import json
+import logging
+import os
+import sys
+from abc import ABCMeta, abstractmethod
+from argparse import ArgumentTypeError
+from ast import literal_eval
+from collections import OrderedDict
+from pathlib import Path
+
+from virtualenv.discovery.cached_py_info import LogCmd
+from virtualenv.util.path import safe_delete
+from virtualenv.util.subprocess import run_cmd
+from virtualenv.version import __version__
+
+from .pyenv_cfg import PyEnvCfg
+
+HERE = Path(os.path.abspath(__file__)).parent
+DEBUG_SCRIPT = HERE / "debug.py"
+
+
+class CreatorMeta:
+    def __init__(self) -> None:
+        self.error = None
+
+
+class Creator(metaclass=ABCMeta):
+    """A class that given a python Interpreter creates a virtual environment."""
+
+    def __init__(self, options, interpreter) -> None:
+        """
+        Construct a new virtual environment creator.
+
+        :param options: the CLI option as parsed from :meth:`add_parser_arguments`
+        :param interpreter: the interpreter to create virtual environment from
+        """
+        self.interpreter = interpreter
+        self._debug = None
+        self.dest = Path(options.dest)
+        self.clear = options.clear
+        self.no_vcs_ignore = options.no_vcs_ignore
+        self.pyenv_cfg = PyEnvCfg.from_folder(self.dest)
+        self.app_data = options.app_data
+        self.env = options.env
+
+    def __repr__(self) -> str:
+        return f"{self.__class__.__name__}({', '.join(f'{k}={v}' for k, v in self._args())})"
+
+    def _args(self):
+        return [
+            ("dest", str(self.dest)),
+            ("clear", self.clear),
+            ("no_vcs_ignore", self.no_vcs_ignore),
+        ]
+
+    @classmethod
+    def can_create(cls, interpreter):  # noqa: ARG003
+        """
+        Determine if we can create a virtual environment.
+
+        :param interpreter: the interpreter in question
+        :return: ``None`` if we can't create, any other object otherwise that will be forwarded to \
+                  :meth:`add_parser_arguments`
+        """
+        return True
+
+    @classmethod
+    def add_parser_arguments(cls, parser, interpreter, meta, app_data):  # noqa: ARG003
+        """
+        Add CLI arguments for the creator.
+
+        :param parser: the CLI parser
+        :param app_data: the application data folder
+        :param interpreter: the interpreter we're asked to create virtual environment for
+        :param meta: value as returned by :meth:`can_create`
+        """
+        parser.add_argument(
+            "dest",
+            help="directory to create virtualenv at",
+            type=cls.validate_dest,
+        )
+        parser.add_argument(
+            "--clear",
+            dest="clear",
+            action="store_true",
+            help="remove the destination directory if exist before starting (will overwrite files otherwise)",
+            default=False,
+        )
+        parser.add_argument(
+            "--no-vcs-ignore",
+            dest="no_vcs_ignore",
+            action="store_true",
+            help="don't create VCS ignore directive in the destination directory",
+            default=False,
+        )
+
+    @abstractmethod
+    def create(self):
+        """Perform the virtual environment creation."""
+        raise NotImplementedError
+
+    @classmethod
+    def validate_dest(cls, raw_value):  # noqa: C901
+        """No path separator in the path, valid chars and must be write-able."""
+
+        def non_write_able(dest, value):
+            common = Path(*os.path.commonprefix([value.parts, dest.parts]))
+            msg = f"the destination {dest.relative_to(common)} is not write-able at {common}"
+            raise ArgumentTypeError(msg)
+
+        # the file system must be able to encode
+        # note in newer CPython this is always utf-8 https://www.python.org/dev/peps/pep-0529/
+        encoding = sys.getfilesystemencoding()
+        refused = OrderedDict()
+        kwargs = {"errors": "ignore"} if encoding != "mbcs" else {}
+        for char in str(raw_value):
+            try:
+                trip = char.encode(encoding, **kwargs).decode(encoding)
+                if trip == char:
+                    continue
+                raise ValueError(trip)  # noqa: TRY301
+            except ValueError:  # noqa: PERF203
+                refused[char] = None
+        if refused:
+            bad = "".join(refused.keys())
+            msg = f"the file system codec ({encoding}) cannot handle characters {bad!r} within {raw_value!r}"
+            raise ArgumentTypeError(msg)
+        if os.pathsep in raw_value:
+            msg = (
+                f"destination {raw_value!r} must not contain the path separator ({os.pathsep})"
+                f" as this would break the activation scripts"
+            )
+            raise ArgumentTypeError(msg)
+
+        value = Path(raw_value)
+        if value.exists() and value.is_file():
+            msg = f"the destination {value} already exists and is a file"
+            raise ArgumentTypeError(msg)
+        dest = Path(os.path.abspath(str(value))).resolve()  # on Windows absolute does not imply resolve so use both
+        value = dest
+        while dest:
+            if dest.exists():
+                if os.access(str(dest), os.W_OK):
+                    break
+                non_write_able(dest, value)
+            base, _ = dest.parent, dest.name
+            if base == dest:
+                non_write_able(dest, value)  # pragma: no cover
+            dest = base
+        return str(value)
+
+    def run(self):
+        if self.dest.exists() and self.clear:
+            logging.debug("delete %s", self.dest)
+            safe_delete(self.dest)
+        self.create()
+        self.set_pyenv_cfg()
+        if not self.no_vcs_ignore:
+            self.setup_ignore_vcs()
+
+    def set_pyenv_cfg(self):
+        self.pyenv_cfg.content = OrderedDict()
+        self.pyenv_cfg["home"] = os.path.dirname(os.path.abspath(self.interpreter.system_executable))
+        self.pyenv_cfg["implementation"] = self.interpreter.implementation
+        self.pyenv_cfg["version_info"] = ".".join(str(i) for i in self.interpreter.version_info)
+        self.pyenv_cfg["virtualenv"] = __version__
+
+    def setup_ignore_vcs(self):
+        """Generate ignore instructions for version control systems."""
+        # mark this folder to be ignored by VCS, handle https://www.python.org/dev/peps/pep-0610/#registered-vcs
+        git_ignore = self.dest / ".gitignore"
+        if not git_ignore.exists():
+            git_ignore.write_text("# created by virtualenv automatically\n*\n", encoding="utf-8")
+        # Mercurial - does not support the .hgignore file inside a subdirectory directly, but only if included via the
+        # subinclude directive from root, at which point on might as well ignore the directory itself, see
+        # https://www.selenic.com/mercurial/hgignore.5.html for more details
+        # Bazaar - does not support ignore files in sub-directories, only at root level via .bzrignore
+        # Subversion - does not support ignore files, requires direct manipulation with the svn tool
+
+    @property
+    def debug(self):
+        """:return: debug information about the virtual environment (only valid after :meth:`create` has run)"""
+        if self._debug is None and self.exe is not None:
+            self._debug = get_env_debug_info(self.exe, self.debug_script(), self.app_data, self.env)
+        return self._debug
+
+    @staticmethod
+    def debug_script():
+        return DEBUG_SCRIPT
+
+
+def get_env_debug_info(env_exe, debug_script, app_data, env):
+    env = env.copy()
+    env.pop("PYTHONPATH", None)
+
+    with app_data.ensure_extracted(debug_script) as debug_script:
+        cmd = [str(env_exe), str(debug_script)]
+        logging.debug("debug via %r", LogCmd(cmd))
+        code, out, err = run_cmd(cmd)
+
+    try:
+        if code != 0:
+            if out:
+                result = literal_eval(out)
+            else:
+                if code == 2 and "file" in err:  # noqa: PLR2004
+                    # Re-raise FileNotFoundError from `run_cmd()`
+                    raise OSError(err)  # noqa: TRY301
+                raise Exception(err)  # noqa: TRY002, TRY301
+        else:
+            result = json.loads(out)
+        if err:
+            result["err"] = err
+    except Exception as exception:  # noqa: BLE001
+        return {"out": out, "err": err, "returncode": code, "exception": repr(exception)}
+    if "sys" in result and "path" in result["sys"]:
+        del result["sys"]["path"][0]
+    return result
+
+
+__all__ = [
+    "Creator",
+    "CreatorMeta",
+]
diff --git a/venv/lib/python3.10/site-packages/virtualenv/create/debug.py b/venv/lib/python3.10/site-packages/virtualenv/create/debug.py
new file mode 100644
index 0000000..ee7fc90
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/virtualenv/create/debug.py
@@ -0,0 +1,101 @@
+"""Inspect a target Python interpreter virtual environment wise."""
+from __future__ import annotations
+
+import sys  # built-in
+
+
+def encode_path(value):
+    if value is None:
+        return None
+    if not isinstance(value, (str, bytes)):
+        value = repr(value) if isinstance(value, type) else repr(type(value))
+    if isinstance(value, bytes):
+        value = value.decode(sys.getfilesystemencoding())
+    return value
+
+
+def encode_list_path(value):
+    return [encode_path(i) for i in value]
+
+
+def run():  # noqa: PLR0912
+    """Print debug data about the virtual environment."""
+    try:
+        from collections import OrderedDict
+    except ImportError:  # pragma: no cover
+        # this is possible if the standard library cannot be accessed
+
+        OrderedDict = dict  # pragma: no cover  # noqa: N806
+    result = OrderedDict([("sys", OrderedDict())])
+    path_keys = (
+        "executable",
+        "_base_executable",
+        "prefix",
+        "base_prefix",
+        "real_prefix",
+        "exec_prefix",
+        "base_exec_prefix",
+        "path",
+        "meta_path",
+    )
+    for key in path_keys:
+        value = getattr(sys, key, None)
+        value = encode_list_path(value) if isinstance(value, list) else encode_path(value)
+        result["sys"][key] = value
+    result["sys"]["fs_encoding"] = sys.getfilesystemencoding()
+    result["sys"]["io_encoding"] = getattr(sys.stdout, "encoding", None)
+    result["version"] = sys.version
+
+    try:
+        import sysconfig
+
+        # https://bugs.python.org/issue22199
+        makefile = getattr(sysconfig, "get_makefile_filename", getattr(sysconfig, "_get_makefile_filename", None))
+        result["makefile_filename"] = encode_path(makefile())
+    except ImportError:
+        pass
+
+    import os  # landmark
+
+    result["os"] = repr(os)
+
+    try:
+        import site  # site
+
+        result["site"] = repr(site)
+    except ImportError as exception:  # pragma: no cover
+        result["site"] = repr(exception)  # pragma: no cover
+
+    try:
+        import datetime  # site
+
+        result["datetime"] = repr(datetime)
+    except ImportError as exception:  # pragma: no cover
+        result["datetime"] = repr(exception)  # pragma: no cover
+
+    try:
+        import math  # site
+
+        result["math"] = repr(math)
+    except ImportError as exception:  # pragma: no cover
+        result["math"] = repr(exception)  # pragma: no cover
+
+    # try to print out, this will validate if other core modules are available (json in this case)
+    try:
+        import json
+
+        result["json"] = repr(json)
+    except ImportError as exception:
+        result["json"] = repr(exception)
+    else:
+        try:
+            content = json.dumps(result, indent=2)
+            sys.stdout.write(content)
+        except (ValueError, TypeError) as exception:  # pragma: no cover
+            sys.stderr.write(repr(exception))
+            sys.stdout.write(repr(result))  # pragma: no cover
+            raise SystemExit(1)  # noqa: TRY200, B904  # pragma: no cover
+
+
+if __name__ == "__main__":
+    run()
diff --git a/venv/lib/python3.10/site-packages/virtualenv/create/describe.py b/venv/lib/python3.10/site-packages/virtualenv/create/describe.py
new file mode 100644
index 0000000..7167e56
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/virtualenv/create/describe.py
@@ -0,0 +1,110 @@
+from __future__ import annotations
+
+from abc import ABCMeta
+from collections import OrderedDict
+from pathlib import Path
+
+from virtualenv.info import IS_WIN
+
+
+class Describe(metaclass=ABCMeta):
+    """Given a host interpreter tell us information about what the created interpreter might look like."""
+
+    suffix = ".exe" if IS_WIN else ""
+
+    def __init__(self, dest, interpreter) -> None:
+        self.interpreter = interpreter
+        self.dest = dest
+        self._stdlib = None
+        self._stdlib_platform = None
+        self._system_stdlib = None
+        self._conf_vars = None
+
+    @property
+    def bin_dir(self):
+        return self.script_dir
+
+    @property
+    def script_dir(self):
+        return self.dest / self.interpreter.install_path("scripts")
+
+    @property
+    def purelib(self):
+        return self.dest / self.interpreter.install_path("purelib")
+
+    @property
+    def platlib(self):
+        return self.dest / self.interpreter.install_path("platlib")
+
+    @property
+    def libs(self):
+        return list(OrderedDict(((self.platlib, None), (self.purelib, None))).keys())
+
+    @property
+    def stdlib(self):
+        if self._stdlib is None:
+            self._stdlib = Path(self.interpreter.sysconfig_path("stdlib", config_var=self._config_vars))
+        return self._stdlib
+
+    @property
+    def stdlib_platform(self):
+        if self._stdlib_platform is None:
+            self._stdlib_platform = Path(self.interpreter.sysconfig_path("platstdlib", config_var=self._config_vars))
+        return self._stdlib_platform
+
+    @property
+    def _config_vars(self):
+        if self._conf_vars is None:
+            self._conf_vars = self._calc_config_vars(self.dest)
+        return self._conf_vars
+
+    def _calc_config_vars(self, to):
+        sys_vars = self.interpreter.sysconfig_vars
+        return {k: (to if v is not None and v.startswith(self.interpreter.prefix) else v) for k, v in sys_vars.items()}
+
+    @classmethod
+    def can_describe(cls, interpreter):  # noqa: ARG003
+        """Knows means it knows how the output will look."""
+        return True
+
+    @property
+    def env_name(self):
+        return self.dest.parts[-1]
+
+    @property
+    def exe(self):
+        return self.bin_dir / f"{self.exe_stem()}{self.suffix}"
+
+    @classmethod
+    def exe_stem(cls):
+        """Executable name without suffix - there seems to be no standard way to get this without creating it."""
+        raise NotImplementedError
+
+    def script(self, name):
+        return self.script_dir / f"{name}{self.suffix}"
+
+
+class Python3Supports(Describe, metaclass=ABCMeta):
+    @classmethod
+    def can_describe(cls, interpreter):
+        return interpreter.version_info.major == 3 and super().can_describe(interpreter)  # noqa: PLR2004
+
+
+class PosixSupports(Describe, metaclass=ABCMeta):
+    @classmethod
+    def can_describe(cls, interpreter):
+        return interpreter.os == "posix" and super().can_describe(interpreter)
+
+
+class WindowsSupports(Describe, metaclass=ABCMeta):
+    @classmethod
+    def can_describe(cls, interpreter):
+        return interpreter.os == "nt" and super().can_describe(interpreter)
+
+
+__all__ = [
+    "Describe",
+    "Python3Supports",
+    "PosixSupports",
+    "WindowsSupports",
+]
diff --git a/venv/lib/python3.10/site-packages/virtualenv/create/pyenv_cfg.py b/venv/lib/python3.10/site-packages/virtualenv/create/pyenv_cfg.py
new file mode 100644
index 0000000..03ebfbb
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/virtualenv/create/pyenv_cfg.py
@@ -0,0 +1,64 @@
+from __future__ import annotations
+
+import logging
+from collections import OrderedDict
+
+
+class PyEnvCfg:
+    def __init__(self, content, path) -> None:
+        self.content = content
+        self.path = path
+
+    @classmethod
+    def from_folder(cls, folder):
+        return cls.from_file(folder / "pyvenv.cfg")
+
+    @classmethod
+    def from_file(cls, path):
+        content = cls._read_values(path) if path.exists() else OrderedDict()
+        return PyEnvCfg(content, path)
+
+    @staticmethod
+    def _read_values(path):
+        content = OrderedDict()
+        for line in path.read_text(encoding="utf-8").splitlines():
+            equals_at = line.index("=")
+            key = line[:equals_at].strip()
+            value = line[equals_at + 1 :].strip()
+            content[key] = value
+        return content
+
+    def write(self):
+        logging.debug("write %s", self.path)
+        text = ""
+        for key, value in self.content.items():
+            line = f"{key} = {value}"
+            logging.debug("\t%s", line)
+            text += line
+            text += "\n"
+        self.path.write_text(text, encoding="utf-8")
+
+    def refresh(self):
+        self.content = self._read_values(self.path)
+        return self.content
+
+    def __setitem__(self, key, value) -> None:
+        self.content[key] = value
+
+    def __getitem__(self, key):
+        return self.content[key]
+
+    def __contains__(self, item) -> bool:
+        return item in self.content
+
+    def update(self, other):
+        self.content.update(other)
+        return self
+
+    def __repr__(self) -> str:
+        return f"{self.__class__.__name__}(path={self.path})"
+
+
+__all__ = [
+    "PyEnvCfg",
+]
diff --git a/venv/lib/python3.10/site-packages/virtualenv/create/via_global_ref/__init__.py b/venv/lib/python3.10/site-packages/virtualenv/create/via_global_ref/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/venv/lib/python3.10/site-packages/virtualenv/create/via_global_ref/_virtualenv.py b/venv/lib/python3.10/site-packages/virtualenv/create/via_global_ref/_virtualenv.py
new file mode 100644
index 0000000..17f73b1
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/virtualenv/create/via_global_ref/_virtualenv.py
@@ -0,0 +1,102 @@
+"""Patches that are applied at runtime to the virtual environment."""
+
+from __future__ import annotations
+
+import os
+import sys
+from contextlib import suppress
+
+VIRTUALENV_PATCH_FILE = os.path.join(__file__)
+
+
+def patch_dist(dist):
+    """
+    Distutils allows user to configure some arguments via a configuration file:
+    https://docs.python.org/3/install/index.html#distutils-configuration-files.
+
+    Some of this arguments though don't make sense in context of the virtual environment files, let's fix them up.
+    """  # noqa: D205
+    # we cannot allow some install config as that would get packages installed outside of the virtual environment
+    old_parse_config_files = dist.Distribution.parse_config_files
+
+    def parse_config_files(self, *args, **kwargs):
+        result = old_parse_config_files(self, *args, **kwargs)
+        install = self.get_option_dict("install")
+
+        if "prefix" in install:  # the prefix governs where to install the libraries
+            install["prefix"] = VIRTUALENV_PATCH_FILE, os.path.abspath(sys.prefix)
+        for base in ("purelib", "platlib", "headers", "scripts", "data"):
+            key = f"install_{base}"
+            if key in install:  # do not allow global configs to hijack venv paths
+                install.pop(key, None)
+        return result
+
+    dist.Distribution.parse_config_files = parse_config_files
+
+
+# Import hook that patches some modules to ignore configuration values that break package installation in case
+# of virtual environments.
+_DISTUTILS_PATCH = "distutils.dist", "setuptools.dist"
+# https://docs.python.org/3/library/importlib.html#setting-up-an-importer
+
+
+class _Finder:
+    """A meta path finder that allows patching the imported distutils modules."""
+
+    fullname = None
+
+    # lock[0] is threading.Lock(), but initialized lazily to avoid importing threading very early at startup,
+    # because there are gevent-based applications that need to be first to import threading by themselves.
+    # See https://github.com/pypa/virtualenv/issues/1895 for details.
+    lock = []  # noqa: RUF012
+
+    def find_spec(self, fullname, path, target=None):  # noqa: ARG002
+        if fullname in _DISTUTILS_PATCH and self.fullname is None:
+            # initialize lock[0] lazily
+            if len(self.lock) == 0:
+                import threading
+
+                lock = threading.Lock()
+                # there is possibility that two threads T1 and T2 are simultaneously running into find_spec,
+                # observing .lock as empty, and further going into hereby initialization. However due to the GIL,
+                # list.append() operation is atomic and this way only one of the threads will "win" to put the lock
+                # - that every thread will use - into .lock[0].
+                # https://docs.python.org/3/faq/library.html#what-kinds-of-global-value-mutation-are-thread-safe
+                self.lock.append(lock)
+
+            from functools import partial
+            from importlib.util import find_spec
+
+            with self.lock[0]:
+                self.fullname = fullname
+                try:
+                    spec = find_spec(fullname, path)
+                    if spec is not None:
+                        # https://www.python.org/dev/peps/pep-0451/#how-loading-will-work
+                        is_new_api = hasattr(spec.loader, "exec_module")
+                        func_name = "exec_module" if is_new_api else "load_module"
+                        old = getattr(spec.loader, func_name)
+                        func = self.exec_module if is_new_api else self.load_module
+                        if old is not func:
+                            with suppress(AttributeError):  # C-Extension loaders are r/o such as zipimporter with <3.7
+                                setattr(spec.loader, func_name, partial(func, old))
+                        return spec
+                finally:
+                    self.fullname = None
+        return None
+
+    @staticmethod
+    def exec_module(old, module):
+        old(module)
+        if module.__name__ in _DISTUTILS_PATCH:
+            patch_dist(module)
+
+    @staticmethod
+    def load_module(old, name):
+        module = old(name)
+        if module.__name__ in _DISTUTILS_PATCH:
+            patch_dist(module)
+        return module
+
+
+sys.meta_path.insert(0, _Finder())
diff --git a/venv/lib/python3.10/site-packages/virtualenv/create/via_global_ref/api.py b/venv/lib/python3.10/site-packages/virtualenv/create/via_global_ref/api.py
new file mode 100644
index 0000000..0d36265
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/virtualenv/create/via_global_ref/api.py
@@ -0,0 +1,114 @@
+from __future__ import annotations
+
+import logging
+import os
+from abc import ABCMeta
+from pathlib import Path
+
+from virtualenv.create.creator import Creator, CreatorMeta
+from virtualenv.info import fs_supports_symlink
+
+
+class ViaGlobalRefMeta(CreatorMeta):
+    def __init__(self) -> None:
+        super().__init__()
+        self.copy_error = None
+        self.symlink_error = None
+        if not fs_supports_symlink():
+            self.symlink_error = "the filesystem does not supports symlink"
+
+    @property
+    def can_copy(self):
+        return not self.copy_error
+
+    @property
+    def can_symlink(self):
+        return not self.symlink_error
+
+
+class ViaGlobalRefApi(Creator, metaclass=ABCMeta):
+    def __init__(self, options, interpreter) -> None:
+        super().__init__(options, interpreter)
+        self.symlinks = self._should_symlink(options)
+        self.enable_system_site_package = options.system_site
+
+    @staticmethod
+    def _should_symlink(options):
+        # Priority of where the option is set to follow the order: CLI, env var, file, hardcoded.
+        # If both set at same level prefers copy over symlink.
+        copies, symlinks = getattr(options, "copies", False), getattr(options, "symlinks", False)
+        copy_src, sym_src = options.get_source("copies"), options.get_source("symlinks")
+        for level in ["cli", "env var", "file", "default"]:
+            s_opt = symlinks if sym_src == level else None
+            c_opt = copies if copy_src == level else None
+            if s_opt is True and c_opt is True:
+                return False
+            if s_opt is True:
+                return True
+            if c_opt is True:
+                return False
+        return False  # fallback to copy
+
+    @classmethod
+    def add_parser_arguments(cls, parser, interpreter, meta, app_data):
+        super().add_parser_arguments(parser, interpreter, meta, app_data)
+        parser.add_argument(
+            "--system-site-packages",
+            default=False,
+            action="store_true",
+            dest="system_site",
+            help="give the virtual environment access to the system site-packages dir",
+        )
+        group = parser.add_mutually_exclusive_group()
+        if not meta.can_symlink and not meta.can_copy:
+            msg = "neither symlink or copy method supported"
+            raise RuntimeError(msg)
+        if meta.can_symlink:
+            group.add_argument(
+                "--symlinks",
+                default=True,
+                action="store_true",
+                dest="symlinks",
+                help="try to use symlinks rather than copies, when symlinks are not the default for the platform",
+            )
+        if meta.can_copy:
+            group.add_argument(
+                "--copies",
+                "--always-copy",
+                default=not meta.can_symlink,
+                action="store_true",
+                dest="copies",
+                help="try to use copies rather than symlinks, even when symlinks are the default for the platform",
+            )
+
+    def create(self):
+        self.install_patch()
+
+    def install_patch(self):
+        text = self.env_patch_text()
+        if text:
+            pth = self.purelib / "_virtualenv.pth"
+            logging.debug("create virtualenv import hook file %s", pth)
+            pth.write_text("import _virtualenv", encoding="utf-8")
+            dest_path = self.purelib / "_virtualenv.py"
+            logging.debug("create %s", dest_path)
+            dest_path.write_text(text, encoding="utf-8")
+
+    def env_patch_text(self):
+        """Patch the distutils package to not be derailed by its configuration files."""
+        with self.app_data.ensure_extracted(Path(__file__).parent / "_virtualenv.py") as resolved_path:
+            text = resolved_path.read_text(encoding="utf-8")
+            return text.replace('"__SCRIPT_DIR__"', repr(os.path.relpath(str(self.script_dir), str(self.purelib))))
+
+    def _args(self):
+        return [*super()._args(), ("global", self.enable_system_site_package)]
+
+    def set_pyenv_cfg(self):
+        super().set_pyenv_cfg()
+        self.pyenv_cfg["include-system-site-packages"] = "true" if self.enable_system_site_package else "false"
+
+
+__all__ = [
+    "ViaGlobalRefMeta",
+    "ViaGlobalRefApi",
+]
diff --git a/venv/lib/python3.10/site-packages/virtualenv/create/via_global_ref/builtin/__init__.py b/venv/lib/python3.10/site-packages/virtualenv/create/via_global_ref/builtin/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/venv/lib/python3.10/site-packages/virtualenv/create/via_global_ref/builtin/builtin_way.py b/venv/lib/python3.10/site-packages/virtualenv/create/via_global_ref/builtin/builtin_way.py
new file mode 100644
index 0000000..bb520a3
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/virtualenv/create/via_global_ref/builtin/builtin_way.py
@@ -0,0 +1,19 @@
+from __future__ import annotations
+
+from abc import ABCMeta
+
+from virtualenv.create.creator import Creator
+from virtualenv.create.describe import Describe
+
+
+class VirtualenvBuiltin(Creator, Describe, metaclass=ABCMeta):
+    """A creator that does operations itself without delegation, if we can create it we can also describe it."""
+
+    def __init__(self, options, interpreter) -> None:
+        Creator.__init__(self, options, interpreter)
+        Describe.__init__(self, self.dest, interpreter)
+
+
+__all__ = [
+    "VirtualenvBuiltin",
+]
diff --git a/venv/lib/python3.10/site-packages/virtualenv/create/via_global_ref/builtin/cpython/__init__.py b/venv/lib/python3.10/site-packages/virtualenv/create/via_global_ref/builtin/cpython/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/venv/lib/python3.10/site-packages/virtualenv/create/via_global_ref/builtin/cpython/common.py b/venv/lib/python3.10/site-packages/virtualenv/create/via_global_ref/builtin/cpython/common.py
new file mode 100644
index 0000000..7c7abd5
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/virtualenv/create/via_global_ref/builtin/cpython/common.py
@@ -0,0 +1,62 @@
+from __future__ import annotations
+
+from abc import ABCMeta
+from collections import OrderedDict
+from pathlib import Path
+
+from virtualenv.create.describe import PosixSupports, WindowsSupports
+from virtualenv.create.via_global_ref.builtin.ref import RefMust, RefWhen
+from virtualenv.create.via_global_ref.builtin.via_global_self_do import ViaGlobalRefVirtualenvBuiltin
+
+
+class CPython(ViaGlobalRefVirtualenvBuiltin, metaclass=ABCMeta):
+    @classmethod
+    def can_describe(cls, interpreter):
+        return interpreter.implementation == "CPython" and super().can_describe(interpreter)
+
+    @classmethod
+    def exe_stem(cls):
+        return "python"
+
+
+class CPythonPosix(CPython, PosixSupports, metaclass=ABCMeta):
+    """Create a CPython virtual environment on POSIX platforms."""
+
+    @classmethod
+    def _executables(cls, interpreter):
+        host_exe = Path(interpreter.system_executable)
+        major, minor = interpreter.version_info.major, interpreter.version_info.minor
+        targets = OrderedDict((i, None) for i in ["python", f"python{major}", f"python{major}.{minor}", host_exe.name])
+        yield host_exe, list(targets.keys()), RefMust.NA, RefWhen.ANY
+
+
+class CPythonWindows(CPython, WindowsSupports, metaclass=ABCMeta):
+    @classmethod
+    def _executables(cls, interpreter):
+        # symlink of the python executables does not work reliably, copy always instead
+        # - https://bugs.python.org/issue42013
+        # - venv
+        host = cls.host_python(interpreter)
+        for path in (host.parent / n for n in {"python.exe", host.name}):  # noqa: PLC0208
+            yield host, [path.name], RefMust.COPY, RefWhen.ANY
+        # for more info on pythonw.exe see https://stackoverflow.com/a/30313091
+        python_w = host.parent / "pythonw.exe"
+        yield python_w, [python_w.name], RefMust.COPY, RefWhen.ANY
+
+    @classmethod
+    def host_python(cls, interpreter):
+        return Path(interpreter.system_executable)
+
+
+def is_mac_os_framework(interpreter):
+    if interpreter.platform == "darwin":
+        return interpreter.sysconfig_vars.get("PYTHONFRAMEWORK") == "Python3"
+    return False
+
+
+__all__ = [
+    "CPython",
+    "CPythonPosix",
+    "CPythonWindows",
+    "is_mac_os_framework",
+]
diff --git a/venv/lib/python3.10/site-packages/virtualenv/create/via_global_ref/builtin/cpython/cpython3.py b/venv/lib/python3.10/site-packages/virtualenv/create/via_global_ref/builtin/cpython/cpython3.py
new file mode 100644
index 0000000..0b7b023
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/virtualenv/create/via_global_ref/builtin/cpython/cpython3.py
@@ -0,0 +1,131 @@
+from __future__ import annotations
+
+import abc
+import fnmatch
+from itertools import chain
+from operator import methodcaller as method
+from pathlib import Path
+from textwrap import dedent
+
+from virtualenv.create.describe import Python3Supports
+from virtualenv.create.via_global_ref.builtin.ref import PathRefToDest
+from virtualenv.create.via_global_ref.store import is_store_python
+
+from .common import CPython, CPythonPosix, CPythonWindows, is_mac_os_framework
+
+
+class CPython3(CPython, Python3Supports, metaclass=abc.ABCMeta):
+    """CPython 3 or later."""
+
+
+class CPython3Posix(CPythonPosix, CPython3):
+    @classmethod
+    def can_describe(cls, interpreter):
+        return is_mac_os_framework(interpreter) is False and super().can_describe(interpreter)
+
+    def env_patch_text(self):
+        text = super().env_patch_text()
+        if self.pyvenv_launch_patch_active(self.interpreter):
+            text += dedent(
+                """
+                # for https://github.com/python/cpython/pull/9516, see https://github.com/pypa/virtualenv/issues/1704
+                import os
+                if "__PYVENV_LAUNCHER__" in os.environ:
+                    del os.environ["__PYVENV_LAUNCHER__"]
+                """,
+            )
+        return text
+
+    @classmethod
+    def pyvenv_launch_patch_active(cls, interpreter):
+        ver = interpreter.version_info
+        return interpreter.platform == "darwin" and ((3, 7, 8) > ver >= (3, 7) or (3, 8, 3) > ver >= (3, 8))
+
+
+class CPython3Windows(CPythonWindows, CPython3):
+    """CPython 3 on Windows."""
+
+    @classmethod
+    def setup_meta(cls, interpreter):
+        if is_store_python(interpreter):  # store python is not supported here
+            return None
+        return super().setup_meta(interpreter)
+
+    @classmethod
+    def sources(cls, interpreter):
+        if cls.has_shim(interpreter):
+            refs = cls.executables(interpreter)
+        else:
+            refs = chain(
+                cls.executables(interpreter),
+                cls.dll_and_pyd(interpreter),
+                cls.python_zip(interpreter),
+            )
+        yield from refs
+
+    @classmethod
+    def executables(cls, interpreter):
+        return super().sources(interpreter)
+
+    @classmethod
+    def has_shim(cls, interpreter):
+        return interpreter.version_info.minor >= 7 and cls.shim(interpreter) is not None  # noqa: PLR2004
+
+    @classmethod
+    def shim(cls, interpreter):
+        shim = Path(interpreter.system_stdlib) / "venv" / "scripts" / "nt" / "python.exe"
+        if shim.exists():
+            return shim
+        return None
+
+    @classmethod
+    def host_python(cls, interpreter):
+        if cls.has_shim(interpreter):
+            # starting with CPython 3.7 Windows ships with a venvlauncher.exe that avoids the need for dll/pyd copies
+            # it also means the wrapper must be copied to avoid bugs such as https://bugs.python.org/issue42013
+            return cls.shim(interpreter)
+        return super().host_python(interpreter)
+
+    @classmethod
+    def dll_and_pyd(cls, interpreter):
+        folders = [Path(interpreter.system_executable).parent]
+
+        # May be missing on some Python hosts.
+        # See https://github.com/pypa/virtualenv/issues/2368
+        dll_folder = Path(interpreter.system_prefix) / "DLLs"
+        if dll_folder.is_dir():
+            folders.append(dll_folder)
+
+        for folder in folders:
+            for file in folder.iterdir():
+                if file.suffix in (".pyd", ".dll"):
+                    yield PathRefToDest(file, cls.to_bin)
+
+    @classmethod
+    def python_zip(cls, interpreter):
+        """
+        "python{VERSION}.zip" contains compiled *.pyc std lib packages, where
+        "VERSION" is `py_version_nodot` var from the `sysconfig` module.
+        :see: https://docs.python.org/3/using/windows.html#the-embeddable-package
+        :see: `discovery.py_info.PythonInfo` class (interpreter).
+        :see: `python -m sysconfig` output.
+
+        :note: The embeddable Python distribution for Windows includes
+        "python{VERSION}.zip" and "python{VERSION}._pth" files. User can
+        move/rename *zip* file and edit `sys.path` by editing *_pth* file.
+        Here the `pattern` is used only for the default *zip* file name!
+        """  # noqa: D205
+        pattern = f"*python{interpreter.version_nodot}.zip"
+        matches = fnmatch.filter(interpreter.path, pattern)
+        matched_paths = map(Path, matches)
+        existing_paths = filter(method("exists"), matched_paths)
+        path = next(existing_paths, None)
+        if path is not None:
+            yield PathRefToDest(path, cls.to_bin)
+
+
+__all__ = [
+    "CPython3",
+    "CPython3Posix",
+    "CPython3Windows",
+]
diff --git a/venv/lib/python3.10/site-packages/virtualenv/create/via_global_ref/builtin/cpython/mac_os.py b/venv/lib/python3.10/site-packages/virtualenv/create/via_global_ref/builtin/cpython/mac_os.py
new file mode 100644
index 0000000..42c191f
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/virtualenv/create/via_global_ref/builtin/cpython/mac_os.py
@@ -0,0 +1,264 @@
+"""The Apple Framework builds require their own customization."""
+from __future__ import annotations
+
+import logging
+import os
+import struct
+import subprocess
+from abc import ABCMeta, abstractmethod
+from pathlib import Path
+from textwrap import dedent
+
+from virtualenv.create.via_global_ref.builtin.ref import (
+    ExePathRefToDest,
+    PathRefToDest,
+    RefMust,
+)
+
+from .common import CPython, CPythonPosix, is_mac_os_framework
+from .cpython3 import CPython3
+
+
+class CPythonmacOsFramework(CPython, metaclass=ABCMeta):
+    @classmethod
+    def can_describe(cls, interpreter):
+        return is_mac_os_framework(interpreter) and super().can_describe(interpreter)
+
+    def create(self):
+        super().create()
+
+        # change the install_name of the copied python executables
+        target = self.desired_mach_o_image_path()
+        current = self.current_mach_o_image_path()
+        for src in self._sources:
+            if isinstance(src, ExePathRefToDest) and (src.must == RefMust.COPY or not self.symlinks):
+                exes = [self.bin_dir / src.base]
+                if not self.symlinks:
+                    exes.extend(self.bin_dir / a for a in src.aliases)
+                for exe in exes:
+                    fix_mach_o(str(exe), current, target, self.interpreter.max_size)
+
+    @classmethod
+    def _executables(cls, interpreter):
+        for _, targets, must, when in super()._executables(interpreter):
+            # Make sure we use the embedded interpreter inside the framework, even if sys.executable points to the
+            # stub executable in ${sys.prefix}/bin.
+            # See http://groups.google.com/group/python-virtualenv/browse_thread/thread/17cab2f85da75951
+            fixed_host_exe = Path(interpreter.prefix) / "Resources" / "Python.app" / "Contents" / "MacOS" / "Python"
+            yield fixed_host_exe, targets, must, when
+
+    @abstractmethod
+    def current_mach_o_image_path(self):
+        raise NotImplementedError
+
+    @abstractmethod
+    def desired_mach_o_image_path(self):
+        raise NotImplementedError
+
+
+class CPython3macOsFramework(CPythonmacOsFramework, CPython3, CPythonPosix):
+    def current_mach_o_image_path(self):
+        return "@executable_path/../../../../Python3"
+
+    def desired_mach_o_image_path(self):
+        return "@executable_path/../.Python"
+
+    @classmethod
+    def sources(cls, interpreter):
+        yield from super().sources(interpreter)
+
+        # add a symlink to the host python image
+        exe = Path(interpreter.prefix) / "Python3"
+        yield PathRefToDest(exe, dest=lambda self, _: self.dest / ".Python", must=RefMust.SYMLINK)
+
+    @property
+    def reload_code(self):
+        result = super().reload_code
+        return dedent(
+            f"""
+        # the bundled site.py always adds the global site package if we're on python framework build, escape this
+        import sys
+        before = sys._framework
+        try:
+            sys._framework = None
+            {result}
+        finally:
+            sys._framework = before
+        """,
+        )
+
+
+def fix_mach_o(exe, current, new, max_size):
+    """
+    https://en.wikipedia.org/wiki/Mach-O.
+
+    Mach-O, short for Mach object file format, is a file format for executables, object code, shared libraries,
+    dynamically-loaded code, and core dumps. A replacement for the a.out format, Mach-O offers more extensibility and
+    faster access to information in the symbol table.
+
+    Each Mach-O file is made up of one Mach-O header, followed by a series of load commands, followed by one or more
+    segments, each of which contains between 0 and 255 sections. Mach-O uses the REL relocation format to handle
+    references to symbols. When looking up symbols Mach-O uses a two-level namespace that encodes each symbol into an
+    'object/symbol name' pair that is then linearly searched for by first the object and then the symbol name.
+
+    The basic structure—a list of variable-length "load commands" that reference pages of data elsewhere in the file—was
+    also used in the executable file format for Accent. The Accent file format was in turn, based on an idea from Spice
+    Lisp.
+
+    With the introduction of Mac OS X 10.6 platform the Mach-O file underwent a significant modification that causes
+    binaries compiled on a computer running 10.6 or later to be (by default) executable only on computers running Mac
+    OS X 10.6 or later. The difference stems from load commands that the dynamic linker, in previous Mac OS X versions,
+    does not understand. Another significant change to the Mach-O format is the change in how the Link Edit tables
+    (found in the __LINKEDIT section) function. In 10.6 these new Link Edit tables are compressed by removing unused and
+    unneeded bits of information, however Mac OS X 10.5 and earlier cannot read this new Link Edit table format.
+    """
+    try:
+        logging.debug("change Mach-O for %s from %s to %s", exe, current, new)
+        _builtin_change_mach_o(max_size)(exe, current, new)
+    except Exception as e:  # noqa: BLE001
+        logging.warning("Could not call _builtin_change_mac_o: %s. Trying to call install_name_tool instead.", e)
+        try:
+            cmd = ["install_name_tool", "-change", current, new, exe]
+            subprocess.check_call(cmd)  # noqa: S603
+        except Exception:
+            logging.fatal("Could not call install_name_tool -- you must have Apple's development tools installed")
+            raise
+
+
+def _builtin_change_mach_o(maxint):  # noqa: C901
+    MH_MAGIC = 0xFEEDFACE  # noqa: N806
+    MH_CIGAM = 0xCEFAEDFE  # noqa: N806
+    MH_MAGIC_64 = 0xFEEDFACF  # noqa: N806
+    MH_CIGAM_64 = 0xCFFAEDFE  # noqa: N806
+    FAT_MAGIC = 0xCAFEBABE  # noqa: N806
+    BIG_ENDIAN = ">"  # noqa: N806
+    LITTLE_ENDIAN = "<"  # noqa: N806
+    LC_LOAD_DYLIB = 0xC  # noqa: N806
+
+    class FileView:
+        """A proxy for file-like objects that exposes a given view of a file. Modified from macholib."""
+
+        def __init__(self, file_obj, start=0, size=maxint) -> None:
+            if isinstance(file_obj, FileView):
+                self._file_obj = file_obj._file_obj  # noqa: SLF001
+            else:
+                self._file_obj = file_obj
+            self._start = start
+            self._end = start + size
+            self._pos = 0
+
+        def __repr__(self) -> str:
+            return f""
+
+        def tell(self):
+            return self._pos
+
+        def _checkwindow(self, seek_to, op):
+            if not (self._start <= seek_to <= self._end):
+                msg = f"{op} to offset {seek_to:d} is outside window [{self._start:d}, {self._end:d}]"
+                raise OSError(msg)
+
+        def seek(self, offset, whence=0):
+            seek_to = offset
+            if whence == os.SEEK_SET:
+                seek_to += self._start
+            elif whence == os.SEEK_CUR:
+                seek_to += self._start + self._pos
+            elif whence == os.SEEK_END:
+                seek_to += self._end
+            else:
+                msg = f"Invalid whence argument to seek: {whence!r}"
+                raise OSError(msg)
+            self._checkwindow(seek_to, "seek")
+            self._file_obj.seek(seek_to)
+            self._pos = seek_to - self._start
+
+        def write(self, content):
+            here = self._start + self._pos
+            self._checkwindow(here, "write")
+            self._checkwindow(here + len(content), "write")
+            self._file_obj.seek(here, os.SEEK_SET)
+            self._file_obj.write(content)
+            self._pos += len(content)
+
+        def read(self, size=maxint):
+            assert size >= 0  # noqa: S101
+            here = self._start + self._pos
+            self._checkwindow(here, "read")
+            size = min(size, self._end - here)
+            self._file_obj.seek(here, os.SEEK_SET)
+            read_bytes = self._file_obj.read(size)
+            self._pos += len(read_bytes)
+            return read_bytes
+
+    def read_data(file, endian, num=1):
+        """Read a given number of 32-bits unsigned integers from the given file with the given endianness."""
+        res = struct.unpack(endian + "L" * num, file.read(num * 4))
+        if len(res) == 1:
+            return res[0]
+        return res
+
+    def mach_o_change(at_path, what, value):  # noqa: C901
+        """
+        Replace a given name (what) in any LC_LOAD_DYLIB command found in the given binary with a new name (value),
+        provided it's shorter.
+        """  # noqa: D205
+
+        def do_macho(file, bits, endian):
+            # Read Mach-O header (the magic number is assumed read by the caller)
+            cpu_type, cpu_sub_type, file_type, n_commands, size_of_commands, flags = read_data(file, endian, 6)
+            # 64-bits header has one more field.
+            if bits == 64:  # noqa: PLR2004
+                read_data(file, endian)
+            # The header is followed by n commands
+            for _ in range(n_commands):
+                where = file.tell()
+                # Read command header
+                cmd, cmd_size = read_data(file, endian, 2)
+                if cmd == LC_LOAD_DYLIB:
+                    # The first data field in LC_LOAD_DYLIB commands is the offset of the name, starting from the
+                    # beginning of the  command.
+                    name_offset = read_data(file, endian)
+                    file.seek(where + name_offset, os.SEEK_SET)
+                    # Read the NUL terminated string
+                    load = file.read(cmd_size - name_offset).decode()
+                    load = load[: load.index("\0")]
+                    # If the string is what is being replaced, overwrite it.
+                    if load == what:
+                        file.seek(where + name_offset, os.SEEK_SET)
+                        file.write(value.encode() + b"\0")
+                # Seek to the next command
+                file.seek(where + cmd_size, os.SEEK_SET)
+
+        def do_file(file, offset=0, size=maxint):
+            file = FileView(file, offset, size)
+            # Read magic number
+            magic = read_data(file, BIG_ENDIAN)
+            if magic == FAT_MAGIC:
+                # Fat binaries contain nfat_arch Mach-O binaries
+                n_fat_arch = read_data(file, BIG_ENDIAN)
+                for _ in range(n_fat_arch):
+                    # Read arch header
+                    cpu_type, cpu_sub_type, offset, size, align = read_data(file, BIG_ENDIAN, 5)
+                    do_file(file, offset, size)
+            elif magic == MH_MAGIC:
+                do_macho(file, 32, BIG_ENDIAN)
+            elif magic == MH_CIGAM:
+                do_macho(file, 32, LITTLE_ENDIAN)
+            elif magic == MH_MAGIC_64:
+                do_macho(file, 64, BIG_ENDIAN)
+            elif magic == MH_CIGAM_64:
+                do_macho(file, 64, LITTLE_ENDIAN)
+
+        assert len(what) >= len(value)  # noqa: S101
+
+        with open(at_path, "r+b") as f:
+            do_file(f)
+
+    return mach_o_change
+
+
+__all__ = [
+    "CPythonmacOsFramework",
+    "CPython3macOsFramework",
+]
diff --git a/venv/lib/python3.10/site-packages/virtualenv/create/via_global_ref/builtin/pypy/__init__.py b/venv/lib/python3.10/site-packages/virtualenv/create/via_global_ref/builtin/pypy/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/venv/lib/python3.10/site-packages/virtualenv/create/via_global_ref/builtin/pypy/common.py b/venv/lib/python3.10/site-packages/virtualenv/create/via_global_ref/builtin/pypy/common.py
new file mode 100644
index 0000000..c7f91e3
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/virtualenv/create/via_global_ref/builtin/pypy/common.py
@@ -0,0 +1,53 @@
+from __future__ import annotations
+
+import abc
+from pathlib import Path
+
+from virtualenv.create.via_global_ref.builtin.ref import PathRefToDest, RefMust, RefWhen
+from virtualenv.create.via_global_ref.builtin.via_global_self_do import ViaGlobalRefVirtualenvBuiltin
+
+
+class PyPy(ViaGlobalRefVirtualenvBuiltin, metaclass=abc.ABCMeta):
+    @classmethod
+    def can_describe(cls, interpreter):
+        return interpreter.implementation == "PyPy" and super().can_describe(interpreter)
+
+    @classmethod
+    def _executables(cls, interpreter):
+        host = Path(interpreter.system_executable)
+        targets = sorted(f"{name}{PyPy.suffix}" for name in cls.exe_names(interpreter))
+        yield host, targets, RefMust.NA, RefWhen.ANY
+
+    @classmethod
+    def executables(cls, interpreter):
+        yield from super().sources(interpreter)
+
+    @classmethod
+    def exe_names(cls, interpreter):
+        return {
+            cls.exe_stem(),
+            "python",
+            f"python{interpreter.version_info.major}",
+            f"python{interpreter.version_info.major}.{interpreter.version_info.minor}",
+        }
+
+    @classmethod
+    def sources(cls, interpreter):
+        yield from cls.executables(interpreter)
+        for host in cls._add_shared_libs(interpreter):
+            yield PathRefToDest(host, dest=lambda self, s: self.bin_dir / s.name)
+
+    @classmethod
+    def _add_shared_libs(cls, interpreter):
+        # https://bitbucket.org/pypy/pypy/issue/1922/future-proofing-virtualenv
+        python_dir = Path(interpreter.system_executable).resolve().parent
+        yield from cls._shared_libs(python_dir)
+
+    @classmethod
+    def _shared_libs(cls, python_dir):
+        raise NotImplementedError
+
+
+__all__ = [
+    "PyPy",
+]
diff --git a/venv/lib/python3.10/site-packages/virtualenv/create/via_global_ref/builtin/pypy/pypy3.py b/venv/lib/python3.10/site-packages/virtualenv/create/via_global_ref/builtin/pypy/pypy3.py
new file mode 100644
index 0000000..39f0ed5
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/virtualenv/create/via_global_ref/builtin/pypy/pypy3.py
@@ -0,0 +1,76 @@
+from __future__ import annotations
+
+import abc
+from pathlib import Path
+
+from virtualenv.create.describe import PosixSupports, Python3Supports, WindowsSupports
+from virtualenv.create.via_global_ref.builtin.ref import PathRefToDest
+
+from .common import PyPy
+
+
+class PyPy3(PyPy, Python3Supports, metaclass=abc.ABCMeta):
+    @classmethod
+    def exe_stem(cls):
+        return "pypy3"
+
+    @classmethod
+    def exe_names(cls, interpreter):
+        return super().exe_names(interpreter) | {"pypy"}
+
+
+class PyPy3Posix(PyPy3, PosixSupports):
+    """PyPy 3 on POSIX."""
+
+    @classmethod
+    def _shared_libs(cls, python_dir):
+        # glob for libpypy3-c.so, libpypy3-c.dylib, libpypy3.9-c.so ...
+        return python_dir.glob("libpypy3*.*")
+
+    def to_lib(self, src):
+        return self.dest / "lib" / src.name
+
+    @classmethod
+    def sources(cls, interpreter):
+        yield from super().sources(interpreter)
+        # PyPy >= 3.8 supports a standard prefix installation, where older
+        # versions always used a portable/development style installation.
+        # If this is a standard prefix installation, skip the below:
+        if interpreter.system_prefix == "/usr":
+            return
+        # Also copy/symlink anything under prefix/lib, which, for "portable"
+        # PyPy builds, includes the tk,tcl runtime and a number of shared
+        # objects. In distro-specific builds or on conda this should be empty
+        # (on PyPy3.8+ it will, like on CPython, hold the stdlib).
+        host_lib = Path(interpreter.system_prefix) / "lib"
+        stdlib = Path(interpreter.system_stdlib)
+        if host_lib.exists() and host_lib.is_dir():
+            for path in host_lib.iterdir():
+                if stdlib == path:
+                    # For PyPy3.8+ the stdlib lives in lib/pypy3.8
+                    # We need to avoid creating a symlink to it since that
+                    # will defeat the purpose of a virtualenv
+                    continue
+                yield PathRefToDest(path, dest=cls.to_lib)
+
+
+class Pypy3Windows(PyPy3, WindowsSupports):
+    """PyPy 3 on Windows."""
+
+    @property
+    def less_v37(self):
+        return self.interpreter.version_info.minor < 7  # noqa: PLR2004
+
+    @classmethod
+    def _shared_libs(cls, python_dir):
+        # glob for libpypy*.dll and libffi*.dll
+        for pattern in ["libpypy*.dll", "libffi*.dll"]:
+            srcs = python_dir.glob(pattern)
+            yield from srcs
+
+
+__all__ = [
+    "PyPy3",
+    "PyPy3Posix",
+    "Pypy3Windows",
+]
diff --git a/venv/lib/python3.10/site-packages/virtualenv/create/via_global_ref/builtin/ref.py b/venv/lib/python3.10/site-packages/virtualenv/create/via_global_ref/builtin/ref.py
new file mode 100644
index 0000000..d3dca5d
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/virtualenv/create/via_global_ref/builtin/ref.py
@@ -0,0 +1,178 @@
+"""
+Virtual environments in the traditional sense are built as reference to the host python. This file allows declarative
+references to elements on the file system, allowing our system to automatically detect what modes it can support given
+the constraints: e.g. can the file system symlink, can the files be read, executed, etc.
+"""  # noqa: D205
+
+from __future__ import annotations
+
+import os
+from abc import ABCMeta, abstractmethod
+from collections import OrderedDict
+from stat import S_IXGRP, S_IXOTH, S_IXUSR
+
+from virtualenv.info import fs_is_case_sensitive, fs_supports_symlink
+from virtualenv.util.path import copy, make_exe, symlink
+
+
+class RefMust:
+    NA = "NA"
+    COPY = "copy"
+    SYMLINK = "symlink"
+
+
+class RefWhen:
+    ANY = "ANY"
+    COPY = "copy"
+    SYMLINK = "symlink"
+
+
+class PathRef(metaclass=ABCMeta):
+    """Base class that checks if a file reference can be symlink/copied."""
+
+    FS_SUPPORTS_SYMLINK = fs_supports_symlink()
+    FS_CASE_SENSITIVE = fs_is_case_sensitive()
+
+    def __init__(self, src, must=RefMust.NA, when=RefWhen.ANY) -> None:
+        self.must = must
+        self.when = when
+        self.src = src
+        try:
+            self.exists = src.exists()
+        except OSError:
+            self.exists = False
+        self._can_read = None if self.exists else False
+        self._can_copy = None if self.exists else False
+        self._can_symlink = None if self.exists else False
+
+    def __repr__(self) -> str:
+        return f"{self.__class__.__name__}(src={self.src})"
+
+    @property
+    def can_read(self):
+        if self._can_read is None:
+            if self.src.is_file():
+                try:
+                    with self.src.open("rb"):
+                        self._can_read = True
+                except OSError:
+                    self._can_read = False
+            else:
+                self._can_read = os.access(str(self.src), os.R_OK)
+        return self._can_read
+
+    @property
+    def can_copy(self):
+        if self._can_copy is None:
+            if self.must == RefMust.SYMLINK:
+                self._can_copy = self.can_symlink
+            else:
+                self._can_copy = self.can_read
+        return self._can_copy
+
+    @property
+    def can_symlink(self):
+        if self._can_symlink is None:
+            if self.must == RefMust.COPY:
+                self._can_symlink = self.can_copy
+            else:
+                self._can_symlink = self.FS_SUPPORTS_SYMLINK and self.can_read
+        return self._can_symlink
+
+    @abstractmethod
+    def run(self, creator, symlinks):
+        raise NotImplementedError
+
+    def method(self, symlinks):
+        if self.must == RefMust.SYMLINK:
+            return symlink
+        if self.must == RefMust.COPY:
+            return copy
+        return symlink if symlinks else copy
+
+
+class ExePathRef(PathRef, metaclass=ABCMeta):
+    """Base class that checks if a executable can be references via symlink/copy."""
+
+    def __init__(self, src, must=RefMust.NA, when=RefWhen.ANY) -> None:
+        super().__init__(src, must, when)
+        self._can_run = None
+
+    @property
+    def can_symlink(self):
+        if self.FS_SUPPORTS_SYMLINK:
+            return self.can_run
+        return False
+
+    @property
+    def can_run(self):
+        if self._can_run is None:
+            mode = self.src.stat().st_mode
+            for key in [S_IXUSR, S_IXGRP, S_IXOTH]:
+                if mode & key:
+                    self._can_run = True
+                break
+            else:
+                self._can_run = False
+        return self._can_run
+
+
+class PathRefToDest(PathRef):
+    """Link a path on the file system."""
+
+    def __init__(self, src, dest, must=RefMust.NA, when=RefWhen.ANY) -> None:
+        super().__init__(src, must, when)
+        self.dest = dest
+
+    def run(self, creator, symlinks):
+        dest = self.dest(creator, self.src)
+        method = self.method(symlinks)
+        dest_iterable = dest if isinstance(dest, list) else (dest,)
+        if not dest.parent.exists():
+            dest.parent.mkdir(parents=True, exist_ok=True)
+        for dst in dest_iterable:
+            method(self.src, dst)
+
+
+class ExePathRefToDest(PathRefToDest, ExePathRef):
+    """Link a exe path on the file system."""
+
+    def __init__(self, src, targets, dest, must=RefMust.NA, when=RefWhen.ANY) -> None:  # noqa: PLR0913
+        ExePathRef.__init__(self, src, must, when)
+        PathRefToDest.__init__(self, src, dest, must, when)
+        if not self.FS_CASE_SENSITIVE:
+            targets = list(OrderedDict((i.lower(), None) for i in targets).keys())
+        self.base = targets[0]
+        self.aliases = targets[1:]
+        self.dest = dest
+
+    def run(self, creator, symlinks):
+        bin_dir = self.dest(creator, self.src).parent
+        dest = bin_dir / self.base
+        method = self.method(symlinks)
+        method(self.src, dest)
+        if not symlinks:
+            make_exe(dest)
+        for extra in self.aliases:
+            link_file = bin_dir / extra
+            if link_file.exists():
+                link_file.unlink()
+            if symlinks:
+                link_file.symlink_to(self.base)
+            else:
+                copy(self.src, link_file)
+            if not symlinks:
+                make_exe(link_file)
+
+    def __repr__(self) -> str:
+        return f"{self.__class__.__name__}(src={self.src}, alias={self.aliases})"
+
+
+__all__ = [
+    "ExePathRef",
+    "ExePathRefToDest",
+    "PathRefToDest",
+    "PathRef",
+    "RefWhen",
+    "RefMust",
+]
diff --git a/venv/lib/python3.10/site-packages/virtualenv/create/via_global_ref/builtin/via_global_self_do.py b/venv/lib/python3.10/site-packages/virtualenv/create/via_global_ref/builtin/via_global_self_do.py
new file mode 100644
index 0000000..3c0f9cf
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/virtualenv/create/via_global_ref/builtin/via_global_self_do.py
@@ -0,0 +1,118 @@
+from __future__ import annotations
+
+from abc import ABCMeta
+
+from virtualenv.create.via_global_ref.api import ViaGlobalRefApi, ViaGlobalRefMeta
+from virtualenv.create.via_global_ref.builtin.ref import (
+    ExePathRefToDest,
+    RefMust,
+    RefWhen,
+)
+from virtualenv.util.path import ensure_dir
+
+from .builtin_way import VirtualenvBuiltin
+
+
+class BuiltinViaGlobalRefMeta(ViaGlobalRefMeta):
+    def __init__(self) -> None:
+        super().__init__()
+        self.sources = []
+
+
+class ViaGlobalRefVirtualenvBuiltin(ViaGlobalRefApi, VirtualenvBuiltin, metaclass=ABCMeta):
+    def __init__(self, options, interpreter) -> None:
+        super().__init__(options, interpreter)
+        self._sources = getattr(options.meta, "sources", None)  # if we're created as a describer this might be missing
+
+    @classmethod
+    def can_create(cls, interpreter):
+        """By default, all built-in methods assume that if we can describe it we can create it."""
+        # first we must be able to describe it
+        if not cls.can_describe(interpreter):
+            return None
+        meta = cls.setup_meta(interpreter)
+        if meta is not None and meta:
+            cls._sources_can_be_applied(interpreter, meta)
+        return meta
+
+    @classmethod
+    def _sources_can_be_applied(cls, interpreter, meta):
+        for src in cls.sources(interpreter):
+            if src.exists:
+                if meta.can_copy and not src.can_copy:
+                    meta.copy_error = f"cannot copy {src}"
+                if meta.can_symlink and not src.can_symlink:
+                    meta.symlink_error = f"cannot symlink {src}"
+            else:
+                msg = f"missing required file {src}"
+                if src.when == RefMust.NA:
+                    meta.error = msg
+                elif src.when == RefMust.COPY:
+                    meta.copy_error = msg
+                elif src.when == RefMust.SYMLINK:
+                    meta.symlink_error = msg
+            if not meta.can_copy and not meta.can_symlink:
+                meta.error = f"neither copy or symlink supported, copy: {meta.copy_error} symlink: {meta.symlink_error}"
+            if meta.error:
+                break
+            meta.sources.append(src)
+
+    @classmethod
+    def setup_meta(cls, interpreter):  # noqa: ARG003
+        return BuiltinViaGlobalRefMeta()
+
+    @classmethod
+    def sources(cls, interpreter):
+        for host_exe, targets, must, when in cls._executables(interpreter):
+            yield ExePathRefToDest(host_exe, dest=cls.to_bin, targets=targets, must=must, when=when)
+
+    def to_bin(self, src):
+        return self.bin_dir / src.name
+
+    @classmethod
+    def _executables(cls, interpreter):
+        raise NotImplementedError
+
+    def create(self):
+        dirs = self.ensure_directories()
+        for directory in list(dirs):
+            if any(i for i in dirs if i is not directory and directory.parts == i.parts[: len(directory.parts)]):
+                dirs.remove(directory)
+        for directory in sorted(dirs):
+            ensure_dir(directory)
+
+        self.set_pyenv_cfg()
+        self.pyenv_cfg.write()
+        true_system_site = self.enable_system_site_package
+        try:
+            self.enable_system_site_package = False
+            for src in self._sources:
+                if (
+                    src.when == RefWhen.ANY
+                    or (src.when == RefWhen.SYMLINK and self.symlinks is True)
+                    or (src.when == RefWhen.COPY and self.symlinks is False)
+                ):
+                    src.run(self, self.symlinks)
+        finally:
+            if true_system_site != self.enable_system_site_package:
+                self.enable_system_site_package = true_system_site
+        super().create()
+
+    def ensure_directories(self):
+        return {self.dest, self.bin_dir, self.script_dir, self.stdlib} | set(self.libs)
+
+    def set_pyenv_cfg(self):
+        """
+        We directly inject the base prefix and base exec prefix to avoid site.py needing to discover these
+        from home (which usually is done within the interpreter itself).
+        """  # noqa: D205
+        super().set_pyenv_cfg()
+        self.pyenv_cfg["base-prefix"] = self.interpreter.system_prefix
+        self.pyenv_cfg["base-exec-prefix"] = self.interpreter.system_exec_prefix
+        self.pyenv_cfg["base-executable"] = self.interpreter.system_executable
+
+
+__all__ = [
+    "BuiltinViaGlobalRefMeta",
+    "ViaGlobalRefVirtualenvBuiltin",
+]
diff --git a/venv/lib/python3.10/site-packages/virtualenv/create/via_global_ref/store.py b/venv/lib/python3.10/site-packages/virtualenv/create/via_global_ref/store.py
new file mode 100644
index 0000000..4be6689
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/virtualenv/create/via_global_ref/store.py
@@ -0,0 +1,26 @@
+from __future__ import annotations
+
+from pathlib import Path
+
+
+def handle_store_python(meta, interpreter):
+    if is_store_python(interpreter):
+        meta.symlink_error = "Windows Store Python does not support virtual environments via symlink"
+    return meta
+
+
+def is_store_python(interpreter):
+    parts = Path(interpreter.system_executable).parts
+    return (
+        len(parts) > 4  # noqa: PLR2004
+        and parts[-4] == "Microsoft"
+        and parts[-3] == "WindowsApps"
+        and parts[-2].startswith("PythonSoftwareFoundation.Python.3.")
+        and parts[-1].startswith("python")
+    )
+
+
+__all__ = [
+    "handle_store_python",
+    "is_store_python",
+]
diff --git a/venv/lib/python3.10/site-packages/virtualenv/create/via_global_ref/venv.py b/venv/lib/python3.10/site-packages/virtualenv/create/via_global_ref/venv.py
new file mode 100644
index 0000000..cc73a6f
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/virtualenv/create/via_global_ref/venv.py
@@ -0,0 +1,100 @@
+from __future__ import annotations
+
+import logging
+from copy import copy
+
+from virtualenv.create.via_global_ref.store import handle_store_python
+from virtualenv.discovery.py_info import PythonInfo
+from virtualenv.util.error import ProcessCallFailedError
+from virtualenv.util.path import ensure_dir
+from virtualenv.util.subprocess import run_cmd
+
+from .api import ViaGlobalRefApi, ViaGlobalRefMeta
+from .builtin.pypy.pypy3 import Pypy3Windows
+
+
+class Venv(ViaGlobalRefApi):
+    def __init__(self, options, interpreter) -> None:
+        self.describe = options.describe
+        super().__init__(options, interpreter)
+        current = PythonInfo.current()
+        self.can_be_inline = interpreter is current and interpreter.executable == interpreter.system_executable
+        self._context = None
+
+    def _args(self):
+        return super()._args() + ([("describe", self.describe.__class__.__name__)] if self.describe else [])
+
+    @classmethod
+    def can_create(cls, interpreter):
+        if interpreter.has_venv:
+            meta = ViaGlobalRefMeta()
+            if interpreter.platform == "win32":
+                meta = handle_store_python(meta, interpreter)
+            return meta
+        return None
+
+    def create(self):
+        if self.can_be_inline:
+            self.create_inline()
+        else:
+            self.create_via_sub_process()
+        for lib in self.libs:
+            ensure_dir(lib)
+        super().create()
+        self.executables_for_win_pypy_less_v37()
+
+    def executables_for_win_pypy_less_v37(self):
+        """
+        PyPy <= 3.6 (v7.3.3) for Windows contains only pypy3.exe and pypy3w.exe
+        Venv does not handle non-existing exe sources, e.g. python.exe, so this
+        patch does it.
+        """  # noqa: D205
+        creator = self.describe
+        if isinstance(creator, Pypy3Windows) and creator.less_v37:
+            for exe in creator.executables(self.interpreter):
+                exe.run(creator, self.symlinks)
+
+    def create_inline(self):
+        from venv import EnvBuilder
+
+        builder = EnvBuilder(
+            system_site_packages=self.enable_system_site_package,
+            clear=False,
+            symlinks=self.symlinks,
+            with_pip=False,
+        )
+        builder.create(str(self.dest))
+
+    def create_via_sub_process(self):
+        cmd = self.get_host_create_cmd()
+        logging.info("using host built-in venv to create via %s", " ".join(cmd))
+        code, out, err = run_cmd(cmd)
+        if code != 0:
+            raise ProcessCallFailedError(code, out, err, cmd)
+
+    def get_host_create_cmd(self):
+        cmd = [self.interpreter.system_executable, "-m", "venv", "--without-pip"]
+        if self.enable_system_site_package:
+            cmd.append("--system-site-packages")
+        cmd.append("--symlinks" if self.symlinks else "--copies")
+        cmd.append(str(self.dest))
+        return cmd
+
+    def set_pyenv_cfg(self):
+        # prefer venv options over ours, but keep our extra
+        venv_content = copy(self.pyenv_cfg.refresh())
+        super().set_pyenv_cfg()
+        self.pyenv_cfg.update(venv_content)
+
+    def __getattribute__(self, item):
+        describe = object.__getattribute__(self, "describe")
+        if describe is not None and hasattr(describe, item):
+            element = getattr(describe, item)
+            if not callable(element) or item in ("script",):
+                return element
+        return object.__getattribute__(self, item)
+
+
+__all__ = [
+    "Venv",
+]
diff --git a/venv/lib/python3.10/site-packages/virtualenv/discovery/__init__.py b/venv/lib/python3.10/site-packages/virtualenv/discovery/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/venv/lib/python3.10/site-packages/virtualenv/discovery/builtin.py b/venv/lib/python3.10/site-packages/virtualenv/discovery/builtin.py
new file mode 100644
index 0000000..92d96a9
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/virtualenv/discovery/builtin.py
@@ -0,0 +1,182 @@
+from __future__ import annotations
+
+import logging
+import os
+import sys
+
+from virtualenv.info import IS_WIN
+
+from .discover import Discover
+from .py_info import PythonInfo
+from .py_spec import PythonSpec
+
+
+class Builtin(Discover):
+    def __init__(self, options) -> None:
+        super().__init__(options)
+        self.python_spec = options.python if options.python else [sys.executable]
+        self.app_data = options.app_data
+        self.try_first_with = options.try_first_with
+
+    @classmethod
+    def add_parser_arguments(cls, parser):
+        parser.add_argument(
+            "-p",
+            "--python",
+            dest="python",
+            metavar="py",
+            type=str,
+            action="append",
+            default=[],
+            help="interpreter based on what to create environment (path/identifier) "
+            "- by default use the interpreter where the tool is installed - first found wins",
+        )
+        parser.add_argument(
+            "--try-first-with",
+            dest="try_first_with",
+            metavar="py_exe",
+            type=str,
+            action="append",
+            default=[],
+            help="try first these interpreters before starting the discovery",
+        )
+
+    def run(self):
+        for python_spec in self.python_spec:
+            result = get_interpreter(python_spec, self.try_first_with, self.app_data, self._env)
+            if result is not None:
+                return result
+        return None
+
+    def __repr__(self) -> str:
+        spec = self.python_spec[0] if len(self.python_spec) == 1 else self.python_spec
+        return f"{self.__class__.__name__} discover of python_spec={spec!r}"
+
+
+def get_interpreter(key, try_first_with, app_data=None, env=None):
+    spec = PythonSpec.from_string_spec(key)
+    logging.info("find interpreter for spec %r", spec)
+    proposed_paths = set()
+    env = os.environ if env is None else env
+    for interpreter, impl_must_match in propose_interpreters(spec, try_first_with, app_data, env):
+        key = interpreter.system_executable, impl_must_match
+        if key in proposed_paths:
+            continue
+        logging.info("proposed %s", interpreter)
+        if interpreter.satisfies(spec, impl_must_match):
+            logging.debug("accepted %s", interpreter)
+            return interpreter
+        proposed_paths.add(key)
+    return None
+
+
+def propose_interpreters(spec, try_first_with, app_data, env=None):  # noqa: C901, PLR0912
+    # 0. try with first
+    env = os.environ if env is None else env
+    for py_exe in try_first_with:
+        path = os.path.abspath(py_exe)
+        try:
+            os.lstat(path)  # Windows Store Python does not work with os.path.exists, but does for os.lstat
+        except OSError:
+            pass
+        else:
+            yield PythonInfo.from_exe(os.path.abspath(path), app_data, env=env), True
+
+    # 1. if it's a path and exists
+    if spec.path is not None:
+        try:
+            os.lstat(spec.path)  # Windows Store Python does not work with os.path.exists, but does for os.lstat
+        except OSError:
+            if spec.is_abs:
+                raise
+        else:
+            yield PythonInfo.from_exe(os.path.abspath(spec.path), app_data, env=env), True
+        if spec.is_abs:
+            return
+    else:
+        # 2. otherwise try with the current
+        yield PythonInfo.current_system(app_data), True
+
+        # 3. otherwise fallback to platform default logic
+        if IS_WIN:
+            from .windows import propose_interpreters
+
+            for interpreter in propose_interpreters(spec, app_data, env):
+                yield interpreter, True
+    # finally just find on path, the path order matters (as the candidates are less easy to control by end user)
+    paths = get_paths(env)
+    tested_exes = set()
+    for pos, path in enumerate(paths):
+        path_str = str(path)
+        logging.debug(LazyPathDump(pos, path_str, env))
+        for candidate, match in possible_specs(spec):
+            found = check_path(candidate, path_str)
+            if found is not None:
+                exe = os.path.abspath(found)
+                if exe not in tested_exes:
+                    tested_exes.add(exe)
+                    interpreter = PathPythonInfo.from_exe(exe, app_data, raise_on_error=False, env=env)
+                    if interpreter is not None:
+                        yield interpreter, match
+
+
+def get_paths(env):
+    path = env.get("PATH", None)
+    if path is None:
+        try:
+            path = os.confstr("CS_PATH")
+        except (AttributeError, ValueError):
+            path = os.defpath
+    return [] if not path else [p for p in path.split(os.pathsep) if os.path.exists(p)]
+
+
+class LazyPathDump:
+    def __init__(self, pos, path, env) -> None:
+        self.pos = pos
+        self.path = path
+        self.env = env
+
+    def __repr__(self) -> str:
+        content = f"discover PATH[{self.pos}]={self.path}"
+        if self.env.get("_VIRTUALENV_DEBUG"):  # this is the over the board debug
+            content += " with =>"
+            for file_name in os.listdir(self.path):
+                try:
+                    file_path = os.path.join(self.path, file_name)
+                    if os.path.isdir(file_path) or not os.access(file_path, os.X_OK):
+                        continue
+                except OSError:
+                    pass
+                content += " "
+                content += file_name
+        return content
+
+
+def check_path(candidate, path):
+    _, ext = os.path.splitext(candidate)
+    if sys.platform == "win32" and ext != ".exe":
+        candidate = candidate + ".exe"
+    if os.path.isfile(candidate):
+        return candidate
+    candidate = os.path.join(path, candidate)
+    if os.path.isfile(candidate):
+        return candidate
+    return None
+
+
+def possible_specs(spec):
+    # 4. then maybe it's something exact on PATH - if it was direct lookup implementation no longer counts
+    yield spec.str_spec, False
+    # 5. or from the spec we can deduce a name on path  that matches
+    yield from spec.generate_names()
+
+
+class PathPythonInfo(PythonInfo):
+    """python info from path."""
+
+
+__all__ = [
+    "get_interpreter",
+    "Builtin",
+    "PathPythonInfo",
+]
diff --git a/venv/lib/python3.10/site-packages/virtualenv/discovery/cached_py_info.py b/venv/lib/python3.10/site-packages/virtualenv/discovery/cached_py_info.py
new file mode 100644
index 0000000..19e938f
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/virtualenv/discovery/cached_py_info.py
@@ -0,0 +1,177 @@
+"""
+
+We acquire the python information by running an interrogation script via subprocess trigger. This operation is not
+cheap, especially not on Windows. To not have to pay this hefty cost every time we apply multiple levels of
+caching.
+"""  # noqa: D205
+
+from __future__ import annotations
+
+import logging
+import os
+import random
+import sys
+from collections import OrderedDict
+from pathlib import Path
+from shlex import quote
+from string import ascii_lowercase, ascii_uppercase, digits
+from subprocess import Popen
+
+from virtualenv.app_data import AppDataDisabled
+from virtualenv.discovery.py_info import PythonInfo
+from virtualenv.util.subprocess import subprocess
+
+_CACHE = OrderedDict()
+_CACHE[Path(sys.executable)] = PythonInfo()
+
+
+def from_exe(cls, app_data, exe, env=None, raise_on_error=True, ignore_cache=False):  # noqa: FBT002, PLR0913
+    env = os.environ if env is None else env
+    result = _get_from_cache(cls, app_data, exe, env, ignore_cache=ignore_cache)
+    if isinstance(result, Exception):
+        if raise_on_error:
+            raise result
+        logging.info("%s", result)
+        result = None
+    return result
+
+
+def _get_from_cache(cls, app_data, exe, env, ignore_cache=True):  # noqa: FBT002
+    # note here we cannot resolve symlinks, as the symlink may trigger different prefix information if there's a
+    # pyenv.cfg somewhere alongside on python3.5+
+    exe_path = Path(exe)
+    if not ignore_cache and exe_path in _CACHE:  # check in the in-memory cache
+        result = _CACHE[exe_path]
+    else:  # otherwise go through the app data cache
+        py_info = _get_via_file_cache(cls, app_data, exe_path, exe, env)
+        result = _CACHE[exe_path] = py_info
+    # independent if it was from the file or in-memory cache fix the original executable location
+    if isinstance(result, PythonInfo):
+        result.executable = exe
+    return result
+
+
+def _get_via_file_cache(cls, app_data, path, exe, env):
+    path_text = str(path)
+    try:
+        path_modified = path.stat().st_mtime
+    except OSError:
+        path_modified = -1
+    if app_data is None:
+        app_data = AppDataDisabled()
+    py_info, py_info_store = None, app_data.py_info(path)
+    with py_info_store.locked():
+        if py_info_store.exists():  # if exists and matches load
+            data = py_info_store.read()
+            of_path, of_st_mtime, of_content = data["path"], data["st_mtime"], data["content"]
+            if of_path == path_text and of_st_mtime == path_modified:
+                py_info = cls._from_dict(of_content.copy())
+                sys_exe = py_info.system_executable
+                if sys_exe is not None and not os.path.exists(sys_exe):
+                    py_info_store.remove()
+                    py_info = None
+            else:
+                py_info_store.remove()
+        if py_info is None:  # if not loaded run and save
+            failure, py_info = _run_subprocess(cls, exe, app_data, env)
+            if failure is None:
+                data = {"st_mtime": path_modified, "path": path_text, "content": py_info._to_dict()}  # noqa: SLF001
+                py_info_store.write(data)
+            else:
+                py_info = failure
+    return py_info
+
+
+COOKIE_LENGTH: int = 32
+
+
+def gen_cookie():
+    return "".join(
+        random.choice(f"{ascii_lowercase}{ascii_uppercase}{digits}") for _ in range(COOKIE_LENGTH)  # noqa: S311
+    )
+
+
+def _run_subprocess(cls, exe, app_data, env):
+    py_info_script = Path(os.path.abspath(__file__)).parent / "py_info.py"
+    # Cookies allow to split the serialized stdout output generated by the script collecting the info from the output
+    # generated by something else. The right way to deal with it is to create an anonymous pipe and pass its descriptor
+    # to the child and output to it. But AFAIK all of them are either not cross-platform or too big to implement and are
+    # not in the stdlib. So the easiest and the shortest way I could mind is just using the cookies.
+    # We generate pseudorandom cookies because it easy to implement and avoids breakage from outputting modules source
+    # code, i.e. by debug output libraries. We reverse the cookies to avoid breakages resulting from variable values
+    # appearing in debug output.
+
+    start_cookie = gen_cookie()
+    end_cookie = gen_cookie()
+    with app_data.ensure_extracted(py_info_script) as py_info_script:
+        cmd = [exe, str(py_info_script), start_cookie, end_cookie]
+        # prevent sys.prefix from leaking into the child process - see https://bugs.python.org/issue22490
+        env = env.copy()
+        env.pop("__PYVENV_LAUNCHER__", None)
+        logging.debug("get interpreter info via cmd: %s", LogCmd(cmd))
+        try:
+            process = Popen(
+                cmd,  # noqa: S603
+                universal_newlines=True,
+                stdin=subprocess.PIPE,
+                stderr=subprocess.PIPE,
+                stdout=subprocess.PIPE,
+                env=env,
+                encoding="utf-8",
+            )
+            out, err = process.communicate()
+            code = process.returncode
+        except OSError as os_error:
+            out, err, code = "", os_error.strerror, os_error.errno
+    result, failure = None, None
+    if code == 0:
+        out_starts = out.find(start_cookie[::-1])
+
+        if out_starts > -1:
+            pre_cookie = out[:out_starts]
+
+            if pre_cookie:
+                sys.stdout.write(pre_cookie)
+
+            out = out[out_starts + COOKIE_LENGTH :]
+
+        out_ends = out.find(end_cookie[::-1])
+
+        if out_ends > -1:
+            post_cookie = out[out_ends + COOKIE_LENGTH :]
+
+            if post_cookie:
+                sys.stdout.write(post_cookie)
+
+            out = out[:out_ends]
+
+        result = cls._from_json(out)
+        result.executable = exe  # keep original executable as this may contain initialization code
+    else:
+        msg = f"{exe} with code {code}{f' out: {out!r}' if out else ''}{f' err: {err!r}' if err else ''}"
+        failure = RuntimeError(f"failed to query {msg}")
+    return failure, result
+
+
+class LogCmd:
+    def __init__(self, cmd, env=None) -> None:
+        self.cmd = cmd
+        self.env = env
+
+    def __repr__(self) -> str:
+        cmd_repr = " ".join(quote(str(c)) for c in self.cmd)
+        if self.env is not None:
+            cmd_repr = f"{cmd_repr} env of {self.env!r}"
+        return cmd_repr
+
+
+def clear(app_data):
+    app_data.py_info_clear()
+    _CACHE.clear()
+
+
+___all___ = [
+    "from_exe",
+    "clear",
+    "LogCmd",
+]
diff --git a/venv/lib/python3.10/site-packages/virtualenv/discovery/discover.py b/venv/lib/python3.10/site-packages/virtualenv/discovery/discover.py
new file mode 100644
index 0000000..b74ee6c
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/virtualenv/discovery/discover.py
@@ -0,0 +1,48 @@
+from __future__ import annotations
+
+from abc import ABCMeta, abstractmethod
+
+
+class Discover(metaclass=ABCMeta):
+    """Discover and provide the requested Python interpreter."""
+
+    @classmethod
+    def add_parser_arguments(cls, parser):
+        """
+        Add CLI arguments for this discovery mechanisms.
+
+        :param parser: the CLI parser
+        """
+        raise NotImplementedError
+
+    def __init__(self, options) -> None:
+        """
+        Create a new discovery mechanism.
+
+        :param options: the parsed options as defined within :meth:`add_parser_arguments`
+        """
+        self._has_run = False
+        self._interpreter = None
+        self._env = options.env
+
+    @abstractmethod
+    def run(self):
+        """
+        Discovers an interpreter.
+
+        :return: the interpreter ready to use for virtual environment creation
+        """
+        raise NotImplementedError
+
+    @property
+    def interpreter(self):
+        """:return: the interpreter as returned by :meth:`run`, cached"""
+        if self._has_run is False:
+            self._interpreter = self.run()
+            self._has_run = True
+        return self._interpreter
+
+
+__all__ = [
+    "Discover",
+]
diff --git a/venv/lib/python3.10/site-packages/virtualenv/discovery/py_info.py b/venv/lib/python3.10/site-packages/virtualenv/discovery/py_info.py
new file mode 100644
index 0000000..a32c3f2
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/virtualenv/discovery/py_info.py
@@ -0,0 +1,576 @@
+"""
+The PythonInfo contains information about a concrete instance of a Python interpreter.
+
+Note: this file is also used to query target interpreters, so can only use standard library methods
+"""
+
+from __future__ import annotations
+
+import json
+import logging
+import os
+import platform
+import re
+import sys
+import sysconfig
+import warnings
+from collections import OrderedDict, namedtuple
+from string import digits
+
+VersionInfo = namedtuple("VersionInfo", ["major", "minor", "micro", "releaselevel", "serial"])
+
+
+def _get_path_extensions():
+    return list(OrderedDict.fromkeys(["", *os.environ.get("PATHEXT", "").lower().split(os.pathsep)]))
+
+
+EXTENSIONS = _get_path_extensions()
+_CONF_VAR_RE = re.compile(r"\{\w+\}")
+
+
+class PythonInfo:
+    """Contains information for a Python interpreter."""
+
+    def __init__(self) -> None:  # noqa: PLR0915
+        def abs_path(v):
+            return None if v is None else os.path.abspath(v)  # unroll relative elements from path (e.g. ..)
+
+        # qualifies the python
+        self.platform = sys.platform
+        self.implementation = platform.python_implementation()
+        if self.implementation == "PyPy":
+            self.pypy_version_info = tuple(sys.pypy_version_info)
+
+        # this is a tuple in earlier, struct later, unify to our own named tuple
+        self.version_info = VersionInfo(*sys.version_info)
+        self.architecture = 64 if sys.maxsize > 2**32 else 32
+
+        # Used to determine some file names.
+        # See `CPython3Windows.python_zip()`.
+        self.version_nodot = sysconfig.get_config_var("py_version_nodot")
+
+        self.version = sys.version
+        self.os = os.name
+
+        # information about the prefix - determines python home
+        self.prefix = abs_path(getattr(sys, "prefix", None))  # prefix we think
+        self.base_prefix = abs_path(getattr(sys, "base_prefix", None))  # venv
+        self.real_prefix = abs_path(getattr(sys, "real_prefix", None))  # old virtualenv
+
+        # information about the exec prefix - dynamic stdlib modules
+        self.base_exec_prefix = abs_path(getattr(sys, "base_exec_prefix", None))
+        self.exec_prefix = abs_path(getattr(sys, "exec_prefix", None))
+
+        self.executable = abs_path(sys.executable)  # the executable we were invoked via
+        self.original_executable = abs_path(self.executable)  # the executable as known by the interpreter
+        self.system_executable = self._fast_get_system_executable()  # the executable we are based of (if available)
+
+        try:
+            __import__("venv")
+            has = True
+        except ImportError:
+            has = False
+        self.has_venv = has
+        self.path = sys.path
+        self.file_system_encoding = sys.getfilesystemencoding()
+        self.stdout_encoding = getattr(sys.stdout, "encoding", None)
+
+        scheme_names = sysconfig.get_scheme_names()
+
+        if "venv" in scheme_names:
+            self.sysconfig_scheme = "venv"
+            self.sysconfig_paths = {
+                i: sysconfig.get_path(i, expand=False, scheme=self.sysconfig_scheme) for i in sysconfig.get_path_names()
+            }
+            # we cannot use distutils at all if "venv" exists, distutils don't know it
+            self.distutils_install = {}
+        # debian / ubuntu python 3.10 without `python3-distutils` will report
+        # mangled `local/bin` / etc. names for the default prefix
+        # intentionally select `posix_prefix` which is the unaltered posix-like paths
+        elif sys.version_info[:2] == (3, 10) and "deb_system" in scheme_names:
+            self.sysconfig_scheme = "posix_prefix"
+            self.sysconfig_paths = {
+                i: sysconfig.get_path(i, expand=False, scheme=self.sysconfig_scheme) for i in sysconfig.get_path_names()
+            }
+            # we cannot use distutils at all if "venv" exists, distutils don't know it
+            self.distutils_install = {}
+        else:
+            self.sysconfig_scheme = None
+            self.sysconfig_paths = {i: sysconfig.get_path(i, expand=False) for i in sysconfig.get_path_names()}
+            self.distutils_install = self._distutils_install().copy()
+
+        # https://bugs.python.org/issue22199
+        makefile = getattr(sysconfig, "get_makefile_filename", getattr(sysconfig, "_get_makefile_filename", None))
+        self.sysconfig = {
+            k: v
+            for k, v in [
+                # a list of content to store from sysconfig
+                ("makefile_filename", makefile()),
+            ]
+            if k is not None
+        }
+
+        config_var_keys = set()
+        for element in self.sysconfig_paths.values():
+            for k in _CONF_VAR_RE.findall(element):
+                config_var_keys.add(k[1:-1])
+        config_var_keys.add("PYTHONFRAMEWORK")
+
+        self.sysconfig_vars = {i: sysconfig.get_config_var(i or "") for i in config_var_keys}
+
+        confs = {
+            k: (self.system_prefix if v is not None and v.startswith(self.prefix) else v)
+            for k, v in self.sysconfig_vars.items()
+        }
+        self.system_stdlib = self.sysconfig_path("stdlib", confs)
+        self.system_stdlib_platform = self.sysconfig_path("platstdlib", confs)
+        self.max_size = getattr(sys, "maxsize", getattr(sys, "maxint", None))
+        self._creators = None
+
+    def _fast_get_system_executable(self):
+        """Try to get the system executable by just looking at properties."""
+        if self.real_prefix or (
+            self.base_prefix is not None and self.base_prefix != self.prefix
+        ):  # if this is a virtual environment
+            if self.real_prefix is None:
+                base_executable = getattr(sys, "_base_executable", None)  # some platforms may set this to help us
+                if base_executable is not None:  # noqa: SIM102 # use the saved system executable if present
+                    if sys.executable != base_executable:  # we know we're in a virtual environment, cannot be us
+                        if os.path.exists(base_executable):
+                            return base_executable
+                        # Python may return "python" because it was invoked from the POSIX virtual environment
+                        # however some installs/distributions do not provide a version-less "python" binary in
+                        # the system install location (see PEP 394) so try to fallback to a versioned binary.
+                        #
+                        # Gate this to Python 3.11 as `sys._base_executable` path resolution is now relative to
+                        # the 'home' key from pyvenv.cfg which often points to the system install location.
+                        major, minor = self.version_info.major, self.version_info.minor
+                        if self.os == "posix" and (major, minor) >= (3, 11):
+                            # search relative to the directory of sys._base_executable
+                            base_dir = os.path.dirname(base_executable)
+                            for base_executable in [
+                                os.path.join(base_dir, exe) for exe in (f"python{major}", f"python{major}.{minor}")
+                            ]:
+                                if os.path.exists(base_executable):
+                                    return base_executable
+            return None  # in this case we just can't tell easily without poking around FS and calling them, bail
+        # if we're not in a virtual environment, this is already a system python, so return the original executable
+        # note we must choose the original and not the pure executable as shim scripts might throw us off
+        return self.original_executable
+
+    def install_path(self, key):
+        result = self.distutils_install.get(key)
+        if result is None:  # use sysconfig if sysconfig_scheme is set or distutils is unavailable
+            # set prefixes to empty => result is relative from cwd
+            prefixes = self.prefix, self.exec_prefix, self.base_prefix, self.base_exec_prefix
+            config_var = {k: "" if v in prefixes else v for k, v in self.sysconfig_vars.items()}
+            result = self.sysconfig_path(key, config_var=config_var).lstrip(os.sep)
+        return result
+
+    @staticmethod
+    def _distutils_install():
+        # use distutils primarily because that's what pip does
+        # https://github.com/pypa/pip/blob/main/src/pip/_internal/locations.py#L95
+        # note here we don't import Distribution directly to allow setuptools to patch it
+        with warnings.catch_warnings():  # disable warning for PEP-632
+            warnings.simplefilter("ignore")
+            try:
+                from distutils import dist
+                from distutils.command.install import SCHEME_KEYS
+            except ImportError:  # if removed or not installed ignore
+                return {}
+
+        d = dist.Distribution({"script_args": "--no-user-cfg"})  # conf files not parsed so they do not hijack paths
+        if hasattr(sys, "_framework"):
+            sys._framework = None  # disable macOS static paths for framework  # noqa: SLF001
+
+        with warnings.catch_warnings():  # disable warning for PEP-632
+            warnings.simplefilter("ignore")
+            i = d.get_command_obj("install", create=True)
+
+        i.prefix = os.sep  # paths generated are relative to prefix that contains the path sep, this makes it relative
+        i.finalize_options()
+        return {key: (getattr(i, f"install_{key}")[1:]).lstrip(os.sep) for key in SCHEME_KEYS}
+
+    @property
+    def version_str(self):
+        return ".".join(str(i) for i in self.version_info[0:3])
+
+    @property
+    def version_release_str(self):
+        return ".".join(str(i) for i in self.version_info[0:2])
+
+    @property
+    def python_name(self):
+        version_info = self.version_info
+        return f"python{version_info.major}.{version_info.minor}"
+
+    @property
+    def is_old_virtualenv(self):
+        return self.real_prefix is not None
+
+    @property
+    def is_venv(self):
+        return self.base_prefix is not None
+
+    def sysconfig_path(self, key, config_var=None, sep=os.sep):
+        pattern = self.sysconfig_paths[key]
+        if config_var is None:
+            config_var = self.sysconfig_vars
+        else:
+            base = self.sysconfig_vars.copy()
+            base.update(config_var)
+            config_var = base
+        return pattern.format(**config_var).replace("/", sep)
+
+    def creators(self, refresh=False):  # noqa: FBT002
+        if self._creators is None or refresh is True:
+            from virtualenv.run.plugin.creators import CreatorSelector
+
+            self._creators = CreatorSelector.for_interpreter(self)
+        return self._creators
+
+    @property
+    def system_include(self):
+        path = self.sysconfig_path(
+            "include",
+            {
+                k: (self.system_prefix if v is not None and v.startswith(self.prefix) else v)
+                for k, v in self.sysconfig_vars.items()
+            },
+        )
+        if not os.path.exists(path):  # some broken packaging don't respect the sysconfig, fallback to distutils path
+            # the pattern include the distribution name too at the end, remove that via the parent call
+            fallback = os.path.join(self.prefix, os.path.dirname(self.install_path("headers")))
+            if os.path.exists(fallback):
+                path = fallback
+        return path
+
+    @property
+    def system_prefix(self):
+        return self.real_prefix or self.base_prefix or self.prefix
+
+    @property
+    def system_exec_prefix(self):
+        return self.real_prefix or self.base_exec_prefix or self.exec_prefix
+
+    def __unicode__(self):
+        return repr(self)
+
+    def __repr__(self) -> str:
+        return "{}({!r})".format(
+            self.__class__.__name__,
+            {k: v for k, v in self.__dict__.items() if not k.startswith("_")},
+        )
+
+    def __str__(self) -> str:
+        return "{}({})".format(
+            self.__class__.__name__,
+            ", ".join(
+                f"{k}={v}"
+                for k, v in (
+                    ("spec", self.spec),
+                    (
+                        "system"
+                        if self.system_executable is not None and self.system_executable != self.executable
+                        else None,
+                        self.system_executable,
+                    ),
+                    (
+                        "original"
+                        if self.original_executable not in {self.system_executable, self.executable}
+                        else None,
+                        self.original_executable,
+                    ),
+                    ("exe", self.executable),
+                    ("platform", self.platform),
+                    ("version", repr(self.version)),
+                    ("encoding_fs_io", f"{self.file_system_encoding}-{self.stdout_encoding}"),
+                )
+                if k is not None
+            ),
+        )
+
+    @property
+    def spec(self):
+        return "{}{}-{}".format(self.implementation, ".".join(str(i) for i in self.version_info), self.architecture)
+
+    @classmethod
+    def clear_cache(cls, app_data):
+        # this method is not used by itself, so here and called functions can import stuff locally
+        from virtualenv.discovery.cached_py_info import clear
+
+        clear(app_data)
+        cls._cache_exe_discovery.clear()
+
+    def satisfies(self, spec, impl_must_match):  # noqa: C901
+        """Check if a given specification can be satisfied by the this python interpreter instance."""
+        if spec.path:
+            if self.executable == os.path.abspath(spec.path):
+                return True  # if the path is a our own executable path we're done
+            if not spec.is_abs:
+                # if path set, and is not our original executable name, this does not match
+                basename = os.path.basename(self.original_executable)
+                spec_path = spec.path
+                if sys.platform == "win32":
+                    basename, suffix = os.path.splitext(basename)
+                    if spec_path.endswith(suffix):
+                        spec_path = spec_path[: -len(suffix)]
+                if basename != spec_path:
+                    return False
+
+        if (
+            impl_must_match
+            and spec.implementation is not None
+            and spec.implementation.lower() != self.implementation.lower()
+        ):
+            return False
+
+        if spec.architecture is not None and spec.architecture != self.architecture:
+            return False
+
+        for our, req in zip(self.version_info[0:3], (spec.major, spec.minor, spec.micro)):
+            if req is not None and our is not None and our != req:
+                return False
+        return True
+
+    _current_system = None
+    _current = None
+
+    @classmethod
+    def current(cls, app_data=None):
+        """
+        This locates the current host interpreter information. This might be different than what we run into in case
+        the host python has been upgraded from underneath us.
+        """  # noqa: D205
+        if cls._current is None:
+            cls._current = cls.from_exe(sys.executable, app_data, raise_on_error=True, resolve_to_host=False)
+        return cls._current
+
+    @classmethod
+    def current_system(cls, app_data=None):
+        """
+        This locates the current host interpreter information. This might be different than what we run into in case
+        the host python has been upgraded from underneath us.
+        """  # noqa: D205
+        if cls._current_system is None:
+            cls._current_system = cls.from_exe(sys.executable, app_data, raise_on_error=True, resolve_to_host=True)
+        return cls._current_system
+
+    def _to_json(self):
+        # don't save calculated paths, as these are non primitive types
+        return json.dumps(self._to_dict(), indent=2)
+
+    def _to_dict(self):
+        data = {var: (getattr(self, var) if var not in ("_creators",) else None) for var in vars(self)}
+
+        data["version_info"] = data["version_info"]._asdict()  # namedtuple to dictionary
+        return data
+
+    @classmethod
+    def from_exe(  # noqa: PLR0913
+        cls,
+        exe,
+        app_data=None,
+        raise_on_error=True,  # noqa: FBT002
+        ignore_cache=False,  # noqa: FBT002
+        resolve_to_host=True,  # noqa: FBT002
+        env=None,
+    ):
+        """Given a path to an executable get the python information."""
+        # this method is not used by itself, so here and called functions can import stuff locally
+        from virtualenv.discovery.cached_py_info import from_exe
+
+        env = os.environ if env is None else env
+        proposed = from_exe(cls, app_data, exe, env=env, raise_on_error=raise_on_error, ignore_cache=ignore_cache)
+
+        if isinstance(proposed, PythonInfo) and resolve_to_host:
+            try:
+                proposed = proposed._resolve_to_system(app_data, proposed)  # noqa: SLF001
+            except Exception as exception:  # noqa: BLE001
+                if raise_on_error:
+                    raise
+                logging.info("ignore %s due cannot resolve system due to %r", proposed.original_executable, exception)
+                proposed = None
+        return proposed
+
+    @classmethod
+    def _from_json(cls, payload):
+        # the dictionary unroll here is to protect against pypy bug of interpreter crashing
+        raw = json.loads(payload)
+        return cls._from_dict(raw.copy())
+
+    @classmethod
+    def _from_dict(cls, data):
+        data["version_info"] = VersionInfo(**data["version_info"])  # restore this to a named tuple structure
+        result = cls()
+        result.__dict__ = data.copy()
+        return result
+
+    @classmethod
+    def _resolve_to_system(cls, app_data, target):
+        start_executable = target.executable
+        prefixes = OrderedDict()
+        while target.system_executable is None:
+            prefix = target.real_prefix or target.base_prefix or target.prefix
+            if prefix in prefixes:
+                if len(prefixes) == 1:
+                    # if we're linking back to ourselves accept ourselves with a WARNING
+                    logging.info("%r links back to itself via prefixes", target)
+                    target.system_executable = target.executable
+                    break
+                for at, (p, t) in enumerate(prefixes.items(), start=1):
+                    logging.error("%d: prefix=%s, info=%r", at, p, t)
+                logging.error("%d: prefix=%s, info=%r", len(prefixes) + 1, prefix, target)
+                msg = "prefixes are causing a circle {}".format("|".join(prefixes.keys()))
+                raise RuntimeError(msg)
+            prefixes[prefix] = target
+            target = target.discover_exe(app_data, prefix=prefix, exact=False)
+        if target.executable != target.system_executable:
+            target = cls.from_exe(target.system_executable, app_data)
+        target.executable = start_executable
+        return target
+
+    _cache_exe_discovery = {}  # noqa: RUF012
+
+    def discover_exe(self, app_data, prefix, exact=True, env=None):  # noqa: FBT002
+        key = prefix, exact
+        if key in self._cache_exe_discovery and prefix:
+            logging.debug("discover exe from cache %s - exact %s: %r", prefix, exact, self._cache_exe_discovery[key])
+            return self._cache_exe_discovery[key]
+        logging.debug("discover exe for %s in %s", self, prefix)
+        # we don't know explicitly here, do some guess work - our executable name should tell
+        possible_names = self._find_possible_exe_names()
+        possible_folders = self._find_possible_folders(prefix)
+        discovered = []
+        env = os.environ if env is None else env
+        for folder in possible_folders:
+            for name in possible_names:
+                info = self._check_exe(app_data, folder, name, exact, discovered, env)
+                if info is not None:
+                    self._cache_exe_discovery[key] = info
+                    return info
+        if exact is False and discovered:
+            info = self._select_most_likely(discovered, self)
+            folders = os.pathsep.join(possible_folders)
+            self._cache_exe_discovery[key] = info
+            logging.debug("no exact match found, chosen most similar of %s within base folders %s", info, folders)
+            return info
+        msg = "failed to detect {} in {}".format("|".join(possible_names), os.pathsep.join(possible_folders))
+        raise RuntimeError(msg)
+
+    def _check_exe(self, app_data, folder, name, exact, discovered, env):  # noqa: PLR0913
+        exe_path = os.path.join(folder, name)
+        if not os.path.exists(exe_path):
+            return None
+        info = self.from_exe(exe_path, app_data, resolve_to_host=False, raise_on_error=False, env=env)
+        if info is None:  # ignore if for some reason we can't query
+            return None
+        for item in ["implementation", "architecture", "version_info"]:
+            found = getattr(info, item)
+            searched = getattr(self, item)
+            if found != searched:
+                if item == "version_info":
+                    found, searched = ".".join(str(i) for i in found), ".".join(str(i) for i in searched)
+                executable = info.executable
+                logging.debug("refused interpreter %s because %s differs %s != %s", executable, item, found, searched)
+                if exact is False:
+                    discovered.append(info)
+                break
+        else:
+            return info
+        return None
+
+    @staticmethod
+    def _select_most_likely(discovered, target):
+        # no exact match found, start relaxing our requirements then to facilitate system package upgrades that
+        # could cause this (when using copy strategy of the host python)
+        def sort_by(info):
+            # we need to setup some priority of traits, this is as follows:
+            # implementation, major, minor, micro, architecture, tag, serial
+            matches = [
+                info.implementation == target.implementation,
+                info.version_info.major == target.version_info.major,
+                info.version_info.minor == target.version_info.minor,
+                info.architecture == target.architecture,
+                info.version_info.micro == target.version_info.micro,
+                info.version_info.releaselevel == target.version_info.releaselevel,
+                info.version_info.serial == target.version_info.serial,
+            ]
+            return sum((1 << pos if match else 0) for pos, match in enumerate(reversed(matches)))
+
+        sorted_discovered = sorted(discovered, key=sort_by, reverse=True)  # sort by priority in decreasing order
+        return sorted_discovered[0]
+
+    def _find_possible_folders(self, inside_folder):
+        candidate_folder = OrderedDict()
+        executables = OrderedDict()
+        executables[os.path.realpath(self.executable)] = None
+        executables[self.executable] = None
+        executables[os.path.realpath(self.original_executable)] = None
+        executables[self.original_executable] = None
+        for exe in executables:
+            base = os.path.dirname(exe)
+            # following path pattern of the current
+            if base.startswith(self.prefix):
+                relative = base[len(self.prefix) :]
+                candidate_folder[f"{inside_folder}{relative}"] = None
+
+        # or at root level
+        candidate_folder[inside_folder] = None
+        return [i for i in candidate_folder if os.path.exists(i)]
+
+    def _find_possible_exe_names(self):
+        name_candidate = OrderedDict()
+        for name in self._possible_base():
+            for at in (3, 2, 1, 0):
+                version = ".".join(str(i) for i in self.version_info[:at])
+                for arch in [f"-{self.architecture}", ""]:
+                    for ext in EXTENSIONS:
+                        candidate = f"{name}{version}{arch}{ext}"
+                        name_candidate[candidate] = None
+        return list(name_candidate.keys())
+
+    def _possible_base(self):
+        possible_base = OrderedDict()
+        basename = os.path.splitext(os.path.basename(self.executable))[0].rstrip(digits)
+        possible_base[basename] = None
+        possible_base[self.implementation] = None
+        # python is always the final option as in practice is used by multiple implementation as exe name
+        if "python" in possible_base:
+            del possible_base["python"]
+        possible_base["python"] = None
+        for base in possible_base:
+            lower = base.lower()
+            yield lower
+            from virtualenv.info import fs_is_case_sensitive
+
+            if fs_is_case_sensitive():
+                if base != lower:
+                    yield base
+                upper = base.upper()
+                if upper != base:
+                    yield upper
+
+
+if __name__ == "__main__":
+    # dump a JSON representation of the current python
+
+    argv = sys.argv[1:]
+
+    if len(argv) >= 1:
+        start_cookie = argv[0]
+        argv = argv[1:]
+    else:
+        start_cookie = ""
+
+    if len(argv) >= 1:
+        end_cookie = argv[0]
+        argv = argv[1:]
+    else:
+        end_cookie = ""
+
+    sys.argv = sys.argv[:1] + argv
+
+    info = PythonInfo()._to_json()  # noqa: SLF001
+    sys.stdout.write("".join((start_cookie[::-1], info, end_cookie[::-1])))
diff --git a/venv/lib/python3.10/site-packages/virtualenv/discovery/py_spec.py b/venv/lib/python3.10/site-packages/virtualenv/discovery/py_spec.py
new file mode 100644
index 0000000..04a5b09
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/virtualenv/discovery/py_spec.py
@@ -0,0 +1,117 @@
+"""A Python specification is an abstract requirement definition of an interpreter."""
+
+from __future__ import annotations
+
+import contextlib
+import os
+import re
+from collections import OrderedDict
+
+from virtualenv.info import fs_is_case_sensitive
+
+PATTERN = re.compile(r"^(?P[a-zA-Z]+)?(?P[0-9.]+)?(?:-(?P32|64))?$")
+
+
+class PythonSpec:
+    """Contains specification about a Python Interpreter."""
+
+    def __init__(self, str_spec, implementation, major, minor, micro, architecture, path) -> None:  # noqa: PLR0913
+        self.str_spec = str_spec
+        self.implementation = implementation
+        self.major = major
+        self.minor = minor
+        self.micro = micro
+        self.architecture = architecture
+        self.path = path
+
+    @classmethod
+    def from_string_spec(cls, string_spec):  # noqa: C901, PLR0912
+        impl, major, minor, micro, arch, path = None, None, None, None, None, None
+        if os.path.isabs(string_spec):
+            path = string_spec
+        else:
+            ok = False
+            match = re.match(PATTERN, string_spec)
+            if match:
+
+                def _int_or_none(val):
+                    return None if val is None else int(val)
+
+                try:
+                    groups = match.groupdict()
+                    version = groups["version"]
+                    if version is not None:
+                        versions = tuple(int(i) for i in version.split(".") if i)
+                        if len(versions) > 3:  # noqa: PLR2004
+                            raise ValueError  # noqa: TRY301
+                        if len(versions) == 3:  # noqa: PLR2004
+                            major, minor, micro = versions
+                        elif len(versions) == 2:  # noqa: PLR2004
+                            major, minor = versions
+                        elif len(versions) == 1:
+                            version_data = versions[0]
+                            major = int(str(version_data)[0])  # first digit major
+                            if version_data > 9:  # noqa: PLR2004
+                                minor = int(str(version_data)[1:])
+                    ok = True
+                except ValueError:
+                    pass
+                else:
+                    impl = groups["impl"]
+                    if impl in {"py", "python"}:
+                        impl = None
+                    arch = _int_or_none(groups["arch"])
+
+            if not ok:
+                path = string_spec
+
+        return cls(string_spec, impl, major, minor, micro, arch, path)
+
+    def generate_names(self):
+        impls = OrderedDict()
+        if self.implementation:
+            # first consider implementation as it is
+            impls[self.implementation] = False
+            if fs_is_case_sensitive():
+                # for case sensitive file systems consider lower and upper case versions too
+                # trivia: MacBooks and all pre 2018 Windows-es were case insensitive by default
+                impls[self.implementation.lower()] = False
+                impls[self.implementation.upper()] = False
+        impls["python"] = True  # finally consider python as alias, implementation must match now
+        version = self.major, self.minor, self.micro
+        with contextlib.suppress(ValueError):
+            version = version[: version.index(None)]
+
+        for impl, match in impls.items():
+            for at in range(len(version), -1, -1):
+                cur_ver = version[0:at]
+                spec = f"{impl}{'.'.join(str(i) for i in cur_ver)}"
+                yield spec, match
+
+    @property
+    def is_abs(self):
+        return self.path is not None and os.path.isabs(self.path)
+
+    def satisfies(self, spec):
+        """Called when there's a candidate metadata spec to see if compatible - e.g. PEP-514 on Windows."""
+        if spec.is_abs and self.is_abs and self.path != spec.path:
+            return False
+        if spec.implementation is not None and spec.implementation.lower() != self.implementation.lower():
+            return False
+        if spec.architecture is not None and spec.architecture != self.architecture:
+            return False
+
+        for our, req in zip((self.major, self.minor, self.micro), (spec.major, spec.minor, spec.micro)):
+            if req is not None and our is not None and our != req:
+                return False
+        return True
+
+    def __repr__(self) -> str:
+        name = type(self).__name__
+        params = "implementation", "major", "minor", "micro", "architecture", "path"
+        return f"{name}({', '.join(f'{k}={getattr(self, k)}' for k in params if getattr(self, k) is not None)})"
+
+
+__all__ = [
+    "PythonSpec",
+]
diff --git a/venv/lib/python3.10/site-packages/virtualenv/discovery/windows/__init__.py b/venv/lib/python3.10/site-packages/virtualenv/discovery/windows/__init__.py
new file mode 100644
index 0000000..9efd5b6
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/virtualenv/discovery/windows/__init__.py
@@ -0,0 +1,47 @@
+from __future__ import annotations
+
+from virtualenv.discovery.py_info import PythonInfo
+from virtualenv.discovery.py_spec import PythonSpec
+
+from .pep514 import discover_pythons
+
+# Map of well-known organizations (as per PEP 514 Company Windows Registry key part) versus Python implementation
+_IMPLEMENTATION_BY_ORG = {
+    "ContinuumAnalytics": "CPython",
+    "PythonCore": "CPython",
+}
+
+
+class Pep514PythonInfo(PythonInfo):
+    """A Python information acquired from PEP-514."""
+
+
+def propose_interpreters(spec, cache_dir, env):
+    # see if PEP-514 entries are good
+
+    # start with higher python versions in an effort to use the latest version available
+    # and prefer PythonCore over conda pythons (as virtualenv is mostly used by non conda tools)
+    existing = list(discover_pythons())
+    existing.sort(
+        key=lambda i: (*tuple(-1 if j is None else j for j in i[1:4]), 1 if i[0] == "PythonCore" else 0),
+        reverse=True,
+    )
+
+    for name, major, minor, arch, exe, _ in existing:
+        # Map well-known/most common organizations to a Python implementation, use the org name as a fallback for
+        # backwards compatibility.
+        implementation = _IMPLEMENTATION_BY_ORG.get(name, name)
+
+        # Pre-filtering based on Windows Registry metadata, for CPython only
+        skip_pre_filter = implementation.lower() != "cpython"
+        registry_spec = PythonSpec(None, implementation, major, minor, None, arch, exe)
+        if skip_pre_filter or registry_spec.satisfies(spec):
+            interpreter = Pep514PythonInfo.from_exe(exe, cache_dir, env=env, raise_on_error=False)
+            if interpreter is not None and interpreter.satisfies(spec, impl_must_match=True):
+                yield interpreter  # Final filtering/matching using interpreter metadata
+
+
+__all__ = [
+    "Pep514PythonInfo",
+    "propose_interpreters",
+]
diff --git a/venv/lib/python3.10/site-packages/virtualenv/discovery/windows/pep514.py b/venv/lib/python3.10/site-packages/virtualenv/discovery/windows/pep514.py
new file mode 100644
index 0000000..9d5691a
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/virtualenv/discovery/windows/pep514.py
@@ -0,0 +1,152 @@
+"""Implement https://www.python.org/dev/peps/pep-0514/ to discover interpreters - Windows only."""
+
+from __future__ import annotations
+
+import os
+import re
+import winreg
+from logging import basicConfig, getLogger
+
+LOGGER = getLogger(__name__)
+
+
+def enum_keys(key):
+    at = 0
+    while True:
+        try:
+            yield winreg.EnumKey(key, at)
+        except OSError:
+            break
+        at += 1
+
+
+def get_value(key, value_name):
+    try:
+        return winreg.QueryValueEx(key, value_name)[0]
+    except OSError:
+        return None
+
+
+def discover_pythons():
+    for hive, hive_name, key, flags, default_arch in [
+        (winreg.HKEY_CURRENT_USER, "HKEY_CURRENT_USER", r"Software\Python", 0, 64),
+        (winreg.HKEY_LOCAL_MACHINE, "HKEY_LOCAL_MACHINE", r"Software\Python", winreg.KEY_WOW64_64KEY, 64),
+        (winreg.HKEY_LOCAL_MACHINE, "HKEY_LOCAL_MACHINE", r"Software\Python", winreg.KEY_WOW64_32KEY, 32),
+    ]:
+        yield from process_set(hive, hive_name, key, flags, default_arch)
+
+
+def process_set(hive, hive_name, key, flags, default_arch):
+    try:
+        with winreg.OpenKeyEx(hive, key, 0, winreg.KEY_READ | flags) as root_key:
+            for company in enum_keys(root_key):
+                if company == "PyLauncher":  # reserved
+                    continue
+                yield from process_company(hive_name, company, root_key, default_arch)
+    except OSError:
+        pass
+
+
+def process_company(hive_name, company, root_key, default_arch):
+    with winreg.OpenKeyEx(root_key, company) as company_key:
+        for tag in enum_keys(company_key):
+            spec = process_tag(hive_name, company, company_key, tag, default_arch)
+            if spec is not None:
+                yield spec
+
+
+def process_tag(hive_name, company, company_key, tag, default_arch):
+    with winreg.OpenKeyEx(company_key, tag) as tag_key:
+        version = load_version_data(hive_name, company, tag, tag_key)
+        if version is not None:  # if failed to get version bail
+            major, minor, _ = version
+            arch = load_arch_data(hive_name, company, tag, tag_key, default_arch)
+            if arch is not None:
+                exe_data = load_exe(hive_name, company, company_key, tag)
+                if exe_data is not None:
+                    exe, args = exe_data
+                    return company, major, minor, arch, exe, args
+                return None
+            return None
+        return None
+
+
+def load_exe(hive_name, company, company_key, tag):
+    key_path = f"{hive_name}/{company}/{tag}"
+    try:
+        with winreg.OpenKeyEx(company_key, rf"{tag}\InstallPath") as ip_key, ip_key:
+            exe = get_value(ip_key, "ExecutablePath")
+            if exe is None:
+                ip = get_value(ip_key, None)
+                if ip is None:
+                    msg(key_path, "no ExecutablePath or default for it")
+
+                else:
+                    exe = os.path.join(ip, "python.exe")
+            if exe is not None and os.path.exists(exe):
+                args = get_value(ip_key, "ExecutableArguments")
+                return exe, args
+            msg(key_path, f"could not load exe with value {exe}")
+    except OSError:
+        msg(f"{key_path}/InstallPath", "missing")
+    return None
+
+
+def load_arch_data(hive_name, company, tag, tag_key, default_arch):
+    arch_str = get_value(tag_key, "SysArchitecture")
+    if arch_str is not None:
+        key_path = f"{hive_name}/{company}/{tag}/SysArchitecture"
+        try:
+            return parse_arch(arch_str)
+        except ValueError as sys_arch:
+            msg(key_path, sys_arch)
+    return default_arch
+
+
+def parse_arch(arch_str):
+    if isinstance(arch_str, str):
+        match = re.match(r"^(\d+)bit$", arch_str)
+        if match:
+            return int(next(iter(match.groups())))
+        error = f"invalid format {arch_str}"
+    else:
+        error = f"arch is not string: {arch_str!r}"
+    raise ValueError(error)
+
+
+def load_version_data(hive_name, company, tag, tag_key):
+    for candidate, key_path in [
+        (get_value(tag_key, "SysVersion"), f"{hive_name}/{company}/{tag}/SysVersion"),
+        (tag, f"{hive_name}/{company}/{tag}"),
+    ]:
+        if candidate is not None:
+            try:
+                return parse_version(candidate)
+            except ValueError as sys_version:
+                msg(key_path, sys_version)
+    return None
+
+
+def parse_version(version_str):
+    if isinstance(version_str, str):
+        match = re.match(r"^(\d+)(?:\.(\d+))?(?:\.(\d+))?$", version_str)
+        if match:
+            return tuple(int(i) if i is not None else None for i in match.groups())
+        error = f"invalid format {version_str}"
+    else:
+        error = f"version is not string: {version_str!r}"
+    raise ValueError(error)
+
+
+def msg(path, what):
+    LOGGER.warning(f"PEP-514 violation in Windows Registry at {path} error: {what}")
+
+
+def _run():
+    basicConfig()
+    interpreters = [repr(spec) for spec in discover_pythons()]
+    print("\n".join(sorted(interpreters)))  # noqa: T201
+
+
+if __name__ == "__main__":
+    _run()
diff --git a/venv/lib/python3.10/site-packages/virtualenv/info.py b/venv/lib/python3.10/site-packages/virtualenv/info.py
new file mode 100644
index 0000000..e097776
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/virtualenv/info.py
@@ -0,0 +1,61 @@
+from __future__ import annotations
+
+import logging
+import os
+import platform
+import sys
+import tempfile
+
+IMPLEMENTATION = platform.python_implementation()
+IS_PYPY = IMPLEMENTATION == "PyPy"
+IS_CPYTHON = IMPLEMENTATION == "CPython"
+IS_WIN = sys.platform == "win32"
+IS_MAC_ARM64 = sys.platform == "darwin" and platform.machine() == "arm64"
+ROOT = os.path.realpath(os.path.join(os.path.abspath(__file__), os.path.pardir, os.path.pardir))
+IS_ZIPAPP = os.path.isfile(ROOT)
+
+_CAN_SYMLINK = _FS_CASE_SENSITIVE = _CFG_DIR = _DATA_DIR = None
+
+
+def fs_is_case_sensitive():
+    global _FS_CASE_SENSITIVE  # noqa: PLW0603
+
+    if _FS_CASE_SENSITIVE is None:
+        with tempfile.NamedTemporaryFile(prefix="TmP") as tmp_file:
+            _FS_CASE_SENSITIVE = not os.path.exists(tmp_file.name.lower())
+            logging.debug("filesystem is %scase-sensitive", "" if _FS_CASE_SENSITIVE else "not ")
+    return _FS_CASE_SENSITIVE
+
+
+def fs_supports_symlink():
+    global _CAN_SYMLINK  # noqa: PLW0603
+
+    if _CAN_SYMLINK is None:
+        can = False
+        if hasattr(os, "symlink"):
+            if IS_WIN:
+                with tempfile.NamedTemporaryFile(prefix="TmP") as tmp_file:
+                    temp_dir = os.path.dirname(tmp_file.name)
+                    dest = os.path.join(temp_dir, f"{tmp_file.name}-{'b'}")
+                    try:
+                        os.symlink(tmp_file.name, dest)
+                        can = True
+                    except (OSError, NotImplementedError):
+                        pass
+                logging.debug("symlink on filesystem does%s work", "" if can else " not")
+            else:
+                can = True
+        _CAN_SYMLINK = can
+    return _CAN_SYMLINK
+
+
+__all__ = (
+    "IS_PYPY",
+    "IS_CPYTHON",
+    "IS_WIN",
+    "fs_is_case_sensitive",
+    "fs_supports_symlink",
+    "ROOT",
+    "IS_ZIPAPP",
+    "IS_MAC_ARM64",
+)
diff --git a/venv/lib/python3.10/site-packages/virtualenv/report.py b/venv/lib/python3.10/site-packages/virtualenv/report.py
new file mode 100644
index 0000000..db1be99
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/virtualenv/report.py
@@ -0,0 +1,51 @@
+from __future__ import annotations
+
+import logging
+import sys
+
+LEVELS = {
+    0: logging.CRITICAL,
+    1: logging.ERROR,
+    2: logging.WARNING,
+    3: logging.INFO,
+    4: logging.DEBUG,
+    5: logging.NOTSET,
+}
+
+MAX_LEVEL = max(LEVELS.keys())
+LOGGER = logging.getLogger()
+
+
+def setup_report(verbosity, show_pid=False):  # noqa: FBT002
+    _clean_handlers(LOGGER)
+    if verbosity > MAX_LEVEL:
+        verbosity = MAX_LEVEL  # pragma: no cover
+    level = LEVELS[verbosity]
+    msg_format = "%(message)s"
+    if level <= logging.DEBUG:
+        locate = "module"
+        msg_format = f"%(relativeCreated)d {msg_format} [%(levelname)s %({locate})s:%(lineno)d]"
+    if show_pid:
+        msg_format = f"[%(process)d] {msg_format}"
+    formatter = logging.Formatter(msg_format)
+    stream_handler = logging.StreamHandler(stream=sys.stdout)
+    stream_handler.setLevel(level)
+    LOGGER.setLevel(logging.NOTSET)
+    stream_handler.setFormatter(formatter)
+    LOGGER.addHandler(stream_handler)
+    level_name = logging.getLevelName(level)
+    logging.debug("setup logging to %s", level_name)
+    logging.getLogger("distlib").setLevel(logging.ERROR)
+    return verbosity
+
+
+def _clean_handlers(log):
+    for log_handler in list(log.handlers):  # remove handlers of libraries
+        log.removeHandler(log_handler)
+
+
+__all__ = [
+    "LEVELS",
+    "MAX_LEVEL",
+    "setup_report",
+]
diff --git a/venv/lib/python3.10/site-packages/virtualenv/run/__init__.py b/venv/lib/python3.10/site-packages/virtualenv/run/__init__.py
new file mode 100644
index 0000000..ed0d6d0
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/virtualenv/run/__init__.py
@@ -0,0 +1,164 @@
+from __future__ import annotations
+
+import logging
+import os
+from functools import partial
+
+from virtualenv.app_data import make_app_data
+from virtualenv.config.cli.parser import VirtualEnvConfigParser
+from virtualenv.report import LEVELS, setup_report
+from virtualenv.run.session import Session
+from virtualenv.seed.wheels.periodic_update import manual_upgrade
+from virtualenv.version import __version__
+
+from .plugin.activators import ActivationSelector
+from .plugin.creators import CreatorSelector
+from .plugin.discovery import get_discover
+from .plugin.seeders import SeederSelector
+
+
+def cli_run(args, options=None, setup_logging=True, env=None):  # noqa: FBT002
+    """
+    Create a virtual environment given some command line interface arguments.
+
+    :param args: the command line arguments
+    :param options: passing in a ``VirtualEnvOptions`` object allows return of the parsed options
+    :param setup_logging: ``True`` if setup logging handlers, ``False`` to use handlers already registered
+    :param env: environment variables to use
+    :return: the session object of the creation (its structure for now is experimental and might change on short notice)
+    """
+    env = os.environ if env is None else env
+    of_session = session_via_cli(args, options, setup_logging, env)
+    with of_session:
+        of_session.run()
+    return of_session
+
+
+def session_via_cli(args, options=None, setup_logging=True, env=None):  # noqa: FBT002
+    """
+    Create a virtualenv session (same as cli_run, but this does not perform the creation). Use this if you just want to
+    query what the virtual environment would look like, but not actually create it.
+
+    :param args: the command line arguments
+    :param options: passing in a ``VirtualEnvOptions`` object allows return of the parsed options
+    :param setup_logging: ``True`` if setup logging handlers, ``False`` to use handlers already registered
+    :param env: environment variables to use
+    :return: the session object of the creation (its structure for now is experimental and might change on short notice)
+    """  # noqa: D205
+    env = os.environ if env is None else env
+    parser, elements = build_parser(args, options, setup_logging, env)
+    options = parser.parse_args(args)
+    creator, seeder, activators = tuple(e.create(options) for e in elements)  # create types
+    return Session(
+        options.verbosity,
+        options.app_data,
+        parser._interpreter,  # noqa: SLF001
+        creator,
+        seeder,
+        activators,
+    )
+
+
+def build_parser(args=None, options=None, setup_logging=True, env=None):  # noqa: FBT002
+    parser = VirtualEnvConfigParser(options, os.environ if env is None else env)
+    add_version_flag(parser)
+    parser.add_argument(
+        "--with-traceback",
+        dest="with_traceback",
+        action="store_true",
+        default=False,
+        help="on failure also display the stacktrace internals of virtualenv",
+    )
+    _do_report_setup(parser, args, setup_logging)
+    options = load_app_data(args, parser, options)
+    handle_extra_commands(options)
+
+    discover = get_discover(parser, args)
+    parser._interpreter = interpreter = discover.interpreter  # noqa: SLF001
+    if interpreter is None:
+        msg = f"failed to find interpreter for {discover}"
+        raise RuntimeError(msg)
+    elements = [
+        CreatorSelector(interpreter, parser),
+        SeederSelector(interpreter, parser),
+        ActivationSelector(interpreter, parser),
+    ]
+    options, _ = parser.parse_known_args(args)
+    for element in elements:
+        element.handle_selected_arg_parse(options)
+    parser.enable_help()
+    return parser, elements
+
+
+def build_parser_only(args=None):
+    """Used to provide a parser for the doc generation."""
+    return build_parser(args)[0]
+
+
+def handle_extra_commands(options):
+    if options.upgrade_embed_wheels:
+        result = manual_upgrade(options.app_data, options.env)
+        raise SystemExit(result)
+
+
+def load_app_data(args, parser, options):
+    parser.add_argument(
+        "--read-only-app-data",
+        action="store_true",
+        help="use app data folder in read-only mode (write operations will fail with error)",
+    )
+    options, _ = parser.parse_known_args(args, namespace=options)
+
+    # here we need a write-able application data (e.g. the zipapp might need this for discovery cache)
+    parser.add_argument(
+        "--app-data",
+        help="a data folder used as cache by the virtualenv",
+        type=partial(make_app_data, read_only=options.read_only_app_data, env=options.env),
+        default=make_app_data(None, read_only=options.read_only_app_data, env=options.env),
+    )
+    parser.add_argument(
+        "--reset-app-data",
+        action="store_true",
+        help="start with empty app data folder",
+    )
+    parser.add_argument(
+        "--upgrade-embed-wheels",
+        action="store_true",
+        help="trigger a manual update of the embedded wheels",
+    )
+    options, _ = parser.parse_known_args(args, namespace=options)
+    if options.reset_app_data:
+        options.app_data.reset()
+    return options
+
+
+def add_version_flag(parser):
+    import virtualenv
+
+    parser.add_argument(
+        "--version",
+        action="version",
+        version=f"%(prog)s {__version__} from {virtualenv.__file__}",
+        help="display the version of the virtualenv package and its location, then exit",
+    )
+
+
+def _do_report_setup(parser, args, setup_logging):
+    level_map = ", ".join(f"{logging.getLevelName(line)}={c}" for c, line in sorted(LEVELS.items()))
+    msg = "verbosity = verbose - quiet, default {}, mapping => {}"
+    verbosity_group = parser.add_argument_group(
+        title="verbosity",
+        description=msg.format(logging.getLevelName(LEVELS[3]), level_map),
+    )
+    verbosity = verbosity_group.add_mutually_exclusive_group()
+    verbosity.add_argument("-v", "--verbose", action="count", dest="verbose", help="increase verbosity", default=2)
+    verbosity.add_argument("-q", "--quiet", action="count", dest="quiet", help="decrease verbosity", default=0)
+    option, _ = parser.parse_known_args(args)
+    if setup_logging:
+        setup_report(option.verbosity)
+
+
+__all__ = [
+    "cli_run",
+    "session_via_cli",
+]
diff --git a/venv/lib/python3.10/site-packages/virtualenv/run/plugin/__init__.py b/venv/lib/python3.10/site-packages/virtualenv/run/plugin/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/venv/lib/python3.10/site-packages/virtualenv/run/plugin/activators.py b/venv/lib/python3.10/site-packages/virtualenv/run/plugin/activators.py
new file mode 100644
index 0000000..a0e8669
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/virtualenv/run/plugin/activators.py
@@ -0,0 +1,62 @@
+from __future__ import annotations
+
+from argparse import ArgumentTypeError
+from collections import OrderedDict
+
+from .base import ComponentBuilder
+
+
+class ActivationSelector(ComponentBuilder):
+    def __init__(self, interpreter, parser) -> None:
+        self.default = None
+        possible = OrderedDict(
+            (k, v) for k, v in self.options("virtualenv.activate").items() if v.supports(interpreter)
+        )
+        super().__init__(interpreter, parser, "activators", possible)
+        self.parser.description = "options for activation scripts"
+        self.active = None
+
+    def add_selector_arg_parse(self, name, choices):
+        self.default = ",".join(choices)
+        self.parser.add_argument(
+            f"--{name}",
+            default=self.default,
+            metavar="comma_sep_list",
+            required=False,
+            help="activators to generate - default is all supported",
+            type=self._extract_activators,
+        )
+
+    def _extract_activators(self, entered_str):
+        elements = [e.strip() for e in entered_str.split(",") if e.strip()]
+        missing = [e for e in elements if e not in self.possible]
+        if missing:
+            msg = f"the following activators are not available {','.join(missing)}"
+            raise ArgumentTypeError(msg)
+        return elements
+
+    def handle_selected_arg_parse(self, options):
+        selected_activators = (
+            self._extract_activators(self.default) if options.activators is self.default else options.activators
+        )
+        self.active = {k: v for k, v in self.possible.items() if k in selected_activators}
+        self.parser.add_argument(
+            "--prompt",
+            dest="prompt",
+            metavar="prompt",
+            help=(
+                "provides an alternative prompt prefix for this environment "
+                "(value of . means name of the current working directory)"
+            ),
+            default=None,
+        )
+        for activator in self.active.values():
+            activator.add_parser_arguments(self.parser, self.interpreter)
+
+    def create(self, options):
+        return [activator_class(options) for activator_class in self.active.values()]
+
+
+__all__ = [
+    "ActivationSelector",
+]
diff --git a/venv/lib/python3.10/site-packages/virtualenv/run/plugin/base.py b/venv/lib/python3.10/site-packages/virtualenv/run/plugin/base.py
new file mode 100644
index 0000000..71ce5c4
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/virtualenv/run/plugin/base.py
@@ -0,0 +1,71 @@
+from __future__ import annotations
+
+import sys
+from collections import OrderedDict
+
+if sys.version_info >= (3, 8):
+    from importlib.metadata import entry_points
+
+    importlib_metadata_version = ()
+else:
+    from importlib_metadata import entry_points, version
+
+    importlib_metadata_version = tuple(int(i) for i in version("importlib_metadata").split(".")[:2])
+
+
+class PluginLoader:
+    _OPTIONS = None
+    _ENTRY_POINTS = None
+
+    @classmethod
+    def entry_points_for(cls, key):
+        if sys.version_info >= (3, 10) or importlib_metadata_version >= (3, 6):
+            return OrderedDict((e.name, e.load()) for e in cls.entry_points().select(group=key))
+        return OrderedDict((e.name, e.load()) for e in cls.entry_points().get(key, {}))
+
+    @staticmethod
+    def entry_points():
+        if PluginLoader._ENTRY_POINTS is None:  # noqa: SLF001
+            PluginLoader._ENTRY_POINTS = entry_points()  # noqa: SLF001
+        return PluginLoader._ENTRY_POINTS  # noqa: SLF001
+
+
+class ComponentBuilder(PluginLoader):
+    def __init__(self, interpreter, parser, name, possible) -> None:
+        self.interpreter = interpreter
+        self.name = name
+        self._impl_class = None
+        self.possible = possible
+        self.parser = parser.add_argument_group(title=name)
+        self.add_selector_arg_parse(name, list(self.possible))
+
+    @classmethod
+    def options(cls, key):
+        if cls._OPTIONS is None:
+            cls._OPTIONS = cls.entry_points_for(key)
+        return cls._OPTIONS
+
+    def add_selector_arg_parse(self, name, choices):
+        raise NotImplementedError
+
+    def handle_selected_arg_parse(self, options):
+        selected = getattr(options, self.name)
+        if selected not in self.possible:
+            msg = f"No implementation for {self.interpreter}"
+            raise RuntimeError(msg)
+        self._impl_class = self.possible[selected]
+        self.populate_selected_argparse(selected, options.app_data)
+        return selected
+
+    def populate_selected_argparse(self, selected, app_data):
+        self.parser.description = f"options for {self.name} {selected}"
+        self._impl_class.add_parser_arguments(self.parser, self.interpreter, app_data)
+
+    def create(self, options):
+        return self._impl_class(options, self.interpreter)
+
+
+__all__ = [
+    "PluginLoader",
+    "ComponentBuilder",
+]
diff --git a/venv/lib/python3.10/site-packages/virtualenv/run/plugin/creators.py b/venv/lib/python3.10/site-packages/virtualenv/run/plugin/creators.py
new file mode 100644
index 0000000..e5f8d68
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/virtualenv/run/plugin/creators.py
@@ -0,0 +1,82 @@
+from __future__ import annotations
+
+from collections import OrderedDict, defaultdict, namedtuple
+
+from virtualenv.create.describe import Describe
+from virtualenv.create.via_global_ref.builtin.builtin_way import VirtualenvBuiltin
+
+from .base import ComponentBuilder
+
+CreatorInfo = namedtuple("CreatorInfo", ["key_to_class", "key_to_meta", "describe", "builtin_key"])
+
+
+class CreatorSelector(ComponentBuilder):
+    def __init__(self, interpreter, parser) -> None:
+        creators, self.key_to_meta, self.describe, self.builtin_key = self.for_interpreter(interpreter)
+        super().__init__(interpreter, parser, "creator", creators)
+
+    @classmethod
+    def for_interpreter(cls, interpreter):
+        key_to_class, key_to_meta, builtin_key, describe = OrderedDict(), {}, None, None
+        errors = defaultdict(list)
+        for key, creator_class in cls.options("virtualenv.create").items():
+            if key == "builtin":
+                msg = "builtin creator is a reserved name"
+                raise RuntimeError(msg)
+            meta = creator_class.can_create(interpreter)
+            if meta:
+                if meta.error:
+                    errors[meta.error].append(creator_class)
+                else:
+                    if "builtin" not in key_to_class and issubclass(creator_class, VirtualenvBuiltin):
+                        builtin_key = key
+                        key_to_class["builtin"] = creator_class
+                        key_to_meta["builtin"] = meta
+                    key_to_class[key] = creator_class
+                    key_to_meta[key] = meta
+            if describe is None and issubclass(creator_class, Describe) and creator_class.can_describe(interpreter):
+                describe = creator_class
+        if not key_to_meta:
+            if errors:
+                rows = [f"{k} for creators {', '.join(i.__name__ for i in v)}" for k, v in errors.items()]
+                raise RuntimeError("\n".join(rows))
+            msg = f"No virtualenv implementation for {interpreter}"
+            raise RuntimeError(msg)
+        return CreatorInfo(
+            key_to_class=key_to_class,
+            key_to_meta=key_to_meta,
+            describe=describe,
+            builtin_key=builtin_key,
+        )
+
+    def add_selector_arg_parse(self, name, choices):
+        # prefer the built-in venv if present, otherwise fallback to first defined type
+        choices = sorted(choices, key=lambda a: 0 if a == "builtin" else 1)
+        default_value = self._get_default(choices)
+        self.parser.add_argument(
+            f"--{name}",
+            choices=choices,
+            default=default_value,
+            required=False,
+            help=f"create environment via{'' if self.builtin_key is None else f' (builtin = {self.builtin_key})'}",
+        )
+
+    @staticmethod
+    def _get_default(choices):
+        return next(iter(choices))
+
+    def populate_selected_argparse(self, selected, app_data):
+        self.parser.description = f"options for {self.name} {selected}"
+        self._impl_class.add_parser_arguments(self.parser, self.interpreter, self.key_to_meta[selected], app_data)
+
+    def create(self, options):
+        options.meta = self.key_to_meta[getattr(options, self.name)]
+        if not issubclass(self._impl_class, Describe):
+            options.describe = self.describe(options, self.interpreter)
+        return super().create(options)
+
+
+__all__ = [
+    "CreatorSelector",
+    "CreatorInfo",
+]
diff --git a/venv/lib/python3.10/site-packages/virtualenv/run/plugin/discovery.py b/venv/lib/python3.10/site-packages/virtualenv/run/plugin/discovery.py
new file mode 100644
index 0000000..c9e4564
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/virtualenv/run/plugin/discovery.py
@@ -0,0 +1,40 @@
+from __future__ import annotations
+
+from .base import PluginLoader
+
+
+class Discovery(PluginLoader):
+    """Discovery plugins."""
+
+
+def get_discover(parser, args):
+    discover_types = Discovery.entry_points_for("virtualenv.discovery")
+    discovery_parser = parser.add_argument_group(
+        title="discovery",
+        description="discover and provide a target interpreter",
+    )
+    choices = _get_default_discovery(discover_types)
+    # prefer the builtin if present, otherwise fallback to first defined type
+    choices = sorted(choices, key=lambda a: 0 if a == "builtin" else 1)
+    discovery_parser.add_argument(
+        "--discovery",
+        choices=choices,
+        default=next(iter(choices)),
+        required=False,
+        help="interpreter discovery method",
+    )
+    options, _ = parser.parse_known_args(args)
+    discover_class = discover_types[options.discovery]
+    discover_class.add_parser_arguments(discovery_parser)
+    options, _ = parser.parse_known_args(args, namespace=options)
+    return discover_class(options)
+
+
+def _get_default_discovery(discover_types):
+    return list(discover_types.keys())
+
+
+__all__ = [
+    "get_discover",
+    "Discovery",
+]
diff --git a/venv/lib/python3.10/site-packages/virtualenv/run/plugin/seeders.py b/venv/lib/python3.10/site-packages/virtualenv/run/plugin/seeders.py
new file mode 100644
index 0000000..b1da34c
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/virtualenv/run/plugin/seeders.py
@@ -0,0 +1,40 @@
+from __future__ import annotations
+
+from .base import ComponentBuilder
+
+
+class SeederSelector(ComponentBuilder):
+    def __init__(self, interpreter, parser) -> None:
+        possible = self.options("virtualenv.seed")
+        super().__init__(interpreter, parser, "seeder", possible)
+
+    def add_selector_arg_parse(self, name, choices):
+        self.parser.add_argument(
+            f"--{name}",
+            choices=choices,
+            default=self._get_default(),
+            required=False,
+            help="seed packages install method",
+        )
+        self.parser.add_argument(
+            "--no-seed",
+            "--without-pip",
+            help="do not install seed packages",
+            action="store_true",
+            dest="no_seed",
+        )
+
+    @staticmethod
+    def _get_default():
+        return "app-data"
+
+    def handle_selected_arg_parse(self, options):
+        return super().handle_selected_arg_parse(options)
+
+    def create(self, options):
+        return self._impl_class(options)
+
+
+__all__ = [
+    "SeederSelector",
+]
diff --git a/venv/lib/python3.10/site-packages/virtualenv/run/session.py b/venv/lib/python3.10/site-packages/virtualenv/run/session.py
new file mode 100644
index 0000000..9ffd890
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/virtualenv/run/session.py
@@ -0,0 +1,89 @@
+from __future__ import annotations
+
+import json
+import logging
+
+
+class Session:
+    """Represents a virtual environment creation session."""
+
+    def __init__(self, verbosity, app_data, interpreter, creator, seeder, activators) -> None:  # noqa: PLR0913
+        self._verbosity = verbosity
+        self._app_data = app_data
+        self._interpreter = interpreter
+        self._creator = creator
+        self._seeder = seeder
+        self._activators = activators
+
+    @property
+    def verbosity(self):
+        """The verbosity of the run."""
+        return self._verbosity
+
+    @property
+    def interpreter(self):
+        """Create a virtual environment based on this reference interpreter."""
+        return self._interpreter
+
+    @property
+    def creator(self):
+        """The creator used to build the virtual environment (must be compatible with the interpreter)."""
+        return self._creator
+
+    @property
+    def seeder(self):
+        """The mechanism used to provide the seed packages (pip, setuptools, wheel)."""
+        return self._seeder
+
+    @property
+    def activators(self):
+        """Activators used to generate activations scripts."""
+        return self._activators
+
+    def run(self):
+        self._create()
+        self._seed()
+        self._activate()
+        self.creator.pyenv_cfg.write()
+
+    def _create(self):
+        logging.info("create virtual environment via %s", self.creator)
+        self.creator.run()
+        logging.debug(_DEBUG_MARKER)
+        logging.debug("%s", _Debug(self.creator))
+
+    def _seed(self):
+        if self.seeder is not None and self.seeder.enabled:
+            logging.info("add seed packages via %s", self.seeder)
+            self.seeder.run(self.creator)
+
+    def _activate(self):
+        if self.activators:
+            active = ", ".join(type(i).__name__.replace("Activator", "") for i in self.activators)
+            logging.info("add activators for %s", active)
+            for activator in self.activators:
+                activator.generate(self.creator)
+
+    def __enter__(self):
+        return self
+
+    def __exit__(self, exc_type, exc_val, exc_tb):
+        self._app_data.close()
+
+
+_DEBUG_MARKER = "=" * 30 + " target debug " + "=" * 30
+
+
+class _Debug:
+    """lazily populate debug."""
+
+    def __init__(self, creator) -> None:
+        self.creator = creator
+
+    def __repr__(self) -> str:
+        return json.dumps(self.creator.debug, indent=2)
+
+
+__all__ = [
+    "Session",
+]
diff --git a/venv/lib/python3.10/site-packages/virtualenv/seed/__init__.py b/venv/lib/python3.10/site-packages/virtualenv/seed/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/venv/lib/python3.10/site-packages/virtualenv/seed/embed/__init__.py b/venv/lib/python3.10/site-packages/virtualenv/seed/embed/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/venv/lib/python3.10/site-packages/virtualenv/seed/embed/base_embed.py b/venv/lib/python3.10/site-packages/virtualenv/seed/embed/base_embed.py
new file mode 100644
index 0000000..5ff2c84
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/virtualenv/seed/embed/base_embed.py
@@ -0,0 +1,118 @@
+from __future__ import annotations
+
+from abc import ABCMeta
+from pathlib import Path
+
+from virtualenv.seed.seeder import Seeder
+from virtualenv.seed.wheels import Version
+
+PERIODIC_UPDATE_ON_BY_DEFAULT = True
+
+
+class BaseEmbed(Seeder, metaclass=ABCMeta):
+    def __init__(self, options) -> None:
+        super().__init__(options, enabled=options.no_seed is False)
+
+        self.download = options.download
+        self.extra_search_dir = [i.resolve() for i in options.extra_search_dir if i.exists()]
+
+        self.pip_version = options.pip
+        self.setuptools_version = options.setuptools
+        self.wheel_version = options.wheel
+
+        self.no_pip = options.no_pip
+        self.no_setuptools = options.no_setuptools
+        self.no_wheel = options.no_wheel
+        self.app_data = options.app_data
+        self.periodic_update = not options.no_periodic_update
+
+        if not self.distribution_to_versions():
+            self.enabled = False
+
+    @classmethod
+    def distributions(cls) -> dict[str, Version]:
+        return {
+            "pip": Version.bundle,
+            "setuptools": Version.bundle,
+            "wheel": Version.bundle,
+        }
+
+    def distribution_to_versions(self) -> dict[str, str]:
+        return {
+            distribution: getattr(self, f"{distribution}_version")
+            for distribution in self.distributions()
+            if getattr(self, f"no_{distribution}") is False and getattr(self, f"{distribution}_version") != "none"
+        }
+
+    @classmethod
+    def add_parser_arguments(cls, parser, interpreter, app_data):  # noqa: ARG003
+        group = parser.add_mutually_exclusive_group()
+        group.add_argument(
+            "--no-download",
+            "--never-download",
+            dest="download",
+            action="store_false",
+            help=f"pass to disable download of the latest {'/'.join(cls.distributions())} from PyPI",
+            default=True,
+        )
+        group.add_argument(
+            "--download",
+            dest="download",
+            action="store_true",
+            help=f"pass to enable download of the latest {'/'.join(cls.distributions())} from PyPI",
+            default=False,
+        )
+        parser.add_argument(
+            "--extra-search-dir",
+            metavar="d",
+            type=Path,
+            nargs="+",
+            help="a path containing wheels to extend the internal wheel list (can be set 1+ times)",
+            default=[],
+        )
+        for distribution, default in cls.distributions().items():
+            if interpreter.version_info[:2] >= (3, 12) and distribution in {"wheel", "setuptools"}:
+                default = "none"  # noqa: PLW2901
+            parser.add_argument(
+                f"--{distribution}",
+                dest=distribution,
+                metavar="version",
+                help=f"version of {distribution} to install as seed: embed, bundle, none or exact version",
+                default=default,
+            )
+        for distribution in cls.distributions():
+            parser.add_argument(
+                f"--no-{distribution}",
+                dest=f"no_{distribution}",
+                action="store_true",
+                help=f"do not install {distribution}",
+                default=False,
+            )
+        parser.add_argument(
+            "--no-periodic-update",
+            dest="no_periodic_update",
+            action="store_true",
+            help="disable the periodic (once every 14 days) update of the embedded wheels",
+            default=not PERIODIC_UPDATE_ON_BY_DEFAULT,
+        )
+
+    def __repr__(self) -> str:
+        result = self.__class__.__name__
+        result += "("
+        if self.extra_search_dir:
+            result += f"extra_search_dir={', '.join(str(i) for i in self.extra_search_dir)},"
+        result += f"download={self.download},"
+        for distribution in self.distributions():
+            if getattr(self, f"no_{distribution}"):
+                continue
+            version = getattr(self, f"{distribution}_version", None)
+            if version == "none":
+                continue
+            ver = f"={version or 'latest'}"
+            result += f" {distribution}{ver},"
+        return result[:-1] + ")"
+
+
+__all__ = [
+    "BaseEmbed",
+]
diff --git a/venv/lib/python3.10/site-packages/virtualenv/seed/embed/pip_invoke.py b/venv/lib/python3.10/site-packages/virtualenv/seed/embed/pip_invoke.py
new file mode 100644
index 0000000..2625a01
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/virtualenv/seed/embed/pip_invoke.py
@@ -0,0 +1,63 @@
+from __future__ import annotations
+
+import logging
+from contextlib import contextmanager
+from subprocess import Popen
+
+from virtualenv.discovery.cached_py_info import LogCmd
+from virtualenv.seed.embed.base_embed import BaseEmbed
+from virtualenv.seed.wheels import Version, get_wheel, pip_wheel_env_run
+
+
+class PipInvoke(BaseEmbed):
+    def __init__(self, options) -> None:
+        super().__init__(options)
+
+    def run(self, creator):
+        if not self.enabled:
+            return
+        for_py_version = creator.interpreter.version_release_str
+        with self.get_pip_install_cmd(creator.exe, for_py_version) as cmd:
+            env = pip_wheel_env_run(self.extra_search_dir, self.app_data, self.env)
+            self._execute(cmd, env)
+
+    @staticmethod
+    def _execute(cmd, env):
+        logging.debug("pip seed by running: %s", LogCmd(cmd, env))
+        process = Popen(cmd, env=env)  # noqa: S603
+        process.communicate()
+        if process.returncode != 0:
+            msg = f"failed seed with code {process.returncode}"
+            raise RuntimeError(msg)
+        return process
+
+    @contextmanager
+    def get_pip_install_cmd(self, exe, for_py_version):
+        cmd = [str(exe), "-m", "pip", "-q", "install", "--only-binary", ":all:", "--disable-pip-version-check"]
+        if not self.download:
+            cmd.append("--no-index")
+        folders = set()
+        for dist, version in self.distribution_to_versions().items():
+            wheel = get_wheel(
+                distribution=dist,
+                version=version,
+                for_py_version=for_py_version,
+                search_dirs=self.extra_search_dir,
+                download=False,
+                app_data=self.app_data,
+                do_periodic_update=self.periodic_update,
+                env=self.env,
+            )
+            if wheel is None:
+                msg = f"could not get wheel for distribution {dist}"
+                raise RuntimeError(msg)
+            folders.add(str(wheel.path.parent))
+            cmd.append(Version.as_pip_req(dist, wheel.version))
+        for folder in sorted(folders):
+            cmd.extend(["--find-links", str(folder)])
+        yield cmd
+
+
+__all__ = [
+    "PipInvoke",
+]
diff --git a/venv/lib/python3.10/site-packages/virtualenv/seed/embed/via_app_data/__init__.py b/venv/lib/python3.10/site-packages/virtualenv/seed/embed/via_app_data/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/venv/lib/python3.10/site-packages/virtualenv/seed/embed/via_app_data/pip_install/__init__.py b/venv/lib/python3.10/site-packages/virtualenv/seed/embed/via_app_data/pip_install/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/venv/lib/python3.10/site-packages/virtualenv/seed/embed/via_app_data/pip_install/base.py b/venv/lib/python3.10/site-packages/virtualenv/seed/embed/via_app_data/pip_install/base.py
new file mode 100644
index 0000000..cc3b736
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/virtualenv/seed/embed/via_app_data/pip_install/base.py
@@ -0,0 +1,204 @@
+from __future__ import annotations
+
+import logging
+import os
+import re
+import zipfile
+from abc import ABCMeta, abstractmethod
+from configparser import ConfigParser
+from itertools import chain
+from pathlib import Path
+from tempfile import mkdtemp
+
+from distlib.scripts import ScriptMaker, enquote_executable
+
+from virtualenv.util.path import safe_delete
+
+
+class PipInstall(metaclass=ABCMeta):
+    def __init__(self, wheel, creator, image_folder) -> None:
+        self._wheel = wheel
+        self._creator = creator
+        self._image_dir = image_folder
+        self._extracted = False
+        self.__dist_info = None
+        self._console_entry_points = None
+
+    @abstractmethod
+    def _sync(self, src, dst):
+        raise NotImplementedError
+
+    def install(self, version_info):
+        self._extracted = True
+        self._uninstall_previous_version()
+        # sync image
+        for filename in self._image_dir.iterdir():
+            into = self._creator.purelib / filename.name
+            self._sync(filename, into)
+        # generate console executables
+        consoles = set()
+        script_dir = self._creator.script_dir
+        for name, module in self._console_scripts.items():
+            consoles.update(self._create_console_entry_point(name, module, script_dir, version_info))
+        logging.debug("generated console scripts %s", " ".join(i.name for i in consoles))
+
+    def build_image(self):
+        # 1. first extract the wheel
+        logging.debug("build install image for %s to %s", self._wheel.name, self._image_dir)
+        with zipfile.ZipFile(str(self._wheel)) as zip_ref:
+            self._shorten_path_if_needed(zip_ref)
+            zip_ref.extractall(str(self._image_dir))
+            self._extracted = True
+        # 2. now add additional files not present in the distribution
+        new_files = self._generate_new_files()
+        # 3. finally fix the records file
+        self._fix_records(new_files)
+
+    def _shorten_path_if_needed(self, zip_ref):
+        if os.name == "nt":
+            to_folder = str(self._image_dir)
+            # https://docs.microsoft.com/en-us/windows/win32/fileio/maximum-file-path-limitation
+            zip_max_len = max(len(i) for i in zip_ref.namelist())
+            path_len = zip_max_len + len(to_folder)
+            if path_len > 260:  # noqa: PLR2004
+                self._image_dir.mkdir(exist_ok=True)  # to get a short path must exist
+
+                from virtualenv.util.path import get_short_path_name
+
+                to_folder = get_short_path_name(to_folder)
+                self._image_dir = Path(to_folder)
+
+    def _records_text(self, files):
+        return "\n".join(f"{os.path.relpath(str(rec), str(self._image_dir))},," for rec in files)
+
+    def _generate_new_files(self):
+        new_files = set()
+        installer = self._dist_info / "INSTALLER"
+        installer.write_text("pip\n", encoding="utf-8")
+        new_files.add(installer)
+        # inject a no-op root element, as workaround for bug in https://github.com/pypa/pip/issues/7226
+        marker = self._image_dir / f"{self._dist_info.stem}.virtualenv"
+        marker.write_text("", encoding="utf-8")
+        new_files.add(marker)
+        folder = mkdtemp()
+        try:
+            to_folder = Path(folder)
+            rel = os.path.relpath(str(self._creator.script_dir), str(self._creator.purelib))
+            version_info = self._creator.interpreter.version_info
+            for name, module in self._console_scripts.items():
+                new_files.update(
+                    Path(os.path.normpath(str(self._image_dir / rel / i.name)))
+                    for i in self._create_console_entry_point(name, module, to_folder, version_info)
+                )
+        finally:
+            safe_delete(folder)
+        return new_files
+
+    @property
+    def _dist_info(self):
+        if self._extracted is False:
+            return None  # pragma: no cover
+        if self.__dist_info is None:
+            files = []
+            for filename in self._image_dir.iterdir():
+                files.append(filename.name)
+                if filename.suffix == ".dist-info":
+                    self.__dist_info = filename
+                    break
+            else:
+                msg = f"no .dist-info at {self._image_dir}, has {', '.join(files)}"
+                raise RuntimeError(msg)  # pragma: no cover
+        return self.__dist_info
+
+    @abstractmethod
+    def _fix_records(self, extra_record_data):
+        raise NotImplementedError
+
+    @property
+    def _console_scripts(self):
+        if self._extracted is False:
+            return None  # pragma: no cover
+        if self._console_entry_points is None:
+            self._console_entry_points = {}
+            entry_points = self._dist_info / "entry_points.txt"
+            if entry_points.exists():
+                parser = ConfigParser()
+                with entry_points.open(encoding="utf-8") as file_handler:
+                    parser.read_file(file_handler)
+                if "console_scripts" in parser.sections():
+                    for name, value in parser.items("console_scripts"):
+                        match = re.match(r"(.*?)-?\d\.?\d*", name)
+                        our_name = match.groups(1)[0] if match else name
+                        self._console_entry_points[our_name] = value
+        return self._console_entry_points
+
+    def _create_console_entry_point(self, name, value, to_folder, version_info):
+        result = []
+        maker = ScriptMakerCustom(to_folder, version_info, self._creator.exe, name)
+        specification = f"{name} = {value}"
+        new_files = maker.make(specification)
+        result.extend(Path(i) for i in new_files)
+        return result
+
+    def _uninstall_previous_version(self):
+        dist_name = self._dist_info.stem.split("-")[0]
+        in_folders = chain.from_iterable([i.iterdir() for i in (self._creator.purelib, self._creator.platlib)])
+        paths = (p for p in in_folders if p.stem.split("-")[0] == dist_name and p.suffix == ".dist-info" and p.is_dir())
+        existing_dist = next(paths, None)
+        if existing_dist is not None:
+            self._uninstall_dist(existing_dist)
+
+    @staticmethod
+    def _uninstall_dist(dist):
+        dist_base = dist.parent
+        logging.debug("uninstall existing distribution %s from %s", dist.stem, dist_base)
+
+        top_txt = dist / "top_level.txt"  # add top level packages at folder level
+        paths = (
+            {dist.parent / i.strip() for i in top_txt.read_text(encoding="utf-8").splitlines()}
+            if top_txt.exists()
+            else set()
+        )
+        paths.add(dist)  # add the dist-info folder itself
+
+        base_dirs, record = paths.copy(), dist / "RECORD"  # collect entries in record that we did not register yet
+        for name in (
+            (i.split(",")[0] for i in record.read_text(encoding="utf-8").splitlines()) if record.exists() else ()
+        ):
+            path = dist_base / name
+            if not any(p in base_dirs for p in path.parents):  # only add if not already added as a base dir
+                paths.add(path)
+
+        for path in sorted(paths):  # actually remove stuff in a stable order
+            if path.exists():
+                if path.is_dir() and not path.is_symlink():
+                    safe_delete(path)
+                else:
+                    path.unlink()
+
+    def clear(self):
+        if self._image_dir.exists():
+            safe_delete(self._image_dir)
+
+    def has_image(self):
+        return self._image_dir.exists() and next(self._image_dir.iterdir()) is not None
+
+
+class ScriptMakerCustom(ScriptMaker):
+    def __init__(self, target_dir, version_info, executable, name) -> None:
+        super().__init__(None, str(target_dir))
+        self.clobber = True  # overwrite
+        self.set_mode = True  # ensure they are executable
+        self.executable = enquote_executable(str(executable))
+        self.version_info = version_info.major, version_info.minor
+        self.variants = {"", "X", "X.Y"}
+        self._name = name
+
+    def _write_script(self, names, shebang, script_bytes, filenames, ext):  # noqa: PLR0913
+        names.add(f"{self._name}{self.version_info[0]}.{self.version_info[1]}")
+        super()._write_script(names, shebang, script_bytes, filenames, ext)
+
+
+__all__ = [
+    "PipInstall",
+]
diff --git a/venv/lib/python3.10/site-packages/virtualenv/seed/embed/via_app_data/pip_install/copy.py b/venv/lib/python3.10/site-packages/virtualenv/seed/embed/via_app_data/pip_install/copy.py
new file mode 100644
index 0000000..b5f01aa
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/virtualenv/seed/embed/via_app_data/pip_install/copy.py
@@ -0,0 +1,40 @@
+from __future__ import annotations
+
+import os
+from pathlib import Path
+
+from virtualenv.util.path import copy
+
+from .base import PipInstall
+
+
+class CopyPipInstall(PipInstall):
+    def _sync(self, src, dst):
+        copy(src, dst)
+
+    def _generate_new_files(self):
+        # create the pyc files
+        new_files = super()._generate_new_files()
+        new_files.update(self._cache_files())
+        return new_files
+
+    def _cache_files(self):
+        version = self._creator.interpreter.version_info
+        py_c_ext = f".{self._creator.interpreter.implementation.lower()}-{version.major}{version.minor}.pyc"
+        for root, dirs, files in os.walk(str(self._image_dir), topdown=True):
+            root_path = Path(root)
+            for name in files:
+                if name.endswith(".py"):
+                    yield root_path / f"{name[:-3]}{py_c_ext}"
+            for name in dirs:
+                yield root_path / name / "__pycache__"
+
+    def _fix_records(self, new_files):
+        extra_record_data_str = self._records_text(new_files)
+        with (self._dist_info / "RECORD").open("ab") as file_handler:
+            file_handler.write(extra_record_data_str.encode("utf-8"))
+
+
+__all__ = [
+    "CopyPipInstall",
+]
diff --git a/venv/lib/python3.10/site-packages/virtualenv/seed/embed/via_app_data/pip_install/symlink.py b/venv/lib/python3.10/site-packages/virtualenv/seed/embed/via_app_data/pip_install/symlink.py
new file mode 100644
index 0000000..6bc5e51
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/virtualenv/seed/embed/via_app_data/pip_install/symlink.py
@@ -0,0 +1,59 @@
+from __future__ import annotations
+
+import os
+from stat import S_IREAD, S_IRGRP, S_IROTH
+from subprocess import PIPE, Popen
+
+from virtualenv.util.path import safe_delete, set_tree
+
+from .base import PipInstall
+
+
+class SymlinkPipInstall(PipInstall):
+    def _sync(self, src, dst):
+        os.symlink(str(src), str(dst))
+
+    def _generate_new_files(self):
+        # create the pyc files, as the build image will be R/O
+        cmd = [str(self._creator.exe), "-m", "compileall", str(self._image_dir)]
+        process = Popen(cmd, stdout=PIPE, stderr=PIPE)  # noqa: S603
+        process.communicate()
+        # the root pyc is shared, so we'll not symlink that - but still add the pyc files to the RECORD for close
+        root_py_cache = self._image_dir / "__pycache__"
+        new_files = set()
+        if root_py_cache.exists():
+            new_files.update(root_py_cache.iterdir())
+            new_files.add(root_py_cache)
+            safe_delete(root_py_cache)
+        core_new_files = super()._generate_new_files()
+        # remove files that are within the image folder deeper than one level (as these will be not linked directly)
+        for file in core_new_files:
+            try:
+                rel = file.relative_to(self._image_dir)
+                if len(rel.parts) > 1:
+                    continue
+            except ValueError:
+                pass
+            new_files.add(file)
+        return new_files
+
+    def _fix_records(self, new_files):
+        new_files.update(i for i in self._image_dir.iterdir())
+        extra_record_data_str = self._records_text(sorted(new_files, key=str))
+        with open(str(self._dist_info / "RECORD"), "wb") as file_handler:
+            file_handler.write(extra_record_data_str.encode("utf-8"))
+
+    def build_image(self):
+        super().build_image()
+        # protect the image by making it read only
+        set_tree(self._image_dir, S_IREAD | S_IRGRP | S_IROTH)
+
+    def clear(self):
+        if self._image_dir.exists():
+            safe_delete(self._image_dir)
+        super().clear()
+
+
+__all__ = [
+    "SymlinkPipInstall",
+]
diff --git a/venv/lib/python3.10/site-packages/virtualenv/seed/embed/via_app_data/via_app_data.py b/venv/lib/python3.10/site-packages/virtualenv/seed/embed/via_app_data/via_app_data.py
new file mode 100644
index 0000000..7e58bfc
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/virtualenv/seed/embed/via_app_data/via_app_data.py
@@ -0,0 +1,144 @@
+"""Bootstrap."""
+
+from __future__ import annotations
+
+import logging
+import sys
+import traceback
+from contextlib import contextmanager
+from pathlib import Path
+from subprocess import CalledProcessError
+from threading import Lock, Thread
+
+from virtualenv.info import fs_supports_symlink
+from virtualenv.seed.embed.base_embed import BaseEmbed
+from virtualenv.seed.wheels import get_wheel
+
+from .pip_install.copy import CopyPipInstall
+from .pip_install.symlink import SymlinkPipInstall
+
+
+class FromAppData(BaseEmbed):
+    def __init__(self, options) -> None:
+        super().__init__(options)
+        self.symlinks = options.symlink_app_data
+
+    @classmethod
+    def add_parser_arguments(cls, parser, interpreter, app_data):
+        super().add_parser_arguments(parser, interpreter, app_data)
+        can_symlink = app_data.transient is False and fs_supports_symlink()
+        sym = "" if can_symlink else "not supported - "
+        parser.add_argument(
+            "--symlink-app-data",
+            dest="symlink_app_data",
+            action="store_true" if can_symlink else "store_false",
+            help=f"{sym} symlink the python packages from the app-data folder (requires seed pip>=19.3)",
+            default=False,
+        )
+
+    def run(self, creator):
+        if not self.enabled:
+            return
+        with self._get_seed_wheels(creator) as name_to_whl:
+            pip_version = name_to_whl["pip"].version_tuple if "pip" in name_to_whl else None
+            installer_class = self.installer_class(pip_version)
+            exceptions = {}
+
+            def _install(name, wheel):
+                try:
+                    logging.debug("install %s from wheel %s via %s", name, wheel, installer_class.__name__)
+                    key = Path(installer_class.__name__) / wheel.path.stem
+                    wheel_img = self.app_data.wheel_image(creator.interpreter.version_release_str, key)
+                    installer = installer_class(wheel.path, creator, wheel_img)
+                    parent = self.app_data.lock / wheel_img.parent
+                    with parent.non_reentrant_lock_for_key(wheel_img.name):
+                        if not installer.has_image():
+                            installer.build_image()
+                    installer.install(creator.interpreter.version_info)
+                except Exception:  # noqa: BLE001
+                    exceptions[name] = sys.exc_info()
+
+            threads = [Thread(target=_install, args=(n, w)) for n, w in name_to_whl.items()]
+            for thread in threads:
+                thread.start()
+            for thread in threads:
+                thread.join()
+            if exceptions:
+                messages = [f"failed to build image {', '.join(exceptions.keys())} because:"]
+                for value in exceptions.values():
+                    exc_type, exc_value, exc_traceback = value
+                    messages.append("".join(traceback.format_exception(exc_type, exc_value, exc_traceback)))
+                raise RuntimeError("\n".join(messages))
+
+    @contextmanager
+    def _get_seed_wheels(self, creator):  # noqa: C901
+        name_to_whl, lock, fail = {}, Lock(), {}
+
+        def _get(distribution, version):
+            for_py_version = creator.interpreter.version_release_str
+            failure, result = None, None
+            # fallback to download in case the exact version is not available
+            for download in [True] if self.download else [False, True]:
+                failure = None
+                try:
+                    result = get_wheel(
+                        distribution=distribution,
+                        version=version,
+                        for_py_version=for_py_version,
+                        search_dirs=self.extra_search_dir,
+                        download=download,
+                        app_data=self.app_data,
+                        do_periodic_update=self.periodic_update,
+                        env=self.env,
+                    )
+                    if result is not None:
+                        break
+                except Exception as exception:
+                    logging.exception("fail")
+                    failure = exception
+            if failure:
+                if isinstance(failure, CalledProcessError):
+                    msg = f"failed to download {distribution}"
+                    if version is not None:
+                        msg += f" version {version}"
+                    msg += f", pip download exit code {failure.returncode}"
+                    output = failure.output + failure.stderr
+                    if output:
+                        msg += "\n"
+                        msg += output
+                else:
+                    msg = repr(failure)
+                logging.error(msg)
+                with lock:
+                    fail[distribution] = version
+            else:
+                with lock:
+                    name_to_whl[distribution] = result
+
+        threads = [
+            Thread(target=_get, args=(distribution, version))
+            for distribution, version in self.distribution_to_versions().items()
+        ]
+        for thread in threads:
+            thread.start()
+        for thread in threads:
+            thread.join()
+        if fail:
+            msg = f"seed failed due to failing to download wheels {', '.join(fail.keys())}"
+            raise RuntimeError(msg)
+        yield name_to_whl
+
+    def installer_class(self, pip_version_tuple):
+        if self.symlinks and pip_version_tuple and pip_version_tuple >= (19, 3):  # symlink support requires pip 19.3+
+            return SymlinkPipInstall
+        return CopyPipInstall
+
+    def __repr__(self) -> str:
+        msg = f", via={'symlink' if self.symlinks else 'copy'}, app_data_dir={self.app_data}"
+        base = super().__repr__()
+        return f"{base[:-1]}{msg}{base[-1]}"
+
+
+__all__ = [
+    "FromAppData",
+]
diff --git a/venv/lib/python3.10/site-packages/virtualenv/seed/seeder.py b/venv/lib/python3.10/site-packages/virtualenv/seed/seeder.py
new file mode 100644
index 0000000..01f9430
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/virtualenv/seed/seeder.py
@@ -0,0 +1,43 @@
+from __future__ import annotations
+
+from abc import ABCMeta, abstractmethod
+
+
+class Seeder(metaclass=ABCMeta):
+    """A seeder will install some seed packages into a virtual environment."""
+
+    def __init__(self, options, enabled) -> None:
+        """
+        Create.
+
+        :param options: the parsed options as defined within :meth:`add_parser_arguments`
+        :param enabled: a flag weather the seeder is enabled or not
+        """
+        self.enabled = enabled
+        self.env = options.env
+
+    @classmethod
+    def add_parser_arguments(cls, parser, interpreter, app_data):
+        """
+        Add CLI arguments for this seed mechanisms.
+
+        :param parser: the CLI parser
+        :param app_data: the CLI parser
+        :param interpreter: the interpreter this virtual environment is based of
+        """
+        raise NotImplementedError
+
+    @abstractmethod
+    def run(self, creator):
+        """
+        Perform the seed operation.
+
+        :param creator: the creator (based of :class:`virtualenv.create.creator.Creator`) we used to create this \
+        virtual environment
+        """
+        raise NotImplementedError
+
+
+__all__ = [
+    "Seeder",
+]
diff --git a/venv/lib/python3.10/site-packages/virtualenv/seed/wheels/__init__.py b/venv/lib/python3.10/site-packages/virtualenv/seed/wheels/__init__.py
new file mode 100644
index 0000000..94b4cc9
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/virtualenv/seed/wheels/__init__.py
@@ -0,0 +1,11 @@
+from __future__ import annotations
+
+from .acquire import get_wheel, pip_wheel_env_run
+from .util import Version, Wheel
+
+__all__ = [
+    "get_wheel",
+    "pip_wheel_env_run",
+    "Version",
+    "Wheel",
+]
diff --git a/venv/lib/python3.10/site-packages/virtualenv/seed/wheels/acquire.py b/venv/lib/python3.10/site-packages/virtualenv/seed/wheels/acquire.py
new file mode 100644
index 0000000..c5ed731
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/virtualenv/seed/wheels/acquire.py
@@ -0,0 +1,132 @@
+"""Bootstrap."""
+
+from __future__ import annotations
+
+import logging
+import sys
+from operator import eq, lt
+from pathlib import Path
+from subprocess import PIPE, CalledProcessError, Popen
+
+from .bundle import from_bundle
+from .periodic_update import add_wheel_to_update_log
+from .util import Version, Wheel, discover_wheels
+
+
+def get_wheel(  # noqa: PLR0913
+    distribution,
+    version,
+    for_py_version,
+    search_dirs,
+    download,
+    app_data,
+    do_periodic_update,
+    env,
+):
+    """Get a wheel with the given distribution-version-for_py_version trio, by using the extra search dir + download."""
+    # not all wheels are compatible with all python versions, so we need to py version qualify it
+    wheel = None
+
+    if not download or version != Version.bundle:
+        # 1. acquire from bundle
+        wheel = from_bundle(distribution, version, for_py_version, search_dirs, app_data, do_periodic_update, env)
+
+    if download and wheel is None and version != Version.embed:
+        # 2. download from the internet
+        wheel = download_wheel(
+            distribution=distribution,
+            version_spec=Version.as_version_spec(version),
+            for_py_version=for_py_version,
+            search_dirs=search_dirs,
+            app_data=app_data,
+            to_folder=app_data.house,
+            env=env,
+        )
+        if wheel is not None and app_data.can_update:
+            add_wheel_to_update_log(wheel, for_py_version, app_data)
+
+    return wheel
+
+
+def download_wheel(distribution, version_spec, for_py_version, search_dirs, app_data, to_folder, env):  # noqa: PLR0913
+    to_download = f"{distribution}{version_spec or ''}"
+    logging.debug("download wheel %s %s to %s", to_download, for_py_version, to_folder)
+    cmd = [
+        sys.executable,
+        "-m",
+        "pip",
+        "download",
+        "--progress-bar",
+        "off",
+        "--disable-pip-version-check",
+        "--only-binary=:all:",
+        "--no-deps",
+        "--python-version",
+        for_py_version,
+        "-d",
+        str(to_folder),
+        to_download,
+    ]
+    # pip has no interface in python - must be a new sub-process
+    env = pip_wheel_env_run(search_dirs, app_data, env)
+    process = Popen(cmd, env=env, stdout=PIPE, stderr=PIPE, universal_newlines=True, encoding="utf-8")  # noqa: S603
+    out, err = process.communicate()
+    if process.returncode != 0:
+        kwargs = {"output": out, "stderr": err}
+        raise CalledProcessError(process.returncode, cmd, **kwargs)
+    result = _find_downloaded_wheel(distribution, version_spec, for_py_version, to_folder, out)
+    logging.debug("downloaded wheel %s", result.name)
+    return result
+
+
+def _find_downloaded_wheel(distribution, version_spec, for_py_version, to_folder, out):
+    for line in out.splitlines():
+        stripped_line = line.lstrip()
+        for marker in ("Saved ", "File was already downloaded "):
+            if stripped_line.startswith(marker):
+                return Wheel(Path(stripped_line[len(marker) :]).absolute())
+    # if for some reason the output does not match fallback to the latest version with that spec
+    return find_compatible_in_house(distribution, version_spec, for_py_version, to_folder)
+
+
+def find_compatible_in_house(distribution, version_spec, for_py_version, in_folder):
+    wheels = discover_wheels(in_folder, distribution, None, for_py_version)
+    start, end = 0, len(wheels)
+    if version_spec is not None and version_spec:
+        if version_spec.startswith("<"):
+            from_pos, op = 1, lt
+        elif version_spec.startswith("=="):
+            from_pos, op = 2, eq
+        else:
+            raise ValueError(version_spec)
+        version = Wheel.as_version_tuple(version_spec[from_pos:])
+        start = next((at for at, w in enumerate(wheels) if op(w.version_tuple, version)), len(wheels))
+
+    return None if start == end else wheels[start]
+
+
+def pip_wheel_env_run(search_dirs, app_data, env):
+    env = env.copy()
+    env.update({"PIP_USE_WHEEL": "1", "PIP_USER": "0", "PIP_NO_INPUT": "1"})
+    wheel = get_wheel(
+        distribution="pip",
+        version=None,
+        for_py_version=f"{sys.version_info.major}.{sys.version_info.minor}",
+        search_dirs=search_dirs,
+        download=False,
+        app_data=app_data,
+        do_periodic_update=False,
+        env=env,
+    )
+    if wheel is None:
+        msg = "could not find the embedded pip"
+        raise RuntimeError(msg)
+    env["PYTHONPATH"] = str(wheel.path)
+    return env
+
+
+__all__ = [
+    "get_wheel",
+    "download_wheel",
+    "pip_wheel_env_run",
+]
diff --git a/venv/lib/python3.10/site-packages/virtualenv/seed/wheels/bundle.py b/venv/lib/python3.10/site-packages/virtualenv/seed/wheels/bundle.py
new file mode 100644
index 0000000..d54ebcc
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/virtualenv/seed/wheels/bundle.py
@@ -0,0 +1,50 @@
+from __future__ import annotations
+
+from virtualenv.seed.wheels.embed import get_embed_wheel
+
+from .periodic_update import periodic_update
+from .util import Version, Wheel, discover_wheels
+
+
+def from_bundle(distribution, version, for_py_version, search_dirs, app_data, do_periodic_update, env):  # noqa: PLR0913
+    """Load the bundled wheel to a cache directory."""
+    of_version = Version.of_version(version)
+    wheel = load_embed_wheel(app_data, distribution, for_py_version, of_version)
+
+    if version != Version.embed:
+        # 2. check if we have upgraded embed
+        if app_data.can_update:
+            per = do_periodic_update
+            wheel = periodic_update(distribution, of_version, for_py_version, wheel, search_dirs, app_data, per, env)
+
+        # 3. acquire from extra search dir
+        found_wheel = from_dir(distribution, of_version, for_py_version, search_dirs)
+        if found_wheel is not None and (wheel is None or found_wheel.version_tuple > wheel.version_tuple):
+            wheel = found_wheel
+    return wheel
+
+
+def load_embed_wheel(app_data, distribution, for_py_version, version):
+    wheel = get_embed_wheel(distribution, for_py_version)
+    if wheel is not None:
+        version_match = version == wheel.version
+        if version is None or version_match:
+            with app_data.ensure_extracted(wheel.path, lambda: app_data.house) as wheel_path:
+                wheel = Wheel(wheel_path)
+        else:  # if version does not match ignore
+            wheel = None
+    return wheel
+
+
+def from_dir(distribution, version, for_py_version, directories):
+    """Load a compatible wheel from a given folder."""
+    for folder in directories:
+        for wheel in discover_wheels(folder, distribution, version, for_py_version):
+            return wheel
+    return None
+
+
+__all__ = [
+    "load_embed_wheel",
+    "from_bundle",
+]
diff --git a/venv/lib/python3.10/site-packages/virtualenv/seed/wheels/embed/__init__.py b/venv/lib/python3.10/site-packages/virtualenv/seed/wheels/embed/__init__.py
new file mode 100644
index 0000000..0d87e4b
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/virtualenv/seed/wheels/embed/__init__.py
@@ -0,0 +1,53 @@
+from __future__ import annotations
+
+from pathlib import Path
+
+from virtualenv.seed.wheels.util import Wheel
+
+BUNDLE_FOLDER = Path(__file__).absolute().parent
+BUNDLE_SUPPORT = {
+    "3.7": {
+        "pip": "pip-23.2.1-py3-none-any.whl",
+        "setuptools": "setuptools-68.0.0-py3-none-any.whl",
+        "wheel": "wheel-0.41.1-py3-none-any.whl",
+    },
+    "3.8": {
+        "pip": "pip-23.2.1-py3-none-any.whl",
+        "setuptools": "setuptools-68.0.0-py3-none-any.whl",
+        "wheel": "wheel-0.41.1-py3-none-any.whl",
+    },
+    "3.9": {
+        "pip": "pip-23.2.1-py3-none-any.whl",
+        "setuptools": "setuptools-68.0.0-py3-none-any.whl",
+        "wheel": "wheel-0.41.1-py3-none-any.whl",
+    },
+    "3.10": {
+        "pip": "pip-23.2.1-py3-none-any.whl",
+        "setuptools": "setuptools-68.0.0-py3-none-any.whl",
+        "wheel": "wheel-0.41.1-py3-none-any.whl",
+    },
+    "3.11": {
+        "pip": "pip-23.2.1-py3-none-any.whl",
+        "setuptools": "setuptools-68.0.0-py3-none-any.whl",
+        "wheel": "wheel-0.41.1-py3-none-any.whl",
+    },
+    "3.12": {
+        "pip": "pip-23.2.1-py3-none-any.whl",
+        "setuptools": "setuptools-68.0.0-py3-none-any.whl",
+        "wheel": "wheel-0.41.1-py3-none-any.whl",
+    },
+}
+MAX = "3.7"
+
+
+def get_embed_wheel(distribution, for_py_version):
+    path = BUNDLE_FOLDER / (BUNDLE_SUPPORT.get(for_py_version, {}) or BUNDLE_SUPPORT[MAX]).get(distribution)
+    return Wheel.from_path(path)
+
+
+__all__ = [
+    "get_embed_wheel",
+    "BUNDLE_SUPPORT",
+    "MAX",
+    "BUNDLE_FOLDER",
+]
diff --git a/venv/lib/python3.10/site-packages/virtualenv/seed/wheels/embed/pip-23.2.1-py3-none-any.whl b/venv/lib/python3.10/site-packages/virtualenv/seed/wheels/embed/pip-23.2.1-py3-none-any.whl
new file mode 100644
index 0000000..ba28ef0
Binary files /dev/null and b/venv/lib/python3.10/site-packages/virtualenv/seed/wheels/embed/pip-23.2.1-py3-none-any.whl differ
diff --git a/venv/lib/python3.10/site-packages/virtualenv/seed/wheels/embed/setuptools-68.0.0-py3-none-any.whl b/venv/lib/python3.10/site-packages/virtualenv/seed/wheels/embed/setuptools-68.0.0-py3-none-any.whl
new file mode 100644
index 0000000..81f1545
Binary files /dev/null and b/venv/lib/python3.10/site-packages/virtualenv/seed/wheels/embed/setuptools-68.0.0-py3-none-any.whl differ
diff --git a/venv/lib/python3.10/site-packages/virtualenv/seed/wheels/embed/wheel-0.41.1-py3-none-any.whl b/venv/lib/python3.10/site-packages/virtualenv/seed/wheels/embed/wheel-0.41.1-py3-none-any.whl
new file mode 100644
index 0000000..17e32b3
Binary files /dev/null and b/venv/lib/python3.10/site-packages/virtualenv/seed/wheels/embed/wheel-0.41.1-py3-none-any.whl differ
diff --git a/venv/lib/python3.10/site-packages/virtualenv/seed/wheels/periodic_update.py b/venv/lib/python3.10/site-packages/virtualenv/seed/wheels/periodic_update.py
new file mode 100644
index 0000000..3d0239d
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/virtualenv/seed/wheels/periodic_update.py
@@ -0,0 +1,428 @@
+"""Periodically update bundled versions."""
+
+
+from __future__ import annotations
+
+import json
+import logging
+import os
+import ssl
+import sys
+from datetime import datetime, timedelta, timezone
+from itertools import groupby
+from pathlib import Path
+from shutil import copy2
+from subprocess import DEVNULL, Popen
+from textwrap import dedent
+from threading import Thread
+from urllib.error import URLError
+from urllib.request import urlopen
+
+from virtualenv.app_data import AppDataDiskFolder
+from virtualenv.seed.wheels.embed import BUNDLE_SUPPORT
+from virtualenv.seed.wheels.util import Wheel
+from virtualenv.util.subprocess import CREATE_NO_WINDOW
+
+GRACE_PERIOD_CI = timedelta(hours=1)  # prevent version switch in the middle of a CI run
+GRACE_PERIOD_MINOR = timedelta(days=28)
+UPDATE_PERIOD = timedelta(days=14)
+UPDATE_ABORTED_DELAY = timedelta(hours=1)
+
+
+def periodic_update(  # noqa: PLR0913
+    distribution,
+    of_version,
+    for_py_version,
+    wheel,
+    search_dirs,
+    app_data,
+    do_periodic_update,
+    env,
+):
+    if do_periodic_update:
+        handle_auto_update(distribution, for_py_version, wheel, search_dirs, app_data, env)
+
+    now = datetime.now(tz=timezone.utc)
+
+    def _update_wheel(ver):
+        updated_wheel = Wheel(app_data.house / ver.filename)
+        logging.debug("using %supdated wheel %s", "periodically " if updated_wheel else "", updated_wheel)
+        return updated_wheel
+
+    u_log = UpdateLog.from_app_data(app_data, distribution, for_py_version)
+    if of_version is None:
+        for _, group in groupby(u_log.versions, key=lambda v: v.wheel.version_tuple[0:2]):
+            # use only latest patch version per minor, earlier assumed to be buggy
+            all_patches = list(group)
+            ignore_grace_period_minor = any(version for version in all_patches if version.use(now))
+            for version in all_patches:
+                if wheel is not None and Path(version.filename).name == wheel.name:
+                    return wheel
+                if version.use(now, ignore_grace_period_minor):
+                    return _update_wheel(version)
+    else:
+        for version in u_log.versions:
+            if version.wheel.version == of_version:
+                return _update_wheel(version)
+
+    return wheel
+
+
+def handle_auto_update(distribution, for_py_version, wheel, search_dirs, app_data, env):  # noqa: PLR0913
+    embed_update_log = app_data.embed_update_log(distribution, for_py_version)
+    u_log = UpdateLog.from_dict(embed_update_log.read())
+    if u_log.needs_update:
+        u_log.periodic = True
+        u_log.started = datetime.now(tz=timezone.utc)
+        embed_update_log.write(u_log.to_dict())
+        trigger_update(distribution, for_py_version, wheel, search_dirs, app_data, periodic=True, env=env)
+
+
+def add_wheel_to_update_log(wheel, for_py_version, app_data):
+    embed_update_log = app_data.embed_update_log(wheel.distribution, for_py_version)
+    logging.debug("adding %s information to %s", wheel.name, embed_update_log.file)
+    u_log = UpdateLog.from_dict(embed_update_log.read())
+    if any(version.filename == wheel.name for version in u_log.versions):
+        logging.warning("%s already present in %s", wheel.name, embed_update_log.file)
+        return
+    # we don't need a release date for sources other than "periodic"
+    version = NewVersion(wheel.name, datetime.now(tz=timezone.utc), None, "download")
+    u_log.versions.append(version)  # always write at the end for proper updates
+    embed_update_log.write(u_log.to_dict())
+
+
+DATETIME_FMT = "%Y-%m-%dT%H:%M:%S.%fZ"
+
+
+def dump_datetime(value):
+    return None if value is None else value.strftime(DATETIME_FMT)
+
+
+def load_datetime(value):
+    return None if value is None else datetime.strptime(value, DATETIME_FMT).replace(tzinfo=timezone.utc)
+
+
+class NewVersion:
+    def __init__(self, filename, found_date, release_date, source) -> None:
+        self.filename = filename
+        self.found_date = found_date
+        self.release_date = release_date
+        self.source = source
+
+    @classmethod
+    def from_dict(cls, dictionary):
+        return cls(
+            filename=dictionary["filename"],
+            found_date=load_datetime(dictionary["found_date"]),
+            release_date=load_datetime(dictionary["release_date"]),
+            source=dictionary["source"],
+        )
+
+    def to_dict(self):
+        return {
+            "filename": self.filename,
+            "release_date": dump_datetime(self.release_date),
+            "found_date": dump_datetime(self.found_date),
+            "source": self.source,
+        }
+
+    def use(self, now, ignore_grace_period_minor=False, ignore_grace_period_ci=False):  # noqa: FBT002
+        if self.source == "manual":
+            return True
+        if self.source == "periodic" and (self.found_date < now - GRACE_PERIOD_CI or ignore_grace_period_ci):
+            if not ignore_grace_period_minor:
+                compare_from = self.release_date or self.found_date
+                return now - compare_from >= GRACE_PERIOD_MINOR
+            return True
+        return False
+
+    def __repr__(self) -> str:
+        return (
+            f"{self.__class__.__name__}(filename={self.filename}), found_date={self.found_date}, "
+            f"release_date={self.release_date}, source={self.source})"
+        )
+
+    def __eq__(self, other):
+        return type(self) == type(other) and all(
+            getattr(self, k) == getattr(other, k) for k in ["filename", "release_date", "found_date", "source"]
+        )
+
+    def __ne__(self, other):
+        return not (self == other)
+
+    @property
+    def wheel(self):
+        return Wheel(Path(self.filename))
+
+
+class UpdateLog:
+    def __init__(self, started, completed, versions, periodic) -> None:
+        self.started = started
+        self.completed = completed
+        self.versions = versions
+        self.periodic = periodic
+
+    @classmethod
+    def from_dict(cls, dictionary):
+        if dictionary is None:
+            dictionary = {}
+        return cls(
+            load_datetime(dictionary.get("started")),
+            load_datetime(dictionary.get("completed")),
+            [NewVersion.from_dict(v) for v in dictionary.get("versions", [])],
+            dictionary.get("periodic"),
+        )
+
+    @classmethod
+    def from_app_data(cls, app_data, distribution, for_py_version):
+        raw_json = app_data.embed_update_log(distribution, for_py_version).read()
+        return cls.from_dict(raw_json)
+
+    def to_dict(self):
+        return {
+            "started": dump_datetime(self.started),
+            "completed": dump_datetime(self.completed),
+            "periodic": self.periodic,
+            "versions": [r.to_dict() for r in self.versions],
+        }
+
+    @property
+    def needs_update(self):
+        now = datetime.now(tz=timezone.utc)
+        if self.completed is None:  # never completed
+            return self._check_start(now)
+        if now - self.completed <= UPDATE_PERIOD:
+            return False
+        return self._check_start(now)
+
+    def _check_start(self, now):
+        return self.started is None or now - self.started > UPDATE_ABORTED_DELAY
+
+
+def trigger_update(distribution, for_py_version, wheel, search_dirs, app_data, env, periodic):  # noqa: PLR0913
+    wheel_path = None if wheel is None else str(wheel.path)
+    cmd = [
+        sys.executable,
+        "-c",
+        dedent(
+            """
+        from virtualenv.report import setup_report, MAX_LEVEL
+        from virtualenv.seed.wheels.periodic_update import do_update
+        setup_report(MAX_LEVEL, show_pid=True)
+        do_update({!r}, {!r}, {!r}, {!r}, {!r}, {!r})
+        """,
+        )
+        .strip()
+        .format(distribution, for_py_version, wheel_path, str(app_data), [str(p) for p in search_dirs], periodic),
+    ]
+    debug = env.get("_VIRTUALENV_PERIODIC_UPDATE_INLINE") == "1"
+    pipe = None if debug else DEVNULL
+    kwargs = {"stdout": pipe, "stderr": pipe}
+    if not debug and sys.platform == "win32":
+        kwargs["creationflags"] = CREATE_NO_WINDOW
+    process = Popen(cmd, **kwargs)  # noqa: S603
+    logging.info(
+        "triggered periodic upgrade of %s%s (for python %s) via background process having PID %d",
+        distribution,
+        "" if wheel is None else f"=={wheel.version}",
+        for_py_version,
+        process.pid,
+    )
+    if debug:
+        process.communicate()  # on purpose not called to make it a background process
+    else:
+        # set the returncode here -> no ResourceWarning on main process exit if the subprocess still runs
+        process.returncode = 0
+
+
+def do_update(distribution, for_py_version, embed_filename, app_data, search_dirs, periodic):  # noqa: PLR0913
+    versions = None
+    try:
+        versions = _run_do_update(app_data, distribution, embed_filename, for_py_version, periodic, search_dirs)
+    finally:
+        logging.debug("done %s %s with %s", distribution, for_py_version, versions)
+    return versions
+
+
+def _run_do_update(  # noqa: C901, PLR0913
+    app_data,
+    distribution,
+    embed_filename,
+    for_py_version,
+    periodic,
+    search_dirs,
+):
+    from virtualenv.seed.wheels import acquire
+
+    wheel_filename = None if embed_filename is None else Path(embed_filename)
+    embed_version = None if wheel_filename is None else Wheel(wheel_filename).version_tuple
+    app_data = AppDataDiskFolder(app_data) if isinstance(app_data, str) else app_data
+    search_dirs = [Path(p) if isinstance(p, str) else p for p in search_dirs]
+    wheelhouse = app_data.house
+    embed_update_log = app_data.embed_update_log(distribution, for_py_version)
+    u_log = UpdateLog.from_dict(embed_update_log.read())
+    now = datetime.now(tz=timezone.utc)
+
+    update_versions, other_versions = [], []
+    for version in u_log.versions:
+        if version.source in {"periodic", "manual"}:
+            update_versions.append(version)
+        else:
+            other_versions.append(version)
+
+    if periodic:
+        source = "periodic"
+    else:
+        source = "manual"
+        # mark the most recent one as source "manual"
+        if update_versions:
+            update_versions[0].source = source
+
+    if wheel_filename is not None:
+        dest = wheelhouse / wheel_filename.name
+        if not dest.exists():
+            copy2(str(wheel_filename), str(wheelhouse))
+    last, last_version, versions, filenames = None, None, [], set()
+    while last is None or not last.use(now, ignore_grace_period_ci=True):
+        download_time = datetime.now(tz=timezone.utc)
+        dest = acquire.download_wheel(
+            distribution=distribution,
+            version_spec=None if last_version is None else f"<{last_version}",
+            for_py_version=for_py_version,
+            search_dirs=search_dirs,
+            app_data=app_data,
+            to_folder=wheelhouse,
+            env=os.environ,
+        )
+        if dest is None or (update_versions and update_versions[0].filename == dest.name):
+            break
+        release_date = release_date_for_wheel_path(dest.path)
+        last = NewVersion(filename=dest.path.name, release_date=release_date, found_date=download_time, source=source)
+        logging.info("detected %s in %s", last, datetime.now(tz=timezone.utc) - download_time)
+        versions.append(last)
+        filenames.add(last.filename)
+        last_wheel = last.wheel
+        last_version = last_wheel.version
+        if embed_version is not None and embed_version >= last_wheel.version_tuple:
+            break  # stop download if we reach the embed version
+    u_log.periodic = periodic
+    if not u_log.periodic:
+        u_log.started = now
+    # update other_versions by removing version we just found
+    other_versions = [version for version in other_versions if version.filename not in filenames]
+    u_log.versions = versions + update_versions + other_versions
+    u_log.completed = datetime.now(tz=timezone.utc)
+    embed_update_log.write(u_log.to_dict())
+    return versions
+
+
+def release_date_for_wheel_path(dest):
+    wheel = Wheel(dest)
+    # the most accurate is to ask PyPi - e.g. https://pypi.org/pypi/pip/json,
+    # see https://warehouse.pypa.io/api-reference/json/ for more details
+    content = _pypi_get_distribution_info_cached(wheel.distribution)
+    if content is not None:
+        try:
+            upload_time = content["releases"][wheel.version][0]["upload_time"]
+            return datetime.strptime(upload_time, "%Y-%m-%dT%H:%M:%S").replace(tzinfo=timezone.utc)
+        except Exception as exception:  # noqa: BLE001
+            logging.error("could not load release date %s because %r", content, exception)  # noqa: TRY400
+    return None
+
+
+def _request_context():
+    yield None
+    # fallback to non verified HTTPS (the information we request is not sensitive, so fallback)
+    yield ssl._create_unverified_context()  # noqa: S323, SLF001
+
+
+_PYPI_CACHE = {}
+
+
+def _pypi_get_distribution_info_cached(distribution):
+    if distribution not in _PYPI_CACHE:
+        _PYPI_CACHE[distribution] = _pypi_get_distribution_info(distribution)
+    return _PYPI_CACHE[distribution]
+
+
+def _pypi_get_distribution_info(distribution):
+    content, url = None, f"https://pypi.org/pypi/{distribution}/json"
+    try:
+        for context in _request_context():
+            try:
+                with urlopen(url, context=context) as file_handler:  # noqa: S310
+                    content = json.load(file_handler)
+                break
+            except URLError as exception:  # noqa: PERF203
+                logging.error("failed to access %s because %r", url, exception)  # noqa: TRY400
+    except Exception as exception:  # noqa: BLE001
+        logging.error("failed to access %s because %r", url, exception)  # noqa: TRY400
+    return content
+
+
+def manual_upgrade(app_data, env):
+    threads = []
+
+    for for_py_version, distribution_to_package in BUNDLE_SUPPORT.items():
+        # load extra search dir for the given for_py
+        for distribution in distribution_to_package:
+            thread = Thread(target=_run_manual_upgrade, args=(app_data, distribution, for_py_version, env))
+            thread.start()
+            threads.append(thread)
+
+    for thread in threads:
+        thread.join()
+
+
+def _run_manual_upgrade(app_data, distribution, for_py_version, env):
+    start = datetime.now(tz=timezone.utc)
+    from .bundle import from_bundle
+
+    current = from_bundle(
+        distribution=distribution,
+        version=None,
+        for_py_version=for_py_version,
+        search_dirs=[],
+        app_data=app_data,
+        do_periodic_update=False,
+        env=env,
+    )
+    logging.warning(
+        "upgrade %s for python %s with current %s",
+        distribution,
+        for_py_version,
+        "" if current is None else current.name,
+    )
+    versions = do_update(
+        distribution=distribution,
+        for_py_version=for_py_version,
+        embed_filename=current.path,
+        app_data=app_data,
+        search_dirs=[],
+        periodic=False,
+    )
+
+    args = [
+        distribution,
+        for_py_version,
+        datetime.now(tz=timezone.utc) - start,
+    ]
+    if versions:
+        args.append("\n".join(f"\t{v}" for v in versions))
+    ver_update = "new entries found:\n%s" if versions else "no new versions found"
+    msg = f"upgraded %s for python %s in %s {ver_update}"
+    logging.warning(msg, *args)
+
+
+__all__ = [
+    "add_wheel_to_update_log",
+    "periodic_update",
+    "do_update",
+    "manual_upgrade",
+    "NewVersion",
+    "UpdateLog",
+    "load_datetime",
+    "dump_datetime",
+    "trigger_update",
+    "release_date_for_wheel_path",
+]
diff --git a/venv/lib/python3.10/site-packages/virtualenv/seed/wheels/util.py b/venv/lib/python3.10/site-packages/virtualenv/seed/wheels/util.py
new file mode 100644
index 0000000..cfc0098
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/virtualenv/seed/wheels/util.py
@@ -0,0 +1,121 @@
+from __future__ import annotations
+
+from operator import attrgetter
+from zipfile import ZipFile
+
+
+class Wheel:
+    def __init__(self, path) -> None:
+        # https://www.python.org/dev/peps/pep-0427/#file-name-convention
+        # The wheel filename is {distribution}-{version}(-{build tag})?-{python tag}-{abi tag}-{platform tag}.whl
+        self.path = path
+        self._parts = path.stem.split("-")
+
+    @classmethod
+    def from_path(cls, path):
+        if path is not None and path.suffix == ".whl" and len(path.stem.split("-")) >= 5:  # noqa: PLR2004
+            return cls(path)
+        return None
+
+    @property
+    def distribution(self):
+        return self._parts[0]
+
+    @property
+    def version(self):
+        return self._parts[1]
+
+    @property
+    def version_tuple(self):
+        return self.as_version_tuple(self.version)
+
+    @staticmethod
+    def as_version_tuple(version):
+        result = []
+        for part in version.split(".")[0:3]:
+            try:
+                result.append(int(part))
+            except ValueError:  # noqa: PERF203
+                break
+        if not result:
+            raise ValueError(version)
+        return tuple(result)
+
+    @property
+    def name(self):
+        return self.path.name
+
+    def support_py(self, py_version):
+        name = f"{'-'.join(self.path.stem.split('-')[0:2])}.dist-info/METADATA"
+        with ZipFile(str(self.path), "r") as zip_file:
+            metadata = zip_file.read(name).decode("utf-8")
+        marker = "Requires-Python:"
+        requires = next((i[len(marker) :] for i in metadata.splitlines() if i.startswith(marker)), None)
+        if requires is None:  # if it does not specify a python requires the assumption is compatible
+            return True
+        py_version_int = tuple(int(i) for i in py_version.split("."))
+        for require in (i.strip() for i in requires.split(",")):
+            # https://www.python.org/dev/peps/pep-0345/#version-specifiers
+            for operator, check in [
+                ("!=", lambda v: py_version_int != v),
+                ("==", lambda v: py_version_int == v),
+                ("<=", lambda v: py_version_int <= v),
+                (">=", lambda v: py_version_int >= v),
+                ("<", lambda v: py_version_int < v),
+                (">", lambda v: py_version_int > v),
+            ]:
+                if require.startswith(operator):
+                    ver_str = require[len(operator) :].strip()
+                    version = tuple((int(i) if i != "*" else None) for i in ver_str.split("."))[0:2]
+                    if not check(version):
+                        return False
+                    break
+        return True
+
+    def __repr__(self) -> str:
+        return f"{self.__class__.__name__}({self.path})"
+
+    def __str__(self) -> str:
+        return str(self.path)
+
+
+def discover_wheels(from_folder, distribution, version, for_py_version):
+    wheels = []
+    for filename in from_folder.iterdir():
+        wheel = Wheel.from_path(filename)
+        if (
+            wheel
+            and wheel.distribution == distribution
+            and (version is None or wheel.version == version)
+            and wheel.support_py(for_py_version)
+        ):
+            wheels.append(wheel)
+    return sorted(wheels, key=attrgetter("version_tuple", "distribution"), reverse=True)
+
+
+class Version:
+    #: the version bundled with virtualenv
+    bundle = "bundle"
+    embed = "embed"
+    #: custom version handlers
+    non_version = (bundle, embed)
+
+    @staticmethod
+    def of_version(value):
+        return None if value in Version.non_version else value
+
+    @staticmethod
+    def as_pip_req(distribution, version):
+        return f"{distribution}{Version.as_version_spec(version)}"
+
+    @staticmethod
+    def as_version_spec(version):
+        of_version = Version.of_version(version)
+        return "" if of_version is None else f"=={of_version}"
+
+
+__all__ = [
+    "discover_wheels",
+    "Version",
+    "Wheel",
+]
diff --git a/venv/lib/python3.10/site-packages/virtualenv/util/__init__.py b/venv/lib/python3.10/site-packages/virtualenv/util/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/venv/lib/python3.10/site-packages/virtualenv/util/error.py b/venv/lib/python3.10/site-packages/virtualenv/util/error.py
new file mode 100644
index 0000000..7b23509
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/virtualenv/util/error.py
@@ -0,0 +1,15 @@
+"""Errors."""
+
+
+from __future__ import annotations
+
+
+class ProcessCallFailedError(RuntimeError):
+    """Failed a process call."""
+
+    def __init__(self, code, out, err, cmd) -> None:
+        super().__init__(code, out, err, cmd)
+        self.code = code
+        self.out = out
+        self.err = err
+        self.cmd = cmd
diff --git a/venv/lib/python3.10/site-packages/virtualenv/util/lock.py b/venv/lib/python3.10/site-packages/virtualenv/util/lock.py
new file mode 100644
index 0000000..c15b5f1
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/virtualenv/util/lock.py
@@ -0,0 +1,169 @@
+"""holds locking functionality that works across processes."""
+
+from __future__ import annotations
+
+import logging
+import os
+from abc import ABCMeta, abstractmethod
+from contextlib import contextmanager, suppress
+from pathlib import Path
+from threading import Lock, RLock
+
+from filelock import FileLock, Timeout
+
+
+class _CountedFileLock(FileLock):
+    def __init__(self, lock_file) -> None:
+        parent = os.path.dirname(lock_file)
+        if not os.path.isdir(parent):
+            with suppress(OSError):
+                os.makedirs(parent)
+
+        super().__init__(lock_file)
+        self.count = 0
+        self.thread_safe = RLock()
+
+    def acquire(self, timeout=None, poll_interval=0.05):
+        if not self.thread_safe.acquire(timeout=-1 if timeout is None else timeout):
+            raise Timeout(self.lock_file)
+        if self.count == 0:
+            super().acquire(timeout, poll_interval)
+        self.count += 1
+
+    def release(self, force=False):  # noqa: FBT002
+        with self.thread_safe:
+            if self.count > 0:
+                self.thread_safe.release()
+            if self.count == 1:
+                super().release(force=force)
+            self.count = max(self.count - 1, 0)
+
+
+_lock_store = {}
+_store_lock = Lock()
+
+
+class PathLockBase(metaclass=ABCMeta):
+    def __init__(self, folder) -> None:
+        path = Path(folder)
+        self.path = path.resolve() if path.exists() else path
+
+    def __repr__(self) -> str:
+        return f"{self.__class__.__name__}({self.path})"
+
+    def __div__(self, other):
+        return type(self)(self.path / other)
+
+    def __truediv__(self, other):
+        return self.__div__(other)
+
+    @abstractmethod
+    def __enter__(self):
+        raise NotImplementedError
+
+    @abstractmethod
+    def __exit__(self, exc_type, exc_val, exc_tb):
+        raise NotImplementedError
+
+    @abstractmethod
+    @contextmanager
+    def lock_for_key(self, name, no_block=False):  # noqa: FBT002
+        raise NotImplementedError
+
+    @abstractmethod
+    @contextmanager
+    def non_reentrant_lock_for_key(self, name):
+        raise NotImplementedError
+
+
+class ReentrantFileLock(PathLockBase):
+    def __init__(self, folder) -> None:
+        super().__init__(folder)
+        self._lock = None
+
+    def _create_lock(self, name=""):
+        lock_file = str(self.path / f"{name}.lock")
+        with _store_lock:
+            if lock_file not in _lock_store:
+                _lock_store[lock_file] = _CountedFileLock(lock_file)
+            return _lock_store[lock_file]
+
+    @staticmethod
+    def _del_lock(lock):
+        if lock is not None:
+            with _store_lock, lock.thread_safe:
+                if lock.count == 0:
+                    _lock_store.pop(lock.lock_file, None)
+
+    def __del__(self) -> None:
+        self._del_lock(self._lock)
+
+    def __enter__(self):
+        self._lock = self._create_lock()
+        self._lock_file(self._lock)
+
+    def __exit__(self, exc_type, exc_val, exc_tb):
+        self._release(self._lock)
+        self._del_lock(self._lock)
+        self._lock = None
+
+    def _lock_file(self, lock, no_block=False):  # noqa: FBT002
+        # multiple processes might be trying to get a first lock... so we cannot check if this directory exist without
+        # a lock, but that lock might then become expensive, and it's not clear where that lock should live.
+        # Instead here we just ignore if we fail to create the directory.
+        with suppress(OSError):
+            os.makedirs(str(self.path))
+
+        try:
+            lock.acquire(0.0001)
+        except Timeout:
+            if no_block:
+                raise
+            logging.debug("lock file %s present, will block until released", lock.lock_file)
+            lock.release()  # release the acquire try from above
+            lock.acquire()
+
+    @staticmethod
+    def _release(lock):
+        lock.release()
+
+    @contextmanager
+    def lock_for_key(self, name, no_block=False):  # noqa: FBT002
+        lock = self._create_lock(name)
+        try:
+            try:
+                self._lock_file(lock, no_block)
+                yield
+            finally:
+                self._release(lock)
+        finally:
+            self._del_lock(lock)
+            lock = None
+
+    @contextmanager
+    def non_reentrant_lock_for_key(self, name):
+        with _CountedFileLock(str(self.path / f"{name}.lock")):
+            yield
+
+
+class NoOpFileLock(PathLockBase):
+    def __enter__(self):
+        raise NotImplementedError
+
+    def __exit__(self, exc_type, exc_val, exc_tb):
+        raise NotImplementedError
+
+    @contextmanager
+    def lock_for_key(self, name, no_block=False):  # noqa: ARG002, FBT002
+        yield
+
+    @contextmanager
+    def non_reentrant_lock_for_key(self, name):  # noqa: ARG002
+        yield
+
+
+__all__ = [
+    "NoOpFileLock",
+    "ReentrantFileLock",
+    "Timeout",
+]
diff --git a/venv/lib/python3.10/site-packages/virtualenv/util/path/__init__.py b/venv/lib/python3.10/site-packages/virtualenv/util/path/__init__.py
new file mode 100644
index 0000000..dc827f3
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/virtualenv/util/path/__init__.py
@@ -0,0 +1,16 @@
+from __future__ import annotations
+
+from ._permission import make_exe, set_tree
+from ._sync import copy, copytree, ensure_dir, safe_delete, symlink
+from ._win import get_short_path_name
+
+__all__ = [
+    "ensure_dir",
+    "symlink",
+    "copy",
+    "copytree",
+    "make_exe",
+    "set_tree",
+    "safe_delete",
+    "get_short_path_name",
+]
diff --git a/venv/lib/python3.10/site-packages/virtualenv/util/path/_permission.py b/venv/lib/python3.10/site-packages/virtualenv/util/path/_permission.py
new file mode 100644
index 0000000..f3e9b62
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/virtualenv/util/path/_permission.py
@@ -0,0 +1,30 @@
+from __future__ import annotations
+
+import os
+from stat import S_IXGRP, S_IXOTH, S_IXUSR
+
+
+def make_exe(filename):
+    original_mode = filename.stat().st_mode
+    levels = [S_IXUSR, S_IXGRP, S_IXOTH]
+    for at in range(len(levels), 0, -1):
+        try:
+            mode = original_mode
+            for level in levels[:at]:
+                mode |= level
+            filename.chmod(mode)
+            break
+        except OSError:  # noqa: PERF203
+            continue
+
+
+def set_tree(folder, stat):
+    for root, _, files in os.walk(str(folder)):
+        for filename in files:
+            os.chmod(os.path.join(root, filename), stat)
+
+
+__all__ = (
+    "make_exe",
+    "set_tree",
+)
diff --git a/venv/lib/python3.10/site-packages/virtualenv/util/path/_sync.py b/venv/lib/python3.10/site-packages/virtualenv/util/path/_sync.py
new file mode 100644
index 0000000..c9334ad
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/virtualenv/util/path/_sync.py
@@ -0,0 +1,83 @@
+from __future__ import annotations
+
+import logging
+import os
+import shutil
+import sys
+from stat import S_IWUSR
+
+
+def ensure_dir(path):
+    if not path.exists():
+        logging.debug("create folder %s", str(path))
+        os.makedirs(str(path))
+
+
+def ensure_safe_to_do(src, dest):
+    if src == dest:
+        msg = f"source and destination is the same {src}"
+        raise ValueError(msg)
+    if not dest.exists():
+        return
+    if dest.is_dir() and not dest.is_symlink():
+        logging.debug("remove directory %s", dest)
+        safe_delete(dest)
+    else:
+        logging.debug("remove file %s", dest)
+        dest.unlink()
+
+
+def symlink(src, dest):
+    ensure_safe_to_do(src, dest)
+    logging.debug("symlink %s", _Debug(src, dest))
+    dest.symlink_to(src, target_is_directory=src.is_dir())
+
+
+def copy(src, dest):
+    ensure_safe_to_do(src, dest)
+    is_dir = src.is_dir()
+    method = copytree if is_dir else shutil.copy
+    logging.debug("copy %s", _Debug(src, dest))
+    method(str(src), str(dest))
+
+
+def copytree(src, dest):
+    for root, _, files in os.walk(src):
+        dest_dir = os.path.join(dest, os.path.relpath(root, src))
+        if not os.path.isdir(dest_dir):
+            os.makedirs(dest_dir)
+        for name in files:
+            src_f = os.path.join(root, name)
+            dest_f = os.path.join(dest_dir, name)
+            shutil.copy(src_f, dest_f)
+
+
+def safe_delete(dest):
+    def onerror(func, path, exc_info):  # noqa: ARG001
+        if not os.access(path, os.W_OK):
+            os.chmod(path, S_IWUSR)
+            func(path)
+        else:
+            raise
+
+    kwargs = {"onexc" if sys.version_info >= (3, 12) else "onerror": onerror}
+    shutil.rmtree(str(dest), ignore_errors=True, **kwargs)
+
+
+class _Debug:
+    def __init__(self, src, dest) -> None:
+        self.src = src
+        self.dest = dest
+
+    def __str__(self) -> str:
+        return f"{'directory ' if self.src.is_dir() else ''}{self.src!s} to {self.dest!s}"
+
+
+__all__ = [
+    "ensure_dir",
+    "symlink",
+    "copy",
+    "symlink",
+    "copytree",
+    "safe_delete",
+]
diff --git a/venv/lib/python3.10/site-packages/virtualenv/util/path/_win.py b/venv/lib/python3.10/site-packages/virtualenv/util/path/_win.py
new file mode 100644
index 0000000..e738551
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/virtualenv/util/path/_win.py
@@ -0,0 +1,23 @@
+from __future__ import annotations
+
+
+def get_short_path_name(long_name):
+    """Gets the short path name of a given long path - http://stackoverflow.com/a/23598461/200291."""
+    import ctypes
+    from ctypes import wintypes
+
+    _GetShortPathNameW = ctypes.windll.kernel32.GetShortPathNameW  # noqa: N806
+    _GetShortPathNameW.argtypes = [wintypes.LPCWSTR, wintypes.LPWSTR, wintypes.DWORD]
+    _GetShortPathNameW.restype = wintypes.DWORD
+    output_buf_size = 0
+    while True:
+        output_buf = ctypes.create_unicode_buffer(output_buf_size)
+        needed = _GetShortPathNameW(long_name, output_buf, output_buf_size)
+        if output_buf_size >= needed:
+            return output_buf.value
+        output_buf_size = needed
+
+
+__all__ = [
+    "get_short_path_name",
+]
diff --git a/venv/lib/python3.10/site-packages/virtualenv/util/subprocess/__init__.py b/venv/lib/python3.10/site-packages/virtualenv/util/subprocess/__init__.py
new file mode 100644
index 0000000..03e5370
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/virtualenv/util/subprocess/__init__.py
@@ -0,0 +1,30 @@
+from __future__ import annotations
+
+import subprocess
+
+CREATE_NO_WINDOW = 0x80000000
+
+
+def run_cmd(cmd):
+    try:
+        process = subprocess.Popen(
+            cmd,  # noqa: S603
+            universal_newlines=True,
+            stdin=subprocess.PIPE,
+            stderr=subprocess.PIPE,
+            stdout=subprocess.PIPE,
+            encoding="utf-8",
+        )
+        out, err = process.communicate()  # input disabled
+        code = process.returncode
+    except OSError as error:
+        code, out, err = error.errno, "", error.strerror
+        if code == 2 and "file" in err:  # noqa: PLR2004
+            err = str(error)  # FileNotFoundError in Python >= 3.3
+    return code, out, err
+
+
+__all__ = (
+    "run_cmd",
+    "CREATE_NO_WINDOW",
+)
diff --git a/venv/lib/python3.10/site-packages/virtualenv/util/zipapp.py b/venv/lib/python3.10/site-packages/virtualenv/util/zipapp.py
new file mode 100644
index 0000000..3049b84
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/virtualenv/util/zipapp.py
@@ -0,0 +1,37 @@
+from __future__ import annotations
+
+import logging
+import os
+import zipfile
+
+from virtualenv.info import IS_WIN, ROOT
+
+
+def read(full_path):
+    sub_file = _get_path_within_zip(full_path)
+    with zipfile.ZipFile(ROOT, "r") as zip_file, zip_file.open(sub_file) as file_handler:
+        return file_handler.read().decode("utf-8")
+
+
+def extract(full_path, dest):
+    logging.debug("extract %s to %s", full_path, dest)
+    sub_file = _get_path_within_zip(full_path)
+    with zipfile.ZipFile(ROOT, "r") as zip_file:
+        info = zip_file.getinfo(sub_file)
+        info.filename = dest.name
+        zip_file.extract(info, str(dest.parent))
+
+
+def _get_path_within_zip(full_path):
+    full_path = os.path.abspath(str(full_path))
+    sub_file = full_path[len(ROOT) + 1 :]
+    if IS_WIN:
+        # paths are always UNIX separators, even on Windows, though __file__ still follows platform default
+        sub_file = sub_file.replace(os.sep, "/")
+    return sub_file
+
+
+__all__ = [
+    "read",
+    "extract",
+]
diff --git a/venv/lib/python3.10/site-packages/virtualenv/version.py b/venv/lib/python3.10/site-packages/virtualenv/version.py
new file mode 100644
index 0000000..f022ba7
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/virtualenv/version.py
@@ -0,0 +1,4 @@
+# file generated by setuptools_scm
+# don't change, don't track in version control
+__version__ = version = '20.24.3'
+__version_tuple__ = version_tuple = (20, 24, 3)
diff --git a/venv/lib/python3.10/site-packages/wheel-0.38.4.dist-info/LICENSE.txt b/venv/lib/python3.10/site-packages/wheel-0.38.4.dist-info/LICENSE.txt
deleted file mode 100644
index a31470f..0000000
--- a/venv/lib/python3.10/site-packages/wheel-0.38.4.dist-info/LICENSE.txt
+++ /dev/null
@@ -1,21 +0,0 @@
-MIT License
-
-Copyright (c) 2012 Daniel Holth  and contributors
-
-Permission is hereby granted, free of charge, to any person obtaining a
-copy of this software and associated documentation files (the "Software"),
-to deal in the Software without restriction, including without limitation
-the rights to use, copy, modify, merge, publish, distribute, sublicense,
-and/or sell copies of the Software, and to permit persons to whom the
-Software is furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included
-in all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
-THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
-OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
-ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
-OTHER DEALINGS IN THE SOFTWARE.
diff --git a/venv/lib/python3.10/site-packages/wheel-0.38.4.dist-info/METADATA b/venv/lib/python3.10/site-packages/wheel-0.38.4.dist-info/METADATA
deleted file mode 100644
index 7ca74eb..0000000
--- a/venv/lib/python3.10/site-packages/wheel-0.38.4.dist-info/METADATA
+++ /dev/null
@@ -1,62 +0,0 @@
-Metadata-Version: 2.1
-Name: wheel
-Version: 0.38.4
-Summary: A built-package format for Python
-Home-page: https://github.com/pypa/wheel
-Author: Daniel Holth
-Author-email: dholth@fastmail.fm
-Maintainer: Alex Grönholm
-Maintainer-email: alex.gronholm@nextday.fi
-License: MIT
-Project-URL: Documentation, https://wheel.readthedocs.io/
-Project-URL: Changelog, https://wheel.readthedocs.io/en/stable/news.html
-Project-URL: Issue Tracker, https://github.com/pypa/wheel/issues
-Keywords: wheel,packaging
-Classifier: Development Status :: 5 - Production/Stable
-Classifier: Intended Audience :: Developers
-Classifier: Topic :: System :: Archiving :: Packaging
-Classifier: License :: OSI Approved :: MIT License
-Classifier: Programming Language :: Python
-Classifier: Programming Language :: Python :: 3 :: Only
-Classifier: Programming Language :: Python :: 3.7
-Classifier: Programming Language :: Python :: 3.8
-Classifier: Programming Language :: Python :: 3.9
-Classifier: Programming Language :: Python :: 3.10
-Classifier: Programming Language :: Python :: 3.11
-Requires-Python: >=3.7
-License-File: LICENSE.txt
-Provides-Extra: test
-Requires-Dist: pytest (>=3.0.0) ; extra == 'test'
-
-wheel
-=====
-
-This library is the reference implementation of the Python wheel packaging
-standard, as defined in `PEP 427`_.
-
-It has two different roles:
-
-#. A setuptools_ extension for building wheels that provides the
-   ``bdist_wheel`` setuptools command
-#. A command line tool for working with wheel files
-
-It should be noted that wheel is **not** intended to be used as a library, and
-as such there is no stable, public API.
-
-.. _PEP 427: https://www.python.org/dev/peps/pep-0427/
-.. _setuptools: https://pypi.org/project/setuptools/
-
-Documentation
--------------
-
-The documentation_ can be found on Read The Docs.
-
-.. _documentation: https://wheel.readthedocs.io/
-
-Code of Conduct
----------------
-
-Everyone interacting in the wheel project's codebases, issue trackers, chat
-rooms, and mailing lists is expected to follow the `PSF Code of Conduct`_.
-
-.. _PSF Code of Conduct: https://github.com/pypa/.github/blob/main/CODE_OF_CONDUCT.md
diff --git a/venv/lib/python3.10/site-packages/wheel-0.38.4.dist-info/RECORD b/venv/lib/python3.10/site-packages/wheel-0.38.4.dist-info/RECORD
deleted file mode 100644
index 04fecc5..0000000
--- a/venv/lib/python3.10/site-packages/wheel-0.38.4.dist-info/RECORD
+++ /dev/null
@@ -1,51 +0,0 @@
-wheel/__init__.py,sha256=2wJrg-twJVHIbVXveZjxyMtxjelZOVff9bnhTBt3eec,59
-wheel/__main__.py,sha256=NkMUnuTCGcOkgY0IBLgBCVC_BGGcWORx2K8jYGS12UE,455
-wheel/_setuptools_logging.py,sha256=NoCnjJ4DFEZ45Eo-2BdXLsWJCwGkait1tp_17paleVw,746
-wheel/bdist_wheel.py,sha256=k_gee2yY4TfDr_dRODcyCso6ItpidTLj6JZfbvZ93qk,19293
-wheel/macosx_libfile.py,sha256=OXM6OTx1O_ACLGBE2Q9prIHj47uWkZSZSuM_756W89Q,16145
-wheel/metadata.py,sha256=-6n1-hH8YtmUV8zsrRf206iXPJJWyE9tqlqZv5XGpSg,3727
-wheel/util.py,sha256=e0jpnsbbM9QhaaMSyap-_ZgUxcxwpyLDk6RHcrduPLg,621
-wheel/wheelfile.py,sha256=9iWWOWcvVXSx26YdfK9QptRA1OHeRRjqEo7PEb631mI,7536
-wheel/cli/__init__.py,sha256=NVh8x79QGybwZpL5VMQgQv-zwSg_6uKsYVXbIVRkpQ4,2384
-wheel/cli/convert.py,sha256=skUf4TuZcksqG75J-_KUkFXdmYDxTJpP311O16cNJ50,9427
-wheel/cli/pack.py,sha256=zQ1zmJouN8Y86hCJ3lTaKh6g7SbqgA2MuKP1wY-vjfw,3383
-wheel/cli/unpack.py,sha256=QU_OVMCvYWtrjQa18Z5ZKaZwGBImAqJIzQokYt2u3bI,659
-wheel/vendored/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
-wheel/vendored/packaging/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
-wheel/vendored/packaging/_manylinux.py,sha256=1OWKAD6xtgTgOGVEhujFWqF35JWqMvQLxZEZ2QlHI9g,11489
-wheel/vendored/packaging/_musllinux.py,sha256=k9vZj4tmx0ElF-2y8h7gbz09zCfhEjZ9ZQbeZr5sVds,4374
-wheel/vendored/packaging/tags.py,sha256=M_DQI4zGnPq3hsRV9QWYf5SoMJmBB91gqqG9Jtyv-m8,15612
-wheel-0.38.4.dist-info/LICENSE.txt,sha256=MMI2GGeRCPPo6h0qZYx8pBe9_IkcmO8aifpP8MmChlQ,1107
-wheel-0.38.4.dist-info/METADATA,sha256=3j4KgVZCY7eZyOrwDKYoTuAcfr_gXAbxx1yGhR9DssA,2110
-wheel-0.38.4.dist-info/WHEEL,sha256=2wepM1nk4DS4eFpYrW1TTqPcoGNfHhhO_i5m4cOimbo,92
-wheel-0.38.4.dist-info/entry_points.txt,sha256=krg-iHKefnsk1qvNLDkZP3-4Aq3J0F_zJaathht0JBI,107
-wheel-0.38.4.dist-info/top_level.txt,sha256=HxSBIbgEstMPe4eFawhA66Mq-QYHMopXVoAncfjb_1c,6
-wheel-0.38.4.dist-info/RECORD,,
-wheel/cli/pack.cpython-310.pyc,,
-wheel-0.38.4.dist-info/INSTALLER,,
-../../../bin/wheel-3.10,,
-wheel-0.38.4.dist-info/__pycache__,,
-wheel/cli/__pycache__,,
-wheel/metadata.cpython-310.pyc,,
-../../../bin/wheel,,
-wheel/vendored/packaging/__init__.cpython-310.pyc,,
-wheel/__pycache__,,
-wheel/wheelfile.cpython-310.pyc,,
-wheel/cli/convert.cpython-310.pyc,,
-wheel/_setuptools_logging.cpython-310.pyc,,
-wheel/vendored/packaging/_musllinux.cpython-310.pyc,,
-wheel/util.cpython-310.pyc,,
-wheel/vendored/packaging/__pycache__,,
-wheel-0.38.4.virtualenv,,
-wheel/vendored/__init__.cpython-310.pyc,,
-wheel/vendored/packaging/tags.cpython-310.pyc,,
-wheel/cli/__init__.cpython-310.pyc,,
-wheel/__main__.cpython-310.pyc,,
-../../../bin/wheel3.10,,
-../../../bin/wheel3,,
-wheel/bdist_wheel.cpython-310.pyc,,
-wheel/cli/unpack.cpython-310.pyc,,
-wheel/__init__.cpython-310.pyc,,
-wheel/vendored/packaging/_manylinux.cpython-310.pyc,,
-wheel/macosx_libfile.cpython-310.pyc,,
-wheel/vendored/__pycache__,,
\ No newline at end of file
diff --git a/venv/lib/python3.10/site-packages/wheel-0.38.4.dist-info/entry_points.txt b/venv/lib/python3.10/site-packages/wheel-0.38.4.dist-info/entry_points.txt
deleted file mode 100644
index 251855d..0000000
--- a/venv/lib/python3.10/site-packages/wheel-0.38.4.dist-info/entry_points.txt
+++ /dev/null
@@ -1,5 +0,0 @@
-[console_scripts]
-wheel = wheel.cli:main
-
-[distutils.commands]
-bdist_wheel = wheel.bdist_wheel:bdist_wheel
diff --git a/venv/lib/python3.10/site-packages/wheel-0.38.4.dist-info/top_level.txt b/venv/lib/python3.10/site-packages/wheel-0.38.4.dist-info/top_level.txt
deleted file mode 100644
index 2309722..0000000
--- a/venv/lib/python3.10/site-packages/wheel-0.38.4.dist-info/top_level.txt
+++ /dev/null
@@ -1 +0,0 @@
-wheel
diff --git a/venv/lib/python3.10/site-packages/wheel/__init__.py b/venv/lib/python3.10/site-packages/wheel/__init__.py
deleted file mode 100644
index ace3fc6..0000000
--- a/venv/lib/python3.10/site-packages/wheel/__init__.py
+++ /dev/null
@@ -1,3 +0,0 @@
-from __future__ import annotations
-
-__version__ = "0.38.4"
diff --git a/venv/lib/python3.10/site-packages/wheel/__main__.py b/venv/lib/python3.10/site-packages/wheel/__main__.py
deleted file mode 100644
index 0be7453..0000000
--- a/venv/lib/python3.10/site-packages/wheel/__main__.py
+++ /dev/null
@@ -1,23 +0,0 @@
-"""
-Wheel command line tool (enable python -m wheel syntax)
-"""
-
-from __future__ import annotations
-
-import sys
-
-
-def main():  # needed for console script
-    if __package__ == "":
-        # To be able to run 'python wheel-0.9.whl/wheel':
-        import os.path
-
-        path = os.path.dirname(os.path.dirname(__file__))
-        sys.path[0:0] = [path]
-    import wheel.cli
-
-    sys.exit(wheel.cli.main())
-
-
-if __name__ == "__main__":
-    sys.exit(main())
diff --git a/venv/lib/python3.10/site-packages/wheel/_setuptools_logging.py b/venv/lib/python3.10/site-packages/wheel/_setuptools_logging.py
deleted file mode 100644
index 006c098..0000000
--- a/venv/lib/python3.10/site-packages/wheel/_setuptools_logging.py
+++ /dev/null
@@ -1,26 +0,0 @@
-# copied from setuptools.logging, omitting monkeypatching
-from __future__ import annotations
-
-import logging
-import sys
-
-
-def _not_warning(record):
-    return record.levelno < logging.WARNING
-
-
-def configure():
-    """
-    Configure logging to emit warning and above to stderr
-    and everything else to stdout. This behavior is provided
-    for compatibility with distutils.log but may change in
-    the future.
-    """
-    err_handler = logging.StreamHandler()
-    err_handler.setLevel(logging.WARNING)
-    out_handler = logging.StreamHandler(sys.stdout)
-    out_handler.addFilter(_not_warning)
-    handlers = err_handler, out_handler
-    logging.basicConfig(
-        format="{message}", style="{", handlers=handlers, level=logging.DEBUG
-    )
diff --git a/venv/lib/python3.10/site-packages/wheel/bdist_wheel.py b/venv/lib/python3.10/site-packages/wheel/bdist_wheel.py
deleted file mode 100644
index 7fcf4a3..0000000
--- a/venv/lib/python3.10/site-packages/wheel/bdist_wheel.py
+++ /dev/null
@@ -1,550 +0,0 @@
-"""
-Create a wheel (.whl) distribution.
-
-A wheel is a built archive format.
-"""
-
-from __future__ import annotations
-
-import os
-import re
-import shutil
-import stat
-import sys
-import sysconfig
-import warnings
-from collections import OrderedDict
-from email.generator import BytesGenerator, Generator
-from email.policy import EmailPolicy
-from glob import iglob
-from io import BytesIO
-from shutil import rmtree
-from zipfile import ZIP_DEFLATED, ZIP_STORED
-
-import pkg_resources
-from setuptools import Command
-
-from . import __version__ as wheel_version
-from .macosx_libfile import calculate_macosx_platform_tag
-from .metadata import pkginfo_to_metadata
-from .util import log
-from .vendored.packaging import tags
-from .wheelfile import WheelFile
-
-safe_name = pkg_resources.safe_name
-safe_version = pkg_resources.safe_version
-setuptools_major_version = int(
-    pkg_resources.get_distribution("setuptools").version.split(".")[0]
-)
-
-PY_LIMITED_API_PATTERN = r"cp3\d"
-
-
-def python_tag():
-    return f"py{sys.version_info[0]}"
-
-
-def get_platform(archive_root):
-    """Return our platform name 'win32', 'linux_x86_64'"""
-    result = sysconfig.get_platform()
-    if result.startswith("macosx") and archive_root is not None:
-        result = calculate_macosx_platform_tag(archive_root, result)
-    elif result == "linux-x86_64" and sys.maxsize == 2147483647:
-        # pip pull request #3497
-        result = "linux-i686"
-
-    return result.replace("-", "_")
-
-
-def get_flag(var, fallback, expected=True, warn=True):
-    """Use a fallback value for determining SOABI flags if the needed config
-    var is unset or unavailable."""
-    val = sysconfig.get_config_var(var)
-    if val is None:
-        if warn:
-            warnings.warn(
-                "Config variable '{}' is unset, Python ABI tag may "
-                "be incorrect".format(var),
-                RuntimeWarning,
-                2,
-            )
-        return fallback
-    return val == expected
-
-
-def get_abi_tag():
-    """Return the ABI tag based on SOABI (if available) or emulate SOABI (PyPy2)."""
-    soabi = sysconfig.get_config_var("SOABI")
-    impl = tags.interpreter_name()
-    if not soabi and impl in ("cp", "pp") and hasattr(sys, "maxunicode"):
-        d = ""
-        m = ""
-        u = ""
-        if get_flag("Py_DEBUG", hasattr(sys, "gettotalrefcount"), warn=(impl == "cp")):
-            d = "d"
-
-        if get_flag(
-            "WITH_PYMALLOC",
-            impl == "cp",
-            warn=(impl == "cp" and sys.version_info < (3, 8)),
-        ) and sys.version_info < (3, 8):
-            m = "m"
-
-        abi = f"{impl}{tags.interpreter_version()}{d}{m}{u}"
-    elif soabi and impl == "cp":
-        abi = "cp" + soabi.split("-")[1]
-    elif soabi and impl == "pp":
-        # we want something like pypy36-pp73
-        abi = "-".join(soabi.split("-")[:2])
-        abi = abi.replace(".", "_").replace("-", "_")
-    elif soabi:
-        abi = soabi.replace(".", "_").replace("-", "_")
-    else:
-        abi = None
-
-    return abi
-
-
-def safer_name(name):
-    return safe_name(name).replace("-", "_")
-
-
-def safer_version(version):
-    return safe_version(version).replace("-", "_")
-
-
-def remove_readonly(func, path, excinfo):
-    print(str(excinfo[1]))
-    os.chmod(path, stat.S_IWRITE)
-    func(path)
-
-
-class bdist_wheel(Command):
-
-    description = "create a wheel distribution"
-
-    supported_compressions = OrderedDict(
-        [("stored", ZIP_STORED), ("deflated", ZIP_DEFLATED)]
-    )
-
-    user_options = [
-        ("bdist-dir=", "b", "temporary directory for creating the distribution"),
-        (
-            "plat-name=",
-            "p",
-            "platform name to embed in generated filenames "
-            "(default: %s)" % get_platform(None),
-        ),
-        (
-            "keep-temp",
-            "k",
-            "keep the pseudo-installation tree around after "
-            + "creating the distribution archive",
-        ),
-        ("dist-dir=", "d", "directory to put final built distributions in"),
-        ("skip-build", None, "skip rebuilding everything (for testing/debugging)"),
-        (
-            "relative",
-            None,
-            "build the archive using relative paths " "(default: false)",
-        ),
-        (
-            "owner=",
-            "u",
-            "Owner name used when creating a tar file" " [default: current user]",
-        ),
-        (
-            "group=",
-            "g",
-            "Group name used when creating a tar file" " [default: current group]",
-        ),
-        ("universal", None, "make a universal wheel" " (default: false)"),
-        (
-            "compression=",
-            None,
-            "zipfile compression (one of: {})"
-            " (default: 'deflated')".format(", ".join(supported_compressions)),
-        ),
-        (
-            "python-tag=",
-            None,
-            "Python implementation compatibility tag"
-            " (default: '%s')" % (python_tag()),
-        ),
-        (
-            "build-number=",
-            None,
-            "Build number for this particular version. "
-            "As specified in PEP-0427, this must start with a digit. "
-            "[default: None]",
-        ),
-        (
-            "py-limited-api=",
-            None,
-            "Python tag (cp32|cp33|cpNN) for abi3 wheel tag" " (default: false)",
-        ),
-    ]
-
-    boolean_options = ["keep-temp", "skip-build", "relative", "universal"]
-
-    def initialize_options(self):
-        self.bdist_dir = None
-        self.data_dir = None
-        self.plat_name = None
-        self.plat_tag = None
-        self.format = "zip"
-        self.keep_temp = False
-        self.dist_dir = None
-        self.egginfo_dir = None
-        self.root_is_pure = None
-        self.skip_build = None
-        self.relative = False
-        self.owner = None
-        self.group = None
-        self.universal = False
-        self.compression = "deflated"
-        self.python_tag = python_tag()
-        self.build_number = None
-        self.py_limited_api = False
-        self.plat_name_supplied = False
-
-    def finalize_options(self):
-        if self.bdist_dir is None:
-            bdist_base = self.get_finalized_command("bdist").bdist_base
-            self.bdist_dir = os.path.join(bdist_base, "wheel")
-
-        self.data_dir = self.wheel_dist_name + ".data"
-        self.plat_name_supplied = self.plat_name is not None
-
-        try:
-            self.compression = self.supported_compressions[self.compression]
-        except KeyError:
-            raise ValueError(f"Unsupported compression: {self.compression}")
-
-        need_options = ("dist_dir", "plat_name", "skip_build")
-
-        self.set_undefined_options("bdist", *zip(need_options, need_options))
-
-        self.root_is_pure = not (
-            self.distribution.has_ext_modules() or self.distribution.has_c_libraries()
-        )
-
-        if self.py_limited_api and not re.match(
-            PY_LIMITED_API_PATTERN, self.py_limited_api
-        ):
-            raise ValueError("py-limited-api must match '%s'" % PY_LIMITED_API_PATTERN)
-
-        # Support legacy [wheel] section for setting universal
-        wheel = self.distribution.get_option_dict("wheel")
-        if "universal" in wheel:
-            # please don't define this in your global configs
-            log.warning(
-                "The [wheel] section is deprecated. Use [bdist_wheel] instead.",
-            )
-            val = wheel["universal"][1].strip()
-            if val.lower() in ("1", "true", "yes"):
-                self.universal = True
-
-        if self.build_number is not None and not self.build_number[:1].isdigit():
-            raise ValueError("Build tag (build-number) must start with a digit.")
-
-    @property
-    def wheel_dist_name(self):
-        """Return distribution full name with - replaced with _"""
-        components = (
-            safer_name(self.distribution.get_name()),
-            safer_version(self.distribution.get_version()),
-        )
-        if self.build_number:
-            components += (self.build_number,)
-        return "-".join(components)
-
-    def get_tag(self):
-        # bdist sets self.plat_name if unset, we should only use it for purepy
-        # wheels if the user supplied it.
-        if self.plat_name_supplied:
-            plat_name = self.plat_name
-        elif self.root_is_pure:
-            plat_name = "any"
-        else:
-            # macosx contains system version in platform name so need special handle
-            if self.plat_name and not self.plat_name.startswith("macosx"):
-                plat_name = self.plat_name
-            else:
-                # on macosx always limit the platform name to comply with any
-                # c-extension modules in bdist_dir, since the user can specify
-                # a higher MACOSX_DEPLOYMENT_TARGET via tools like CMake
-
-                # on other platforms, and on macosx if there are no c-extension
-                # modules, use the default platform name.
-                plat_name = get_platform(self.bdist_dir)
-
-            if (
-                plat_name in ("linux-x86_64", "linux_x86_64")
-                and sys.maxsize == 2147483647
-            ):
-                plat_name = "linux_i686"
-
-        plat_name = plat_name.lower().replace("-", "_").replace(".", "_")
-
-        if self.root_is_pure:
-            if self.universal:
-                impl = "py2.py3"
-            else:
-                impl = self.python_tag
-            tag = (impl, "none", plat_name)
-        else:
-            impl_name = tags.interpreter_name()
-            impl_ver = tags.interpreter_version()
-            impl = impl_name + impl_ver
-            # We don't work on CPython 3.1, 3.0.
-            if self.py_limited_api and (impl_name + impl_ver).startswith("cp3"):
-                impl = self.py_limited_api
-                abi_tag = "abi3"
-            else:
-                abi_tag = str(get_abi_tag()).lower()
-            tag = (impl, abi_tag, plat_name)
-            # issue gh-374: allow overriding plat_name
-            supported_tags = [
-                (t.interpreter, t.abi, plat_name) for t in tags.sys_tags()
-            ]
-            assert (
-                tag in supported_tags
-            ), f"would build wheel with unsupported tag {tag}"
-        return tag
-
-    def run(self):
-        build_scripts = self.reinitialize_command("build_scripts")
-        build_scripts.executable = "python"
-        build_scripts.force = True
-
-        build_ext = self.reinitialize_command("build_ext")
-        build_ext.inplace = False
-
-        if not self.skip_build:
-            self.run_command("build")
-
-        install = self.reinitialize_command("install", reinit_subcommands=True)
-        install.root = self.bdist_dir
-        install.compile = False
-        install.skip_build = self.skip_build
-        install.warn_dir = False
-
-        # A wheel without setuptools scripts is more cross-platform.
-        # Use the (undocumented) `no_ep` option to setuptools'
-        # install_scripts command to avoid creating entry point scripts.
-        install_scripts = self.reinitialize_command("install_scripts")
-        install_scripts.no_ep = True
-
-        # Use a custom scheme for the archive, because we have to decide
-        # at installation time which scheme to use.
-        for key in ("headers", "scripts", "data", "purelib", "platlib"):
-            setattr(install, "install_" + key, os.path.join(self.data_dir, key))
-
-        basedir_observed = ""
-
-        if os.name == "nt":
-            # win32 barfs if any of these are ''; could be '.'?
-            # (distutils.command.install:change_roots bug)
-            basedir_observed = os.path.normpath(os.path.join(self.data_dir, ".."))
-            self.install_libbase = self.install_lib = basedir_observed
-
-        setattr(
-            install,
-            "install_purelib" if self.root_is_pure else "install_platlib",
-            basedir_observed,
-        )
-
-        log.info(f"installing to {self.bdist_dir}")
-
-        self.run_command("install")
-
-        impl_tag, abi_tag, plat_tag = self.get_tag()
-        archive_basename = f"{self.wheel_dist_name}-{impl_tag}-{abi_tag}-{plat_tag}"
-        if not self.relative:
-            archive_root = self.bdist_dir
-        else:
-            archive_root = os.path.join(
-                self.bdist_dir, self._ensure_relative(install.install_base)
-            )
-
-        self.set_undefined_options("install_egg_info", ("target", "egginfo_dir"))
-        distinfo_dirname = "{}-{}.dist-info".format(
-            safer_name(self.distribution.get_name()),
-            safer_version(self.distribution.get_version()),
-        )
-        distinfo_dir = os.path.join(self.bdist_dir, distinfo_dirname)
-        self.egg2dist(self.egginfo_dir, distinfo_dir)
-
-        self.write_wheelfile(distinfo_dir)
-
-        # Make the archive
-        if not os.path.exists(self.dist_dir):
-            os.makedirs(self.dist_dir)
-
-        wheel_path = os.path.join(self.dist_dir, archive_basename + ".whl")
-        with WheelFile(wheel_path, "w", self.compression) as wf:
-            wf.write_files(archive_root)
-
-        # Add to 'Distribution.dist_files' so that the "upload" command works
-        getattr(self.distribution, "dist_files", []).append(
-            (
-                "bdist_wheel",
-                "{}.{}".format(*sys.version_info[:2]),  # like 3.7
-                wheel_path,
-            )
-        )
-
-        if not self.keep_temp:
-            log.info(f"removing {self.bdist_dir}")
-            if not self.dry_run:
-                rmtree(self.bdist_dir, onerror=remove_readonly)
-
-    def write_wheelfile(
-        self, wheelfile_base, generator="bdist_wheel (" + wheel_version + ")"
-    ):
-        from email.message import Message
-
-        msg = Message()
-        msg["Wheel-Version"] = "1.0"  # of the spec
-        msg["Generator"] = generator
-        msg["Root-Is-Purelib"] = str(self.root_is_pure).lower()
-        if self.build_number is not None:
-            msg["Build"] = self.build_number
-
-        # Doesn't work for bdist_wininst
-        impl_tag, abi_tag, plat_tag = self.get_tag()
-        for impl in impl_tag.split("."):
-            for abi in abi_tag.split("."):
-                for plat in plat_tag.split("."):
-                    msg["Tag"] = "-".join((impl, abi, plat))
-
-        wheelfile_path = os.path.join(wheelfile_base, "WHEEL")
-        log.info(f"creating {wheelfile_path}")
-        buffer = BytesIO()
-        BytesGenerator(buffer, maxheaderlen=0).flatten(msg)
-        with open(wheelfile_path, "wb") as f:
-            f.write(buffer.getvalue().replace(b"\r\n", b"\r"))
-
-    def _ensure_relative(self, path):
-        # copied from dir_util, deleted
-        drive, path = os.path.splitdrive(path)
-        if path[0:1] == os.sep:
-            path = drive + path[1:]
-        return path
-
-    @property
-    def license_paths(self):
-        if setuptools_major_version >= 57:
-            # Setuptools has resolved any patterns to actual file names
-            return self.distribution.metadata.license_files or ()
-
-        files = set()
-        metadata = self.distribution.get_option_dict("metadata")
-        if setuptools_major_version >= 42:
-            # Setuptools recognizes the license_files option but does not do globbing
-            patterns = self.distribution.metadata.license_files
-        else:
-            # Prior to those, wheel is entirely responsible for handling license files
-            if "license_files" in metadata:
-                patterns = metadata["license_files"][1].split()
-            else:
-                patterns = ()
-
-        if "license_file" in metadata:
-            warnings.warn(
-                'The "license_file" option is deprecated. Use "license_files" instead.',
-                DeprecationWarning,
-            )
-            files.add(metadata["license_file"][1])
-
-        if not files and not patterns and not isinstance(patterns, list):
-            patterns = ("LICEN[CS]E*", "COPYING*", "NOTICE*", "AUTHORS*")
-
-        for pattern in patterns:
-            for path in iglob(pattern):
-                if path.endswith("~"):
-                    log.debug(
-                        f'ignoring license file "{path}" as it looks like a backup'
-                    )
-                    continue
-
-                if path not in files and os.path.isfile(path):
-                    log.info(
-                        f'adding license file "{path}" (matched pattern "{pattern}")'
-                    )
-                    files.add(path)
-
-        return files
-
-    def egg2dist(self, egginfo_path, distinfo_path):
-        """Convert an .egg-info directory into a .dist-info directory"""
-
-        def adios(p):
-            """Appropriately delete directory, file or link."""
-            if os.path.exists(p) and not os.path.islink(p) and os.path.isdir(p):
-                shutil.rmtree(p)
-            elif os.path.exists(p):
-                os.unlink(p)
-
-        adios(distinfo_path)
-
-        if not os.path.exists(egginfo_path):
-            # There is no egg-info. This is probably because the egg-info
-            # file/directory is not named matching the distribution name used
-            # to name the archive file. Check for this case and report
-            # accordingly.
-            import glob
-
-            pat = os.path.join(os.path.dirname(egginfo_path), "*.egg-info")
-            possible = glob.glob(pat)
-            err = f"Egg metadata expected at {egginfo_path} but not found"
-            if possible:
-                alt = os.path.basename(possible[0])
-                err += f" ({alt} found - possible misnamed archive file?)"
-
-            raise ValueError(err)
-
-        if os.path.isfile(egginfo_path):
-            # .egg-info is a single file
-            pkginfo_path = egginfo_path
-            pkg_info = pkginfo_to_metadata(egginfo_path, egginfo_path)
-            os.mkdir(distinfo_path)
-        else:
-            # .egg-info is a directory
-            pkginfo_path = os.path.join(egginfo_path, "PKG-INFO")
-            pkg_info = pkginfo_to_metadata(egginfo_path, pkginfo_path)
-
-            # ignore common egg metadata that is useless to wheel
-            shutil.copytree(
-                egginfo_path,
-                distinfo_path,
-                ignore=lambda x, y: {
-                    "PKG-INFO",
-                    "requires.txt",
-                    "SOURCES.txt",
-                    "not-zip-safe",
-                },
-            )
-
-            # delete dependency_links if it is only whitespace
-            dependency_links_path = os.path.join(distinfo_path, "dependency_links.txt")
-            with open(dependency_links_path) as dependency_links_file:
-                dependency_links = dependency_links_file.read().strip()
-            if not dependency_links:
-                adios(dependency_links_path)
-
-        pkg_info_path = os.path.join(distinfo_path, "METADATA")
-        serialization_policy = EmailPolicy(
-            utf8=True,
-            mangle_from_=False,
-            max_line_length=0,
-        )
-        with open(pkg_info_path, "w", encoding="utf-8") as out:
-            Generator(out, policy=serialization_policy).flatten(pkg_info)
-
-        for license_path in self.license_paths:
-            filename = os.path.basename(license_path)
-            shutil.copy(license_path, os.path.join(distinfo_path, filename))
-
-        adios(egginfo_path)
diff --git a/venv/lib/python3.10/site-packages/wheel/cli/__init__.py b/venv/lib/python3.10/site-packages/wheel/cli/__init__.py
deleted file mode 100644
index c0fb8c4..0000000
--- a/venv/lib/python3.10/site-packages/wheel/cli/__init__.py
+++ /dev/null
@@ -1,96 +0,0 @@
-"""
-Wheel command-line utility.
-"""
-
-from __future__ import annotations
-
-import argparse
-import os
-import sys
-
-
-class WheelError(Exception):
-    pass
-
-
-def unpack_f(args):
-    from .unpack import unpack
-
-    unpack(args.wheelfile, args.dest)
-
-
-def pack_f(args):
-    from .pack import pack
-
-    pack(args.directory, args.dest_dir, args.build_number)
-
-
-def convert_f(args):
-    from .convert import convert
-
-    convert(args.files, args.dest_dir, args.verbose)
-
-
-def version_f(args):
-    from .. import __version__
-
-    print("wheel %s" % __version__)
-
-
-def parser():
-    p = argparse.ArgumentParser()
-    s = p.add_subparsers(help="commands")
-
-    unpack_parser = s.add_parser("unpack", help="Unpack wheel")
-    unpack_parser.add_argument(
-        "--dest", "-d", help="Destination directory", default="."
-    )
-    unpack_parser.add_argument("wheelfile", help="Wheel file")
-    unpack_parser.set_defaults(func=unpack_f)
-
-    repack_parser = s.add_parser("pack", help="Repack wheel")
-    repack_parser.add_argument("directory", help="Root directory of the unpacked wheel")
-    repack_parser.add_argument(
-        "--dest-dir",
-        "-d",
-        default=os.path.curdir,
-        help="Directory to store the wheel (default %(default)s)",
-    )
-    repack_parser.add_argument(
-        "--build-number", help="Build tag to use in the wheel name"
-    )
-    repack_parser.set_defaults(func=pack_f)
-
-    convert_parser = s.add_parser("convert", help="Convert egg or wininst to wheel")
-    convert_parser.add_argument("files", nargs="*", help="Files to convert")
-    convert_parser.add_argument(
-        "--dest-dir",
-        "-d",
-        default=os.path.curdir,
-        help="Directory to store wheels (default %(default)s)",
-    )
-    convert_parser.add_argument("--verbose", "-v", action="store_true")
-    convert_parser.set_defaults(func=convert_f)
-
-    version_parser = s.add_parser("version", help="Print version and exit")
-    version_parser.set_defaults(func=version_f)
-
-    help_parser = s.add_parser("help", help="Show this help")
-    help_parser.set_defaults(func=lambda args: p.print_help())
-
-    return p
-
-
-def main():
-    p = parser()
-    args = p.parse_args()
-    if not hasattr(args, "func"):
-        p.print_help()
-    else:
-        try:
-            args.func(args)
-            return 0
-        except WheelError as e:
-            print(e, file=sys.stderr)
-
-    return 1
diff --git a/venv/lib/python3.10/site-packages/wheel/cli/convert.py b/venv/lib/python3.10/site-packages/wheel/cli/convert.py
deleted file mode 100644
index 1287059..0000000
--- a/venv/lib/python3.10/site-packages/wheel/cli/convert.py
+++ /dev/null
@@ -1,273 +0,0 @@
-from __future__ import annotations
-
-import os.path
-import re
-import shutil
-import tempfile
-import zipfile
-from glob import iglob
-
-from ..bdist_wheel import bdist_wheel
-from ..wheelfile import WheelFile
-from . import WheelError
-
-try:
-    from setuptools import Distribution
-except ImportError:
-    from distutils.dist import Distribution
-
-egg_info_re = re.compile(
-    r"""
-    (?P.+?)-(?P.+?)
-    (-(?Ppy\d\.\d+)
-     (-(?P.+?))?
-    )?.egg$""",
-    re.VERBOSE,
-)
-
-
-class _bdist_wheel_tag(bdist_wheel):
-    # allow the client to override the default generated wheel tag
-    # The default bdist_wheel implementation uses python and abi tags
-    # of the running python process. This is not suitable for
-    # generating/repackaging prebuild binaries.
-
-    full_tag_supplied = False
-    full_tag = None  # None or a (pytag, soabitag, plattag) triple
-
-    def get_tag(self):
-        if self.full_tag_supplied and self.full_tag is not None:
-            return self.full_tag
-        else:
-            return bdist_wheel.get_tag(self)
-
-
-def egg2wheel(egg_path: str, dest_dir: str):
-    filename = os.path.basename(egg_path)
-    match = egg_info_re.match(filename)
-    if not match:
-        raise WheelError(f"Invalid egg file name: {filename}")
-
-    egg_info = match.groupdict()
-    dir = tempfile.mkdtemp(suffix="_e2w")
-    if os.path.isfile(egg_path):
-        # assume we have a bdist_egg otherwise
-        with zipfile.ZipFile(egg_path) as egg:
-            egg.extractall(dir)
-    else:
-        # support buildout-style installed eggs directories
-        for pth in os.listdir(egg_path):
-            src = os.path.join(egg_path, pth)
-            if os.path.isfile(src):
-                shutil.copy2(src, dir)
-            else:
-                shutil.copytree(src, os.path.join(dir, pth))
-
-    pyver = egg_info["pyver"]
-    if pyver:
-        pyver = egg_info["pyver"] = pyver.replace(".", "")
-
-    arch = (egg_info["arch"] or "any").replace(".", "_").replace("-", "_")
-
-    # assume all binary eggs are for CPython
-    abi = "cp" + pyver[2:] if arch != "any" else "none"
-
-    root_is_purelib = egg_info["arch"] is None
-    if root_is_purelib:
-        bw = bdist_wheel(Distribution())
-    else:
-        bw = _bdist_wheel_tag(Distribution())
-
-    bw.root_is_pure = root_is_purelib
-    bw.python_tag = pyver
-    bw.plat_name_supplied = True
-    bw.plat_name = egg_info["arch"] or "any"
-    if not root_is_purelib:
-        bw.full_tag_supplied = True
-        bw.full_tag = (pyver, abi, arch)
-
-    dist_info_dir = os.path.join(dir, "{name}-{ver}.dist-info".format(**egg_info))
-    bw.egg2dist(os.path.join(dir, "EGG-INFO"), dist_info_dir)
-    bw.write_wheelfile(dist_info_dir, generator="egg2wheel")
-    wheel_name = "{name}-{ver}-{pyver}-{}-{}.whl".format(abi, arch, **egg_info)
-    with WheelFile(os.path.join(dest_dir, wheel_name), "w") as wf:
-        wf.write_files(dir)
-
-    shutil.rmtree(dir)
-
-
-def parse_wininst_info(wininfo_name, egginfo_name):
-    """Extract metadata from filenames.
-
-    Extracts the 4 metadataitems needed (name, version, pyversion, arch) from
-    the installer filename and the name of the egg-info directory embedded in
-    the zipfile (if any).
-
-    The egginfo filename has the format::
-
-        name-ver(-pyver)(-arch).egg-info
-
-    The installer filename has the format::
-
-        name-ver.arch(-pyver).exe
-
-    Some things to note:
-
-    1. The installer filename is not definitive. An installer can be renamed
-       and work perfectly well as an installer. So more reliable data should
-       be used whenever possible.
-    2. The egg-info data should be preferred for the name and version, because
-       these come straight from the distutils metadata, and are mandatory.
-    3. The pyver from the egg-info data should be ignored, as it is
-       constructed from the version of Python used to build the installer,
-       which is irrelevant - the installer filename is correct here (even to
-       the point that when it's not there, any version is implied).
-    4. The architecture must be taken from the installer filename, as it is
-       not included in the egg-info data.
-    5. Architecture-neutral installers still have an architecture because the
-       installer format itself (being executable) is architecture-specific. We
-       should therefore ignore the architecture if the content is pure-python.
-    """
-
-    egginfo = None
-    if egginfo_name:
-        egginfo = egg_info_re.search(egginfo_name)
-        if not egginfo:
-            raise ValueError(f"Egg info filename {egginfo_name} is not valid")
-
-    # Parse the wininst filename
-    # 1. Distribution name (up to the first '-')
-    w_name, sep, rest = wininfo_name.partition("-")
-    if not sep:
-        raise ValueError(f"Installer filename {wininfo_name} is not valid")
-
-    # Strip '.exe'
-    rest = rest[:-4]
-    # 2. Python version (from the last '-', must start with 'py')
-    rest2, sep, w_pyver = rest.rpartition("-")
-    if sep and w_pyver.startswith("py"):
-        rest = rest2
-        w_pyver = w_pyver.replace(".", "")
-    else:
-        # Not version specific - use py2.py3. While it is possible that
-        # pure-Python code is not compatible with both Python 2 and 3, there
-        # is no way of knowing from the wininst format, so we assume the best
-        # here (the user can always manually rename the wheel to be more
-        # restrictive if needed).
-        w_pyver = "py2.py3"
-    # 3. Version and architecture
-    w_ver, sep, w_arch = rest.rpartition(".")
-    if not sep:
-        raise ValueError(f"Installer filename {wininfo_name} is not valid")
-
-    if egginfo:
-        w_name = egginfo.group("name")
-        w_ver = egginfo.group("ver")
-
-    return {"name": w_name, "ver": w_ver, "arch": w_arch, "pyver": w_pyver}
-
-
-def wininst2wheel(path, dest_dir):
-    with zipfile.ZipFile(path) as bdw:
-        # Search for egg-info in the archive
-        egginfo_name = None
-        for filename in bdw.namelist():
-            if ".egg-info" in filename:
-                egginfo_name = filename
-                break
-
-        info = parse_wininst_info(os.path.basename(path), egginfo_name)
-
-        root_is_purelib = True
-        for zipinfo in bdw.infolist():
-            if zipinfo.filename.startswith("PLATLIB"):
-                root_is_purelib = False
-                break
-        if root_is_purelib:
-            paths = {"purelib": ""}
-        else:
-            paths = {"platlib": ""}
-
-        dist_info = "%(name)s-%(ver)s" % info
-        datadir = "%s.data/" % dist_info
-
-        # rewrite paths to trick ZipFile into extracting an egg
-        # XXX grab wininst .ini - between .exe, padding, and first zip file.
-        members = []
-        egginfo_name = ""
-        for zipinfo in bdw.infolist():
-            key, basename = zipinfo.filename.split("/", 1)
-            key = key.lower()
-            basepath = paths.get(key, None)
-            if basepath is None:
-                basepath = datadir + key.lower() + "/"
-            oldname = zipinfo.filename
-            newname = basepath + basename
-            zipinfo.filename = newname
-            del bdw.NameToInfo[oldname]
-            bdw.NameToInfo[newname] = zipinfo
-            # Collect member names, but omit '' (from an entry like "PLATLIB/"
-            if newname:
-                members.append(newname)
-            # Remember egg-info name for the egg2dist call below
-            if not egginfo_name:
-                if newname.endswith(".egg-info"):
-                    egginfo_name = newname
-                elif ".egg-info/" in newname:
-                    egginfo_name, sep, _ = newname.rpartition("/")
-        dir = tempfile.mkdtemp(suffix="_b2w")
-        bdw.extractall(dir, members)
-
-    # egg2wheel
-    abi = "none"
-    pyver = info["pyver"]
-    arch = (info["arch"] or "any").replace(".", "_").replace("-", "_")
-    # Wininst installers always have arch even if they are not
-    # architecture-specific (because the format itself is).
-    # So, assume the content is architecture-neutral if root is purelib.
-    if root_is_purelib:
-        arch = "any"
-    # If the installer is architecture-specific, it's almost certainly also
-    # CPython-specific.
-    if arch != "any":
-        pyver = pyver.replace("py", "cp")
-    wheel_name = "-".join((dist_info, pyver, abi, arch))
-    if root_is_purelib:
-        bw = bdist_wheel(Distribution())
-    else:
-        bw = _bdist_wheel_tag(Distribution())
-
-    bw.root_is_pure = root_is_purelib
-    bw.python_tag = pyver
-    bw.plat_name_supplied = True
-    bw.plat_name = info["arch"] or "any"
-
-    if not root_is_purelib:
-        bw.full_tag_supplied = True
-        bw.full_tag = (pyver, abi, arch)
-
-    dist_info_dir = os.path.join(dir, "%s.dist-info" % dist_info)
-    bw.egg2dist(os.path.join(dir, egginfo_name), dist_info_dir)
-    bw.write_wheelfile(dist_info_dir, generator="wininst2wheel")
-
-    wheel_path = os.path.join(dest_dir, wheel_name)
-    with WheelFile(wheel_path, "w") as wf:
-        wf.write_files(dir)
-
-    shutil.rmtree(dir)
-
-
-def convert(files, dest_dir, verbose):
-    for pat in files:
-        for installer in iglob(pat):
-            if os.path.splitext(installer)[1] == ".egg":
-                conv = egg2wheel
-            else:
-                conv = wininst2wheel
-
-            if verbose:
-                print(f"{installer}... ", flush=True)
-
-            conv(installer, dest_dir)
-            if verbose:
-                print("OK")
diff --git a/venv/lib/python3.10/site-packages/wheel/cli/pack.py b/venv/lib/python3.10/site-packages/wheel/cli/pack.py
deleted file mode 100644
index 1949d4c..0000000
--- a/venv/lib/python3.10/site-packages/wheel/cli/pack.py
+++ /dev/null
@@ -1,90 +0,0 @@
-from __future__ import annotations
-
-import os.path
-import re
-
-from wheel.cli import WheelError
-from wheel.wheelfile import WheelFile
-
-DIST_INFO_RE = re.compile(r"^(?P(?P.+?)-(?P\d.*?))\.dist-info$")
-BUILD_NUM_RE = re.compile(rb"Build: (\d\w*)$")
-
-
-def pack(directory: str, dest_dir: str, build_number: str | None):
-    """Repack a previously unpacked wheel directory into a new wheel file.
-
-    The .dist-info/WHEEL file must contain one or more tags so that the target
-    wheel file name can be determined.
-
-    :param directory: The unpacked wheel directory
-    :param dest_dir: Destination directory (defaults to the current directory)
-    """
-    # Find the .dist-info directory
-    dist_info_dirs = [
-        fn
-        for fn in os.listdir(directory)
-        if os.path.isdir(os.path.join(directory, fn)) and DIST_INFO_RE.match(fn)
-    ]
-    if len(dist_info_dirs) > 1:
-        raise WheelError(f"Multiple .dist-info directories found in {directory}")
-    elif not dist_info_dirs:
-        raise WheelError(f"No .dist-info directories found in {directory}")
-
-    # Determine the target wheel filename
-    dist_info_dir = dist_info_dirs[0]
-    name_version = DIST_INFO_RE.match(dist_info_dir).group("namever")
-
-    # Read the tags and the existing build number from .dist-info/WHEEL
-    existing_build_number = None
-    wheel_file_path = os.path.join(directory, dist_info_dir, "WHEEL")
-    with open(wheel_file_path) as f:
-        tags = []
-        for line in f:
-            if line.startswith("Tag: "):
-                tags.append(line.split(" ")[1].rstrip())
-            elif line.startswith("Build: "):
-                existing_build_number = line.split(" ")[1].rstrip()
-
-        if not tags:
-            raise WheelError(
-                "No tags present in {}/WHEEL; cannot determine target wheel "
-                "filename".format(dist_info_dir)
-            )
-
-    # Set the wheel file name and add/replace/remove the Build tag in .dist-info/WHEEL
-    build_number = build_number if build_number is not None else existing_build_number
-    if build_number is not None:
-        if build_number:
-            name_version += "-" + build_number
-
-        if build_number != existing_build_number:
-            replacement = (
-                ("Build: %s\r\n" % build_number).encode("ascii")
-                if build_number
-                else b""
-            )
-            with open(wheel_file_path, "rb+") as f:
-                wheel_file_content = f.read()
-                wheel_file_content, num_replaced = BUILD_NUM_RE.subn(
-                    replacement, wheel_file_content
-                )
-                if not num_replaced:
-                    wheel_file_content += replacement
-
-                f.seek(0)
-                f.truncate()
-                f.write(wheel_file_content)
-
-    # Reassemble the tags for the wheel file
-    impls = sorted({tag.split("-")[0] for tag in tags})
-    abivers = sorted({tag.split("-")[1] for tag in tags})
-    platforms = sorted({tag.split("-")[2] for tag in tags})
-    tagline = "-".join([".".join(impls), ".".join(abivers), ".".join(platforms)])
-
-    # Repack the wheel
-    wheel_path = os.path.join(dest_dir, f"{name_version}-{tagline}.whl")
-    with WheelFile(wheel_path, "w") as wf:
-        print(f"Repacking wheel as {wheel_path}...", end="", flush=True)
-        wf.write_files(directory)
-
-    print("OK")
diff --git a/venv/lib/python3.10/site-packages/wheel/cli/unpack.py b/venv/lib/python3.10/site-packages/wheel/cli/unpack.py
deleted file mode 100644
index c6409d4..0000000
--- a/venv/lib/python3.10/site-packages/wheel/cli/unpack.py
+++ /dev/null
@@ -1,23 +0,0 @@
-from __future__ import annotations
-
-from pathlib import Path
-
-from ..wheelfile import WheelFile
-
-
-def unpack(path: str, dest: str = ".") -> None:
-    """Unpack a wheel.
-
-    Wheel content will be unpacked to {dest}/{name}-{ver}, where {name}
-    is the package name and {ver} its version.
-
-    :param path: The path to the wheel.
-    :param dest: Destination directory (default to current directory).
-    """
-    with WheelFile(path) as wf:
-        namever = wf.parsed_filename.group("namever")
-        destination = Path(dest) / namever
-        print(f"Unpacking to: {destination}...", end="", flush=True)
-        wf.extractall(destination)
-
-    print("OK")
diff --git a/venv/lib/python3.10/site-packages/wheel/macosx_libfile.py b/venv/lib/python3.10/site-packages/wheel/macosx_libfile.py
deleted file mode 100644
index 4d08574..0000000
--- a/venv/lib/python3.10/site-packages/wheel/macosx_libfile.py
+++ /dev/null
@@ -1,471 +0,0 @@
-"""
-This module contains function to analyse dynamic library
-headers to extract system information
-
-Currently only for MacOSX
-
-Library file on macosx system starts with Mach-O or Fat field.
-This can be distinguish by first 32 bites and it is called magic number.
-Proper value of magic number is with suffix _MAGIC. Suffix _CIGAM means
-reversed bytes order.
-Both fields can occur in two types: 32 and 64 bytes.
-
-FAT field inform that this library contains few version of library
-(typically for different types version). It contains
-information where Mach-O headers starts.
-
-Each section started with Mach-O header contains one library
-(So if file starts with this field it contains only one version).
-
-After filed Mach-O there are section fields.
-Each of them starts with two fields:
-cmd - magic number for this command
-cmdsize - total size occupied by this section information.
-
-In this case only sections LC_VERSION_MIN_MACOSX (for macosx 10.13 and earlier)
-and LC_BUILD_VERSION (for macosx 10.14 and newer) are interesting,
-because them contains information about minimal system version.
-
-Important remarks:
-- For fat files this implementation looks for maximum number version.
-  It not check if it is 32 or 64 and do not compare it with currently built package.
-  So it is possible to false report higher version that needed.
-- All structures signatures are taken form macosx header files.
-- I think that binary format will be more stable than `otool` output.
-  and if apple introduce some changes both implementation will need to be updated.
-- The system compile will set the deployment target no lower than
-  11.0 for arm64 builds. For "Universal 2" builds use the x86_64 deployment
-  target when the arm64 target is 11.0.
-"""
-
-from __future__ import annotations
-
-import ctypes
-import os
-import sys
-
-"""here the needed const and struct from mach-o header files"""
-
-FAT_MAGIC = 0xCAFEBABE
-FAT_CIGAM = 0xBEBAFECA
-FAT_MAGIC_64 = 0xCAFEBABF
-FAT_CIGAM_64 = 0xBFBAFECA
-MH_MAGIC = 0xFEEDFACE
-MH_CIGAM = 0xCEFAEDFE
-MH_MAGIC_64 = 0xFEEDFACF
-MH_CIGAM_64 = 0xCFFAEDFE
-
-LC_VERSION_MIN_MACOSX = 0x24
-LC_BUILD_VERSION = 0x32
-
-CPU_TYPE_ARM64 = 0x0100000C
-
-mach_header_fields = [
-    ("magic", ctypes.c_uint32),
-    ("cputype", ctypes.c_int),
-    ("cpusubtype", ctypes.c_int),
-    ("filetype", ctypes.c_uint32),
-    ("ncmds", ctypes.c_uint32),
-    ("sizeofcmds", ctypes.c_uint32),
-    ("flags", ctypes.c_uint32),
-]
-"""
-struct mach_header {
-    uint32_t	magic;		/* mach magic number identifier */
-    cpu_type_t	cputype;	/* cpu specifier */
-    cpu_subtype_t	cpusubtype;	/* machine specifier */
-    uint32_t	filetype;	/* type of file */
-    uint32_t	ncmds;		/* number of load commands */
-    uint32_t	sizeofcmds;	/* the size of all the load commands */
-    uint32_t	flags;		/* flags */
-};
-typedef integer_t cpu_type_t;
-typedef integer_t cpu_subtype_t;
-"""
-
-mach_header_fields_64 = mach_header_fields + [("reserved", ctypes.c_uint32)]
-"""
-struct mach_header_64 {
-    uint32_t	magic;		/* mach magic number identifier */
-    cpu_type_t	cputype;	/* cpu specifier */
-    cpu_subtype_t	cpusubtype;	/* machine specifier */
-    uint32_t	filetype;	/* type of file */
-    uint32_t	ncmds;		/* number of load commands */
-    uint32_t	sizeofcmds;	/* the size of all the load commands */
-    uint32_t	flags;		/* flags */
-    uint32_t	reserved;	/* reserved */
-};
-"""
-
-fat_header_fields = [("magic", ctypes.c_uint32), ("nfat_arch", ctypes.c_uint32)]
-"""
-struct fat_header {
-    uint32_t	magic;		/* FAT_MAGIC or FAT_MAGIC_64 */
-    uint32_t	nfat_arch;	/* number of structs that follow */
-};
-"""
-
-fat_arch_fields = [
-    ("cputype", ctypes.c_int),
-    ("cpusubtype", ctypes.c_int),
-    ("offset", ctypes.c_uint32),
-    ("size", ctypes.c_uint32),
-    ("align", ctypes.c_uint32),
-]
-"""
-struct fat_arch {
-    cpu_type_t	cputype;	/* cpu specifier (int) */
-    cpu_subtype_t	cpusubtype;	/* machine specifier (int) */
-    uint32_t	offset;		/* file offset to this object file */
-    uint32_t	size;		/* size of this object file */
-    uint32_t	align;		/* alignment as a power of 2 */
-};
-"""
-
-fat_arch_64_fields = [
-    ("cputype", ctypes.c_int),
-    ("cpusubtype", ctypes.c_int),
-    ("offset", ctypes.c_uint64),
-    ("size", ctypes.c_uint64),
-    ("align", ctypes.c_uint32),
-    ("reserved", ctypes.c_uint32),
-]
-"""
-struct fat_arch_64 {
-    cpu_type_t	cputype;	/* cpu specifier (int) */
-    cpu_subtype_t	cpusubtype;	/* machine specifier (int) */
-    uint64_t	offset;		/* file offset to this object file */
-    uint64_t	size;		/* size of this object file */
-    uint32_t	align;		/* alignment as a power of 2 */
-    uint32_t	reserved;	/* reserved */
-};
-"""
-
-segment_base_fields = [("cmd", ctypes.c_uint32), ("cmdsize", ctypes.c_uint32)]
-"""base for reading segment info"""
-
-segment_command_fields = [
-    ("cmd", ctypes.c_uint32),
-    ("cmdsize", ctypes.c_uint32),
-    ("segname", ctypes.c_char * 16),
-    ("vmaddr", ctypes.c_uint32),
-    ("vmsize", ctypes.c_uint32),
-    ("fileoff", ctypes.c_uint32),
-    ("filesize", ctypes.c_uint32),
-    ("maxprot", ctypes.c_int),
-    ("initprot", ctypes.c_int),
-    ("nsects", ctypes.c_uint32),
-    ("flags", ctypes.c_uint32),
-]
-"""
-struct segment_command { /* for 32-bit architectures */
-    uint32_t	cmd;		/* LC_SEGMENT */
-    uint32_t	cmdsize;	/* includes sizeof section structs */
-    char		segname[16];	/* segment name */
-    uint32_t	vmaddr;		/* memory address of this segment */
-    uint32_t	vmsize;		/* memory size of this segment */
-    uint32_t	fileoff;	/* file offset of this segment */
-    uint32_t	filesize;	/* amount to map from the file */
-    vm_prot_t	maxprot;	/* maximum VM protection */
-    vm_prot_t	initprot;	/* initial VM protection */
-    uint32_t	nsects;		/* number of sections in segment */
-    uint32_t	flags;		/* flags */
-};
-typedef int vm_prot_t;
-"""
-
-segment_command_fields_64 = [
-    ("cmd", ctypes.c_uint32),
-    ("cmdsize", ctypes.c_uint32),
-    ("segname", ctypes.c_char * 16),
-    ("vmaddr", ctypes.c_uint64),
-    ("vmsize", ctypes.c_uint64),
-    ("fileoff", ctypes.c_uint64),
-    ("filesize", ctypes.c_uint64),
-    ("maxprot", ctypes.c_int),
-    ("initprot", ctypes.c_int),
-    ("nsects", ctypes.c_uint32),
-    ("flags", ctypes.c_uint32),
-]
-"""
-struct segment_command_64 { /* for 64-bit architectures */
-    uint32_t	cmd;		/* LC_SEGMENT_64 */
-    uint32_t	cmdsize;	/* includes sizeof section_64 structs */
-    char		segname[16];	/* segment name */
-    uint64_t	vmaddr;		/* memory address of this segment */
-    uint64_t	vmsize;		/* memory size of this segment */
-    uint64_t	fileoff;	/* file offset of this segment */
-    uint64_t	filesize;	/* amount to map from the file */
-    vm_prot_t	maxprot;	/* maximum VM protection */
-    vm_prot_t	initprot;	/* initial VM protection */
-    uint32_t	nsects;		/* number of sections in segment */
-    uint32_t	flags;		/* flags */
-};
-"""
-
-version_min_command_fields = segment_base_fields + [
-    ("version", ctypes.c_uint32),
-    ("sdk", ctypes.c_uint32),
-]
-"""
-struct version_min_command {
-    uint32_t	cmd;		/* LC_VERSION_MIN_MACOSX or
-                               LC_VERSION_MIN_IPHONEOS or
-                               LC_VERSION_MIN_WATCHOS or
-                               LC_VERSION_MIN_TVOS */
-    uint32_t	cmdsize;	/* sizeof(struct min_version_command) */
-    uint32_t	version;	/* X.Y.Z is encoded in nibbles xxxx.yy.zz */
-    uint32_t	sdk;		/* X.Y.Z is encoded in nibbles xxxx.yy.zz */
-};
-"""
-
-build_version_command_fields = segment_base_fields + [
-    ("platform", ctypes.c_uint32),
-    ("minos", ctypes.c_uint32),
-    ("sdk", ctypes.c_uint32),
-    ("ntools", ctypes.c_uint32),
-]
-"""
-struct build_version_command {
-    uint32_t	cmd;		/* LC_BUILD_VERSION */
-    uint32_t	cmdsize;	/* sizeof(struct build_version_command) plus */
-                                /* ntools * sizeof(struct build_tool_version) */
-    uint32_t	platform;	/* platform */
-    uint32_t	minos;		/* X.Y.Z is encoded in nibbles xxxx.yy.zz */
-    uint32_t	sdk;		/* X.Y.Z is encoded in nibbles xxxx.yy.zz */
-    uint32_t	ntools;		/* number of tool entries following this */
-};
-"""
-
-
-def swap32(x):
-    return (
-        ((x << 24) & 0xFF000000)
-        | ((x << 8) & 0x00FF0000)
-        | ((x >> 8) & 0x0000FF00)
-        | ((x >> 24) & 0x000000FF)
-    )
-
-
-def get_base_class_and_magic_number(lib_file, seek=None):
-    if seek is None:
-        seek = lib_file.tell()
-    else:
-        lib_file.seek(seek)
-    magic_number = ctypes.c_uint32.from_buffer_copy(
-        lib_file.read(ctypes.sizeof(ctypes.c_uint32))
-    ).value
-
-    # Handle wrong byte order
-    if magic_number in [FAT_CIGAM, FAT_CIGAM_64, MH_CIGAM, MH_CIGAM_64]:
-        if sys.byteorder == "little":
-            BaseClass = ctypes.BigEndianStructure
-        else:
-            BaseClass = ctypes.LittleEndianStructure
-
-        magic_number = swap32(magic_number)
-    else:
-        BaseClass = ctypes.Structure
-
-    lib_file.seek(seek)
-    return BaseClass, magic_number
-
-
-def read_data(struct_class, lib_file):
-    return struct_class.from_buffer_copy(lib_file.read(ctypes.sizeof(struct_class)))
-
-
-def extract_macosx_min_system_version(path_to_lib):
-    with open(path_to_lib, "rb") as lib_file:
-        BaseClass, magic_number = get_base_class_and_magic_number(lib_file, 0)
-        if magic_number not in [FAT_MAGIC, FAT_MAGIC_64, MH_MAGIC, MH_MAGIC_64]:
-            return
-
-        if magic_number in [FAT_MAGIC, FAT_CIGAM_64]:
-
-            class FatHeader(BaseClass):
-                _fields_ = fat_header_fields
-
-            fat_header = read_data(FatHeader, lib_file)
-            if magic_number == FAT_MAGIC:
-
-                class FatArch(BaseClass):
-                    _fields_ = fat_arch_fields
-
-            else:
-
-                class FatArch(BaseClass):
-                    _fields_ = fat_arch_64_fields
-
-            fat_arch_list = [
-                read_data(FatArch, lib_file) for _ in range(fat_header.nfat_arch)
-            ]
-
-            versions_list = []
-            for el in fat_arch_list:
-                try:
-                    version = read_mach_header(lib_file, el.offset)
-                    if version is not None:
-                        if el.cputype == CPU_TYPE_ARM64 and len(fat_arch_list) != 1:
-                            # Xcode will not set the deployment target below 11.0.0
-                            # for the arm64 architecture. Ignore the arm64 deployment
-                            # in fat binaries when the target is 11.0.0, that way
-                            # the other architectures can select a lower deployment
-                            # target.
-                            # This is safe because there is no arm64 variant for
-                            # macOS 10.15 or earlier.
-                            if version == (11, 0, 0):
-                                continue
-                        versions_list.append(version)
-                except ValueError:
-                    pass
-
-            if len(versions_list) > 0:
-                return max(versions_list)
-            else:
-                return None
-
-        else:
-            try:
-                return read_mach_header(lib_file, 0)
-            except ValueError:
-                """when some error during read library files"""
-                return None
-
-
-def read_mach_header(lib_file, seek=None):
-    """
-    This funcition parse mach-O header and extract
-    information about minimal system version
-
-    :param lib_file: reference to opened library file with pointer
-    """
-    if seek is not None:
-        lib_file.seek(seek)
-    base_class, magic_number = get_base_class_and_magic_number(lib_file)
-    arch = "32" if magic_number == MH_MAGIC else "64"
-
-    class SegmentBase(base_class):
-        _fields_ = segment_base_fields
-
-    if arch == "32":
-
-        class MachHeader(base_class):
-            _fields_ = mach_header_fields
-
-    else:
-
-        class MachHeader(base_class):
-            _fields_ = mach_header_fields_64
-
-    mach_header = read_data(MachHeader, lib_file)
-    for _i in range(mach_header.ncmds):
-        pos = lib_file.tell()
-        segment_base = read_data(SegmentBase, lib_file)
-        lib_file.seek(pos)
-        if segment_base.cmd == LC_VERSION_MIN_MACOSX:
-
-            class VersionMinCommand(base_class):
-                _fields_ = version_min_command_fields
-
-            version_info = read_data(VersionMinCommand, lib_file)
-            return parse_version(version_info.version)
-        elif segment_base.cmd == LC_BUILD_VERSION:
-
-            class VersionBuild(base_class):
-                _fields_ = build_version_command_fields
-
-            version_info = read_data(VersionBuild, lib_file)
-            return parse_version(version_info.minos)
-        else:
-            lib_file.seek(pos + segment_base.cmdsize)
-            continue
-
-
-def parse_version(version):
-    x = (version & 0xFFFF0000) >> 16
-    y = (version & 0x0000FF00) >> 8
-    z = version & 0x000000FF
-    return x, y, z
-
-
-def calculate_macosx_platform_tag(archive_root, platform_tag):
-    """
-    Calculate proper macosx platform tag basing on files which are included to wheel
-
-    Example platform tag `macosx-10.14-x86_64`
-    """
-    prefix, base_version, suffix = platform_tag.split("-")
-    base_version = tuple(int(x) for x in base_version.split("."))
-    base_version = base_version[:2]
-    if base_version[0] > 10:
-        base_version = (base_version[0], 0)
-    assert len(base_version) == 2
-    if "MACOSX_DEPLOYMENT_TARGET" in os.environ:
-        deploy_target = tuple(
-            int(x) for x in os.environ["MACOSX_DEPLOYMENT_TARGET"].split(".")
-        )
-        deploy_target = deploy_target[:2]
-        if deploy_target[0] > 10:
-            deploy_target = (deploy_target[0], 0)
-        if deploy_target < base_version:
-            sys.stderr.write(
-                "[WARNING] MACOSX_DEPLOYMENT_TARGET is set to a lower value ({}) than "
-                "the version on which the Python interpreter was compiled ({}), and "
-                "will be ignored.\n".format(
-                    ".".join(str(x) for x in deploy_target),
-                    ".".join(str(x) for x in base_version),
-                )
-            )
-        else:
-            base_version = deploy_target
-
-    assert len(base_version) == 2
-    start_version = base_version
-    versions_dict = {}
-    for (dirpath, _dirnames, filenames) in os.walk(archive_root):
-        for filename in filenames:
-            if filename.endswith(".dylib") or filename.endswith(".so"):
-                lib_path = os.path.join(dirpath, filename)
-                min_ver = extract_macosx_min_system_version(lib_path)
-                if min_ver is not None:
-                    min_ver = min_ver[0:2]
-                    if min_ver[0] > 10:
-                        min_ver = (min_ver[0], 0)
-                    versions_dict[lib_path] = min_ver
-
-    if len(versions_dict) > 0:
-        base_version = max(base_version, max(versions_dict.values()))
-
-    # macosx platform tag do not support minor bugfix release
-    fin_base_version = "_".join([str(x) for x in base_version])
-    if start_version < base_version:
-        problematic_files = [k for k, v in versions_dict.items() if v > start_version]
-        problematic_files = "\n".join(problematic_files)
-        if len(problematic_files) == 1:
-            files_form = "this file"
-        else:
-            files_form = "these files"
-        error_message = (
-            "[WARNING] This wheel needs a higher macOS version than {}  "
-            "To silence this warning, set MACOSX_DEPLOYMENT_TARGET to at least "
-            + fin_base_version
-            + " or recreate "
-            + files_form
-            + " with lower "
-            "MACOSX_DEPLOYMENT_TARGET:  \n" + problematic_files
-        )
-
-        if "MACOSX_DEPLOYMENT_TARGET" in os.environ:
-            error_message = error_message.format(
-                "is set in MACOSX_DEPLOYMENT_TARGET variable."
-            )
-        else:
-            error_message = error_message.format(
-                "the version your Python interpreter is compiled against."
-            )
-
-        sys.stderr.write(error_message)
-
-    platform_tag = prefix + "_" + fin_base_version + "_" + suffix
-    return platform_tag
diff --git a/venv/lib/python3.10/site-packages/wheel/metadata.py b/venv/lib/python3.10/site-packages/wheel/metadata.py
deleted file mode 100644
index 159ff0a..0000000
--- a/venv/lib/python3.10/site-packages/wheel/metadata.py
+++ /dev/null
@@ -1,109 +0,0 @@
-"""
-Tools for converting old- to new-style metadata.
-"""
-from __future__ import annotations
-
-import os.path
-import textwrap
-from email.message import Message
-from email.parser import Parser
-from typing import Iterator
-
-from pkg_resources import Requirement, safe_extra, split_sections
-
-
-def requires_to_requires_dist(requirement: Requirement) -> str:
-    """Return the version specifier for a requirement in PEP 345/566 fashion."""
-    if getattr(requirement, "url", None):
-        return " @ " + requirement.url
-
-    requires_dist = []
-    for op, ver in requirement.specs:
-        requires_dist.append(op + ver)
-
-    if requires_dist:
-        return " (" + ",".join(sorted(requires_dist)) + ")"
-    else:
-        return ""
-
-
-def convert_requirements(requirements: list[str]) -> Iterator[str]:
-    """Yield Requires-Dist: strings for parsed requirements strings."""
-    for req in requirements:
-        parsed_requirement = Requirement.parse(req)
-        spec = requires_to_requires_dist(parsed_requirement)
-        extras = ",".join(sorted(parsed_requirement.extras))
-        if extras:
-            extras = f"[{extras}]"
-
-        yield parsed_requirement.project_name + extras + spec
-
-
-def generate_requirements(
-    extras_require: dict[str, list[str]]
-) -> Iterator[tuple[str, str]]:
-    """
-    Convert requirements from a setup()-style dictionary to
-    ('Requires-Dist', 'requirement') and ('Provides-Extra', 'extra') tuples.
-
-    extras_require is a dictionary of {extra: [requirements]} as passed to setup(),
-    using the empty extra {'': [requirements]} to hold install_requires.
-    """
-    for extra, depends in extras_require.items():
-        condition = ""
-        extra = extra or ""
-        if ":" in extra:  # setuptools extra:condition syntax
-            extra, condition = extra.split(":", 1)
-
-        extra = safe_extra(extra)
-        if extra:
-            yield "Provides-Extra", extra
-            if condition:
-                condition = "(" + condition + ") and "
-            condition += "extra == '%s'" % extra
-
-        if condition:
-            condition = " ; " + condition
-
-        for new_req in convert_requirements(depends):
-            yield "Requires-Dist", new_req + condition
-
-
-def pkginfo_to_metadata(egg_info_path: str, pkginfo_path: str) -> Message:
-    """
-    Convert .egg-info directory with PKG-INFO to the Metadata 2.1 format
-    """
-    with open(pkginfo_path, encoding="utf-8") as headers:
-        pkg_info = Parser().parse(headers)
-
-    pkg_info.replace_header("Metadata-Version", "2.1")
-    # Those will be regenerated from `requires.txt`.
-    del pkg_info["Provides-Extra"]
-    del pkg_info["Requires-Dist"]
-    requires_path = os.path.join(egg_info_path, "requires.txt")
-    if os.path.exists(requires_path):
-        with open(requires_path) as requires_file:
-            requires = requires_file.read()
-
-        parsed_requirements = sorted(split_sections(requires), key=lambda x: x[0] or "")
-        for extra, reqs in parsed_requirements:
-            for key, value in generate_requirements({extra: reqs}):
-                if (key, value) not in pkg_info.items():
-                    pkg_info[key] = value
-
-    description = pkg_info["Description"]
-    if description:
-        description_lines = pkg_info["Description"].splitlines()
-        dedented_description = "\n".join(
-            # if the first line of long_description is blank,
-            # the first line here will be indented.
-            (
-                description_lines[0].lstrip(),
-                textwrap.dedent("\n".join(description_lines[1:])),
-                "\n",
-            )
-        )
-        pkg_info.set_payload(dedented_description)
-        del pkg_info["Description"]
-
-    return pkg_info
diff --git a/venv/lib/python3.10/site-packages/wheel/util.py b/venv/lib/python3.10/site-packages/wheel/util.py
deleted file mode 100644
index d98d98c..0000000
--- a/venv/lib/python3.10/site-packages/wheel/util.py
+++ /dev/null
@@ -1,26 +0,0 @@
-from __future__ import annotations
-
-import base64
-import logging
-
-log = logging.getLogger("wheel")
-
-# ensure Python logging is configured
-try:
-    __import__("setuptools.logging")
-except ImportError:
-    # setuptools < ??
-    from . import _setuptools_logging
-
-    _setuptools_logging.configure()
-
-
-def urlsafe_b64encode(data: bytes) -> bytes:
-    """urlsafe_b64encode without padding"""
-    return base64.urlsafe_b64encode(data).rstrip(b"=")
-
-
-def urlsafe_b64decode(data: bytes) -> bytes:
-    """urlsafe_b64decode without padding"""
-    pad = b"=" * (4 - (len(data) & 3))
-    return base64.urlsafe_b64decode(data + pad)
diff --git a/venv/lib/python3.10/site-packages/wheel/vendored/packaging/_musllinux.py b/venv/lib/python3.10/site-packages/wheel/vendored/packaging/_musllinux.py
deleted file mode 100644
index 7946c9b..0000000
--- a/venv/lib/python3.10/site-packages/wheel/vendored/packaging/_musllinux.py
+++ /dev/null
@@ -1,138 +0,0 @@
-"""PEP 656 support.
-
-This module implements logic to detect if the currently running Python is
-linked against musl, and what musl version is used.
-"""
-
-from __future__ import annotations
-
-import contextlib
-import functools
-import operator
-import os
-import re
-import struct
-import subprocess
-import sys
-from typing import IO, Iterator, NamedTuple
-
-
-def _read_unpacked(f: IO[bytes], fmt: str) -> tuple[int, ...]:
-    return struct.unpack(fmt, f.read(struct.calcsize(fmt)))
-
-
-def _parse_ld_musl_from_elf(f: IO[bytes]) -> str | None:
-    """Detect musl libc location by parsing the Python executable.
-
-    Based on: https://gist.github.com/lyssdod/f51579ae8d93c8657a5564aefc2ffbca
-    ELF header: https://refspecs.linuxfoundation.org/elf/gabi4+/ch4.eheader.html
-    """
-    f.seek(0)
-    try:
-        ident = _read_unpacked(f, "16B")
-    except struct.error:
-        return None
-    if ident[:4] != tuple(b"\x7fELF"):  # Invalid magic, not ELF.
-        return None
-    f.seek(struct.calcsize("HHI"), 1)  # Skip file type, machine, and version.
-
-    try:
-        # e_fmt: Format for program header.
-        # p_fmt: Format for section header.
-        # p_idx: Indexes to find p_type, p_offset, and p_filesz.
-        e_fmt, p_fmt, p_idx = {
-            1: ("IIIIHHH", "IIIIIIII", (0, 1, 4)),  # 32-bit.
-            2: ("QQQIHHH", "IIQQQQQQ", (0, 2, 5)),  # 64-bit.
-        }[ident[4]]
-    except KeyError:
-        return None
-    else:
-        p_get = operator.itemgetter(*p_idx)
-
-    # Find the interpreter section and return its content.
-    try:
-        _, e_phoff, _, _, _, e_phentsize, e_phnum = _read_unpacked(f, e_fmt)
-    except struct.error:
-        return None
-    for i in range(e_phnum + 1):
-        f.seek(e_phoff + e_phentsize * i)
-        try:
-            p_type, p_offset, p_filesz = p_get(_read_unpacked(f, p_fmt))
-        except struct.error:
-            return None
-        if p_type != 3:  # Not PT_INTERP.
-            continue
-        f.seek(p_offset)
-        interpreter = os.fsdecode(f.read(p_filesz)).strip("\0")
-        if "musl" not in interpreter:
-            return None
-        return interpreter
-    return None
-
-
-class _MuslVersion(NamedTuple):
-    major: int
-    minor: int
-
-
-def _parse_musl_version(output: str) -> _MuslVersion | None:
-    lines = [n for n in (n.strip() for n in output.splitlines()) if n]
-    if len(lines) < 2 or lines[0][:4] != "musl":
-        return None
-    m = re.match(r"Version (\d+)\.(\d+)", lines[1])
-    if not m:
-        return None
-    return _MuslVersion(major=int(m.group(1)), minor=int(m.group(2)))
-
-
-@functools.lru_cache()
-def _get_musl_version(executable: str) -> _MuslVersion | None:
-    """Detect currently-running musl runtime version.
-
-    This is done by checking the specified executable's dynamic linking
-    information, and invoking the loader to parse its output for a version
-    string. If the loader is musl, the output would be something like::
-
-        musl libc (x86_64)
-        Version 1.2.2
-        Dynamic Program Loader
-    """
-    with contextlib.ExitStack() as stack:
-        try:
-            f = stack.enter_context(open(executable, "rb"))
-        except OSError:
-            return None
-        ld = _parse_ld_musl_from_elf(f)
-    if not ld:
-        return None
-    proc = subprocess.run([ld], stderr=subprocess.PIPE, text=True)
-    return _parse_musl_version(proc.stderr)
-
-
-def platform_tags(arch: str) -> Iterator[str]:
-    """Generate musllinux tags compatible to the current platform.
-
-    :param arch: Should be the part of platform tag after the ``linux_``
-        prefix, e.g. ``x86_64``. The ``linux_`` prefix is assumed as a
-        prerequisite for the current platform to be musllinux-compatible.
-
-    :returns: An iterator of compatible musllinux tags.
-    """
-    sys_musl = _get_musl_version(sys.executable)
-    if sys_musl is None:  # Python not dynamically linked against musl.
-        return
-    for minor in range(sys_musl.minor, -1, -1):
-        yield f"musllinux_{sys_musl.major}_{minor}_{arch}"
-
-
-if __name__ == "__main__":  # pragma: no cover
-    import sysconfig
-
-    plat = sysconfig.get_platform()
-    assert plat.startswith("linux-"), "not linux"
-
-    print("plat:", plat)
-    print("musl:", _get_musl_version(sys.executable))
-    print("tags:", end=" ")
-    for t in platform_tags(re.sub(r"[.-]", "_", plat.split("-", 1)[-1])):
-        print(t, end="\n      ")
diff --git a/venv/lib/python3.10/site-packages/wheel/wheelfile.py b/venv/lib/python3.10/site-packages/wheel/wheelfile.py
deleted file mode 100644
index 8ae9733..0000000
--- a/venv/lib/python3.10/site-packages/wheel/wheelfile.py
+++ /dev/null
@@ -1,191 +0,0 @@
-from __future__ import annotations
-
-import csv
-import hashlib
-import os.path
-import re
-import stat
-import time
-from collections import OrderedDict
-from io import StringIO, TextIOWrapper
-from zipfile import ZIP_DEFLATED, ZipFile, ZipInfo
-
-from wheel.cli import WheelError
-from wheel.util import log, urlsafe_b64decode, urlsafe_b64encode
-
-# Non-greedy matching of an optional build number may be too clever (more
-# invalid wheel filenames will match). Separate regex for .dist-info?
-WHEEL_INFO_RE = re.compile(
-    r"""^(?P(?P[^\s-]+?)-(?P[^\s-]+?))(-(?P\d[^\s-]*))?
-     -(?P[^\s-]+?)-(?P[^\s-]+?)-(?P\S+)\.whl$""",
-    re.VERBOSE,
-)
-MINIMUM_TIMESTAMP = 315532800  # 1980-01-01 00:00:00 UTC
-
-
-def get_zipinfo_datetime(timestamp=None):
-    # Some applications need reproducible .whl files, but they can't do this without
-    # forcing the timestamp of the individual ZipInfo objects. See issue #143.
-    timestamp = int(os.environ.get("SOURCE_DATE_EPOCH", timestamp or time.time()))
-    timestamp = max(timestamp, MINIMUM_TIMESTAMP)
-    return time.gmtime(timestamp)[0:6]
-
-
-class WheelFile(ZipFile):
-    """A ZipFile derivative class that also reads SHA-256 hashes from
-    .dist-info/RECORD and checks any read files against those.
-    """
-
-    _default_algorithm = hashlib.sha256
-
-    def __init__(self, file, mode="r", compression=ZIP_DEFLATED):
-        basename = os.path.basename(file)
-        self.parsed_filename = WHEEL_INFO_RE.match(basename)
-        if not basename.endswith(".whl") or self.parsed_filename is None:
-            raise WheelError(f"Bad wheel filename {basename!r}")
-
-        ZipFile.__init__(self, file, mode, compression=compression, allowZip64=True)
-
-        self.dist_info_path = "{}.dist-info".format(
-            self.parsed_filename.group("namever")
-        )
-        self.record_path = self.dist_info_path + "/RECORD"
-        self._file_hashes = OrderedDict()
-        self._file_sizes = {}
-        if mode == "r":
-            # Ignore RECORD and any embedded wheel signatures
-            self._file_hashes[self.record_path] = None, None
-            self._file_hashes[self.record_path + ".jws"] = None, None
-            self._file_hashes[self.record_path + ".p7s"] = None, None
-
-            # Fill in the expected hashes by reading them from RECORD
-            try:
-                record = self.open(self.record_path)
-            except KeyError:
-                raise WheelError(f"Missing {self.record_path} file")
-
-            with record:
-                for line in csv.reader(
-                    TextIOWrapper(record, newline="", encoding="utf-8")
-                ):
-                    path, hash_sum, size = line
-                    if not hash_sum:
-                        continue
-
-                    algorithm, hash_sum = hash_sum.split("=")
-                    try:
-                        hashlib.new(algorithm)
-                    except ValueError:
-                        raise WheelError(f"Unsupported hash algorithm: {algorithm}")
-
-                    if algorithm.lower() in {"md5", "sha1"}:
-                        raise WheelError(
-                            "Weak hash algorithm ({}) is not permitted by PEP "
-                            "427".format(algorithm)
-                        )
-
-                    self._file_hashes[path] = (
-                        algorithm,
-                        urlsafe_b64decode(hash_sum.encode("ascii")),
-                    )
-
-    def open(self, name_or_info, mode="r", pwd=None):
-        def _update_crc(newdata):
-            eof = ef._eof
-            update_crc_orig(newdata)
-            running_hash.update(newdata)
-            if eof and running_hash.digest() != expected_hash:
-                raise WheelError(f"Hash mismatch for file '{ef_name}'")
-
-        ef_name = (
-            name_or_info.filename if isinstance(name_or_info, ZipInfo) else name_or_info
-        )
-        if (
-            mode == "r"
-            and not ef_name.endswith("/")
-            and ef_name not in self._file_hashes
-        ):
-            raise WheelError(f"No hash found for file '{ef_name}'")
-
-        ef = ZipFile.open(self, name_or_info, mode, pwd)
-        if mode == "r" and not ef_name.endswith("/"):
-            algorithm, expected_hash = self._file_hashes[ef_name]
-            if expected_hash is not None:
-                # Monkey patch the _update_crc method to also check for the hash from
-                # RECORD
-                running_hash = hashlib.new(algorithm)
-                update_crc_orig, ef._update_crc = ef._update_crc, _update_crc
-
-        return ef
-
-    def write_files(self, base_dir):
-        log.info(f"creating '{self.filename}' and adding '{base_dir}' to it")
-        deferred = []
-        for root, dirnames, filenames in os.walk(base_dir):
-            # Sort the directory names so that `os.walk` will walk them in a
-            # defined order on the next iteration.
-            dirnames.sort()
-            for name in sorted(filenames):
-                path = os.path.normpath(os.path.join(root, name))
-                if os.path.isfile(path):
-                    arcname = os.path.relpath(path, base_dir).replace(os.path.sep, "/")
-                    if arcname == self.record_path:
-                        pass
-                    elif root.endswith(".dist-info"):
-                        deferred.append((path, arcname))
-                    else:
-                        self.write(path, arcname)
-
-        deferred.sort()
-        for path, arcname in deferred:
-            self.write(path, arcname)
-
-    def write(self, filename, arcname=None, compress_type=None):
-        with open(filename, "rb") as f:
-            st = os.fstat(f.fileno())
-            data = f.read()
-
-        zinfo = ZipInfo(
-            arcname or filename, date_time=get_zipinfo_datetime(st.st_mtime)
-        )
-        zinfo.external_attr = (stat.S_IMODE(st.st_mode) | stat.S_IFMT(st.st_mode)) << 16
-        zinfo.compress_type = compress_type or self.compression
-        self.writestr(zinfo, data, compress_type)
-
-    def writestr(self, zinfo_or_arcname, data, compress_type=None):
-        if isinstance(data, str):
-            data = data.encode("utf-8")
-
-        ZipFile.writestr(self, zinfo_or_arcname, data, compress_type)
-        fname = (
-            zinfo_or_arcname.filename
-            if isinstance(zinfo_or_arcname, ZipInfo)
-            else zinfo_or_arcname
-        )
-        log.info(f"adding '{fname}'")
-        if fname != self.record_path:
-            hash_ = self._default_algorithm(data)
-            self._file_hashes[fname] = (
-                hash_.name,
-                urlsafe_b64encode(hash_.digest()).decode("ascii"),
-            )
-            self._file_sizes[fname] = len(data)
-
-    def close(self):
-        # Write RECORD
-        if self.fp is not None and self.mode == "w" and self._file_hashes:
-            data = StringIO()
-            writer = csv.writer(data, delimiter=",", quotechar='"', lineterminator="\n")
-            writer.writerows(
-                (
-                    (fname, algorithm + "=" + hash_, self._file_sizes[fname])
-                    for fname, (algorithm, hash_) in self._file_hashes.items()
-                )
-            )
-            writer.writerow((format(self.record_path), "", ""))
-            zinfo = ZipInfo(self.record_path, date_time=get_zipinfo_datetime())
-            zinfo.compress_type = self.compression
-            zinfo.external_attr = 0o664 << 16
-            self.writestr(zinfo, data.getvalue())
-
-        ZipFile.close(self)
diff --git a/venv/lib/python3.10/site-packages/xdist/__init__.py b/venv/lib/python3.10/site-packages/xdist/__init__.py
new file mode 100644
index 0000000..031a3d3
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/xdist/__init__.py
@@ -0,0 +1,15 @@
+from xdist.plugin import (
+    is_xdist_worker,
+    is_xdist_master,
+    get_xdist_worker_id,
+    is_xdist_controller,
+)
+from xdist._version import version as __version__
+
+__all__ = [
+    "__version__",
+    "is_xdist_worker",
+    "is_xdist_master",
+    "is_xdist_controller",
+    "get_xdist_worker_id",
+]
diff --git a/venv/lib/python3.10/site-packages/xdist/_version.py b/venv/lib/python3.10/site-packages/xdist/_version.py
new file mode 100644
index 0000000..5a223df
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/xdist/_version.py
@@ -0,0 +1,5 @@
+# coding: utf-8
+# file generated by setuptools_scm
+# don't change, don't track in version control
+version = '2.5.0'
+version_tuple = (2, 5, 0)
diff --git a/venv/lib/python3.10/site-packages/xdist/dsession.py b/venv/lib/python3.10/site-packages/xdist/dsession.py
new file mode 100644
index 0000000..2ae3db6
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/xdist/dsession.py
@@ -0,0 +1,449 @@
+import py
+import pytest
+
+from xdist.workermanage import NodeManager
+from xdist.scheduler import (
+    EachScheduling,
+    LoadScheduling,
+    LoadScopeScheduling,
+    LoadFileScheduling,
+    LoadGroupScheduling,
+)
+
+
+from queue import Empty, Queue
+
+
+class Interrupted(KeyboardInterrupt):
+    """signals an immediate interruption."""
+
+
+class DSession:
+    """A pytest plugin which runs a distributed test session
+
+    At the beginning of the test session this creates a NodeManager
+    instance which creates and starts all nodes.  Nodes then emit
+    events processed in the pytest_runtestloop hook using the worker_*
+    methods.
+
+    Once a node is started it will automatically start running the
+    pytest mainloop with some custom hooks.  This means a node
+    automatically starts collecting tests.  Once tests are collected
+    it will wait for instructions.
+    """
+
+    def __init__(self, config):
+        self.config = config
+        self.log = py.log.Producer("dsession")
+        if not config.option.debug:
+            py.log.setconsumer(self.log._keywords, None)
+        self.nodemanager = None
+        self.sched = None
+        self.shuttingdown = False
+        self.countfailures = 0
+        self.maxfail = config.getvalue("maxfail")
+        self.queue = Queue()
+        self._session = None
+        self._failed_collection_errors = {}
+        self._active_nodes = set()
+        self._failed_nodes_count = 0
+        self._max_worker_restart = get_default_max_worker_restart(self.config)
+        # summary message to print at the end of the session
+        self._summary_report = None
+        self.terminal = config.pluginmanager.getplugin("terminalreporter")
+        if self.terminal:
+            self.trdist = TerminalDistReporter(config)
+            config.pluginmanager.register(self.trdist, "terminaldistreporter")
+
+    @property
+    def session_finished(self):
+        """Return True if the distributed session has finished
+
+        This means all nodes have executed all test items.  This is
+        used by pytest_runtestloop to break out of its loop.
+        """
+        return bool(self.shuttingdown and not self._active_nodes)
+
+    def report_line(self, line):
+        if self.terminal and self.config.option.verbose >= 0:
+            self.terminal.write_line(line)
+
+    @pytest.hookimpl(trylast=True)
+    def pytest_sessionstart(self, session):
+        """Creates and starts the nodes.
+
+        The nodes are setup to put their events onto self.queue.  As
+        soon as nodes start they will emit the worker_workerready event.
+        """
+        self.nodemanager = NodeManager(self.config)
+        nodes = self.nodemanager.setup_nodes(putevent=self.queue.put)
+        self._active_nodes.update(nodes)
+        self._session = session
+
+    @pytest.hookimpl
+    def pytest_sessionfinish(self, session):
+        """Shutdown all nodes."""
+        nm = getattr(self, "nodemanager", None)  # if not fully initialized
+        if nm is not None:
+            nm.teardown_nodes()
+        self._session = None
+
+    @pytest.hookimpl
+    def pytest_collection(self):
+        # prohibit collection of test items in controller process
+        return True
+
+    @pytest.hookimpl(trylast=True)
+    def pytest_xdist_make_scheduler(self, config, log):
+        dist = config.getvalue("dist")
+        schedulers = {
+            "each": EachScheduling,
+            "load": LoadScheduling,
+            "loadscope": LoadScopeScheduling,
+            "loadfile": LoadFileScheduling,
+            "loadgroup": LoadGroupScheduling,
+        }
+        return schedulers[dist](config, log)
+
+    @pytest.hookimpl
+    def pytest_runtestloop(self):
+        self.sched = self.config.hook.pytest_xdist_make_scheduler(
+            config=self.config, log=self.log
+        )
+        assert self.sched is not None
+
+        self.shouldstop = False
+        while not self.session_finished:
+            self.loop_once()
+            if self.shouldstop:
+                self.triggershutdown()
+                raise Interrupted(str(self.shouldstop))
+        return True
+
+    def loop_once(self):
+        """Process one callback from one of the workers."""
+        while 1:
+            if not self._active_nodes:
+                # If everything has died stop looping
+                self.triggershutdown()
+                raise RuntimeError("Unexpectedly no active workers available")
+            try:
+                eventcall = self.queue.get(timeout=2.0)
+                break
+            except Empty:
+                continue
+        callname, kwargs = eventcall
+        assert callname, kwargs
+        method = "worker_" + callname
+        call = getattr(self, method)
+        self.log("calling method", method, kwargs)
+        call(**kwargs)
+        if self.sched.tests_finished:
+            self.triggershutdown()
+
+    #
+    # callbacks for processing events from workers
+    #
+
+    def worker_workerready(self, node, workerinfo):
+        """Emitted when a node first starts up.
+
+        This adds the node to the scheduler, nodes continue with
+        collection without any further input.
+        """
+        node.workerinfo = workerinfo
+        node.workerinfo["id"] = node.gateway.id
+        node.workerinfo["spec"] = node.gateway.spec
+
+        self.config.hook.pytest_testnodeready(node=node)
+        if self.shuttingdown:
+            node.shutdown()
+        else:
+            self.sched.add_node(node)
+
+    def worker_workerfinished(self, node):
+        """Emitted when node executes its pytest_sessionfinish hook.
+
+        Removes the node from the scheduler.
+
+        The node might not be in the scheduler if it had not emitted
+        workerready before shutdown was triggered.
+        """
+        self.config.hook.pytest_testnodedown(node=node, error=None)
+        if node.workeroutput["exitstatus"] == 2:  # keyboard-interrupt
+            self.shouldstop = "{} received keyboard-interrupt".format(node)
+            self.worker_errordown(node, "keyboard-interrupt")
+            return
+        if node in self.sched.nodes:
+            crashitem = self.sched.remove_node(node)
+            assert not crashitem, (crashitem, node)
+        self._active_nodes.remove(node)
+
+    def worker_internal_error(self, node, formatted_error):
+        """
+        pytest_internalerror() was called on the worker.
+
+        pytest_internalerror() arguments are an excinfo and an excrepr, which can't
+        be serialized, so we go with a poor man's solution of raising an exception
+        here ourselves using the formatted message.
+        """
+        self._active_nodes.remove(node)
+        try:
+            assert False, formatted_error
+        except AssertionError:
+            from _pytest._code import ExceptionInfo
+
+            excinfo = ExceptionInfo.from_current()
+            excrepr = excinfo.getrepr()
+            self.config.hook.pytest_internalerror(excrepr=excrepr, excinfo=excinfo)
+
+    def worker_errordown(self, node, error):
+        """Emitted by the WorkerController when a node dies."""
+        self.config.hook.pytest_testnodedown(node=node, error=error)
+        try:
+            crashitem = self.sched.remove_node(node)
+        except KeyError:
+            pass
+        else:
+            if crashitem:
+                self.handle_crashitem(crashitem, node)
+
+        self._failed_nodes_count += 1
+        maximum_reached = (
+            self._max_worker_restart is not None
+            and self._failed_nodes_count > self._max_worker_restart
+        )
+        if maximum_reached:
+            if self._max_worker_restart == 0:
+                msg = "worker {} crashed and worker restarting disabled".format(
+                    node.gateway.id
+                )
+            else:
+                msg = "maximum crashed workers reached: %d" % self._max_worker_restart
+            self._summary_report = msg
+            self.report_line("\n" + msg)
+            self.triggershutdown()
+        else:
+            self.report_line("\nreplacing crashed worker %s" % node.gateway.id)
+            self._clone_node(node)
+        self._active_nodes.remove(node)
+
+    @pytest.hookimpl
+    def pytest_terminal_summary(self, terminalreporter):
+        if self.config.option.verbose >= 0 and self._summary_report:
+            terminalreporter.write_sep("=", "xdist: {}".format(self._summary_report))
+
+    def worker_collectionfinish(self, node, ids):
+        """worker has finished test collection.
+
+        This adds the collection for this node to the scheduler.  If
+        the scheduler indicates collection is finished (i.e. all
+        initial nodes have submitted their collections), then tells the
+        scheduler to schedule the collected items.  When initiating
+        scheduling the first time it logs which scheduler is in use.
+        """
+        if self.shuttingdown:
+            return
+        self.config.hook.pytest_xdist_node_collection_finished(node=node, ids=ids)
+        # tell session which items were effectively collected otherwise
+        # the controller node will finish the session with EXIT_NOTESTSCOLLECTED
+        self._session.testscollected = len(ids)
+        self.sched.add_node_collection(node, ids)
+        if self.terminal:
+            self.trdist.setstatus(node.gateway.spec, "[%d]" % (len(ids)))
+        if self.sched.collection_is_completed:
+            if self.terminal and not self.sched.has_pending:
+                self.trdist.ensure_show_status()
+                self.terminal.write_line("")
+                if self.config.option.verbose > 0:
+                    self.terminal.write_line(
+                        "scheduling tests via %s" % (self.sched.__class__.__name__)
+                    )
+            self.sched.schedule()
+
+    def worker_logstart(self, node, nodeid, location):
+        """Emitted when a node calls the pytest_runtest_logstart hook."""
+        self.config.hook.pytest_runtest_logstart(nodeid=nodeid, location=location)
+
+    def worker_logfinish(self, node, nodeid, location):
+        """Emitted when a node calls the pytest_runtest_logfinish hook."""
+        self.config.hook.pytest_runtest_logfinish(nodeid=nodeid, location=location)
+
+    def worker_testreport(self, node, rep):
+        """Emitted when a node calls the pytest_runtest_logreport hook."""
+        rep.node = node
+        self.config.hook.pytest_runtest_logreport(report=rep)
+        self._handlefailures(rep)
+
+    def worker_runtest_protocol_complete(self, node, item_index, duration):
+        """
+        Emitted when a node fires the 'runtest_protocol_complete' event,
+        signalling that a test has completed the runtestprotocol and should be
+        removed from the pending list in the scheduler.
+        """
+        self.sched.mark_test_complete(node, item_index, duration)
+
+    def worker_collectreport(self, node, rep):
+        """Emitted when a node calls the pytest_collectreport hook.
+
+        Because we only need the report when there's a failure/skip, as optimization
+        we only expect to receive failed/skipped reports from workers (#330).
+        """
+        assert not rep.passed
+        self._failed_worker_collectreport(node, rep)
+
+    def worker_warning_captured(self, warning_message, when, item):
+        """Emitted when a node calls the pytest_warning_captured hook (deprecated in 6.0)."""
+        # This hook as been removed in pytest 7.1, and we can remove support once we only
+        # support pytest >=7.1.
+        kwargs = dict(warning_message=warning_message, when=when, item=item)
+        self.config.hook.pytest_warning_captured.call_historic(kwargs=kwargs)
+
+    def worker_warning_recorded(self, warning_message, when, nodeid, location):
+        """Emitted when a node calls the pytest_warning_recorded hook."""
+        kwargs = dict(
+            warning_message=warning_message, when=when, nodeid=nodeid, location=location
+        )
+        self.config.hook.pytest_warning_recorded.call_historic(kwargs=kwargs)
+
+    def _clone_node(self, node):
+        """Return new node based on an existing one.
+
+        This is normally for when a node dies, this will copy the spec
+        of the existing node and create a new one with a new id.  The
+        new node will have been setup so it will start calling the
+        "worker_*" hooks and do work soon.
+        """
+        spec = node.gateway.spec
+        spec.id = None
+        self.nodemanager.group.allocate_id(spec)
+        node = self.nodemanager.setup_node(spec, self.queue.put)
+        self._active_nodes.add(node)
+        return node
+
+    def _failed_worker_collectreport(self, node, rep):
+        # Check we haven't already seen this report (from
+        # another worker).
+        if rep.longrepr not in self._failed_collection_errors:
+            self._failed_collection_errors[rep.longrepr] = True
+            self.config.hook.pytest_collectreport(report=rep)
+            self._handlefailures(rep)
+
+    def _handlefailures(self, rep):
+        if rep.failed:
+            self.countfailures += 1
+            if self.maxfail and self.countfailures >= self.maxfail:
+                self.shouldstop = "stopping after %d failures" % (self.countfailures)
+
+    def triggershutdown(self):
+        self.log("triggering shutdown")
+        self.shuttingdown = True
+        for node in self.sched.nodes:
+            node.shutdown()
+
+    def handle_crashitem(self, nodeid, worker):
+        # XXX get more reporting info by recording pytest_runtest_logstart?
+        # XXX count no of failures and retry N times
+        runner = self.config.pluginmanager.getplugin("runner")
+        fspath = nodeid.split("::")[0]
+        msg = "worker {!r} crashed while running {!r}".format(worker.gateway.id, nodeid)
+        rep = runner.TestReport(
+            nodeid, (fspath, None, fspath), (), "failed", msg, "???"
+        )
+        rep.node = worker
+
+        self.config.hook.pytest_handlecrashitem(
+            crashitem=nodeid,
+            report=rep,
+            sched=self.sched,
+        )
+        self.config.hook.pytest_runtest_logreport(report=rep)
+
+
+class TerminalDistReporter:
+    def __init__(self, config):
+        self.config = config
+        self.tr = config.pluginmanager.getplugin("terminalreporter")
+        self._status = {}
+        self._lastlen = 0
+        self._isatty = getattr(self.tr, "isatty", self.tr.hasmarkup)
+
+    def write_line(self, msg):
+        self.tr.write_line(msg)
+
+    def ensure_show_status(self):
+        if not self._isatty:
+            self.write_line(self.getstatus())
+
+    def setstatus(self, spec, status, show=True):
+        self._status[spec.id] = status
+        if show and self._isatty:
+            self.rewrite(self.getstatus())
+
+    def getstatus(self):
+        if self.config.option.verbose >= 0:
+            parts = [
+                "{} {}".format(spec.id, self._status[spec.id]) for spec in self._specs
+            ]
+            return " / ".join(parts)
+        else:
+            return "bringing up nodes..."
+
+    def rewrite(self, line, newline=False):
+        pline = line + " " * max(self._lastlen - len(line), 0)
+        if newline:
+            self._lastlen = 0
+            pline += "\n"
+        else:
+            self._lastlen = len(line)
+        self.tr.rewrite(pline, bold=True)
+
+    @pytest.hookimpl
+    def pytest_xdist_setupnodes(self, specs):
+        self._specs = specs
+        for spec in specs:
+            self.setstatus(spec, "I", show=False)
+        self.setstatus(spec, "I", show=True)
+        self.ensure_show_status()
+
+    @pytest.hookimpl
+    def pytest_xdist_newgateway(self, gateway):
+        if self.config.option.verbose > 0:
+            rinfo = gateway._rinfo()
+            version = "%s.%s.%s" % rinfo.version_info[:3]
+            self.rewrite(
+                "[%s] %s Python %s cwd: %s"
+                % (gateway.id, rinfo.platform, version, rinfo.cwd),
+                newline=True,
+            )
+        self.setstatus(gateway.spec, "C")
+
+    @pytest.hookimpl
+    def pytest_testnodeready(self, node):
+        if self.config.option.verbose > 0:
+            d = node.workerinfo
+            infoline = "[{}] Python {}".format(
+                d["id"], d["version"].replace("\n", " -- ")
+            )
+            self.rewrite(infoline, newline=True)
+        self.setstatus(node.gateway.spec, "ok")
+
+    @pytest.hookimpl
+    def pytest_testnodedown(self, node, error):
+        if not error:
+            return
+        self.write_line("[{}] node down: {}".format(node.gateway.id, error))
+
+
+def get_default_max_worker_restart(config):
+    """gets the default value of --max-worker-restart option if it is not provided.
+
+    Use a reasonable default to avoid workers from restarting endlessly due to crashing collections (#226).
+    """
+    result = config.option.maxworkerrestart
+    if result is not None:
+        result = int(result)
+    elif config.option.numprocesses:
+        # if --max-worker-restart was not provided, use a reasonable default (#226)
+        result = config.option.numprocesses * 4
+    return result
diff --git a/venv/lib/python3.10/site-packages/xdist/looponfail.py b/venv/lib/python3.10/site-packages/xdist/looponfail.py
new file mode 100644
index 0000000..ef4c34f
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/xdist/looponfail.py
@@ -0,0 +1,276 @@
+"""
+    Implement -f aka looponfailing for pytest.
+
+    NOTE that we try to avoid loading and depending on application modules
+    within the controlling process (the one that starts repeatedly test
+    processes) otherwise changes to source code can crash
+    the controlling process which should best never happen.
+"""
+import py
+import pytest
+import sys
+import time
+import execnet
+
+
+@pytest.hookimpl
+def pytest_addoption(parser):
+    group = parser.getgroup("xdist", "distributed and subprocess testing")
+    group._addoption(
+        "-f",
+        "--looponfail",
+        action="store_true",
+        dest="looponfail",
+        default=False,
+        help="run tests in subprocess, wait for modified files "
+        "and re-run failing test set until all pass.",
+    )
+
+
+@pytest.hookimpl
+def pytest_cmdline_main(config):
+
+    if config.getoption("looponfail"):
+        usepdb = config.getoption("usepdb", False)  # a core option
+        if usepdb:
+            raise pytest.UsageError("--pdb is incompatible with --looponfail.")
+        looponfail_main(config)
+        return 2  # looponfail only can get stop with ctrl-C anyway
+
+
+def looponfail_main(config):
+    remotecontrol = RemoteControl(config)
+    rootdirs = [py.path.local(root) for root in config.getini("looponfailroots")]
+    statrecorder = StatRecorder(rootdirs)
+    try:
+        while 1:
+            remotecontrol.loop_once()
+            if not remotecontrol.failures and remotecontrol.wasfailing:
+                # the last failures passed, let's immediately rerun all
+                continue
+            repr_pytest_looponfailinfo(
+                failreports=remotecontrol.failures, rootdirs=rootdirs
+            )
+            statrecorder.waitonchange(checkinterval=2.0)
+    except KeyboardInterrupt:
+        print()
+
+
+class RemoteControl:
+    def __init__(self, config):
+        self.config = config
+        self.failures = []
+
+    def trace(self, *args):
+        if self.config.option.debug:
+            msg = " ".join(str(x) for x in args)
+            print("RemoteControl:", msg)
+
+    def initgateway(self):
+        return execnet.makegateway("popen")
+
+    def setup(self, out=None):
+        if out is None:
+            out = py.io.TerminalWriter()
+        if hasattr(self, "gateway"):
+            raise ValueError("already have gateway %r" % self.gateway)
+        self.trace("setting up worker session")
+        self.gateway = self.initgateway()
+        self.channel = channel = self.gateway.remote_exec(
+            init_worker_session,
+            args=self.config.args,
+            option_dict=vars(self.config.option),
+        )
+        remote_outchannel = channel.receive()
+
+        def write(s):
+            out._file.write(s)
+            out._file.flush()
+
+        remote_outchannel.setcallback(write)
+
+    def ensure_teardown(self):
+        if hasattr(self, "channel"):
+            if not self.channel.isclosed():
+                self.trace("closing", self.channel)
+                self.channel.close()
+            del self.channel
+        if hasattr(self, "gateway"):
+            self.trace("exiting", self.gateway)
+            self.gateway.exit()
+            del self.gateway
+
+    def runsession(self):
+        try:
+            self.trace("sending", self.failures)
+            self.channel.send(self.failures)
+            try:
+                return self.channel.receive()
+            except self.channel.RemoteError:
+                e = sys.exc_info()[1]
+                self.trace("ERROR", e)
+                raise
+        finally:
+            self.ensure_teardown()
+
+    def loop_once(self):
+        self.setup()
+        self.wasfailing = self.failures and len(self.failures)
+        result = self.runsession()
+        failures, reports, collection_failed = result
+        if collection_failed:
+            pass  # "Collection failed, keeping previous failure set"
+        else:
+            uniq_failures = []
+            for failure in failures:
+                if failure not in uniq_failures:
+                    uniq_failures.append(failure)
+            self.failures = uniq_failures
+
+
+def repr_pytest_looponfailinfo(failreports, rootdirs):
+    tr = py.io.TerminalWriter()
+    if failreports:
+        tr.sep("#", "LOOPONFAILING", bold=True)
+        for report in failreports:
+            if report:
+                tr.line(report, red=True)
+    tr.sep("#", "waiting for changes", bold=True)
+    for rootdir in rootdirs:
+        tr.line("### Watching:   {}".format(rootdir), bold=True)
+
+
+def init_worker_session(channel, args, option_dict):
+    import os
+    import sys
+
+    outchannel = channel.gateway.newchannel()
+    sys.stdout = sys.stderr = outchannel.makefile("w")
+    channel.send(outchannel)
+    # prune sys.path to not contain relative paths
+    newpaths = []
+    for p in sys.path:
+        if p:
+            if not os.path.isabs(p):
+                p = os.path.abspath(p)
+            newpaths.append(p)
+    sys.path[:] = newpaths
+
+    # fullwidth, hasmarkup = channel.receive()
+    from _pytest.config import Config
+
+    config = Config.fromdictargs(option_dict, list(args))
+    config.args = args
+    from xdist.looponfail import WorkerFailSession
+
+    WorkerFailSession(config, channel).main()
+
+
+class WorkerFailSession:
+    def __init__(self, config, channel):
+        self.config = config
+        self.channel = channel
+        self.recorded_failures = []
+        self.collection_failed = False
+        config.pluginmanager.register(self)
+        config.option.looponfail = False
+        config.option.usepdb = False
+
+    def DEBUG(self, *args):
+        if self.config.option.debug:
+            print(" ".join(map(str, args)))
+
+    @pytest.hookimpl
+    def pytest_collection(self, session):
+        self.session = session
+        self.trails = self.current_command
+        hook = self.session.ihook
+        try:
+            items = session.perform_collect(self.trails or None)
+        except pytest.UsageError:
+            items = session.perform_collect(None)
+        hook.pytest_collection_modifyitems(
+            session=session, config=session.config, items=items
+        )
+        hook.pytest_collection_finish(session=session)
+        return True
+
+    @pytest.hookimpl
+    def pytest_runtest_logreport(self, report):
+        if report.failed:
+            self.recorded_failures.append(report)
+
+    @pytest.hookimpl
+    def pytest_collectreport(self, report):
+        if report.failed:
+            self.recorded_failures.append(report)
+            self.collection_failed = True
+
+    def main(self):
+        self.DEBUG("WORKER: received configuration, waiting for command trails")
+        try:
+            command = self.channel.receive()
+        except KeyboardInterrupt:
+            return  # in the worker we can't do much about this
+        self.DEBUG("received", command)
+        self.current_command = command
+        self.config.hook.pytest_cmdline_main(config=self.config)
+        trails, failreports = [], []
+        for rep in self.recorded_failures:
+            trails.append(rep.nodeid)
+            loc = rep.longrepr
+            loc = str(getattr(loc, "reprcrash", loc))
+            failreports.append(loc)
+        self.channel.send((trails, failreports, self.collection_failed))
+
+
+class StatRecorder:
+    def __init__(self, rootdirlist):
+        self.rootdirlist = rootdirlist
+        self.statcache = {}
+        self.check()  # snapshot state
+
+    def fil(self, p):
+        return p.check(file=1, dotfile=0) and p.ext != ".pyc"
+
+    def rec(self, p):
+        return p.check(dotfile=0)
+
+    def waitonchange(self, checkinterval=1.0):
+        while 1:
+            changed = self.check()
+            if changed:
+                return
+            time.sleep(checkinterval)
+
+    def check(self, removepycfiles=True):  # noqa, too complex
+        changed = False
+        statcache = self.statcache
+        newstat = {}
+        for rootdir in self.rootdirlist:
+            for path in rootdir.visit(self.fil, self.rec):
+                oldstat = statcache.pop(path, None)
+                try:
+                    newstat[path] = curstat = path.stat()
+                except py.error.ENOENT:
+                    if oldstat:
+                        changed = True
+                else:
+                    if oldstat:
+                        if (
+                            oldstat.mtime != curstat.mtime
+                            or oldstat.size != curstat.size
+                        ):
+                            changed = True
+                            print("# MODIFIED", path)
+                            if removepycfiles and path.ext == ".py":
+                                pycfile = path + "c"
+                                if pycfile.check():
+                                    pycfile.remove()
+
+                    else:
+                        changed = True
+        if statcache:
+            changed = True
+        self.statcache = newstat
+        return changed
diff --git a/venv/lib/python3.10/site-packages/xdist/newhooks.py b/venv/lib/python3.10/site-packages/xdist/newhooks.py
new file mode 100644
index 0000000..7776624
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/xdist/newhooks.py
@@ -0,0 +1,91 @@
+"""
+xdist hooks.
+
+Additionally, pytest-xdist will also decorate a few other hooks
+with the worker instance that executed the hook originally:
+
+``pytest_runtest_logreport``: ``rep`` parameter has a ``node`` attribute.
+
+You can use this hooks just as you would use normal pytest hooks, but some care
+must be taken in plugins in case ``xdist`` is not installed. Please see:
+
+    http://pytest.org/en/latest/writing_plugins.html#optionally-using-hooks-from-3rd-party-plugins
+"""
+import pytest
+
+
+@pytest.hookspec()
+def pytest_xdist_setupnodes(config, specs):
+    """called before any remote node is set up."""
+
+
+@pytest.hookspec()
+def pytest_xdist_newgateway(gateway):
+    """called on new raw gateway creation."""
+
+
+@pytest.hookspec()
+def pytest_xdist_rsyncstart(source, gateways):
+    """called before rsyncing a directory to remote gateways takes place."""
+
+
+@pytest.hookspec()
+def pytest_xdist_rsyncfinish(source, gateways):
+    """called after rsyncing a directory to remote gateways takes place."""
+
+
+@pytest.hookspec(firstresult=True)
+def pytest_xdist_getremotemodule():
+    """called when creating remote node"""
+
+
+@pytest.hookspec()
+def pytest_configure_node(node):
+    """configure node information before it gets instantiated."""
+
+
+@pytest.hookspec()
+def pytest_testnodeready(node):
+    """Test Node is ready to operate."""
+
+
+@pytest.hookspec()
+def pytest_testnodedown(node, error):
+    """Test Node is down."""
+
+
+@pytest.hookspec()
+def pytest_xdist_node_collection_finished(node, ids):
+    """called by the controller node when a worker node finishes collecting."""
+
+
+@pytest.hookspec(firstresult=True)
+def pytest_xdist_make_scheduler(config, log):
+    """return a node scheduler implementation"""
+
+
+@pytest.hookspec(firstresult=True)
+def pytest_xdist_auto_num_workers(config):
+    """
+    Return the number of workers to spawn when ``--numprocesses=auto`` is given in the
+    command-line.
+
+    .. versionadded:: 2.1
+    """
+
+
+@pytest.hookspec(firstresult=True)
+def pytest_handlecrashitem(crashitem, report, sched):
+    """
+    Handle a crashitem, modifying the report if necessary.
+
+    The scheduler is provided as a parameter to reschedule the test if desired with
+    `sched.mark_test_pending`.
+
+    def pytest_handlecrashitem(crashitem, report, sched):
+        if should_rerun(crashitem):
+            sched.mark_test_pending(crashitem)
+            report.outcome = "rerun"
+
+    .. versionadded:: 2.2.1
+    """
diff --git a/venv/lib/python3.10/site-packages/xdist/plugin.py b/venv/lib/python3.10/site-packages/xdist/plugin.py
new file mode 100644
index 0000000..85f76e8
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/xdist/plugin.py
@@ -0,0 +1,304 @@
+import os
+import uuid
+import sys
+from pathlib import Path
+
+import py
+import pytest
+
+
+PYTEST_GTE_7 = hasattr(pytest, "version_tuple") and pytest.version_tuple >= (7, 0)  # type: ignore[attr-defined]
+
+_sys_path = list(sys.path)  # freeze a copy of sys.path at interpreter startup
+
+
+@pytest.hookimpl
+def pytest_xdist_auto_num_workers(config):
+    try:
+        import psutil
+    except ImportError:
+        pass
+    else:
+        use_logical = config.option.numprocesses == "logical"
+        count = psutil.cpu_count(logical=use_logical) or psutil.cpu_count()
+        if count:
+            return count
+    try:
+        from os import sched_getaffinity
+
+        def cpu_count():
+            return len(sched_getaffinity(0))
+
+    except ImportError:
+        if os.environ.get("TRAVIS") == "true":
+            # workaround https://bitbucket.org/pypy/pypy/issues/2375
+            return 2
+        try:
+            from os import cpu_count
+        except ImportError:
+            from multiprocessing import cpu_count
+    try:
+        n = cpu_count()
+    except NotImplementedError:
+        return 1
+    return n if n else 1
+
+
+def parse_numprocesses(s):
+    if s in ("auto", "logical"):
+        return s
+    elif s is not None:
+        return int(s)
+
+
+@pytest.hookimpl
+def pytest_addoption(parser):
+    group = parser.getgroup("xdist", "distributed and subprocess testing")
+    group._addoption(
+        "-n",
+        "--numprocesses",
+        dest="numprocesses",
+        metavar="numprocesses",
+        action="store",
+        type=parse_numprocesses,
+        help="Shortcut for '--dist=load --tx=NUM*popen'. With 'auto', attempt "
+        "to detect physical CPU count. With 'logical', detect logical CPU "
+        "count. If physical CPU count cannot be found, falls back to logical "
+        "count. This will be 0 when used with --pdb.",
+    )
+    group.addoption(
+        "--maxprocesses",
+        dest="maxprocesses",
+        metavar="maxprocesses",
+        action="store",
+        type=int,
+        help="limit the maximum number of workers to process the tests when using --numprocesses=auto",
+    )
+    group.addoption(
+        "--max-worker-restart",
+        action="store",
+        default=None,
+        dest="maxworkerrestart",
+        help="maximum number of workers that can be restarted "
+        "when crashed (set to zero to disable this feature)",
+    )
+    group.addoption(
+        "--dist",
+        metavar="distmode",
+        action="store",
+        choices=["each", "load", "loadscope", "loadfile", "loadgroup", "no"],
+        dest="dist",
+        default="no",
+        help=(
+            "set mode for distributing tests to exec environments.\n\n"
+            "each: send each test to all available environments.\n\n"
+            "load: load balance by sending any pending test to any"
+            " available environment.\n\n"
+            "loadscope: load balance by sending pending groups of tests in"
+            " the same scope to any available environment.\n\n"
+            "loadfile: load balance by sending test grouped by file"
+            " to any available environment.\n\n"
+            "loadgroup: like load, but sends tests marked with 'xdist_group' to the same worker.\n\n"
+            "(default) no: run tests inprocess, don't distribute."
+        ),
+    )
+    group.addoption(
+        "--tx",
+        dest="tx",
+        action="append",
+        default=[],
+        metavar="xspec",
+        help=(
+            "add a test execution environment. some examples: "
+            "--tx popen//python=python2.5 --tx socket=192.168.1.102:8888 "
+            "--tx ssh=user@codespeak.net//chdir=testcache"
+        ),
+    )
+    group._addoption(
+        "-d",
+        action="store_true",
+        dest="distload",
+        default=False,
+        help="load-balance tests.  shortcut for '--dist=load'",
+    )
+    group.addoption(
+        "--rsyncdir",
+        action="append",
+        default=[],
+        metavar="DIR",
+        help="add directory for rsyncing to remote tx nodes.",
+    )
+    group.addoption(
+        "--rsyncignore",
+        action="append",
+        default=[],
+        metavar="GLOB",
+        help="add expression for ignores when rsyncing to remote tx nodes.",
+    )
+    group.addoption(
+        "--boxed",
+        action="store_true",
+        help="backward compatibility alias for pytest-forked --forked",
+    )
+    group.addoption(
+        "--testrunuid",
+        action="store",
+        help=(
+            "provide an identifier shared amongst all workers as the value of "
+            "the 'testrun_uid' fixture,\n\n,"
+            "if not provided, 'testrun_uid' is filled with a new unique string "
+            "on every test run."
+        ),
+    )
+
+    parser.addini(
+        "rsyncdirs",
+        "list of (relative) paths to be rsynced for remote distributed testing.",
+        type="paths" if PYTEST_GTE_7 else "pathlist",
+    )
+    parser.addini(
+        "rsyncignore",
+        "list of (relative) glob-style paths to be ignored for rsyncing.",
+        type="paths" if PYTEST_GTE_7 else "pathlist",
+    )
+    parser.addini(
+        "looponfailroots",
+        type="paths" if PYTEST_GTE_7 else "pathlist",
+        help="directories to check for changes",
+        default=[Path.cwd() if PYTEST_GTE_7 else py.path.local()],
+    )
+
+
+# -------------------------------------------------------------------------
+# distributed testing hooks
+# -------------------------------------------------------------------------
+
+
+@pytest.hookimpl
+def pytest_addhooks(pluginmanager):
+    from xdist import newhooks
+
+    pluginmanager.add_hookspecs(newhooks)
+
+
+# -------------------------------------------------------------------------
+# distributed testing initialization
+# -------------------------------------------------------------------------
+
+
+@pytest.hookimpl(trylast=True)
+def pytest_configure(config):
+    if config.getoption("dist") != "no" and not config.getvalue("collectonly"):
+        from xdist.dsession import DSession
+
+        session = DSession(config)
+        config.pluginmanager.register(session, "dsession")
+        tr = config.pluginmanager.getplugin("terminalreporter")
+        if tr:
+            tr.showfspath = False
+    if config.getoption("boxed"):
+        warning = DeprecationWarning(
+            "The --boxed commmand line argument is deprecated. "
+            "Install pytest-forked and use --forked instead. "
+            "pytest-xdist 3.0.0 will remove the --boxed argument and pytest-forked dependency."
+        )
+        config.issue_config_time_warning(warning, 2)
+        config.option.forked = True
+
+    config_line = (
+        "xdist_group: specify group for tests should run in same session."
+        "in relation to one another. " + "Provided by pytest-xdist."
+    )
+    config.addinivalue_line("markers", config_line)
+
+
+@pytest.hookimpl(tryfirst=True)
+def pytest_cmdline_main(config):
+    usepdb = config.getoption("usepdb", False)  # a core option
+    if config.option.numprocesses in ("auto", "logical"):
+        if usepdb:
+            config.option.numprocesses = 0
+            config.option.dist = "no"
+        else:
+            auto_num_cpus = config.hook.pytest_xdist_auto_num_workers(config=config)
+            config.option.numprocesses = auto_num_cpus
+
+    if config.option.numprocesses:
+        if config.option.dist == "no":
+            config.option.dist = "load"
+        numprocesses = config.option.numprocesses
+        if config.option.maxprocesses:
+            numprocesses = min(numprocesses, config.option.maxprocesses)
+        config.option.tx = ["popen"] * numprocesses
+    if config.option.distload:
+        config.option.dist = "load"
+    val = config.getvalue
+    if not val("collectonly") and val("dist") != "no" and usepdb:
+        raise pytest.UsageError(
+            "--pdb is incompatible with distributing tests; try using -n0 or -nauto."
+        )  # noqa: E501
+
+
+# -------------------------------------------------------------------------
+# fixtures and API to easily know the role of current node
+# -------------------------------------------------------------------------
+
+
+def is_xdist_worker(request_or_session) -> bool:
+    """Return `True` if this is an xdist worker, `False` otherwise
+
+    :param request_or_session: the `pytest` `request` or `session` object
+    """
+    return hasattr(request_or_session.config, "workerinput")
+
+
+def is_xdist_controller(request_or_session) -> bool:
+    """Return `True` if this is the xdist controller, `False` otherwise
+
+    Note: this method also returns `False` when distribution has not been
+    activated at all.
+
+    :param request_or_session: the `pytest` `request` or `session` object
+    """
+    return (
+        not is_xdist_worker(request_or_session)
+        and request_or_session.config.option.dist != "no"
+    )
+
+
+# ALIAS: TODO, deprecate (#592)
+is_xdist_master = is_xdist_controller
+
+
+def get_xdist_worker_id(request_or_session):
+    """Return the id of the current worker ('gw0', 'gw1', etc) or 'master'
+    if running on the controller node.
+
+    If not distributing tests (for example passing `-n0` or not passing `-n` at all)
+    also return 'master'.
+
+    :param request_or_session: the `pytest` `request` or `session` object
+    """
+    if hasattr(request_or_session.config, "workerinput"):
+        return request_or_session.config.workerinput["workerid"]
+    else:
+        # TODO: remove "master", ideally for a None
+        return "master"
+
+
+@pytest.fixture(scope="session")
+def worker_id(request):
+    """Return the id of the current worker ('gw0', 'gw1', etc) or 'master'
+    if running on the master node.
+    """
+    # TODO: remove "master", ideally for a None
+    return get_xdist_worker_id(request)
+
+
+@pytest.fixture(scope="session")
+def testrun_uid(request):
+    """Return the unique id of the current test."""
+    if hasattr(request.config, "workerinput"):
+        return request.config.workerinput["testrunuid"]
+    else:
+        return uuid.uuid4().hex
diff --git a/venv/lib/python3.10/site-packages/xdist/remote.py b/venv/lib/python3.10/site-packages/xdist/remote.py
new file mode 100644
index 0000000..160b042
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/xdist/remote.py
@@ -0,0 +1,291 @@
+"""
+    This module is executed in remote subprocesses and helps to
+    control a remote testing session and relay back information.
+    It assumes that 'py' is importable and does not have dependencies
+    on the rest of the xdist code.  This means that the xdist-plugin
+    needs not to be installed in remote environments.
+"""
+
+import sys
+import os
+import time
+
+import py
+import pytest
+from execnet.gateway_base import dumps, DumpError
+
+from _pytest.config import _prepareconfig, Config
+
+try:
+    from setproctitle import setproctitle
+except ImportError:
+
+    def setproctitle(title):
+        pass
+
+
+def worker_title(title):
+    try:
+        setproctitle(title)
+    except Exception:
+        # changing the process name is very optional, no errors please
+        pass
+
+
+class WorkerInteractor:
+    def __init__(self, config, channel):
+        self.config = config
+        self.workerid = config.workerinput.get("workerid", "?")
+        self.testrunuid = config.workerinput["testrunuid"]
+        self.log = py.log.Producer("worker-%s" % self.workerid)
+        if not config.option.debug:
+            py.log.setconsumer(self.log._keywords, None)
+        self.channel = channel
+        config.pluginmanager.register(self)
+
+    def sendevent(self, name, **kwargs):
+        self.log("sending", name, kwargs)
+        self.channel.send((name, kwargs))
+
+    @pytest.hookimpl
+    def pytest_internalerror(self, excrepr):
+        formatted_error = str(excrepr)
+        for line in formatted_error.split("\n"):
+            self.log("IERROR>", line)
+        interactor.sendevent("internal_error", formatted_error=formatted_error)
+
+    @pytest.hookimpl
+    def pytest_sessionstart(self, session):
+        self.session = session
+        workerinfo = getinfodict()
+        self.sendevent("workerready", workerinfo=workerinfo)
+
+    @pytest.hookimpl(hookwrapper=True)
+    def pytest_sessionfinish(self, exitstatus):
+        # in pytest 5.0+, exitstatus is an IntEnum object
+        self.config.workeroutput["exitstatus"] = int(exitstatus)
+        yield
+        self.sendevent("workerfinished", workeroutput=self.config.workeroutput)
+
+    @pytest.hookimpl
+    def pytest_collection(self, session):
+        self.sendevent("collectionstart")
+
+    @pytest.hookimpl
+    def pytest_runtestloop(self, session):
+        self.log("entering main loop")
+        torun = []
+        while 1:
+            try:
+                name, kwargs = self.channel.receive()
+            except EOFError:
+                return True
+            self.log("received command", name, kwargs)
+            if name == "runtests":
+                torun.extend(kwargs["indices"])
+            elif name == "runtests_all":
+                torun.extend(range(len(session.items)))
+            self.log("items to run:", torun)
+            # only run if we have an item and a next item
+            while len(torun) >= 2:
+                self.run_one_test(torun)
+            if name == "shutdown":
+                if torun:
+                    self.run_one_test(torun)
+                break
+        return True
+
+    def run_one_test(self, torun):
+        items = self.session.items
+        self.item_index = torun.pop(0)
+        item = items[self.item_index]
+        if torun:
+            nextitem = items[torun[0]]
+        else:
+            nextitem = None
+
+        worker_title("[pytest-xdist running] %s" % item.nodeid)
+
+        start = time.time()
+        self.config.hook.pytest_runtest_protocol(item=item, nextitem=nextitem)
+        duration = time.time() - start
+
+        worker_title("[pytest-xdist idle]")
+
+        self.sendevent(
+            "runtest_protocol_complete", item_index=self.item_index, duration=duration
+        )
+
+    def pytest_collection_modifyitems(self, session, config, items):
+        # add the group name to nodeid as suffix if --dist=loadgroup
+        if config.getvalue("loadgroup"):
+            for item in items:
+                mark = item.get_closest_marker("xdist_group")
+                if not mark:
+                    continue
+                gname = (
+                    mark.args[0]
+                    if len(mark.args) > 0
+                    else mark.kwargs.get("name", "default")
+                )
+                item._nodeid = "{}@{}".format(item.nodeid, gname)
+
+    @pytest.hookimpl
+    def pytest_collection_finish(self, session):
+        try:
+            topdir = str(self.config.rootpath)
+        except AttributeError:  # pytest <= 6.1.0
+            topdir = str(self.config.rootdir)
+
+        self.sendevent(
+            "collectionfinish",
+            topdir=topdir,
+            ids=[item.nodeid for item in session.items],
+        )
+
+    @pytest.hookimpl
+    def pytest_runtest_logstart(self, nodeid, location):
+        self.sendevent("logstart", nodeid=nodeid, location=location)
+
+    @pytest.hookimpl
+    def pytest_runtest_logfinish(self, nodeid, location):
+        self.sendevent("logfinish", nodeid=nodeid, location=location)
+
+    @pytest.hookimpl
+    def pytest_runtest_logreport(self, report):
+        data = self.config.hook.pytest_report_to_serializable(
+            config=self.config, report=report
+        )
+        data["item_index"] = self.item_index
+        data["worker_id"] = self.workerid
+        data["testrun_uid"] = self.testrunuid
+        assert self.session.items[self.item_index].nodeid == report.nodeid
+        self.sendevent("testreport", data=data)
+
+    @pytest.hookimpl
+    def pytest_collectreport(self, report):
+        # send only reports that have not passed to controller as optimization (#330)
+        if not report.passed:
+            data = self.config.hook.pytest_report_to_serializable(
+                config=self.config, report=report
+            )
+            self.sendevent("collectreport", data=data)
+
+    @pytest.hookimpl
+    def pytest_warning_recorded(self, warning_message, when, nodeid, location):
+        self.sendevent(
+            "warning_recorded",
+            warning_message_data=serialize_warning_message(warning_message),
+            when=when,
+            nodeid=nodeid,
+            location=location,
+        )
+
+
+def serialize_warning_message(warning_message):
+    if isinstance(warning_message.message, Warning):
+        message_module = type(warning_message.message).__module__
+        message_class_name = type(warning_message.message).__name__
+        message_str = str(warning_message.message)
+        # check now if we can serialize the warning arguments (#349)
+        # if not, we will just use the exception message on the controller node
+        try:
+            dumps(warning_message.message.args)
+        except DumpError:
+            message_args = None
+        else:
+            message_args = warning_message.message.args
+    else:
+        message_str = warning_message.message
+        message_module = None
+        message_class_name = None
+        message_args = None
+    if warning_message.category:
+        category_module = warning_message.category.__module__
+        category_class_name = warning_message.category.__name__
+    else:
+        category_module = None
+        category_class_name = None
+
+    result = {
+        "message_str": message_str,
+        "message_module": message_module,
+        "message_class_name": message_class_name,
+        "message_args": message_args,
+        "category_module": category_module,
+        "category_class_name": category_class_name,
+    }
+    # access private _WARNING_DETAILS because the attributes vary between Python versions
+    for attr_name in warning_message._WARNING_DETAILS:
+        if attr_name in ("message", "category"):
+            continue
+        attr = getattr(warning_message, attr_name)
+        # Check if we can serialize the warning detail, marking `None` otherwise
+        # Note that we need to define the attr (even as `None`) to allow deserializing
+        try:
+            dumps(attr)
+        except DumpError:
+            result[attr_name] = repr(attr)
+        else:
+            result[attr_name] = attr
+    return result
+
+
+def getinfodict():
+    import platform
+
+    return dict(
+        version=sys.version,
+        version_info=tuple(sys.version_info),
+        sysplatform=sys.platform,
+        platform=platform.platform(),
+        executable=sys.executable,
+        cwd=os.getcwd(),
+    )
+
+
+def remote_initconfig(option_dict, args):
+    option_dict["plugins"].append("no:terminal")
+    return Config.fromdictargs(option_dict, args)
+
+
+def setup_config(config, basetemp):
+    config.option.loadgroup = config.getvalue("dist") == "loadgroup"
+    config.option.looponfail = False
+    config.option.usepdb = False
+    config.option.dist = "no"
+    config.option.distload = False
+    config.option.numprocesses = None
+    config.option.maxprocesses = None
+    config.option.basetemp = basetemp
+
+
+if __name__ == "__channelexec__":
+    channel = channel  # type: ignore[name-defined] # noqa: F821
+    workerinput, args, option_dict, change_sys_path = channel.receive()  # type: ignore[name-defined]
+
+    if change_sys_path is None:
+        importpath = os.getcwd()
+        sys.path.insert(0, importpath)
+        os.environ["PYTHONPATH"] = (
+            importpath + os.pathsep + os.environ.get("PYTHONPATH", "")
+        )
+    else:
+        sys.path = change_sys_path
+
+    os.environ["PYTEST_XDIST_TESTRUNUID"] = workerinput["testrunuid"]
+    os.environ["PYTEST_XDIST_WORKER"] = workerinput["workerid"]
+    os.environ["PYTEST_XDIST_WORKER_COUNT"] = str(workerinput["workercount"])
+
+    if hasattr(Config, "InvocationParams"):
+        config = _prepareconfig(args, None)
+    else:
+        config = remote_initconfig(option_dict, args)
+        config.args = args
+
+    setup_config(config, option_dict.get("basetemp"))
+    config._parser.prog = os.path.basename(workerinput["mainargv"][0])
+    config.workerinput = workerinput  # type: ignore[attr-defined]
+    config.workeroutput = {}  # type: ignore[attr-defined]
+    interactor = WorkerInteractor(config, channel)  # type: ignore[name-defined]
+    config.hook.pytest_cmdline_main(config=config)
diff --git a/venv/lib/python3.10/site-packages/xdist/report.py b/venv/lib/python3.10/site-packages/xdist/report.py
new file mode 100644
index 0000000..02ad30d
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/xdist/report.py
@@ -0,0 +1,20 @@
+from difflib import unified_diff
+
+
+def report_collection_diff(from_collection, to_collection, from_id, to_id):
+    """Report the collected test difference between two nodes.
+
+    :returns: detailed message describing the difference between the given
+    collections, or None if they are equal.
+    """
+    if from_collection == to_collection:
+        return None
+
+    diff = unified_diff(from_collection, to_collection, fromfile=from_id, tofile=to_id)
+    error_message = (
+        "Different tests were collected between {from_id} and {to_id}. "
+        "The difference is:\n"
+        "{diff}"
+    ).format(from_id=from_id, to_id=to_id, diff="\n".join(diff))
+    msg = "\n".join(x.rstrip() for x in error_message.split("\n"))
+    return msg
diff --git a/venv/lib/python3.10/site-packages/xdist/scheduler/__init__.py b/venv/lib/python3.10/site-packages/xdist/scheduler/__init__.py
new file mode 100644
index 0000000..ab2e830
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/xdist/scheduler/__init__.py
@@ -0,0 +1,5 @@
+from xdist.scheduler.each import EachScheduling  # noqa
+from xdist.scheduler.load import LoadScheduling  # noqa
+from xdist.scheduler.loadfile import LoadFileScheduling  # noqa
+from xdist.scheduler.loadscope import LoadScopeScheduling  # noqa
+from xdist.scheduler.loadgroup import LoadGroupScheduling  # noqa
diff --git a/venv/lib/python3.10/site-packages/xdist/scheduler/each.py b/venv/lib/python3.10/site-packages/xdist/scheduler/each.py
new file mode 100644
index 0000000..cfe99e7
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/xdist/scheduler/each.py
@@ -0,0 +1,140 @@
+from py.log import Producer
+
+from xdist.workermanage import parse_spec_config
+from xdist.report import report_collection_diff
+
+
+class EachScheduling:
+    """Implement scheduling of test items on all nodes
+
+    If a node gets added after the test run is started then it is
+    assumed to replace a node which got removed before it finished
+    its collection.  In this case it will only be used if a node
+    with the same spec got removed earlier.
+
+    Any nodes added after the run is started will only get items
+    assigned if a node with a matching spec was removed before it
+    finished all its pending items.  The new node will then be
+    assigned the remaining items from the removed node.
+    """
+
+    def __init__(self, config, log=None):
+        self.config = config
+        self.numnodes = len(parse_spec_config(config))
+        self.node2collection = {}
+        self.node2pending = {}
+        self._started = []
+        self._removed2pending = {}
+        if log is None:
+            self.log = Producer("eachsched")
+        else:
+            self.log = log.eachsched
+        self.collection_is_completed = False
+
+    @property
+    def nodes(self):
+        """A list of all nodes in the scheduler."""
+        return list(self.node2pending.keys())
+
+    @property
+    def tests_finished(self):
+        if not self.collection_is_completed:
+            return False
+        if self._removed2pending:
+            return False
+        for pending in self.node2pending.values():
+            if len(pending) >= 2:
+                return False
+        return True
+
+    @property
+    def has_pending(self):
+        """Return True if there are pending test items
+
+        This indicates that collection has finished and nodes are
+        still processing test items, so this can be thought of as
+        "the scheduler is active".
+        """
+        for pending in self.node2pending.values():
+            if pending:
+                return True
+        return False
+
+    def add_node(self, node):
+        assert node not in self.node2pending
+        self.node2pending[node] = []
+
+    def add_node_collection(self, node, collection):
+        """Add the collected test items from a node
+
+        Collection is complete once all nodes have submitted their
+        collection.  In this case its pending list is set to an empty
+        list.  When the collection is already completed this
+        submission is from a node which was restarted to replace a
+        dead node.  In this case we already assign the pending items
+        here.  In either case ``.schedule()`` will instruct the
+        node to start running the required tests.
+        """
+        assert node in self.node2pending
+        if not self.collection_is_completed:
+            self.node2collection[node] = list(collection)
+            self.node2pending[node] = []
+            if len(self.node2collection) >= self.numnodes:
+                self.collection_is_completed = True
+        elif self._removed2pending:
+            for deadnode in self._removed2pending:
+                if deadnode.gateway.spec == node.gateway.spec:
+                    dead_collection = self.node2collection[deadnode]
+                    if collection != dead_collection:
+                        msg = report_collection_diff(
+                            dead_collection,
+                            collection,
+                            deadnode.gateway.id,
+                            node.gateway.id,
+                        )
+                        self.log(msg)
+                        return
+                    pending = self._removed2pending.pop(deadnode)
+                    self.node2pending[node] = pending
+                    break
+
+    def mark_test_complete(self, node, item_index, duration=0):
+        self.node2pending[node].remove(item_index)
+
+    def mark_test_pending(self, item):
+        self.pending.insert(
+            0,
+            self.collection.index(item),
+        )
+        for node in self.node2pending:
+            self.check_schedule(node)
+
+    def remove_node(self, node):
+        # KeyError if we didn't get an add_node() yet
+        pending = self.node2pending.pop(node)
+        if not pending:
+            return
+        crashitem = self.node2collection[node][pending.pop(0)]
+        if pending:
+            self._removed2pending[node] = pending
+        return crashitem
+
+    def schedule(self):
+        """Schedule the test items on the nodes
+
+        If the node's pending list is empty it is a new node which
+        needs to run all the tests.  If the pending list is already
+        populated (by ``.add_node_collection()``) then it replaces a
+        dead node and we only need to run those tests.
+        """
+        assert self.collection_is_completed
+        for node, pending in self.node2pending.items():
+            if node in self._started:
+                continue
+            if not pending:
+                pending[:] = range(len(self.node2collection[node]))
+                node.send_runtest_all()
+                node.shutdown()
+            else:
+                node.send_runtest_some(pending)
+            self._started.append(node)
diff --git a/venv/lib/python3.10/site-packages/xdist/scheduler/load.py b/venv/lib/python3.10/site-packages/xdist/scheduler/load.py
new file mode 100644
index 0000000..f32caa5
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/xdist/scheduler/load.py
@@ -0,0 +1,294 @@
+from itertools import cycle
+
+from py.log import Producer
+from _pytest.runner import CollectReport
+
+from xdist.workermanage import parse_spec_config
+from xdist.report import report_collection_diff
+
+
+class LoadScheduling:
+    """Implement load scheduling across nodes.
+
+    This distributes the tests collected across all nodes so each test
+    is run just once.  All nodes collect and submit the test suite and
+    when all collections are received it is verified they are
+    identical collections.  Then the collection gets divided up in
+    chunks and chunks get submitted to nodes.  Whenever a node finishes
+    an item, it calls ``.mark_test_complete()`` which will trigger the
+    scheduler to assign more tests if the number of pending tests for
+    the node falls below a low-watermark.
+
+    When created, ``numnodes`` defines how many nodes are expected to
+    submit a collection. This is used to know when all nodes have
+    finished collection or how large the chunks need to be created.
+
+    Attributes:
+
+    :numnodes: The expected number of nodes taking part.  The actual
+       number of nodes will vary during the scheduler's lifetime as
+       nodes are added by the DSession as they are brought up and
+       removed either because of a dead node or normal shutdown.  This
+       number is primarily used to know when the initial collection is
+       completed.
+
+    :node2collection: Map of nodes and their test collection.  All
+       collections should always be identical.
+
+    :node2pending: Map of nodes and the indices of their pending
+       tests.  The indices are an index into ``.pending`` (which is
+       identical to their own collection stored in
+       ``.node2collection``).
+
+    :collection: The one collection once it is validated to be
+       identical between all the nodes.  It is initialised to None
+       until ``.schedule()`` is called.
+
+    :pending: List of indices of globally pending tests.  These are
+       tests which have not yet been allocated to a chunk for a node
+       to process.
+
+    :log: A py.log.Producer instance.
+
+    :config: Config object, used for handling hooks.
+    """
+
+    def __init__(self, config, log=None):
+        self.numnodes = len(parse_spec_config(config))
+        self.node2collection = {}
+        self.node2pending = {}
+        self.pending = []
+        self.collection = None
+        if log is None:
+            self.log = Producer("loadsched")
+        else:
+            self.log = log.loadsched
+        self.config = config
+
+    @property
+    def nodes(self):
+        """A list of all nodes in the scheduler."""
+        return list(self.node2pending.keys())
+
+    @property
+    def collection_is_completed(self):
+        """Boolean indication initial test collection is complete.
+
+        This is a boolean indicating all initial participating nodes
+        have finished collection.  The required number of initial
+        nodes is defined by ``.numnodes``.
+        """
+        return len(self.node2collection) >= self.numnodes
+
+    @property
+    def tests_finished(self):
+        """Return True if all tests have been executed by the nodes."""
+        if not self.collection_is_completed:
+            return False
+        if self.pending:
+            return False
+        for pending in self.node2pending.values():
+            if len(pending) >= 2:
+                return False
+        return True
+
+    @property
+    def has_pending(self):
+        """Return True if there are pending test items
+
+        This indicates that collection has finished and nodes are
+        still processing test items, so this can be thought of as
+        "the scheduler is active".
+        """
+        if self.pending:
+            return True
+        for pending in self.node2pending.values():
+            if pending:
+                return True
+        return False
+
+    def add_node(self, node):
+        """Add a new node to the scheduler.
+
+        From now on the node will be allocated chunks of tests to
+        execute.
+
+        Called by the ``DSession.worker_workerready`` hook when it
+        successfully bootstraps a new node.
+        """
+        assert node not in self.node2pending
+        self.node2pending[node] = []
+
+    def add_node_collection(self, node, collection):
+        """Add the collected test items from a node
+
+        The collection is stored in the ``.node2collection`` map.
+        Called by the ``DSession.worker_collectionfinish`` hook.
+        """
+        assert node in self.node2pending
+        if self.collection_is_completed:
+            # A new node has been added later, perhaps an original one died.
+            # .schedule() should have
+            # been called by now
+            assert self.collection
+            if collection != self.collection:
+                other_node = next(iter(self.node2collection.keys()))
+                msg = report_collection_diff(
+                    self.collection, collection, other_node.gateway.id, node.gateway.id
+                )
+                self.log(msg)
+                return
+        self.node2collection[node] = list(collection)
+
+    def mark_test_complete(self, node, item_index, duration=0):
+        """Mark test item as completed by node
+
+        The duration it took to execute the item is used as a hint to
+        the scheduler.
+
+        This is called by the ``DSession.worker_testreport`` hook.
+        """
+        self.node2pending[node].remove(item_index)
+        self.check_schedule(node, duration=duration)
+
+    def mark_test_pending(self, item):
+        self.pending.insert(
+            0,
+            self.collection.index(item),
+        )
+        for node in self.node2pending:
+            self.check_schedule(node)
+
+    def check_schedule(self, node, duration=0):
+        """Maybe schedule new items on the node
+
+        If there are any globally pending nodes left then this will
+        check if the given node should be given any more tests.  The
+        ``duration`` of the last test is optionally used as a
+        heuristic to influence how many tests the node is assigned.
+        """
+        if node.shutting_down:
+            return
+
+        if self.pending:
+            # how many nodes do we have?
+            num_nodes = len(self.node2pending)
+            # if our node goes below a heuristic minimum, fill it out to
+            # heuristic maximum
+            items_per_node_min = max(2, len(self.pending) // num_nodes // 4)
+            items_per_node_max = max(2, len(self.pending) // num_nodes // 2)
+            node_pending = self.node2pending[node]
+            if len(node_pending) < items_per_node_min:
+                if duration >= 0.1 and len(node_pending) >= 2:
+                    # seems the node is doing long-running tests
+                    # and has enough items to continue
+                    # so let's rather wait with sending new items
+                    return
+                num_send = items_per_node_max - len(node_pending)
+                self._send_tests(node, num_send)
+        else:
+            node.shutdown()
+
+        self.log("num items waiting for node:", len(self.pending))
+
+    def remove_node(self, node):
+        """Remove a node from the scheduler
+
+        This should be called either when the node crashed or at
+        shutdown time.  In the former case any pending items assigned
+        to the node will be re-scheduled.  Called by the
+        ``DSession.worker_workerfinished`` and
+        ``DSession.worker_errordown`` hooks.
+
+        Return the item which was being executing while the node
+        crashed or None if the node has no more pending items.
+
+        """
+        pending = self.node2pending.pop(node)
+        if not pending:
+            return
+
+        # The node crashed, reassing pending items
+        crashitem = self.collection[pending.pop(0)]
+        self.pending.extend(pending)
+        for node in self.node2pending:
+            self.check_schedule(node)
+        return crashitem
+
+    def schedule(self):
+        """Initiate distribution of the test collection
+
+        Initiate scheduling of the items across the nodes.  If this
+        gets called again later it behaves the same as calling
+        ``.check_schedule()`` on all nodes so that newly added nodes
+        will start to be used.
+
+        This is called by the ``DSession.worker_collectionfinish`` hook
+        if ``.collection_is_completed`` is True.
+        """
+        assert self.collection_is_completed
+
+        # Initial distribution already happened, reschedule on all nodes
+        if self.collection is not None:
+            for node in self.nodes:
+                self.check_schedule(node)
+            return
+
+        # XXX allow nodes to have different collections
+        if not self._check_nodes_have_same_collection():
+            self.log("**Different tests collected, aborting run**")
+            return
+
+        # Collections are identical, create the index of pending items.
+        self.collection = list(self.node2collection.values())[0]
+        self.pending[:] = range(len(self.collection))
+        if not self.collection:
+            return
+
+        # Send a batch of tests to run. If we don't have at least two
+        # tests per node, we have to send them all so that we can send
+        # shutdown signals and get all nodes working.
+        initial_batch = max(len(self.pending) // 4, 2 * len(self.nodes))
+
+        # distribute tests round-robin up to the batch size
+        # (or until we run out)
+        nodes = cycle(self.nodes)
+        for i in range(initial_batch):
+            self._send_tests(next(nodes), 1)
+
+        if not self.pending:
+            # initial distribution sent all tests, start node shutdown
+            for node in self.nodes:
+                node.shutdown()
+
+    def _send_tests(self, node, num):
+        tests_per_node = self.pending[:num]
+        if tests_per_node:
+            del self.pending[:num]
+            self.node2pending[node].extend(tests_per_node)
+            node.send_runtest_some(tests_per_node)
+
+    def _check_nodes_have_same_collection(self):
+        """Return True if all nodes have collected the same items.
+
+        If collections differ, this method returns False while logging
+        the collection differences and posting collection errors to
+        pytest_collectreport hook.
+        """
+        node_collection_items = list(self.node2collection.items())
+        first_node, col = node_collection_items[0]
+        same_collection = True
+        for node, collection in node_collection_items[1:]:
+            msg = report_collection_diff(
+                col, collection, first_node.gateway.id, node.gateway.id
+            )
+            if msg:
+                same_collection = False
+                self.log(msg)
+                if self.config is not None:
+                    rep = CollectReport(
+                        node.gateway.id, "failed", longrepr=msg, result=[]
+                    )
+                    self.config.hook.pytest_collectreport(report=rep)
+
+        return same_collection
diff --git a/venv/lib/python3.10/site-packages/xdist/scheduler/loadfile.py b/venv/lib/python3.10/site-packages/xdist/scheduler/loadfile.py
new file mode 100644
index 0000000..867a94e
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/xdist/scheduler/loadfile.py
@@ -0,0 +1,52 @@
+from .loadscope import LoadScopeScheduling
+from py.log import Producer
+
+
+class LoadFileScheduling(LoadScopeScheduling):
+    """Implement load scheduling across nodes, but grouping test test file.
+
+    This distributes the tests collected across all nodes so each test is run
+    just once.  All nodes collect and submit the list of tests and when all
+    collections are received it is verified they are identical collections.
+    Then the collection gets divided up in work units, grouped by test file,
+    and those work units get submitted to nodes.  Whenever a node finishes an
+    item, it calls ``.mark_test_complete()`` which will trigger the scheduler
+    to assign more work units if the number of pending tests for the node falls
+    below a low-watermark.
+
+    When created, ``numnodes`` defines how many nodes are expected to submit a
+    collection. This is used to know when all nodes have finished collection.
+
+    This class behaves very much like LoadScopeScheduling, but with a file-level scope.
+    """
+
+    def __init__(self, config, log=None):
+        super().__init__(config, log)
+        if log is None:
+            self.log = Producer("loadfilesched")
+        else:
+            self.log = log.loadfilesched
+
+    def _split_scope(self, nodeid):
+        """Determine the scope (grouping) of a nodeid.
+
+        There are usually 3 cases for a nodeid::
+
+            example/loadsuite/test/test_beta.py::test_beta0
+            example/loadsuite/test/test_delta.py::Delta1::test_delta0
+            example/loadsuite/epsilon/__init__.py::epsilon.epsilon
+
+        #. Function in a test module.
+        #. Method of a class in a test module.
+        #. Doctest in a function in a package.
+
+        This function will group tests with the scope determined by splitting
+        the first ``::`` from the left. That is, test will be grouped in a
+        single work unit when they reside in the same file.
+         In the above example, scopes will be::
+
+            example/loadsuite/test/test_beta.py
+            example/loadsuite/test/test_delta.py
+            example/loadsuite/epsilon/__init__.py
+        """
+        return nodeid.split("::", 1)[0]
diff --git a/venv/lib/python3.10/site-packages/xdist/scheduler/loadgroup.py b/venv/lib/python3.10/site-packages/xdist/scheduler/loadgroup.py
new file mode 100644
index 0000000..072f64a
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/xdist/scheduler/loadgroup.py
@@ -0,0 +1,54 @@
+from .loadscope import LoadScopeScheduling
+from py.log import Producer
+
+
+class LoadGroupScheduling(LoadScopeScheduling):
+    """Implement load scheduling across nodes, but grouping test by xdist_group mark.
+
+    This class behaves very much like LoadScopeScheduling, but it groups tests by xdist_group mark
+    instead of the module or class to which they belong to.
+    """
+
+    def __init__(self, config, log=None):
+        super().__init__(config, log)
+        if log is None:
+            self.log = Producer("loadgroupsched")
+        else:
+            self.log = log.loadgroupsched
+
+    def _split_scope(self, nodeid):
+        """Determine the scope (grouping) of a nodeid.
+
+        There are usually 3 cases for a nodeid::
+
+            example/loadsuite/test/test_beta.py::test_beta0
+            example/loadsuite/test/test_delta.py::Delta1::test_delta0
+            example/loadsuite/epsilon/__init__.py::epsilon.epsilon
+
+        #. Function in a test module.
+        #. Method of a class in a test module.
+        #. Doctest in a function in a package.
+
+        With loadgroup, two cases are added::
+
+            example/loadsuite/test/test_beta.py::test_beta0
+            example/loadsuite/test/test_delta.py::Delta1::test_delta0
+            example/loadsuite/epsilon/__init__.py::epsilon.epsilon
+            example/loadsuite/test/test_gamma.py::test_beta0@gname
+            example/loadsuite/test/test_delta.py::Gamma1::test_gamma0@gname
+
+        This function will group tests with the scope determined by splitting the first ``@``
+        from the right. That is, test will be grouped in a single work unit when they have
+        same group name. In the above example, scopes will be::
+
+            example/loadsuite/test/test_beta.py::test_beta0
+            example/loadsuite/test/test_delta.py::Delta1::test_delta0
+            example/loadsuite/epsilon/__init__.py::epsilon.epsilon
+            gname
+            gname
+        """
+        if nodeid.rfind("@") > nodeid.rfind("]"):
+            # check the index of ']' to avoid the case: parametrize mark value has '@'
+            return nodeid.split("@")[-1]
+        else:
+            return nodeid
diff --git a/venv/lib/python3.10/site-packages/xdist/scheduler/loadscope.py b/venv/lib/python3.10/site-packages/xdist/scheduler/loadscope.py
new file mode 100644
index 0000000..c25e476
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/xdist/scheduler/loadscope.py
@@ -0,0 +1,412 @@
+from collections import OrderedDict
+
+from _pytest.runner import CollectReport
+from py.log import Producer
+from xdist.report import report_collection_diff
+from xdist.workermanage import parse_spec_config
+
+
+class LoadScopeScheduling:
+    """Implement load scheduling across nodes, but grouping test by scope.
+
+    This distributes the tests collected across all nodes so each test is run
+    just once.  All nodes collect and submit the list of tests and when all
+    collections are received it is verified they are identical collections.
+    Then the collection gets divided up in work units, grouped by test scope,
+    and those work units get submitted to nodes.  Whenever a node finishes an
+    item, it calls ``.mark_test_complete()`` which will trigger the scheduler
+    to assign more work units if the number of pending tests for the node falls
+    below a low-watermark.
+
+    When created, ``numnodes`` defines how many nodes are expected to submit a
+    collection. This is used to know when all nodes have finished collection.
+
+    Attributes:
+
+    :numnodes: The expected number of nodes taking part.  The actual number of
+       nodes will vary during the scheduler's lifetime as nodes are added by
+       the DSession as they are brought up and removed either because of a dead
+       node or normal shutdown.  This number is primarily used to know when the
+       initial collection is completed.
+
+    :collection: The final list of tests collected by all nodes once it is
+       validated to be identical between all the nodes.  It is initialised to
+       None until ``.schedule()`` is called.
+
+    :workqueue: Ordered dictionary that maps all available scopes with their
+       associated tests (nodeid). Nodeids are in turn associated with their
+       completion status. One entry of the workqueue is called a work unit.
+       In turn, a collection of work unit is called a workload.
+
+       ::
+
+            workqueue = {
+                '///test_module.py': {
+                    '///test_module.py::test_case1': False,
+                    '///test_module.py::test_case2': False,
+                    (...)
+                },
+                (...)
+            }
+
+    :assigned_work: Ordered dictionary that maps worker nodes with their
+       assigned work units.
+
+       ::
+
+            assigned_work = {
+                '': {
+                    '///test_module.py': {
+                        '///test_module.py::test_case1': False,
+                        '///test_module.py::test_case2': False,
+                        (...)
+                    },
+                    (...)
+                },
+                (...)
+            }
+
+    :registered_collections: Ordered dictionary that maps worker nodes with
+       their collection of tests gathered during test discovery.
+
+       ::
+
+            registered_collections = {
+                '': [
+                    '///test_module.py::test_case1',
+                    '///test_module.py::test_case2',
+                ],
+                (...)
+            }
+
+    :log: A py.log.Producer instance.
+
+    :config: Config object, used for handling hooks.
+    """
+
+    def __init__(self, config, log=None):
+        self.numnodes = len(parse_spec_config(config))
+        self.collection = None
+
+        self.workqueue = OrderedDict()
+        self.assigned_work = OrderedDict()
+        self.registered_collections = OrderedDict()
+
+        if log is None:
+            self.log = Producer("loadscopesched")
+        else:
+            self.log = log.loadscopesched
+
+        self.config = config
+
+    @property
+    def nodes(self):
+        """A list of all active nodes in the scheduler."""
+        return list(self.assigned_work.keys())
+
+    @property
+    def collection_is_completed(self):
+        """Boolean indication initial test collection is complete.
+
+        This is a boolean indicating all initial participating nodes have
+        finished collection.  The required number of initial nodes is defined
+        by ``.numnodes``.
+        """
+        return len(self.registered_collections) >= self.numnodes
+
+    @property
+    def tests_finished(self):
+        """Return True if all tests have been executed by the nodes."""
+        if not self.collection_is_completed:
+            return False
+
+        if self.workqueue:
+            return False
+
+        for assigned_unit in self.assigned_work.values():
+            if self._pending_of(assigned_unit) >= 2:
+                return False
+
+        return True
+
+    @property
+    def has_pending(self):
+        """Return True if there are pending test items.
+
+        This indicates that collection has finished and nodes are still
+        processing test items, so this can be thought of as
+        "the scheduler is active".
+        """
+        if self.workqueue:
+            return True
+
+        for assigned_unit in self.assigned_work.values():
+            if self._pending_of(assigned_unit) > 0:
+                return True
+
+        return False
+
+    def add_node(self, node):
+        """Add a new node to the scheduler.
+
+        From now on the node will be assigned work units to be executed.
+
+        Called by the ``DSession.worker_workerready`` hook when it successfully
+        bootstraps a new node.
+        """
+        assert node not in self.assigned_work
+        self.assigned_work[node] = OrderedDict()
+
+    def remove_node(self, node):
+        """Remove a node from the scheduler.
+
+        This should be called either when the node crashed or at shutdown time.
+        In the former case any pending items assigned to the node will be
+        re-scheduled.
+
+        Called by the hooks:
+
+        - ``DSession.worker_workerfinished``.
+        - ``DSession.worker_errordown``.
+
+        Return the item being executed while the node crashed or None if the
+        node has no more pending items.
+        """
+        workload = self.assigned_work.pop(node)
+        if not self._pending_of(workload):
+            return None
+
+        # The node crashed, identify test that crashed
+        for work_unit in workload.values():
+            for nodeid, completed in work_unit.items():
+                if not completed:
+                    crashitem = nodeid
+                    break
+            else:
+                continue
+            break
+        else:
+            raise RuntimeError(
+                "Unable to identify crashitem on a workload with pending items"
+            )
+
+        # Made uncompleted work unit available again
+        self.workqueue.update(workload)
+
+        for node in self.assigned_work:
+            self._reschedule(node)
+
+        return crashitem
+
+    def add_node_collection(self, node, collection):
+        """Add the collected test items from a node.
+
+        The collection is stored in the ``.registered_collections`` dictionary.
+
+        Called by the hook:
+
+        - ``DSession.worker_collectionfinish``.
+        """
+
+        # Check that add_node() was called on the node before
+        assert node in self.assigned_work
+
+        # A new node has been added later, perhaps an original one died.
+        if self.collection_is_completed:
+
+            # Assert that .schedule() should have been called by now
+            assert self.collection
+
+            # Check that the new collection matches the official collection
+            if collection != self.collection:
+
+                other_node = next(iter(self.registered_collections.keys()))
+
+                msg = report_collection_diff(
+                    self.collection, collection, other_node.gateway.id, node.gateway.id
+                )
+                self.log(msg)
+                return
+
+        self.registered_collections[node] = list(collection)
+
+    def mark_test_complete(self, node, item_index, duration=0):
+        """Mark test item as completed by node.
+
+        Called by the hook:
+
+        - ``DSession.worker_testreport``.
+        """
+        nodeid = self.registered_collections[node][item_index]
+        scope = self._split_scope(nodeid)
+
+        self.assigned_work[node][scope][nodeid] = True
+        self._reschedule(node)
+
+    def mark_test_pending(self, item):
+        raise NotImplementedError()
+
+    def _assign_work_unit(self, node):
+        """Assign a work unit to a node."""
+        assert self.workqueue
+
+        # Grab a unit of work
+        scope, work_unit = self.workqueue.popitem(last=False)
+
+        # Keep track of the assigned work
+        assigned_to_node = self.assigned_work.setdefault(node, default=OrderedDict())
+        assigned_to_node[scope] = work_unit
+
+        # Ask the node to execute the workload
+        worker_collection = self.registered_collections[node]
+        nodeids_indexes = [
+            worker_collection.index(nodeid)
+            for nodeid, completed in work_unit.items()
+            if not completed
+        ]
+
+        node.send_runtest_some(nodeids_indexes)
+
+    def _split_scope(self, nodeid):
+        """Determine the scope (grouping) of a nodeid.
+
+        There are usually 3 cases for a nodeid::
+
+            example/loadsuite/test/test_beta.py::test_beta0
+            example/loadsuite/test/test_delta.py::Delta1::test_delta0
+            example/loadsuite/epsilon/__init__.py::epsilon.epsilon
+
+        #. Function in a test module.
+        #. Method of a class in a test module.
+        #. Doctest in a function in a package.
+
+        This function will group tests with the scope determined by splitting
+        the first ``::`` from the right. That is, classes will be grouped in a
+        single work unit, and functions from a test module will be grouped by
+        their module. In the above example, scopes will be::
+
+            example/loadsuite/test/test_beta.py
+            example/loadsuite/test/test_delta.py::Delta1
+            example/loadsuite/epsilon/__init__.py
+        """
+        return nodeid.rsplit("::", 1)[0]
+
+    def _pending_of(self, workload):
+        """Return the number of pending tests in a workload."""
+        pending = sum(list(scope.values()).count(False) for scope in workload.values())
+        return pending
+
+    def _reschedule(self, node):
+        """Maybe schedule new items on the node.
+
+        If there are any globally pending work units left then this will check
+        if the given node should be given any more tests.
+        """
+
+        # Do not add more work to a node shutting down
+        if node.shutting_down:
+            return
+
+        # Check that more work is available
+        if not self.workqueue:
+            node.shutdown()
+            return
+
+        self.log("Number of units waiting for node:", len(self.workqueue))
+
+        # Check that the node is almost depleted of work
+        # 2: Heuristic of minimum tests to enqueue more work
+        if self._pending_of(self.assigned_work[node]) > 2:
+            return
+
+        # Pop one unit of work and assign it
+        self._assign_work_unit(node)
+
+    def schedule(self):
+        """Initiate distribution of the test collection.
+
+        Initiate scheduling of the items across the nodes.  If this gets called
+        again later it behaves the same as calling ``._reschedule()`` on all
+        nodes so that newly added nodes will start to be used.
+
+        If ``.collection_is_completed`` is True, this is called by the hook:
+
+        - ``DSession.worker_collectionfinish``.
+        """
+        assert self.collection_is_completed
+
+        # Initial distribution already happened, reschedule on all nodes
+        if self.collection is not None:
+            for node in self.nodes:
+                self._reschedule(node)
+            return
+
+        # Check that all nodes collected the same tests
+        if not self._check_nodes_have_same_collection():
+            self.log("**Different tests collected, aborting run**")
+            return
+
+        # Collections are identical, create the final list of items
+        self.collection = list(next(iter(self.registered_collections.values())))
+        if not self.collection:
+            return
+
+        # Determine chunks of work (scopes)
+        for nodeid in self.collection:
+            scope = self._split_scope(nodeid)
+            work_unit = self.workqueue.setdefault(scope, default=OrderedDict())
+            work_unit[nodeid] = False
+
+        # Avoid having more workers than work
+        extra_nodes = len(self.nodes) - len(self.workqueue)
+
+        if extra_nodes > 0:
+            self.log("Shuting down {} nodes".format(extra_nodes))
+
+            for _ in range(extra_nodes):
+                unused_node, assigned = self.assigned_work.popitem(last=True)
+
+                self.log("Shuting down unused node {}".format(unused_node))
+                unused_node.shutdown()
+
+        # Assign initial workload
+        for node in self.nodes:
+            self._assign_work_unit(node)
+
+        # Ensure nodes start with at least two work units if possible (#277)
+        for node in self.nodes:
+            self._reschedule(node)
+
+        # Initial distribution sent all tests, start node shutdown
+        if not self.workqueue:
+            for node in self.nodes:
+                node.shutdown()
+
+    def _check_nodes_have_same_collection(self):
+        """Return True if all nodes have collected the same items.
+
+        If collections differ, this method returns False while logging
+        the collection differences and posting collection errors to
+        pytest_collectreport hook.
+        """
+        node_collection_items = list(self.registered_collections.items())
+        first_node, col = node_collection_items[0]
+        same_collection = True
+
+        for node, collection in node_collection_items[1:]:
+            msg = report_collection_diff(
+                col, collection, first_node.gateway.id, node.gateway.id
+            )
+            if not msg:
+                continue
+
+            same_collection = False
+            self.log(msg)
+
+            if self.config is None:
+                continue
+
+            rep = CollectReport(node.gateway.id, "failed", longrepr=msg, result=[])
+            self.config.hook.pytest_collectreport(report=rep)
+
+        return same_collection
diff --git a/venv/lib/python3.10/site-packages/xdist/workermanage.py b/venv/lib/python3.10/site-packages/xdist/workermanage.py
new file mode 100644
index 0000000..8d291d4
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/xdist/workermanage.py
@@ -0,0 +1,431 @@
+import fnmatch
+import os
+import re
+import sys
+import uuid
+
+import py
+import pytest
+import execnet
+
+import xdist.remote
+from xdist.plugin import _sys_path
+
+
+def parse_spec_config(config):
+    xspeclist = []
+    for xspec in config.getvalue("tx"):
+        i = xspec.find("*")
+        try:
+            num = int(xspec[:i])
+        except ValueError:
+            xspeclist.append(xspec)
+        else:
+            xspeclist.extend([xspec[i + 1 :]] * num)
+    if not xspeclist:
+        raise pytest.UsageError(
+            "MISSING test execution (tx) nodes: please specify --tx"
+        )
+    return xspeclist
+
+
+class NodeManager:
+    EXIT_TIMEOUT = 10
+    DEFAULT_IGNORES = [".*", "*.pyc", "*.pyo", "*~"]
+
+    def __init__(self, config, specs=None, defaultchdir="pyexecnetcache"):
+        self.config = config
+        self.trace = self.config.trace.get("nodemanager")
+        self.testrunuid = self.config.getoption("testrunuid")
+        if self.testrunuid is None:
+            self.testrunuid = uuid.uuid4().hex
+        self.group = execnet.Group()
+        if specs is None:
+            specs = self._getxspecs()
+        self.specs = []
+        for spec in specs:
+            if not isinstance(spec, execnet.XSpec):
+                spec = execnet.XSpec(spec)
+            if not spec.chdir and not spec.popen:
+                spec.chdir = defaultchdir
+            self.group.allocate_id(spec)
+            self.specs.append(spec)
+        self.roots = self._getrsyncdirs()
+        self.rsyncoptions = self._getrsyncoptions()
+        self._rsynced_specs = set()
+
+    def rsync_roots(self, gateway):
+        """Rsync the set of roots to the node's gateway cwd."""
+        if self.roots:
+            for root in self.roots:
+                self.rsync(gateway, root, **self.rsyncoptions)
+
+    def setup_nodes(self, putevent):
+        self.config.hook.pytest_xdist_setupnodes(config=self.config, specs=self.specs)
+        self.trace("setting up nodes")
+        return [self.setup_node(spec, putevent) for spec in self.specs]
+
+    def setup_node(self, spec, putevent):
+        gw = self.group.makegateway(spec)
+        self.config.hook.pytest_xdist_newgateway(gateway=gw)
+        self.rsync_roots(gw)
+        node = WorkerController(self, gw, self.config, putevent)
+        gw.node = node  # keep the node alive
+        node.setup()
+        self.trace("started node %r" % node)
+        return node
+
+    def teardown_nodes(self):
+        self.group.terminate(self.EXIT_TIMEOUT)
+
+    def _getxspecs(self):
+        return [execnet.XSpec(x) for x in parse_spec_config(self.config)]
+
+    def _getrsyncdirs(self):
+        for spec in self.specs:
+            if not spec.popen or spec.chdir:
+                break
+        else:
+            return []
+        import pytest
+        import _pytest
+
+        def get_dir(p):
+            """Return the directory path if p is a package or the path to the .py file otherwise."""
+            stripped = p.rstrip("co")
+            if os.path.basename(stripped) == "__init__.py":
+                return os.path.dirname(p)
+            else:
+                return stripped
+
+        pytestpath = get_dir(pytest.__file__)
+        pytestdir = get_dir(_pytest.__file__)
+        config = self.config
+        candidates = [py._pydir, pytestpath, pytestdir]
+        candidates += config.option.rsyncdir
+        rsyncroots = config.getini("rsyncdirs")
+        if rsyncroots:
+            candidates.extend(rsyncroots)
+        roots = []
+        for root in candidates:
+            root = py.path.local(root).realpath()
+            if not root.check():
+                raise pytest.UsageError("rsyncdir doesn't exist: {!r}".format(root))
+            if root not in roots:
+                roots.append(root)
+        return roots
+
+    def _getrsyncoptions(self):
+        """Get options to be passed for rsync."""
+        ignores = list(self.DEFAULT_IGNORES)
+        ignores += [str(path) for path in self.config.option.rsyncignore]
+        ignores += [str(path) for path in self.config.getini("rsyncignore")]
+
+        return {
+            "ignores": ignores,
+            "verbose": getattr(self.config.option, "verbose", 0),
+        }
+
+    def rsync(self, gateway, source, notify=None, verbose=False, ignores=None):
+        """Perform rsync to remote hosts for node."""
+        # XXX This changes the calling behaviour of
+        #     pytest_xdist_rsyncstart and pytest_xdist_rsyncfinish to
+        #     be called once per rsync target.
+        rsync = HostRSync(source, verbose=verbose, ignores=ignores)
+        spec = gateway.spec
+        if spec.popen and not spec.chdir:
+            # XXX This assumes that sources are python-packages
+            #     and that adding the basedir does not hurt.
+            gateway.remote_exec(
+                """
+                import sys ; sys.path.insert(0, %r)
+            """
+                % os.path.dirname(str(source))
+            ).waitclose()
+            return
+        if (spec, source) in self._rsynced_specs:
+            return
+
+        def finished():
+            if notify:
+                notify("rsyncrootready", spec, source)
+
+        rsync.add_target_host(gateway, finished=finished)
+        self._rsynced_specs.add((spec, source))
+        self.config.hook.pytest_xdist_rsyncstart(source=source, gateways=[gateway])
+        rsync.send()
+        self.config.hook.pytest_xdist_rsyncfinish(source=source, gateways=[gateway])
+
+
+class HostRSync(execnet.RSync):
+    """RSyncer that filters out common files"""
+
+    def __init__(self, sourcedir, *args, **kwargs):
+        self._synced = {}
+        ignores = kwargs.pop("ignores", None) or []
+        self._ignores = [
+            re.compile(fnmatch.translate(getattr(x, "strpath", x))) for x in ignores
+        ]
+        super().__init__(sourcedir=sourcedir, **kwargs)
+
+    def filter(self, path):
+        path = py.path.local(path)
+        for cre in self._ignores:
+            if cre.match(path.basename) or cre.match(path.strpath):
+                return False
+        else:
+            return True
+
+    def add_target_host(self, gateway, finished=None):
+        remotepath = os.path.basename(self._sourcedir)
+        super().add_target(gateway, remotepath, finishedcallback=finished, delete=True)
+
+    def _report_send_file(self, gateway, modified_rel_path):
+        if self._verbose > 0:
+            path = os.path.basename(self._sourcedir) + "/" + modified_rel_path
+            remotepath = gateway.spec.chdir
+            print("{}:{} <= {}".format(gateway.spec, remotepath, path))
+
+
+def make_reltoroot(roots, args):
+    # XXX introduce/use public API for splitting pytest args
+    splitcode = "::"
+    result = []
+    for arg in args:
+        parts = arg.split(splitcode)
+        fspath = py.path.local(parts[0])
+        if not fspath.exists():
+            result.append(arg)
+            continue
+        for root in roots:
+            x = fspath.relto(root)
+            if x or fspath == root:
+                parts[0] = root.basename + "/" + x
+                break
+        else:
+            raise ValueError("arg {} not relative to an rsync root".format(arg))
+        result.append(splitcode.join(parts))
+    return result
+
+
+class WorkerController:
+    ENDMARK = -1
+
+    class RemoteHook:
+        @pytest.hookimpl(trylast=True)
+        def pytest_xdist_getremotemodule(self):
+            return xdist.remote
+
+    def __init__(self, nodemanager, gateway, config, putevent):
+        config.pluginmanager.register(self.RemoteHook())
+        self.nodemanager = nodemanager
+        self.putevent = putevent
+        self.gateway = gateway
+        self.config = config
+        self.workerinput = {
+            "workerid": gateway.id,
+            "workercount": len(nodemanager.specs),
+            "testrunuid": nodemanager.testrunuid,
+            "mainargv": sys.argv,
+        }
+        self._down = False
+        self._shutdown_sent = False
+        self.log = py.log.Producer("workerctl-%s" % gateway.id)
+        if not self.config.option.debug:
+            py.log.setconsumer(self.log._keywords, None)
+
+    def __repr__(self):
+        return "<{} {}>".format(self.__class__.__name__, self.gateway.id)
+
+    @property
+    def shutting_down(self):
+        return self._down or self._shutdown_sent
+
+    def setup(self):
+        self.log("setting up worker session")
+        spec = self.gateway.spec
+        if hasattr(self.config, "invocation_params"):
+            args = [str(x) for x in self.config.invocation_params.args or ()]
+            option_dict = {}
+        else:
+            args = self.config.args
+            option_dict = vars(self.config.option)
+        if not spec.popen or spec.chdir:
+            args = make_reltoroot(self.nodemanager.roots, args)
+        if spec.popen:
+            name = "popen-%s" % self.gateway.id
+            if hasattr(self.config, "_tmp_path_factory"):
+                basetemp = self.config._tmp_path_factory.getbasetemp()
+                option_dict["basetemp"] = str(basetemp / name)
+        self.config.hook.pytest_configure_node(node=self)
+
+        remote_module = self.config.hook.pytest_xdist_getremotemodule()
+        self.channel = self.gateway.remote_exec(remote_module)
+        # change sys.path only for remote workers
+        # restore sys.path from a frozen copy for local workers
+        change_sys_path = _sys_path if self.gateway.spec.popen else None
+        self.channel.send((self.workerinput, args, option_dict, change_sys_path))
+
+        if self.putevent:
+            self.channel.setcallback(self.process_from_remote, endmarker=self.ENDMARK)
+
+    def ensure_teardown(self):
+        if hasattr(self, "channel"):
+            if not self.channel.isclosed():
+                self.log("closing", self.channel)
+                self.channel.close()
+            # del self.channel
+        if hasattr(self, "gateway"):
+            self.log("exiting", self.gateway)
+            self.gateway.exit()
+            # del self.gateway
+
+    def send_runtest_some(self, indices):
+        self.sendcommand("runtests", indices=indices)
+
+    def send_runtest_all(self):
+        self.sendcommand("runtests_all")
+
+    def shutdown(self):
+        if not self._down:
+            try:
+                self.sendcommand("shutdown")
+            except OSError:
+                pass
+            self._shutdown_sent = True
+
+    def sendcommand(self, name, **kwargs):
+        """send a named parametrized command to the other side."""
+        self.log("sending command {}(**{})".format(name, kwargs))
+        self.channel.send((name, kwargs))
+
+    def notify_inproc(self, eventname, **kwargs):
+        self.log("queuing {}(**{})".format(eventname, kwargs))
+        self.putevent((eventname, kwargs))
+
+    def process_from_remote(self, eventcall):  # noqa too complex
+        """this gets called for each object we receive from
+        the other side and if the channel closes.
+
+        Note that channel callbacks run in the receiver
+        thread of execnet gateways - we need to
+        avoid raising exceptions or doing heavy work.
+        """
+        try:
+            if eventcall == self.ENDMARK:
+                err = self.channel._getremoteerror()
+                if not self._down:
+                    if not err or isinstance(err, EOFError):
+                        err = "Not properly terminated"  # lost connection?
+                    self.notify_inproc("errordown", node=self, error=err)
+                    self._down = True
+                return
+            eventname, kwargs = eventcall
+            if eventname in ("collectionstart",):
+                self.log("ignoring {}({})".format(eventname, kwargs))
+            elif eventname == "workerready":
+                self.notify_inproc(eventname, node=self, **kwargs)
+            elif eventname == "internal_error":
+                self.notify_inproc(eventname, node=self, **kwargs)
+            elif eventname == "workerfinished":
+                self._down = True
+                self.workeroutput = kwargs["workeroutput"]
+                self.notify_inproc("workerfinished", node=self)
+            elif eventname in ("logstart", "logfinish"):
+                self.notify_inproc(eventname, node=self, **kwargs)
+            elif eventname in ("testreport", "collectreport", "teardownreport"):
+                item_index = kwargs.pop("item_index", None)
+                rep = self.config.hook.pytest_report_from_serializable(
+                    config=self.config, data=kwargs["data"]
+                )
+                if item_index is not None:
+                    rep.item_index = item_index
+                self.notify_inproc(eventname, node=self, rep=rep)
+            elif eventname == "collectionfinish":
+                self.notify_inproc(eventname, node=self, ids=kwargs["ids"])
+            elif eventname == "runtest_protocol_complete":
+                self.notify_inproc(eventname, node=self, **kwargs)
+            elif eventname == "logwarning":
+                self.notify_inproc(
+                    eventname,
+                    message=kwargs["message"],
+                    code=kwargs["code"],
+                    nodeid=kwargs["nodeid"],
+                    fslocation=kwargs["nodeid"],
+                )
+            elif eventname == "warning_captured":
+                warning_message = unserialize_warning_message(
+                    kwargs["warning_message_data"]
+                )
+                self.notify_inproc(
+                    eventname,
+                    warning_message=warning_message,
+                    when=kwargs["when"],
+                    item=kwargs["item"],
+                )
+            elif eventname == "warning_recorded":
+                warning_message = unserialize_warning_message(
+                    kwargs["warning_message_data"]
+                )
+                self.notify_inproc(
+                    eventname,
+                    warning_message=warning_message,
+                    when=kwargs["when"],
+                    nodeid=kwargs["nodeid"],
+                    location=kwargs["location"],
+                )
+            else:
+                raise ValueError("unknown event: {}".format(eventname))
+        except KeyboardInterrupt:
+            # should not land in receiver-thread
+            raise
+        except:  # noqa
+            from _pytest._code import ExceptionInfo
+
+            excinfo = ExceptionInfo.from_current()
+            print("!" * 20, excinfo)
+            self.config.notify_exception(excinfo)
+            self.shutdown()
+            self.notify_inproc("errordown", node=self, error=excinfo)
+
+
+def unserialize_warning_message(data):
+    import warnings
+    import importlib
+
+    if data["message_module"]:
+        mod = importlib.import_module(data["message_module"])
+        cls = getattr(mod, data["message_class_name"])
+        message = None
+        if data["message_args"] is not None:
+            try:
+                message = cls(*data["message_args"])
+            except TypeError:
+                pass
+        if message is None:
+            # could not recreate the original warning instance;
+            # create a generic Warning instance with the original
+            # message at least
+            message_text = "{mod}.{cls}: {msg}".format(
+                mod=data["message_module"],
+                cls=data["message_class_name"],
+                msg=data["message_str"],
+            )
+            message = Warning(message_text)
+    else:
+        message = data["message_str"]
+
+    if data["category_module"]:
+        mod = importlib.import_module(data["category_module"])
+        category = getattr(mod, data["category_class_name"])
+    else:
+        category = None
+
+    kwargs = {"message": message, "category": category}
+    # access private _WARNING_DETAILS because the attributes vary between Python versions
+    for attr_name in warnings.WarningMessage._WARNING_DETAILS:  # type: ignore[attr-defined]
+        if attr_name in ("message", "category"):
+            continue
+        kwargs[attr_name] = data[attr_name]
+
+    return warnings.WarningMessage(**kwargs)  # type: ignore[arg-type]
diff --git a/venv/pyvenv.cfg b/venv/pyvenv.cfg
index c388022..0537ffc 100644
--- a/venv/pyvenv.cfg
+++ b/venv/pyvenv.cfg
@@ -1,8 +1,3 @@
 home = /usr/bin
-implementation = CPython
-version_info = 3.10.6.final.0
-virtualenv = 20.16.7
 include-system-site-packages = false
-base-prefix = /usr
-base-exec-prefix = /usr
-base-executable = /usr/bin/python3.10
+version = 3.10.12